code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
# -*- coding: utf-8 -*-
'''
This code calculates changes in the ratio between different population-weighted GDP deciles and quintiles
by <NAME> (<EMAIL>)
'''
import pandas as pd
import numpy as np
from netCDF4 import Dataset
import _env
datasets = _env.datasets
scenarios = _env.scenarios
gdp_year = 2010
sgdp_year = str(gdp_year)
idir_temp = _env.odir_root + '/sim_temperature/'
####summarize global and regional GDP changes####
gdp_year = 2010
sgdp_year = str(gdp_year)
boot_methods = ['country-lag0','country-lag1','country-lag5','year','year-blocks']
itbl_gdp_baseline = pd.read_csv(_env.odir_root + 'basic_stats' + '/Country_Basic_Stats.csv')
itbl_gdp_baseline.sort_values([sgdp_year + '_gdpcap'],inplace=True)
tot_pop = itbl_gdp_baseline[sgdp_year + '_pop'].sum()
#itbl_gdp_baseline['2010_pop_ratio'] = itbl_gdp_baseline['2010_pop']/tot_pop
itbl_gdp_baseline[sgdp_year + '_gdpsum'] = 0
#itbl_gdp_baseline['2010_popw_gdp'] = 0
itbl_gdp_baseline[sgdp_year + '_popsum'] = 0
#itbl_gdp_baseline['2010_pop_ratio_sum'] = 0
for irow, row in enumerate(itbl_gdp_baseline.index):
if irow == 0:
itbl_gdp_baseline.loc[row,sgdp_year + '_gdpsum'] = itbl_gdp_baseline.loc[row,sgdp_year + '_gdp']
itbl_gdp_baseline.loc[row, sgdp_year + '_popsum'] = itbl_gdp_baseline.loc[row,sgdp_year + '_pop']
else:
itbl_gdp_baseline.loc[row,sgdp_year + '_gdpsum'] = itbl_gdp_baseline[sgdp_year + '_gdpsum'].iloc[irow-1] + itbl_gdp_baseline.loc[row,sgdp_year + '_gdp']
itbl_gdp_baseline.loc[row, sgdp_year + '_popsum'] = itbl_gdp_baseline[sgdp_year + '_popsum'].iloc[irow-1] + itbl_gdp_baseline.loc[row,sgdp_year + '_pop']
itbl_gdp_baseline[sgdp_year + '_pop_ratio_sum'] = itbl_gdp_baseline[sgdp_year + '_popsum']/tot_pop
#deciles (<=10% and >=90%)
deciles = {}
ind10 = np.where(itbl_gdp_baseline[sgdp_year + '_pop_ratio_sum']<=0.1)[0]
deciles[10] = itbl_gdp_baseline.iloc[ind10].copy()
ind90 = np.where(itbl_gdp_baseline[sgdp_year + '_pop_ratio_sum']>=0.9)[0]
deciles[90] = itbl_gdp_baseline.iloc[ind90].copy()
#quintiles (<=20% and >=80%)
ind20 = np.where(itbl_gdp_baseline[sgdp_year + '_pop_ratio_sum']<=0.2)[0]
deciles[20] = itbl_gdp_baseline.iloc[ind20].copy()
ind80 = np.where(itbl_gdp_baseline[sgdp_year + '_pop_ratio_sum']>=0.8)[0]
deciles[80] = itbl_gdp_baseline.iloc[ind80].copy()
for ds in datasets:
scens = ['No-Aerosol']
if ds == 'ERA-Interim':
scens = ['No-Aerosol','No-Sulfate']
idir_gdp = _env.odir_root + '/gdp_' + ds + '/'
odir_summary = _env.odir_root + '/summary_' + ds + '/'
_env.mkdirs(odir_summary)
for scen in scens:
writer = pd.ExcelWriter(odir_summary + 'Deciles_and_Quintile_ratio_changes_'+ds+'_'+scen+'_Burke.xls')
otbls_ctry_GDP_stat = {}
otbls = {}
otbl_ineq = pd.DataFrame(index = boot_methods,columns = ['median_ratio','5_ratio','95_ratio','10_ratio','90_ratio','probability_reduced'])
otbls['deciles'] = otbl_ineq.copy()
otbls['quintiles'] = otbl_ineq.copy()
for b_m in boot_methods:
inc_gdp = Dataset(idir_gdp + 'GDP_Changes_Burke_' + b_m + '_' + str(gdp_year) + '_'+ds+'_'+scen+'.nc')
imtrx_gdp = inc_gdp['GDP'][:]
dec_var = {}
dec_base = {}
for perc in [10,20,80,90]:
dec = deciles[perc].copy()
dec_pop_tot = dec[sgdp_year + '_pop'].sum()
dec_gdp_tot = dec[sgdp_year + '_gdp'].sum()
dec_base[perc] = dec_gdp_tot/dec_pop_tot
ind_ctry = dec.index
imtrx_dec = imtrx_gdp[:,ind_ctry,:]
imtrx_dec_sum = dec_gdp_tot-(imtrx_dec.data).sum(axis=1)
# print(perc, np.median(imtrx_dec_sum),dec_gdp_tot,np.median(imtrx_dec_sum)/dec_gdp_tot)
dec_gdpcap = imtrx_dec_sum/dec_pop_tot
dec_var[perc] = dec_gdpcap.copy()
dec_diff = (dec_var[90]/dec_var[10]-dec_base[90]/dec_base[10])/(dec_base[90]/dec_base[10])*100
quin_diff = (dec_var[80]/dec_var[20] - dec_base[80]/dec_base[20])/(dec_base[80]/dec_base[20])*100
otbls['deciles'].loc[b_m,'median_ratio'] = np.median(dec_diff)
otbls['deciles'].loc[b_m,'5_ratio'] = np.percentile(dec_diff,5)
otbls['deciles'].loc[b_m,'95_ratio'] = np.percentile(dec_diff,95)
otbls['deciles'].loc[b_m,'10_ratio'] = np.percentile(dec_diff,10)
otbls['deciles'].loc[b_m,'90_ratio'] = np.percentile(dec_diff,90)
otbls['deciles'].loc[b_m,'probability_reduced'] = len(dec_diff[dec_diff<0])/np.size(dec_diff)
otbls['quintiles'].loc[b_m,'median_ratio'] = np.median(quin_diff)
otbls['quintiles'].loc[b_m,'5_ratio'] = np.percentile(quin_diff,5)
otbls['quintiles'].loc[b_m,'95_ratio'] = np.percentile(quin_diff,95)
otbls['quintiles'].loc[b_m,'10_ratio'] = np.percentile(quin_diff,10)
otbls['quintiles'].loc[b_m,'90_ratio'] = np.percentile(quin_diff,90)
otbls['quintiles'].loc[b_m,'probability_reduced'] = len(quin_diff[quin_diff<0])/np.size(quin_diff)
otbls['deciles'].to_excel(writer,'deciles')
otbls['quintiles'].to_excel(writer,'quintiles')
writer.save()
|
[
"pandas.DataFrame",
"numpy.size",
"_env.mkdirs",
"pandas.read_csv",
"numpy.median",
"numpy.percentile",
"numpy.where",
"pandas.ExcelWriter"
] |
[((599, 671), 'pandas.read_csv', 'pd.read_csv', (["(_env.odir_root + 'basic_stats' + '/Country_Basic_Stats.csv')"], {}), "(_env.odir_root + 'basic_stats' + '/Country_Basic_Stats.csv')\n", (610, 671), True, 'import pandas as pd\n'), ((1834, 1898), 'numpy.where', 'np.where', (["(itbl_gdp_baseline[sgdp_year + '_pop_ratio_sum'] <= 0.1)"], {}), "(itbl_gdp_baseline[sgdp_year + '_pop_ratio_sum'] <= 0.1)\n", (1842, 1898), True, 'import numpy as np\n'), ((1962, 2026), 'numpy.where', 'np.where', (["(itbl_gdp_baseline[sgdp_year + '_pop_ratio_sum'] >= 0.9)"], {}), "(itbl_gdp_baseline[sgdp_year + '_pop_ratio_sum'] >= 0.9)\n", (1970, 2026), True, 'import numpy as np\n'), ((2120, 2184), 'numpy.where', 'np.where', (["(itbl_gdp_baseline[sgdp_year + '_pop_ratio_sum'] <= 0.2)"], {}), "(itbl_gdp_baseline[sgdp_year + '_pop_ratio_sum'] <= 0.2)\n", (2128, 2184), True, 'import numpy as np\n'), ((2246, 2310), 'numpy.where', 'np.where', (["(itbl_gdp_baseline[sgdp_year + '_pop_ratio_sum'] >= 0.8)"], {}), "(itbl_gdp_baseline[sgdp_year + '_pop_ratio_sum'] >= 0.8)\n", (2254, 2310), True, 'import numpy as np\n'), ((2618, 2643), '_env.mkdirs', '_env.mkdirs', (['odir_summary'], {}), '(odir_summary)\n', (2629, 2643), False, 'import _env\n'), ((2690, 2795), 'pandas.ExcelWriter', 'pd.ExcelWriter', (["(odir_summary + 'Deciles_and_Quintile_ratio_changes_' + ds + '_' + scen +\n '_Burke.xls')"], {}), "(odir_summary + 'Deciles_and_Quintile_ratio_changes_' + ds +\n '_' + scen + '_Burke.xls')\n", (2704, 2795), True, 'import pandas as pd\n'), ((2865, 2997), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'boot_methods', 'columns': "['median_ratio', '5_ratio', '95_ratio', '10_ratio', '90_ratio',\n 'probability_reduced']"}), "(index=boot_methods, columns=['median_ratio', '5_ratio',\n '95_ratio', '10_ratio', '90_ratio', 'probability_reduced'])\n", (2877, 2997), True, 'import pandas as pd\n'), ((4387, 4406), 'numpy.median', 'np.median', (['dec_diff'], {}), '(dec_diff)\n', (4396, 4406), True, 'import numpy as np\n'), ((4457, 4483), 'numpy.percentile', 'np.percentile', (['dec_diff', '(5)'], {}), '(dec_diff, 5)\n', (4470, 4483), True, 'import numpy as np\n'), ((4534, 4561), 'numpy.percentile', 'np.percentile', (['dec_diff', '(95)'], {}), '(dec_diff, 95)\n', (4547, 4561), True, 'import numpy as np\n'), ((4625, 4652), 'numpy.percentile', 'np.percentile', (['dec_diff', '(10)'], {}), '(dec_diff, 10)\n', (4638, 4652), True, 'import numpy as np\n'), ((4703, 4730), 'numpy.percentile', 'np.percentile', (['dec_diff', '(90)'], {}), '(dec_diff, 90)\n', (4716, 4730), True, 'import numpy as np\n'), ((4906, 4926), 'numpy.median', 'np.median', (['quin_diff'], {}), '(quin_diff)\n', (4915, 4926), True, 'import numpy as np\n'), ((4979, 5006), 'numpy.percentile', 'np.percentile', (['quin_diff', '(5)'], {}), '(quin_diff, 5)\n', (4992, 5006), True, 'import numpy as np\n'), ((5059, 5087), 'numpy.percentile', 'np.percentile', (['quin_diff', '(95)'], {}), '(quin_diff, 95)\n', (5072, 5087), True, 'import numpy as np\n'), ((5153, 5181), 'numpy.percentile', 'np.percentile', (['quin_diff', '(10)'], {}), '(quin_diff, 10)\n', (5166, 5181), True, 'import numpy as np\n'), ((5234, 5262), 'numpy.percentile', 'np.percentile', (['quin_diff', '(90)'], {}), '(quin_diff, 90)\n', (5247, 5262), True, 'import numpy as np\n'), ((4818, 4835), 'numpy.size', 'np.size', (['dec_diff'], {}), '(dec_diff)\n', (4825, 4835), True, 'import numpy as np\n'), ((5354, 5372), 'numpy.size', 'np.size', (['quin_diff'], {}), '(quin_diff)\n', (5361, 5372), True, 'import numpy as np\n')]
|
__author__ = "<NAME>"
__copyright__ = "2021, Hamilton-Jacobi Analysis in Python"
__license__ = "Molux Licence"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Completed"
import argparse
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import os, sys
from os.path import abspath, dirname, exists, join
sys.path.append(dirname(dirname(abspath(__file__))))
from Grids import createGrid
from InitialConditions import *
from Visualization import *
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
"""
Test Implicit Functions
Lekan Molu, September 07, 2021
"""
parser = argparse.ArgumentParser(description='2D Plotter for Various Implicit Initial Conditions for the Value Function')
parser.add_argument('--delay', '-dl', type=float, default=3, help='pause time between successive updates of plots' )
args = parser.parse_args()
def levelset_viz(g, ax, fig, mesh, title='', savedict=None, fontdict=None, fc='c', ec='k'):
"""
Simultaneously visualize the level sets of a value function
on a 1X3 chart:
Chart 131: 2D Value function as a surface mesh
Chart 132: 2D Value function as colored contour levels
Chart 133: 2D Value zero - set as cyan contour.
Author: <NAME>, October 29, 2021
"""
ax[0].plot_surface(g.xs[0], g.xs[1], mesh, rstride=1, cstride=1,
cmap='viridis', edgecolor=ec, facecolor=fc)
ax[0].set_xlabel('X', fontdict=fontdict)
ax[0].set_ylabel('Y', fontdict=fontdict)
ax[0].set_zlabel('Z', fontdict=fontdict)
ax[0].set_title(f'{title}', fontdict=fontdict)
ax[1].contourf(g.xs[0], g.xs[1], mesh, colors=fc)
ax[1].set_xlabel('X', fontdict=fontdict)
ax[1].set_title(f'Contours', fontdict=fontdict)
ax[2].contour(g.xs[0], g.xs[1], mesh, levels=0, colors=fc)
ax[2].set_xlabel('X', fontdict=fontdict)
ax[2].set_ylabel('Y', fontdict=fontdict)
ax[2].grid('on')
ax[2].set_title(f'2D Zero level set', fontdict=fontdict)
fig.tight_layout()
if savedict["save"]:
plt.savefig(join(savedict["savepath"],savedict["savename"]),
bbox_inches='tight',facecolor='None')
fig.canvas.draw()
fig.canvas.flush_events()
def get_grid():
g2min = -2*np.ones((2, 1),dtype=np.float64)
g2max = +2*np.ones((2, 1),dtype=np.float64)
g2N = 51*np.ones((2, 1),dtype=np.int64)
g2 = createGrid(g2min, g2max, g2N, process=True)
return g2
def main(savedict):
# generate signed distance function for cylinder
center = np.array(([[-.5,.5]]), np.float64).T
g2 = get_grid()
# shapes generation
axis_align, radius=2, 1
cylinder = shapeCylinder(g2, axis_align, center, radius);
sphere = shapeSphere(g2, center, radius=1)
sphere2 = shapeSphere(g2, center=np.array(([-0., 0.])).T, radius=1)
rect = shapeRectangleByCorners(g2)
rect2 = shapeRectangleByCorners(g2, np.array([[ -1.0, -np.inf, ]]).T, np.array([[ np.inf, -1.0 ]]).T, )
rect3 = shapeRectangleByCorners(g2, np.array([[ -1.0, -0.5, ]]).T, np.array([[ .5, 1.0 ]]).T)
rect4 = shapeRectangleByCenter(g2, np.array([[ -1.0, -0.5, ]]).T, np.array([[ .5, 1.0 ]]).T)
# Set Ops
sphere_union = shapeUnion(sphere, sphere2)
rect_union = shapeUnion(rect, rect3)
rect_comp = shapeComplement(rect2)
sph_rect_diff = shapeDifference(sphere, rect)
fig = plt.figure(figsize=(16, 9))
gs = gridspec.GridSpec(1, 3, fig)
ax = [plt.subplot(gs[i], projection='3d') for i in range(2)] + [plt.subplot(gs[2])]
savedict["savename"] = "cylinder_2d.jpg"
levelset_viz(g2, ax, fig, cylinder, title='Cylinder', savedict=savedict, fontdict={'fontsize':12, 'fontweight':'bold'})
plt.pause(args.delay)
savedict["savename"] = "sphere_2d.jpg"
plt.clf()
ax = [plt.subplot(gs[i], projection='3d') for i in range(2)] + [plt.subplot(gs[2])]
levelset_viz(g2, ax, fig, sphere, title='Sphere', savedict=savedict, fontdict={'fontsize':12, 'fontweight':'bold'})
plt.pause(args.delay)
savedict["savename"]="sphere2_2d.jpg"
plt.clf()
ax = [plt.subplot(gs[i], projection='3d') for i in range(2)] + [plt.subplot(gs[2])]
levelset_viz(g2, ax, fig, sphere2, title='Sphere, C=(-.5, .5)', savedict=savedict, fontdict={'fontsize':12, 'fontweight':'bold'})
plt.pause(args.delay)
savedict["savename"]="rect_2d.jpg"
plt.clf()
ax = [plt.subplot(gs[i], projection='3d') for i in range(2)] + [plt.subplot(gs[2])]
levelset_viz(g2, ax, fig, rect, title='Unit Square@Origin', savedict=savedict, fontdict={'fontsize':12, 'fontweight':'bold'})
plt.pause(args.delay)
savedict["savename"]="rect2_2d.jpg"
plt.clf()
ax = [plt.subplot(gs[i], projection='3d') for i in range(2)] + [plt.subplot(gs[2])]
levelset_viz(g2, ax, fig, rect2, title='Rect by Corners', savedict=savedict, fontdict={'fontsize':12, 'fontweight':'bold'})
plt.pause(args.delay)
savedict["savename"]="rect3_2d.jpg"
plt.clf()
ax = [plt.subplot(gs[i], projection='3d') for i in range(2)] + [plt.subplot(gs[2])]
levelset_viz(g2, ax, fig, rect3, title='RectCorner: [1,-0.5], W: [0.5,1.0]', savedict=savedict, fontdict={'fontsize':12, 'fontweight':'bold'})
plt.pause(args.delay)
savedict["savename"]="rect4_2d.jpg"
plt.clf()
ax = [plt.subplot(gs[i], projection='3d') for i in range(2)] + [plt.subplot(gs[2])]
levelset_viz(g2, ax, fig, rect4, title='RectCent: [1,-0.5], W: [0.5,1.0]', savedict=savedict, fontdict={'fontsize':12, 'fontweight':'bold'})
plt.pause(args.delay)
# Show Unions
savedict["savename"]="sphere_union_2d.jpg"
plt.clf()
ax = [plt.subplot(gs[i], projection='3d') for i in range(2)] + [plt.subplot(gs[2])]
levelset_viz(g2, ax, fig, sphere_union, title='Spheres+Sphere', savedict=savedict, fontdict={'fontsize':12, 'fontweight':'bold'})
plt.pause(args.delay)
savedict["savename"]="rect_union_2d.jpg"
plt.clf()
ax = [plt.subplot(gs[i], projection='3d') for i in range(2)] + [plt.subplot(gs[2])]
levelset_viz(g2, ax, fig, rect_union, title='Union of 2 Rects', savedict=savedict, fontdict={'fontsize':12, 'fontweight':'bold'})
plt.pause(args.delay)
savedict["savename"]="rect_comp_2d.jpg"
plt.clf()
ax = [plt.subplot(gs[i], projection='3d') for i in range(2)] + [plt.subplot(gs[2])]
levelset_viz(g2, ax, fig, rect_comp, title='Rect Complement', savedict=savedict, fontdict={'fontsize':12, 'fontweight':'bold'})
plt.pause(args.delay)
savedict["savename"]="sph_rect_diff_2d.jpg"
plt.clf()
ax = [plt.subplot(gs[i], projection='3d') for i in range(2)] + [plt.subplot(gs[2])]
levelset_viz(g2, ax, fig, sph_rect_diff, title='Sphere-Rect Diff', savedict=savedict, fontdict={'fontsize':12, 'fontweight':'bold'})
plt.pause(args.delay)
if __name__ == '__main__':
savedict = dict(save=True, savename='cyl_2d.jpg',\
savepath=join("..", "jpeg_dumps"))
plt.ion()
main(savedict)
|
[
"matplotlib.pyplot.subplot",
"os.path.abspath",
"argparse.ArgumentParser",
"os.path.join",
"matplotlib.pyplot.clf",
"numpy.ones",
"Grids.createGrid",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.figure",
"numpy.array",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.pause"
] |
[((625, 747), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""2D Plotter for Various Implicit Initial Conditions for the Value Function"""'}), "(description=\n '2D Plotter for Various Implicit Initial Conditions for the Value Function'\n )\n", (648, 747), False, 'import argparse\n'), ((2266, 2309), 'Grids.createGrid', 'createGrid', (['g2min', 'g2max', 'g2N'], {'process': '(True)'}), '(g2min, g2max, g2N, process=True)\n', (2276, 2309), False, 'from Grids import createGrid\n'), ((3196, 3223), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (3206, 3223), True, 'import matplotlib.pyplot as plt\n'), ((3230, 3258), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(3)', 'fig'], {}), '(1, 3, fig)\n', (3247, 3258), True, 'import matplotlib.gridspec as gridspec\n'), ((3509, 3530), 'matplotlib.pyplot.pause', 'plt.pause', (['args.delay'], {}), '(args.delay)\n', (3518, 3530), True, 'import matplotlib.pyplot as plt\n'), ((3573, 3582), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3580, 3582), True, 'import matplotlib.pyplot as plt\n'), ((3787, 3808), 'matplotlib.pyplot.pause', 'plt.pause', (['args.delay'], {}), '(args.delay)\n', (3796, 3808), True, 'import matplotlib.pyplot as plt\n'), ((3850, 3859), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3857, 3859), True, 'import matplotlib.pyplot as plt\n'), ((4078, 4099), 'matplotlib.pyplot.pause', 'plt.pause', (['args.delay'], {}), '(args.delay)\n', (4087, 4099), True, 'import matplotlib.pyplot as plt\n'), ((4138, 4147), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4145, 4147), True, 'import matplotlib.pyplot as plt\n'), ((4362, 4383), 'matplotlib.pyplot.pause', 'plt.pause', (['args.delay'], {}), '(args.delay)\n', (4371, 4383), True, 'import matplotlib.pyplot as plt\n'), ((4423, 4432), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4430, 4432), True, 'import matplotlib.pyplot as plt\n'), ((4645, 4666), 'matplotlib.pyplot.pause', 'plt.pause', (['args.delay'], {}), '(args.delay)\n', (4654, 4666), True, 'import matplotlib.pyplot as plt\n'), ((4706, 4715), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4713, 4715), True, 'import matplotlib.pyplot as plt\n'), ((4947, 4968), 'matplotlib.pyplot.pause', 'plt.pause', (['args.delay'], {}), '(args.delay)\n', (4956, 4968), True, 'import matplotlib.pyplot as plt\n'), ((5008, 5017), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5015, 5017), True, 'import matplotlib.pyplot as plt\n'), ((5247, 5268), 'matplotlib.pyplot.pause', 'plt.pause', (['args.delay'], {}), '(args.delay)\n', (5256, 5268), True, 'import matplotlib.pyplot as plt\n'), ((5330, 5339), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5337, 5339), True, 'import matplotlib.pyplot as plt\n'), ((5558, 5579), 'matplotlib.pyplot.pause', 'plt.pause', (['args.delay'], {}), '(args.delay)\n', (5567, 5579), True, 'import matplotlib.pyplot as plt\n'), ((5624, 5633), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5631, 5633), True, 'import matplotlib.pyplot as plt\n'), ((5852, 5873), 'matplotlib.pyplot.pause', 'plt.pause', (['args.delay'], {}), '(args.delay)\n', (5861, 5873), True, 'import matplotlib.pyplot as plt\n'), ((5917, 5926), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5924, 5926), True, 'import matplotlib.pyplot as plt\n'), ((6143, 6164), 'matplotlib.pyplot.pause', 'plt.pause', (['args.delay'], {}), '(args.delay)\n', (6152, 6164), True, 'import matplotlib.pyplot as plt\n'), ((6212, 6221), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6219, 6221), True, 'import matplotlib.pyplot as plt\n'), ((6443, 6464), 'matplotlib.pyplot.pause', 'plt.pause', (['args.delay'], {}), '(args.delay)\n', (6452, 6464), True, 'import matplotlib.pyplot as plt\n'), ((6589, 6598), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (6596, 6598), True, 'import matplotlib.pyplot as plt\n'), ((2141, 2174), 'numpy.ones', 'np.ones', (['(2, 1)'], {'dtype': 'np.float64'}), '((2, 1), dtype=np.float64)\n', (2148, 2174), True, 'import numpy as np\n'), ((2186, 2219), 'numpy.ones', 'np.ones', (['(2, 1)'], {'dtype': 'np.float64'}), '((2, 1), dtype=np.float64)\n', (2193, 2219), True, 'import numpy as np\n'), ((2229, 2260), 'numpy.ones', 'np.ones', (['(2, 1)'], {'dtype': 'np.int64'}), '((2, 1), dtype=np.int64)\n', (2236, 2260), True, 'import numpy as np\n'), ((2403, 2438), 'numpy.array', 'np.array', (['[[-0.5, 0.5]]', 'np.float64'], {}), '([[-0.5, 0.5]], np.float64)\n', (2411, 2438), True, 'import numpy as np\n'), ((367, 384), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (374, 384), False, 'from os.path import abspath, dirname, exists, join\n'), ((1972, 2020), 'os.path.join', 'join', (["savedict['savepath']", "savedict['savename']"], {}), "(savedict['savepath'], savedict['savename'])\n", (1976, 2020), False, 'from os.path import abspath, dirname, exists, join\n'), ((2749, 2776), 'numpy.array', 'np.array', (['[[-1.0, -np.inf]]'], {}), '([[-1.0, -np.inf]])\n', (2757, 2776), True, 'import numpy as np\n'), ((2785, 2811), 'numpy.array', 'np.array', (['[[np.inf, -1.0]]'], {}), '([[np.inf, -1.0]])\n', (2793, 2811), True, 'import numpy as np\n'), ((2856, 2880), 'numpy.array', 'np.array', (['[[-1.0, -0.5]]'], {}), '([[-1.0, -0.5]])\n', (2864, 2880), True, 'import numpy as np\n'), ((2889, 2911), 'numpy.array', 'np.array', (['[[0.5, 1.0]]'], {}), '([[0.5, 1.0]])\n', (2897, 2911), True, 'import numpy as np\n'), ((2952, 2976), 'numpy.array', 'np.array', (['[[-1.0, -0.5]]'], {}), '([[-1.0, -0.5]])\n', (2960, 2976), True, 'import numpy as np\n'), ((2985, 3007), 'numpy.array', 'np.array', (['[[0.5, 1.0]]'], {}), '([[0.5, 1.0]])\n', (2993, 3007), True, 'import numpy as np\n'), ((3266, 3301), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[i]'], {'projection': '"""3d"""'}), "(gs[i], projection='3d')\n", (3277, 3301), True, 'import matplotlib.pyplot as plt\n'), ((3324, 3342), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[2]'], {}), '(gs[2])\n', (3335, 3342), True, 'import matplotlib.pyplot as plt\n'), ((3590, 3625), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[i]'], {'projection': '"""3d"""'}), "(gs[i], projection='3d')\n", (3601, 3625), True, 'import matplotlib.pyplot as plt\n'), ((3648, 3666), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[2]'], {}), '(gs[2])\n', (3659, 3666), True, 'import matplotlib.pyplot as plt\n'), ((3867, 3902), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[i]'], {'projection': '"""3d"""'}), "(gs[i], projection='3d')\n", (3878, 3902), True, 'import matplotlib.pyplot as plt\n'), ((3925, 3943), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[2]'], {}), '(gs[2])\n', (3936, 3943), True, 'import matplotlib.pyplot as plt\n'), ((4155, 4190), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[i]'], {'projection': '"""3d"""'}), "(gs[i], projection='3d')\n", (4166, 4190), True, 'import matplotlib.pyplot as plt\n'), ((4213, 4231), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[2]'], {}), '(gs[2])\n', (4224, 4231), True, 'import matplotlib.pyplot as plt\n'), ((4440, 4475), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[i]'], {'projection': '"""3d"""'}), "(gs[i], projection='3d')\n", (4451, 4475), True, 'import matplotlib.pyplot as plt\n'), ((4498, 4516), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[2]'], {}), '(gs[2])\n', (4509, 4516), True, 'import matplotlib.pyplot as plt\n'), ((4723, 4758), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[i]'], {'projection': '"""3d"""'}), "(gs[i], projection='3d')\n", (4734, 4758), True, 'import matplotlib.pyplot as plt\n'), ((4781, 4799), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[2]'], {}), '(gs[2])\n', (4792, 4799), True, 'import matplotlib.pyplot as plt\n'), ((5025, 5060), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[i]'], {'projection': '"""3d"""'}), "(gs[i], projection='3d')\n", (5036, 5060), True, 'import matplotlib.pyplot as plt\n'), ((5083, 5101), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[2]'], {}), '(gs[2])\n', (5094, 5101), True, 'import matplotlib.pyplot as plt\n'), ((5347, 5382), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[i]'], {'projection': '"""3d"""'}), "(gs[i], projection='3d')\n", (5358, 5382), True, 'import matplotlib.pyplot as plt\n'), ((5405, 5423), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[2]'], {}), '(gs[2])\n', (5416, 5423), True, 'import matplotlib.pyplot as plt\n'), ((5641, 5676), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[i]'], {'projection': '"""3d"""'}), "(gs[i], projection='3d')\n", (5652, 5676), True, 'import matplotlib.pyplot as plt\n'), ((5699, 5717), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[2]'], {}), '(gs[2])\n', (5710, 5717), True, 'import matplotlib.pyplot as plt\n'), ((5934, 5969), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[i]'], {'projection': '"""3d"""'}), "(gs[i], projection='3d')\n", (5945, 5969), True, 'import matplotlib.pyplot as plt\n'), ((5992, 6010), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[2]'], {}), '(gs[2])\n', (6003, 6010), True, 'import matplotlib.pyplot as plt\n'), ((6229, 6264), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[i]'], {'projection': '"""3d"""'}), "(gs[i], projection='3d')\n", (6240, 6264), True, 'import matplotlib.pyplot as plt\n'), ((6287, 6305), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[2]'], {}), '(gs[2])\n', (6298, 6305), True, 'import matplotlib.pyplot as plt\n'), ((6562, 6586), 'os.path.join', 'join', (['""".."""', '"""jpeg_dumps"""'], {}), "('..', 'jpeg_dumps')\n", (6566, 6586), False, 'from os.path import abspath, dirname, exists, join\n'), ((2641, 2662), 'numpy.array', 'np.array', (['[-0.0, 0.0]'], {}), '([-0.0, 0.0])\n', (2649, 2662), True, 'import numpy as np\n')]
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import scipy
from statsmodels import robust
class Singular_description(object):
'''
Display statistics from every numerical column in data set.
Base class for Mutual description instance.
Outcomes are represented from the beggining (after hoover),
in each histogram plot in the page.
Class covers the most general feature statistics used in data analysis.
'''
def __init__(self):
# Handled by cursor in common.py file in `Mutual_description`
self.column = ""
def histogram(self, plot_number):
# Generate histogram and save as a static file
# size and ticks are adjusted with accordance to display size
sns.set_style("whitegrid")
fig, ax = plt.subplots()
fig.set_size_inches(12, 12)
ax=sns.distplot(self.dataset.iloc[:, [plot_number]], rug=True, color='k')
fig.patch.set_alpha(0.0)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
fig.savefig('static/plot{}.png'.format(plot_number + 1), dpi=fig.dpi)
# return fig
# plt.show()
def measurement(self):
# call for measurement category of the feature
# possible outcomes are:
# -- quantitive continous
# -- quantitive discrete categorical
# -- quantitive discrete numerical
if self.dataset[self.column].dtypes == 'float64':
for value in self.dataset[self.column].values:
if float(value) != int(value):
return 'quantitive continous'
if len(pd.unique(self.dataset[self.column])) == 2:
return 'quantitive discrete categorical'
else:
return 'quantitive discrete numerical'
def average(self):
# TODO: remove
return np.average(self.dataset[self.column])
def expected_value(self):
# call for expected value from feature distribution
return np.mean(self.dataset[self.column])
def median(self):
# call for median from feature distribution
return np.median(self.dataset[self.column])
def mode(self):
# call for mode from feature distribution
return scipy.stats.mode(self.dataset[self.column])
def standard_deviation(self):
# call for standard deviation from feature distribution
return np.std(self.dataset[self.column])
def absolute_deviation_from_mean(self):
# call for absolute deviation from mean from feature distribution
return np.mean(np.absolute(self.dataset[self.column] - np.mean(self.dataset[self.column])))
def absolute_deviation_from_median(self):
# call for mode from feature distribution
return scipy.stats.median_absolute_deviation(self.dataset[self.column])
def quarter_deviation(self):
# call for quarter devaition from feature distribution
q75, q25 = np.percentile(self.dataset[self.column], [75 ,25])
return (q75 - q25)
def coefficient_of_variation(self):
# call for coefficient of variation from feature distribution
return scipy.stats.variation(self.dataset[self.column])
def gini_coefficient(self):
# call for gini coefficient from feature distribution
# TODO: refactorize
mad = np.abs(np.subtract.outer(self.dataset[self.column], self.dataset[self.column])).mean()
rmad = mad/np.mean(self.dataset[self.column])
return 0.5 * rmad
def asymmetry_factor(self):
# call for asymmetry factor from feature distribution
return scipy.stats.skew(self.dataset[self.column])
def entropy(self):
# call for entropy from feature distribution
return scipy.stats.entropy(self.dataset[self.column])
|
[
"seaborn.set_style",
"numpy.average",
"scipy.stats.mode",
"numpy.median",
"numpy.std",
"scipy.stats.entropy",
"matplotlib.pyplot.yticks",
"pandas.unique",
"numpy.percentile",
"scipy.stats.variation",
"scipy.stats.skew",
"numpy.mean",
"numpy.subtract.outer",
"seaborn.distplot",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplots",
"scipy.stats.median_absolute_deviation"
] |
[((784, 810), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (797, 810), True, 'import seaborn as sns\n'), ((838, 852), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (850, 852), True, 'import matplotlib.pyplot as plt\n'), ((909, 979), 'seaborn.distplot', 'sns.distplot', (['self.dataset.iloc[:, [plot_number]]'], {'rug': '(True)', 'color': '"""k"""'}), "(self.dataset.iloc[:, [plot_number]], rug=True, color='k')\n", (921, 979), True, 'import seaborn as sns\n'), ((1030, 1053), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(25)'}), '(fontsize=25)\n', (1040, 1053), True, 'import matplotlib.pyplot as plt\n'), ((1062, 1085), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(25)'}), '(fontsize=25)\n', (1072, 1085), True, 'import matplotlib.pyplot as plt\n'), ((1958, 1995), 'numpy.average', 'np.average', (['self.dataset[self.column]'], {}), '(self.dataset[self.column])\n', (1968, 1995), True, 'import numpy as np\n'), ((2103, 2137), 'numpy.mean', 'np.mean', (['self.dataset[self.column]'], {}), '(self.dataset[self.column])\n', (2110, 2137), True, 'import numpy as np\n'), ((2228, 2264), 'numpy.median', 'np.median', (['self.dataset[self.column]'], {}), '(self.dataset[self.column])\n', (2237, 2264), True, 'import numpy as np\n'), ((2351, 2394), 'scipy.stats.mode', 'scipy.stats.mode', (['self.dataset[self.column]'], {}), '(self.dataset[self.column])\n', (2367, 2394), False, 'import scipy\n'), ((2509, 2542), 'numpy.std', 'np.std', (['self.dataset[self.column]'], {}), '(self.dataset[self.column])\n', (2515, 2542), True, 'import numpy as np\n'), ((2874, 2938), 'scipy.stats.median_absolute_deviation', 'scipy.stats.median_absolute_deviation', (['self.dataset[self.column]'], {}), '(self.dataset[self.column])\n', (2911, 2938), False, 'import scipy\n'), ((3055, 3105), 'numpy.percentile', 'np.percentile', (['self.dataset[self.column]', '[75, 25]'], {}), '(self.dataset[self.column], [75, 25])\n', (3068, 3105), True, 'import numpy as np\n'), ((3261, 3309), 'scipy.stats.variation', 'scipy.stats.variation', (['self.dataset[self.column]'], {}), '(self.dataset[self.column])\n', (3282, 3309), False, 'import scipy\n'), ((3725, 3768), 'scipy.stats.skew', 'scipy.stats.skew', (['self.dataset[self.column]'], {}), '(self.dataset[self.column])\n', (3741, 3768), False, 'import scipy\n'), ((3864, 3910), 'scipy.stats.entropy', 'scipy.stats.entropy', (['self.dataset[self.column]'], {}), '(self.dataset[self.column])\n', (3883, 3910), False, 'import scipy\n'), ((3554, 3588), 'numpy.mean', 'np.mean', (['self.dataset[self.column]'], {}), '(self.dataset[self.column])\n', (3561, 3588), True, 'import numpy as np\n'), ((1716, 1752), 'pandas.unique', 'pd.unique', (['self.dataset[self.column]'], {}), '(self.dataset[self.column])\n', (1725, 1752), True, 'import pandas as pd\n'), ((2725, 2759), 'numpy.mean', 'np.mean', (['self.dataset[self.column]'], {}), '(self.dataset[self.column])\n', (2732, 2759), True, 'import numpy as np\n'), ((3455, 3526), 'numpy.subtract.outer', 'np.subtract.outer', (['self.dataset[self.column]', 'self.dataset[self.column]'], {}), '(self.dataset[self.column], self.dataset[self.column])\n', (3472, 3526), True, 'import numpy as np\n')]
|
'''
Script to convert a MAF to a vcf4.2 file using python >=3.6.
Created by <NAME>
8 March 2018
'''
import os
import sys
from optparse import OptionParser
import subprocess
from functools import wraps
import datetime
import time
import numpy as np
def OptionParsing():
usage = 'usage: %prog -i <*.maf> -o <directory> -r <ref.fa>'
parser = OptionParser(usage)
parser.add_option('-i', '--input_maf', dest="maf", default=None, help=".maf file to be converted.")
parser.add_option('-o', '--output_dir', dest="outDir", default=None, help="Output directory for .vcf file")
parser.add_option('-r', '--ref_genome', dest="refGenome", default="/Users/schencro/Desktop/Bioinformatics_Tools/Ref_Genomes/Ensembl/GRCh37.75/GRCh37.75.fa", help="Reference genome to be used for maf2vcf conversion.")
parser.add_option('-s', '--spotCheckMaf', dest='spotcheck', default=False, action='store_true', help="Use this flag to verify reference matching to maf file. Default=False")
parser.add_option('-v', '--verbose', dest='verbose', default=False, action='store_true', help="Use this flag to turn on verbose mode. Default=False")
(options, args) = parser.parse_args()
if options.maf is None or options.outDir is None or options.refGenome is None:
print("ERROR: Please include arguments for maf file, output directory, and reference genome (single fasta file).")
sys.exit()
else:
pass
return (options, parser)
def fn_timer(function):
'''
Use this as a wrapper at the top of any function you want to get run time information about.
:param function: Function of interest.
:return: A function to wrap around a function.
'''
@wraps(function)
def function_timer(*args, **kwargs):
t0 = time.time()
result = function(*args, **kwargs)
t1 = time.time()
print ("INFO: Total time running %s: %s minutes" %
(function.__name__, str(round((t1-t0)/60.,2)))
)
return result
return function_timer
def UpdateProgressGetN(fileName):
if fileName[len(fileName)-1]=="z":
cmd = "gzip -cd %s | wc -l" % (fileName)
pipe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout
else:
cmd = "wc -l %s" % (fileName)
pipe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout
return(int(pipe.read().decode("utf-8").lstrip(" ").split(" ")[0]))
def UpdateProgress(i, n, DisplayText):
'''
Prints a progress bar where appropriate.
:param i: Current Step
:param n: Total number of steps.
:param DisplayText: A string that you want to print out that is informative.
:return: None
'''
sys.stdout.write('\r')
j = (i + 1) / n
sys.stdout.write("[%-20s] %d%%\t INFO: %s" % ('=' * int(20 * j), 100 * j, DisplayText))
sys.stdout.flush()
def SamtoolsFaidx(refGenome, genomicPos, ref='', check=True):
'''
Obtain reference sequence and perform check if needed.
:param check: Whether or not to throw error if the provided reference matches
:param refGenome: Reference Fasta file
:param genomicPos: Genomic Position of interest.
:param ref: Reference sequence to compare to fetched sequence.
:return: Fetched reference sequence.
'''
proc = subprocess.Popen(['samtools','faidx',refGenome, genomicPos], stdout=subprocess.PIPE)
proc.wait()
outInfo = proc.stdout.readlines()
refSeq = ''.join([line.decode('utf-8').rstrip('\n') for line in outInfo[1:]])
if check:
if refSeq == ref:
return(True)
else:
print('ERROR: May not be proper reference genome')
print('ERROR: Improper reference. Found %s at %s. Reference genome shows %s' % (ref, genomicPos, refSeq))
sys.exit()
return(None)
else:
return(refSeq)
def SpotCheckProperReference(mafFile, Options, fileLength):
'''
Randomly samples the file to ensure proper reference file is used. Random sampling is employed to ensure proper
reference is used. Will spot check 2% of a file of more than 200 variants.
:param mafFile: Input mafFile object (opened)
:param Options: Parser Options
:param fileLength: Length of the file being read
:return: None
'''
print("INFO: Verifying maf file.")
if fileLength > 200:
n=0.02
else:
n=1.
a = np.arange(fileLength)
np.random.shuffle(a)
a = list(a[:int(fileLength*n)])
i = 0
count = 0
for line in mafFile:
if i != 0 and line.startswith('Hugo_Symbol Chromosome Start_position') == False:
# checkIt = len([k for k in a if k==i])
# if checkIt==1:
UpdateProgress(count, len(a), "INFO: Verifying maf file")
count+=1
line = line.rstrip('\n').split('\t')
genomicPos = line[1] + ":" + line[2] + "-" + line[3]
ref = line[7]
mutType = line[5]
variantClass = line[6]
if variantClass != "INS" and variantClass != "TNP" and variantClass !="ONP":
toContinue = SamtoolsFaidx(Options.refGenome, genomicPos, ref)
if count == len(a):
print('')
return(toContinue)
# else:
# print(checkIt)
# print(line)
# print([k for k in a])
# sys.exit("Problem here")
elif i != 0 and line.startswith('Hugo_Symbol Chromosome Start_position') == False:
print("")
print("ERROR: No header found in maf file.")
elif line.startswith('Hugo_Symbol Chromosome Start_position') == True:
toContinue = True
else:
sys.exit("What the fuck")
i+=1
print('')
return(toContinue)
def processSNP(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options):
ref = line[7]
tAllele1 = line[8] # Normal Allele
tAllele2 = line[9] # Alt Allele
QUAL = line[42]
if QUAL == 'None' or QUAL == 'NA' or QUAL == '':
QUAL = '.'
if ref == tAllele1:
altAllele = tAllele1
refAllele = tAllele2
else:
altAllele = tAllele2
refAllele = tAllele1
ref_reads = line[39]
alt_reads = line[38]
reportedVAF = line[28]
# Get phasing information and determine reads for vaf==1
if ref_reads == 'NA' or alt_reads == 'NA' and reportedVAF == '1':
GT = "1/1" # Appears to be homozygous for alternative allele (germline unlikely since it is called w.r.t normal?)
vaf = reportedVAF # Sets VAF equal to 1
if ref_reads == 'NA':
ref_reads = '.'
total_reads = alt_reads
else:
alt_reads = '.'
total_reads = ref_reads
sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf])
# Tossing these very strange mutations within the MAF file.
elif ref_reads == 'NA' or alt_reads == 'NA' and reportedVAF == 'NA':
with open(errorFile, 'a') as errerOut:
errerOut.write('\t'.join(line)+'\n')
if Options.verbose:
print("WARNING: %s" % '\t'.join(line))
return(None)
# Simple SNV cases
else:
total_reads = str(int(ref_reads) + int(alt_reads))
vaf = repr(round(int(alt_reads) / float(total_reads), 4))
if vaf != '1.' and strand=="+" or strand=="-":
GT="0|1"
else:
GT="0/1"
sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf])
# Last check for interesting but unresolved MAF line
if (ref != tAllele1 and ref != tAllele2) or (strand != '+' and strand != '-'):
with open(errorFile, 'a') as errerOut:
errerOut.write('\t'.join(line)+'\n')
if Options.verbose:
print("WARNING: %s" % '\t'.join(line))
return(None)
# Create INFO field
INFO = "MAF_Hugo_Symbol=" + line[0] + ";MAF_ref_context=" + line[15].upper() + ";MAF_Genome_Change=" + line[14] + ";MAF_Variant_Type=" + variantType + ";MAF_Variant_Classification=" + mutType +";DCC_Project_Code=" + line[44]
# Normal variant field if anything
if line[41]=="NA":
normalGenotype = ".:.,.:.:."
else:
normalGenotype = ".:.,.:.:%s"%(line[41])
# Final vcf line out
lineOut = [chrom, pos, rsid, refAllele, altAllele, QUAL, '.', INFO, "GT:AD:DP:VF", normalGenotype, sampleField]
return(lineOut)
def processDEL(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options):
ref = line[7]
tAllele1 = line[8] # Normal Allele Typically
tAllele2 = line[9] # Alt Allele Typically
QUAL = line[42]
if QUAL == 'None' or QUAL == 'NA' or QUAL == '':
QUAL = '.'
if ref == tAllele1:
altAllele = tAllele1
refAllele = tAllele2
else:
altAllele = tAllele2
refAllele = tAllele1
# Obtain the reference sequence + 1 preceding base for the DEL
refAnchorPos = str(int(pos)-1) # Fetch the base that precedes the deletion.
refSeq = SamtoolsFaidx(Options.refGenome, chrom + ":" + refAnchorPos + "-" + line[3], check=False)
if refSeq[1:] != altAllele:
print("ERROR: Deletion alternative allele does not match reference sequence. %s" % ('\t'.join(line)))
sys.exit()
# VCF reference is the preceding base plus the reported deletion in the MAF file.
vcfRef = refSeq
# VCF has base directly preceding the deletion as the alternative base and the variant pos
vcfAlt=refSeq[0]
vcfPos=refAnchorPos
# Get read information
iref_reads = line[37]
ialt_reads = line[36]
ref_reads = line[39]
alt_reads = line[38]
reportedVAF = line[28]
i_t_vaf = line[43]
# Get phasing information and determine reads for vaf==1
if (ref_reads != 'NA' or iref_reads!='NA') and (alt_reads != 'NA' or ialt_reads!='NA'):
GT="0/1"
ref_reads = [read for read in [ref_reads, iref_reads] if read != "NA"][0]
alt_reads = [read for read in [alt_reads, ialt_reads] if read != "NA"][0]
total_reads = str(int(ref_reads) + int(alt_reads))
vaf = str(int(alt_reads)/float(total_reads))
elif i_t_vaf!="" and i_t_vaf!="NA" and ref_reads == 'NA' and iref_reads=='NA' and alt_reads == 'NA' and ialt_reads=='NA':
vaf=i_t_vaf
GT="./."
ref_reads = '.'
alt_reads = '.'
total_reads = '.'
elif (i_t_vaf=="" or i_t_vaf=="NA") and ref_reads == 'NA' and iref_reads=='NA' and alt_reads == 'NA' and ialt_reads=='NA':
GT='./.'
ref_reads='.'
alt_reads='.'
total_reads='.'
vaf='.'
else:
sys.exit("ERROR: Problem processing DEL %s"%('\t'.join(line)))
sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf])
# Create INFO field
INFO = "MAF_Hugo_Symbol=" + line[0] + ";MAF_ref_context=" + line[15].upper() + ";MAF_Genome_Change=" + line[
14] + ";MAF_Variant_Type=" + variantType + ";MAF_Variant_Classification=" + mutType + ";DCC_Project_Code=" + \
line[44]
# Normal variant field if anything
if line[41] == "NA":
normalGenotype = ".:.,.:.:."
else:
normalGenotype = ".:.,.:.:%s" % (line[41])
lineOut = [chrom, vcfPos, rsid, vcfRef, vcfAlt, QUAL, '.', INFO, "GT:AD:DP:VF", normalGenotype, sampleField]
return(lineOut)
def processINS(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options):
ref = line[7]
tAllele1 = line[8] # Normal Allele Typically
tAllele2 = line[9] # Alt Allele Typically
QUAL = line[42]
if QUAL == 'None' or QUAL == 'NA' or QUAL == '':
QUAL = '.'
if tAllele1 == '-':
altAllele = tAllele2
else:
altAllele = tAllele1
# Obtain the reference sequence + 1 preceding base for the DEL
refAnchorPos = str(int(pos) - 1) # Fetch the base that precedes the deletion.
refSeq = SamtoolsFaidx(Options.refGenome, chrom + ":" + refAnchorPos + "-" + line[3], check=False)
# VCF reference is the preceding base in the insertion in MAF
vcfRef = refSeq[0]
# VCF has base directly preceding the deletion as the alternative base and the variant pos
vcfAlt = refSeq[0]+altAllele
vcfPos = refAnchorPos
# Get read information
iref_reads = line[37]
ialt_reads = line[36]
ref_reads = line[39]
alt_reads = line[38]
reportedVAF = line[28]
i_t_vaf = line[43]
# Get phasing information and determine reads for vaf==1
if (ref_reads != 'NA' or iref_reads != 'NA') and (alt_reads != 'NA' or ialt_reads != 'NA'):
GT = "0/1"
ref_reads = [read for read in [ref_reads, iref_reads] if read != "NA"][0]
alt_reads = [read for read in [alt_reads, ialt_reads] if read != "NA"][0]
total_reads = str(int(ref_reads) + int(alt_reads))
vaf = str(int(alt_reads) / float(total_reads))
elif i_t_vaf != "" and i_t_vaf != "NA" and ref_reads == 'NA' and iref_reads == 'NA' and alt_reads == 'NA' and ialt_reads == 'NA':
vaf = i_t_vaf
GT = "./."
ref_reads = '.'
alt_reads = '.'
total_reads = '.'
elif (
i_t_vaf == "" or i_t_vaf == "NA") and ref_reads == 'NA' and iref_reads == 'NA' and alt_reads == 'NA' and ialt_reads == 'NA':
GT = './.'
ref_reads = '.'
alt_reads = '.'
total_reads = '.'
vaf = '.'
else:
sys.exit("ERROR: Problem processing INS %s" % ('\t'.join(line)))
sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf])
# Create INFO field
INFO = "MAF_Hugo_Symbol=" + line[0] + ";MAF_ref_context=" + line[15].upper() + ";MAF_Genome_Change=" + line[
14] + ";MAF_Variant_Type=" + variantType + ";MAF_Variant_Classification=" + mutType + ";DCC_Project_Code=" + \
line[44]
# Normal variant field if anything
if line[41] == "NA":
normalGenotype = ".:.,.:.:."
else:
normalGenotype = ".:.,.:.:%s" % (line[41])
lineOut = [chrom, vcfPos, rsid, vcfRef, vcfAlt, QUAL, '.', INFO, "GT:AD:DP:VF", normalGenotype, sampleField]
return (lineOut)
def CreateVCFLine(line, errorFile, Options):
line = line.rstrip('\n').split('\t')
# Genomic Position
chrom, pos, id = line[1], line[2], line[10]
# Get rs ID
rsid = line[10]
if rsid == '':
rsid = '.'
elif rsid.startswith("rs") == False:
if Options.verbose:
print("ERROR: %s"%(line))
sys.exit("ERROR: Problem in id column")
# Strand Information
strand = line[4]
# Variant Classification/Type (Type is SNP, INS, DEL, etc.)
mutType = line[5]
variantType = line[6]
# Create proper vcf formatted information
if mutType == '':
mutType = '.'
if variantType == '':
variantType = '.'
# Determine type of variant to continue processing.
linetowrite = None
if variantType=="SNP":
linetowrite = processSNP(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options)
elif variantType=="DEL":
linetowrite = processDEL(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options)
elif variantType=="INS":
linetowrite = processINS(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options)
elif variantType=="TNP" or variantType=="ONP":
with open(errorFile, 'a') as errerOut:
errerOut.write('\t'.join(line)+'\n')
else: # This may seem duplicitious, but I explicityly want to know as much of what I'm choosing to filter out as possible...
if Options.verbose:
print("WARNING: Malformed MAF entry. %s"%('\t'.join(line)))
print('')
with open(errorFile, 'a') as errerOut:
errerOut.write('\t'.join(line)+'\n')
# print(line)
# sys.exit("ERROR: Malformed MAF entry.")
return(linetowrite)
def CreateHeader(ioObject, Options, tumorID, normalID):
now = datetime.datetime.now()
ioObject.write("##fileformat=VCFv4.2\n")
ioObject.write("##fileDate=%s\n"%(now.date()))
ioObject.write("##source=maf2vcf.py\n")
ioObject.write("##reference=%s\n"%(Options.refGenome))
ioObject.write("##sampleColumns=Normal.Tumor\n")
ioObject.write("##INFO=<ID=MAF_Hugo_Symbol,Number=1,Type=String,Description=\"HUGO Symbol in original MAF file.\">\n")
ioObject.write("##INFO=<ID=MAF_ref_context,Number=1,Type=String,Description=\"Reference context in original MAF file.\">\n")
ioObject.write("##INFO=<ID=MAF_Genome_Change,Number=1,Type=String,Description=\"Genome change in original MAF file.\">\n")
ioObject.write("##INFO=<ID=MAF_Variant_Type,Number=1,Type=String,Description=\"Variant type (SNP,INS,DEL) in original MAF file.\">\n")
ioObject.write("##INFO=<ID=MAF_Variant_Classification,Number=1,Type=String,Description=\"Variant Classification (if SNP) in original MAF file.\">\n")
ioObject.write("##INFO=<ID=DCC_Project_Code,Number=1,Type=String,Description=\"DCC Project Code in original MAF file.\">\n")
ioObject.write("##FORMAT=<ID=GT,Number=1,Type=String,Description=\"Genotype\">\n")
ioObject.write("##FORMAT=<ID=AD,Number=2,Type=Integer,Description=\"Allelic depths of REF and ALT(s) in the order listed\">\n")
ioObject.write("##FORMAT=<ID=DP,Number=1,Type=Integer,Description=\"Total read depth across this site\">\n")
ioObject.write("##FORMAT=<ID=VF,Number=1,Type=Float,Description=\"Variant Allele Frequency.\">\n")
ioObject.write("#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t%s\t%s\n"%(normalID,tumorID))
@fn_timer
def ProcessFile(Options):
n = UpdateProgressGetN(Options.maf)
if Options.spotcheck:
with open(Options.maf, 'r') as inFile:
SpotCheckProperReference(inFile, Options, n)
with open(Options.maf,'r') as inFile:
i = 0
for line in inFile:
if i == 1:
toPullIDs = line.rstrip('\n').split('\t')
break
else:
header = line
i+=1
tumorID = toPullIDs[12]
normalID = toPullIDs[13]
count = 0
i = 0
with open(Options.maf, 'r') as inFile:
with open(Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.maf','.vcf'), 'w') as outVCF:
errorFile = Options.outDir + Options.maf.split('/')[len(Options.maf.split('/')) - 1].replace('.maf', '.ignoredSNVs.maf')
with open(errorFile, 'w') as errorOut:
errorOut.write(header)
CreateHeader(outVCF, Options, tumorID, normalID)
for line in inFile:
UpdateProgress(i, n, "Processing Maf File")
if line.startswith('Hugo_Symbol Chromosome Start_position'):
count+=1
i += 1
else:
i += 1
linetoWrite = CreateVCFLine(line, errorFile, Options)
if linetoWrite is not None:
outVCF.write('\t'.join(linetoWrite)+'\n')
print('')
print("INFO: Sorting vcf file.")
vcfFile = Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.maf','.vcf')
vcfFileSorted = Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.head.maf','.sorted.vcf.gz')
os.system("cat %s | awk '$1 ~ /^#/ {print $0;next} {print $0 | \"LC_ALL=C sort -k1,1 -k2,2n\"}' | gzip > %s"%(vcfFile, vcfFileSorted))
os.system("rm %s"%(vcfFile))
os.system("gzip %s"%(errorFile))
def main():
print("INFO: Processing MAF file.")
FilePath = os.path.dirname(os.path.abspath(__file__))
(Options, Parser) = OptionParsing()
ProcessFile(Options)
if __name__=="__main__":
main()
|
[
"sys.stdout.write",
"subprocess.Popen",
"os.path.abspath",
"optparse.OptionParser",
"os.system",
"time.time",
"sys.stdout.flush",
"numpy.arange",
"functools.wraps",
"sys.exit",
"datetime.datetime.now",
"numpy.random.shuffle"
] |
[((349, 368), 'optparse.OptionParser', 'OptionParser', (['usage'], {}), '(usage)\n', (361, 368), False, 'from optparse import OptionParser\n'), ((1695, 1710), 'functools.wraps', 'wraps', (['function'], {}), '(function)\n', (1700, 1710), False, 'from functools import wraps\n'), ((2702, 2724), 'sys.stdout.write', 'sys.stdout.write', (["'\\r'"], {}), "('\\r')\n", (2718, 2724), False, 'import sys\n'), ((2841, 2859), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2857, 2859), False, 'import sys\n'), ((3296, 3387), 'subprocess.Popen', 'subprocess.Popen', (["['samtools', 'faidx', refGenome, genomicPos]"], {'stdout': 'subprocess.PIPE'}), "(['samtools', 'faidx', refGenome, genomicPos], stdout=\n subprocess.PIPE)\n", (3312, 3387), False, 'import subprocess\n'), ((4394, 4415), 'numpy.arange', 'np.arange', (['fileLength'], {}), '(fileLength)\n', (4403, 4415), True, 'import numpy as np\n'), ((4420, 4440), 'numpy.random.shuffle', 'np.random.shuffle', (['a'], {}), '(a)\n', (4437, 4440), True, 'import numpy as np\n'), ((16034, 16057), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (16055, 16057), False, 'import datetime\n'), ((19407, 19553), 'os.system', 'os.system', (['(\'cat %s | awk \\\'$1 ~ /^#/ {print $0;next} {print $0 | "LC_ALL=C sort -k1,1 -k2,2n"}\\\' | gzip > %s\'\n % (vcfFile, vcfFileSorted))'], {}), '(\n \'cat %s | awk \\\'$1 ~ /^#/ {print $0;next} {print $0 | "LC_ALL=C sort -k1,1 -k2,2n"}\\\' | gzip > %s\'\n % (vcfFile, vcfFileSorted))\n', (19416, 19553), False, 'import os\n'), ((19546, 19574), 'os.system', 'os.system', (["('rm %s' % vcfFile)"], {}), "('rm %s' % vcfFile)\n", (19555, 19574), False, 'import os\n'), ((19579, 19611), 'os.system', 'os.system', (["('gzip %s' % errorFile)"], {}), "('gzip %s' % errorFile)\n", (19588, 19611), False, 'import os\n'), ((1394, 1404), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1402, 1404), False, 'import sys\n'), ((1765, 1776), 'time.time', 'time.time', ([], {}), '()\n', (1774, 1776), False, 'import time\n'), ((1833, 1844), 'time.time', 'time.time', ([], {}), '()\n', (1842, 1844), False, 'import time\n'), ((9331, 9341), 'sys.exit', 'sys.exit', ([], {}), '()\n', (9339, 9341), False, 'import sys\n'), ((19696, 19721), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (19711, 19721), False, 'import os\n'), ((2169, 2226), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'shell': '(True)', 'stdout': 'subprocess.PIPE'}), '(cmd, shell=True, stdout=subprocess.PIPE)\n', (2185, 2226), False, 'import subprocess\n'), ((2297, 2354), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'shell': '(True)', 'stdout': 'subprocess.PIPE'}), '(cmd, shell=True, stdout=subprocess.PIPE)\n', (2313, 2354), False, 'import subprocess\n'), ((3790, 3800), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3798, 3800), False, 'import sys\n'), ((14550, 14589), 'sys.exit', 'sys.exit', (['"""ERROR: Problem in id column"""'], {}), "('ERROR: Problem in id column')\n", (14558, 14589), False, 'import sys\n'), ((5725, 5750), 'sys.exit', 'sys.exit', (['"""What the fuck"""'], {}), "('What the fuck')\n", (5733, 5750), False, 'import sys\n')]
|
import numpy as np
from pomegranate import *
import json
################################################################################
# LOGGING
################################################################################
import logging
# Logging format
FORMAT = '%(asctime)s SigMa %(levelname)-10s: %(message)s'
logging.basicConfig(format=FORMAT)
def get_logger(verbosity=logging.INFO):
'''
Returns logger object
'''
logger = logging.getLogger(__name__)
logger.setLevel(verbosity)
return logger
################################################################################
# UTILS
################################################################################
def sample_and_noise(model, noise_dist, n_seqs, seqs_len):
noise_change_dist = DiscreteDistribution(dict(zip(range(96), [1.0 / 96] * 96)))
seqs = []
noised_seqs = []
for i in range(n_seqs):
seq = np.array(model.sample(seqs_len))
seqs.append(seq)
noised_seq = seq.copy()
hits = noise_dist.sample(seqs_len)
for j, hit in enumerate(hits):
if hit == 0:
noised_seq[j] = noise_change_dist.sample()
noised_seqs.append(noised_seq)
return seqs, noised_seqs
def get_emissions(file='data\emissions_for_breast_cancer'):
return np.load(file + '.npy')
def sample_uniform_between_a_b(n_states, a=0.0, b=1.0):
return (b - a) * np.random.sample(n_states) + a
def random_seqs_from_json(file_name, n_seqs=10):
seqs = []
seqs_names = []
json_file = json.load(open(file_name))
samples = json_file[u'samples']
samples_to_seq = json_file[u'sampleToSequence']
samples = np.random.permutation(samples)
for i in range(n_seqs):
seqs.append(samples_to_seq[samples[i]])
seqs_names.append(samples[i])
return seqs, seqs_names
def to_json(file_name, dict_to_save):
with open(file_name + '.json', 'w') as fp:
json.dump(dict_to_save, fp)
def full_sample_to_chromosomes_seqs(sample, dists_sample):
np_sample = np.array(sample)
starting_chromosome_idxs = np.where(np.array(dists_sample) >= 1e100)[0]
return np.split(np_sample, starting_chromosome_idxs)[1:]
def load_json(file_name):
return json.load(open(file_name))
def get_split_sequences(file_name, sample_numbers=None):
json_file = json.load(open(file_name))
samples = json_file[u'samples']
samples_to_seq = json_file[u'sampleToSequence']
samples_dists = json_file[u'sampleToPrevMutDists']
out_seqs = []
out_names = []
if sample_numbers is None:
sample_numbers = range(len(samples))
for i in sample_numbers:
n = samples[i]
out_names.append(n)
out_seqs.append(full_sample_to_chromosomes_seqs(samples_to_seq[n], samples_dists[n]))
return zip(out_names, out_seqs)
def get_full_sequences(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'):
json_file = json.load(open(file_name))
samples = json_file[u'samples']
samples_to_seq = json_file[u'sampleToSequence']
out_seqs = []
out_names = []
for n in samples:
out_names.append(n)
out_seqs.append(samples_to_seq[n])
return zip(out_names, out_seqs)
def get_count_sequences_as_mat(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'):
json_file = json.load(open(file_name))
samples = json_file[u'samples']
samples_to_seq = json_file[u'sampleToSequence']
# finding num_object + counting
num_objects = 0
samples_objects = []
samples_counts = []
for sample in samples:
objects, counts = np.unique(samples_to_seq[sample], return_counts=True)
samples_objects.append(objects)
samples_counts.append(counts)
num_objects = max(num_objects, np.max(objects))
num_objects += 1
count_mat = np.zeros((len(samples), num_objects))
for i in range(len(samples)):
count_mat[i, samples_objects[i]] = samples_counts[i]
return count_mat
def get_samples_names(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'):
json_file = json.load(open(file_name))
samples = json_file[u'samples']
return samples
def get_split_sequences_by_threshold(file_name, threshold, sample_numbers=None):
json_file = json.load(open(file_name))
samples = json_file[u'samples']
samples_to_seq = json_file[u'sampleToSequence']
samples_dists = json_file[u'sampleToPrevMutDists']
out_seqs = []
out_names = []
if sample_numbers is None:
sample_numbers = range(len(samples))
for i in sample_numbers:
n = samples[i]
out_names.append(n)
out_seqs.append(full_sample_to_chromosomes_seqs_by_threshold(samples_to_seq[n], samples_dists[n], threshold))
return zip(out_names, out_seqs)
def full_sample_to_chromosomes_seqs_by_threshold(sample, dists_sample, threshold):
np_sample = np.array(sample)
np_dists = np.array(dists_sample)
starting_chromosome_idxs = np.where(np_dists >= 1e100)[0]
chromosomes = np.split(np_sample, starting_chromosome_idxs)[1:]
chromosomes_dists = np.split(np_dists, starting_chromosome_idxs)[1:]
out = []
for i in range(len(chromosomes)):
chromosome = chromosomes[i]
chromosome_dists = chromosomes_dists[i]
starting_seqs_idxs = np.where(chromosome_dists >= threshold)[0]
seqs = np.split(chromosome, starting_seqs_idxs)[1:]
out.append(seqs)
return out
def seqs_to_seq(seqs):
out = []
for seq in seqs:
out.extend(seq)
return np.array(out)
def seqs_to_seq_of_prefix(seqs):
out = []
for seq in seqs:
out.append(seq[0])
return np.array(out)
def sample_indices_not_in_dir(dir_path):
import os
samples_in_dir = [f[:-5] for f in os.listdir(dir_path)]
samples = get_samples_names()
missing_indices = []
for i in range(len(samples)):
if samples[i] not in samples_in_dir:
missing_indices.append(i)
return missing_indices
|
[
"json.dump",
"numpy.load",
"logging.basicConfig",
"numpy.unique",
"numpy.split",
"numpy.max",
"numpy.where",
"numpy.array",
"numpy.random.permutation",
"os.listdir",
"logging.getLogger",
"numpy.random.sample"
] |
[((322, 356), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': 'FORMAT'}), '(format=FORMAT)\n', (341, 356), False, 'import logging\n'), ((453, 480), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (470, 480), False, 'import logging\n'), ((1318, 1340), 'numpy.load', 'np.load', (["(file + '.npy')"], {}), "(file + '.npy')\n", (1325, 1340), True, 'import numpy as np\n'), ((1681, 1711), 'numpy.random.permutation', 'np.random.permutation', (['samples'], {}), '(samples)\n', (1702, 1711), True, 'import numpy as np\n'), ((2054, 2070), 'numpy.array', 'np.array', (['sample'], {}), '(sample)\n', (2062, 2070), True, 'import numpy as np\n'), ((4908, 4924), 'numpy.array', 'np.array', (['sample'], {}), '(sample)\n', (4916, 4924), True, 'import numpy as np\n'), ((4940, 4962), 'numpy.array', 'np.array', (['dists_sample'], {}), '(dists_sample)\n', (4948, 4962), True, 'import numpy as np\n'), ((5571, 5584), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (5579, 5584), True, 'import numpy as np\n'), ((5692, 5705), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (5700, 5705), True, 'import numpy as np\n'), ((1949, 1976), 'json.dump', 'json.dump', (['dict_to_save', 'fp'], {}), '(dict_to_save, fp)\n', (1958, 1976), False, 'import json\n'), ((2158, 2203), 'numpy.split', 'np.split', (['np_sample', 'starting_chromosome_idxs'], {}), '(np_sample, starting_chromosome_idxs)\n', (2166, 2203), True, 'import numpy as np\n'), ((3621, 3674), 'numpy.unique', 'np.unique', (['samples_to_seq[sample]'], {'return_counts': '(True)'}), '(samples_to_seq[sample], return_counts=True)\n', (3630, 3674), True, 'import numpy as np\n'), ((4995, 5023), 'numpy.where', 'np.where', (['(np_dists >= 1e+100)'], {}), '(np_dists >= 1e+100)\n', (5003, 5023), True, 'import numpy as np\n'), ((5045, 5090), 'numpy.split', 'np.split', (['np_sample', 'starting_chromosome_idxs'], {}), '(np_sample, starting_chromosome_idxs)\n', (5053, 5090), True, 'import numpy as np\n'), ((5119, 5163), 'numpy.split', 'np.split', (['np_dists', 'starting_chromosome_idxs'], {}), '(np_dists, starting_chromosome_idxs)\n', (5127, 5163), True, 'import numpy as np\n'), ((1420, 1446), 'numpy.random.sample', 'np.random.sample', (['n_states'], {}), '(n_states)\n', (1436, 1446), True, 'import numpy as np\n'), ((3792, 3807), 'numpy.max', 'np.max', (['objects'], {}), '(objects)\n', (3798, 3807), True, 'import numpy as np\n'), ((5334, 5373), 'numpy.where', 'np.where', (['(chromosome_dists >= threshold)'], {}), '(chromosome_dists >= threshold)\n', (5342, 5373), True, 'import numpy as np\n'), ((5392, 5432), 'numpy.split', 'np.split', (['chromosome', 'starting_seqs_idxs'], {}), '(chromosome, starting_seqs_idxs)\n', (5400, 5432), True, 'import numpy as np\n'), ((5801, 5821), 'os.listdir', 'os.listdir', (['dir_path'], {}), '(dir_path)\n', (5811, 5821), False, 'import os\n'), ((2111, 2133), 'numpy.array', 'np.array', (['dists_sample'], {}), '(dists_sample)\n', (2119, 2133), True, 'import numpy as np\n')]
|
import os
import datetime
import psycopg2
import numpy as np
import pandas as pd
#import statsmodels.api as sm
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod.families import Binomial
from statsmodels.genmod.families.links import probit
DATABASE_URL = os.environ['DATABASE_URL']
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
cur = conn.cursor() # cursor needed to perform db ops
cur.execute("SELECT * FROM Iris;")
iris_df = pd.DataFrame(cur.fetchall())
X = np.array(iris_df.iloc[:, 0:4])
y = np.array(iris_df.iloc[:, 4] == 'virginica', dtype=int)
weight = np.ones(150)
probit_link = probit()
bin_family = Binomial(probit_link)
my_glm = GLM(y, X, freq_weights = weight, family = bin_family)
my_glm_fit = my_glm.fit()
theta = my_glm_fit.params
current_dt = datetime.datetime.now()
cur.execute("INSERT INTO scores VALUES (%s, %s, %s, %s, %s)",
(str(current_dt), theta[0], theta[1], theta[2], theta[3]))
conn.commit()
cur.close()
conn.close()
|
[
"statsmodels.genmod.generalized_linear_model.GLM",
"statsmodels.genmod.families.links.probit",
"numpy.ones",
"numpy.array",
"statsmodels.genmod.families.Binomial",
"datetime.datetime.now",
"psycopg2.connect"
] |
[((327, 376), 'psycopg2.connect', 'psycopg2.connect', (['DATABASE_URL'], {'sslmode': '"""require"""'}), "(DATABASE_URL, sslmode='require')\n", (343, 376), False, 'import psycopg2\n'), ((511, 541), 'numpy.array', 'np.array', (['iris_df.iloc[:, 0:4]'], {}), '(iris_df.iloc[:, 0:4])\n', (519, 541), True, 'import numpy as np\n'), ((546, 600), 'numpy.array', 'np.array', (["(iris_df.iloc[:, 4] == 'virginica')"], {'dtype': 'int'}), "(iris_df.iloc[:, 4] == 'virginica', dtype=int)\n", (554, 600), True, 'import numpy as np\n'), ((611, 623), 'numpy.ones', 'np.ones', (['(150)'], {}), '(150)\n', (618, 623), True, 'import numpy as np\n'), ((640, 648), 'statsmodels.genmod.families.links.probit', 'probit', ([], {}), '()\n', (646, 648), False, 'from statsmodels.genmod.families.links import probit\n'), ((662, 683), 'statsmodels.genmod.families.Binomial', 'Binomial', (['probit_link'], {}), '(probit_link)\n', (670, 683), False, 'from statsmodels.genmod.families import Binomial\n'), ((693, 742), 'statsmodels.genmod.generalized_linear_model.GLM', 'GLM', (['y', 'X'], {'freq_weights': 'weight', 'family': 'bin_family'}), '(y, X, freq_weights=weight, family=bin_family)\n', (696, 742), False, 'from statsmodels.genmod.generalized_linear_model import GLM\n'), ((813, 836), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (834, 836), False, 'import datetime\n')]
|
import numpy
from sympy import Rational as frac
from sympy import pi, sqrt
from ..helpers import article, fsd, pm, pm_roll, untangle
from ._helpers import E3rScheme
citation = article(
authors=["<NAME>", "<NAME>"],
title="Approximate integration formulas for certain spherically symmetric regions",
journal="Math. Comp.",
volume="17",
year="1963",
pages="105-135",
url="https://doi.org/10.1090/S0025-5718-1963-0161473-0",
)
def stroud_secrest_07():
nu, xi = [sqrt(15 - p_m * 3 * sqrt(5)) for p_m in [+1, -1]]
A = frac(3, 5)
B = frac(1, 30)
data = [(A, numpy.array([[0, 0, 0]])), (B, pm_roll(3, [xi, nu]))]
points, weights = untangle(data)
weights *= 8 * pi
return E3rScheme("Stroud-Secrest VII", weights, points, 5, citation)
def stroud_secrest_08():
nu = sqrt(30)
eta = sqrt(10)
A = frac(3, 5)
B = frac(2, 75)
C = frac(3, 100)
data = [(A, numpy.array([[0, 0, 0]])), (B, fsd(3, (nu, 1))), (C, pm(3, eta))]
points, weights = untangle(data)
weights *= 8 * pi
return E3rScheme("Stroud-Secrest VIII", weights, points, 5, citation)
def stroud_secrest_09():
eta = sqrt(10)
xi, nu = [sqrt(15 - p_m * 5 * sqrt(5)) for p_m in [+1, -1]]
A = frac(3, 5)
B = frac(1, 50)
data = [(A, numpy.array([[0, 0, 0]])), (B, pm(3, eta)), (B, pm_roll(3, [xi, nu]))]
points, weights = untangle(data)
weights *= 8 * pi
return E3rScheme("Stroud-Secrest IX", weights, points, 5, citation)
def stroud_secrest_10():
sqrt130 = sqrt(130)
nu = sqrt((720 - 24 * sqrt130) / 11)
xi = sqrt(288 + 24 * sqrt130)
eta = sqrt((-216 + 24 * sqrt130) / 7)
A = (5175 - 13 * sqrt130) / 8820
B = (3870 + 283 * sqrt130) / 493920
C = (3204 - 281 * sqrt130) / 197568
# ERR in Stroud's book: 917568 vs. 197568
D = (4239 + 373 * sqrt130) / 197568
data = [
(A, numpy.array([[0, 0, 0]])),
(B, fsd(3, (nu, 1))),
(C, fsd(3, (xi, 2))),
(D, pm(3, eta)),
]
points, weights = untangle(data)
weights *= 8 * pi
return E3rScheme("Stroud-Secrest X", weights, points, 7, citation)
def stroud_secrest_11():
sqrt5 = sqrt(5)
sqrt39 = sqrt(39)
sqrt195 = sqrt(195)
nu, xi = [
sqrt(-50 + p_m * 10 * sqrt5 + 10 * sqrt39 - p_m * 2 * sqrt195)
for p_m in [+1, -1]
]
eta = sqrt(36 + 4 * sqrt39)
mu, lmbda = [
sqrt(54 + p_m * 18 * sqrt5 + 6 * sqrt39 + p_m * 2 * sqrt195) for p_m in [+1, -1]
]
A = (1725 - 26 * sqrt39) / 2940
B = (1065 + 171 * sqrt39) / 54880
C = (297 - 47 * sqrt39) / 32928
data = [
(A, numpy.array([[0, 0, 0]])),
(B, pm_roll(3, [xi, nu])),
(C, pm(3, eta)),
(C, pm_roll(3, [lmbda, mu])),
]
points, weights = untangle(data)
weights *= 8 * pi
return E3rScheme("Stroud-Secrest XI", weights, points, 7, citation)
|
[
"numpy.array",
"sympy.sqrt",
"sympy.Rational"
] |
[((553, 563), 'sympy.Rational', 'frac', (['(3)', '(5)'], {}), '(3, 5)\n', (557, 563), True, 'from sympy import Rational as frac\n'), ((572, 583), 'sympy.Rational', 'frac', (['(1)', '(30)'], {}), '(1, 30)\n', (576, 583), True, 'from sympy import Rational as frac\n'), ((824, 832), 'sympy.sqrt', 'sqrt', (['(30)'], {}), '(30)\n', (828, 832), False, 'from sympy import pi, sqrt\n'), ((843, 851), 'sympy.sqrt', 'sqrt', (['(10)'], {}), '(10)\n', (847, 851), False, 'from sympy import pi, sqrt\n'), ((860, 870), 'sympy.Rational', 'frac', (['(3)', '(5)'], {}), '(3, 5)\n', (864, 870), True, 'from sympy import Rational as frac\n'), ((879, 890), 'sympy.Rational', 'frac', (['(2)', '(75)'], {}), '(2, 75)\n', (883, 890), True, 'from sympy import Rational as frac\n'), ((899, 911), 'sympy.Rational', 'frac', (['(3)', '(100)'], {}), '(3, 100)\n', (903, 911), True, 'from sympy import Rational as frac\n'), ((1165, 1173), 'sympy.sqrt', 'sqrt', (['(10)'], {}), '(10)\n', (1169, 1173), False, 'from sympy import pi, sqrt\n'), ((1246, 1256), 'sympy.Rational', 'frac', (['(3)', '(5)'], {}), '(3, 5)\n', (1250, 1256), True, 'from sympy import Rational as frac\n'), ((1265, 1276), 'sympy.Rational', 'frac', (['(1)', '(50)'], {}), '(1, 50)\n', (1269, 1276), True, 'from sympy import Rational as frac\n'), ((1537, 1546), 'sympy.sqrt', 'sqrt', (['(130)'], {}), '(130)\n', (1541, 1546), False, 'from sympy import pi, sqrt\n'), ((1557, 1588), 'sympy.sqrt', 'sqrt', (['((720 - 24 * sqrt130) / 11)'], {}), '((720 - 24 * sqrt130) / 11)\n', (1561, 1588), False, 'from sympy import pi, sqrt\n'), ((1598, 1622), 'sympy.sqrt', 'sqrt', (['(288 + 24 * sqrt130)'], {}), '(288 + 24 * sqrt130)\n', (1602, 1622), False, 'from sympy import pi, sqrt\n'), ((1633, 1664), 'sympy.sqrt', 'sqrt', (['((-216 + 24 * sqrt130) / 7)'], {}), '((-216 + 24 * sqrt130) / 7)\n', (1637, 1664), False, 'from sympy import pi, sqrt\n'), ((2183, 2190), 'sympy.sqrt', 'sqrt', (['(5)'], {}), '(5)\n', (2187, 2190), False, 'from sympy import pi, sqrt\n'), ((2204, 2212), 'sympy.sqrt', 'sqrt', (['(39)'], {}), '(39)\n', (2208, 2212), False, 'from sympy import pi, sqrt\n'), ((2227, 2236), 'sympy.sqrt', 'sqrt', (['(195)'], {}), '(195)\n', (2231, 2236), False, 'from sympy import pi, sqrt\n'), ((2368, 2389), 'sympy.sqrt', 'sqrt', (['(36 + 4 * sqrt39)'], {}), '(36 + 4 * sqrt39)\n', (2372, 2389), False, 'from sympy import pi, sqrt\n'), ((2261, 2323), 'sympy.sqrt', 'sqrt', (['(-50 + p_m * 10 * sqrt5 + 10 * sqrt39 - p_m * 2 * sqrt195)'], {}), '(-50 + p_m * 10 * sqrt5 + 10 * sqrt39 - p_m * 2 * sqrt195)\n', (2265, 2323), False, 'from sympy import pi, sqrt\n'), ((2416, 2476), 'sympy.sqrt', 'sqrt', (['(54 + p_m * 18 * sqrt5 + 6 * sqrt39 + p_m * 2 * sqrt195)'], {}), '(54 + p_m * 18 * sqrt5 + 6 * sqrt39 + p_m * 2 * sqrt195)\n', (2420, 2476), False, 'from sympy import pi, sqrt\n'), ((601, 625), 'numpy.array', 'numpy.array', (['[[0, 0, 0]]'], {}), '([[0, 0, 0]])\n', (612, 625), False, 'import numpy\n'), ((929, 953), 'numpy.array', 'numpy.array', (['[[0, 0, 0]]'], {}), '([[0, 0, 0]])\n', (940, 953), False, 'import numpy\n'), ((1294, 1318), 'numpy.array', 'numpy.array', (['[[0, 0, 0]]'], {}), '([[0, 0, 0]])\n', (1305, 1318), False, 'import numpy\n'), ((1894, 1918), 'numpy.array', 'numpy.array', (['[[0, 0, 0]]'], {}), '([[0, 0, 0]])\n', (1905, 1918), False, 'import numpy\n'), ((2639, 2663), 'numpy.array', 'numpy.array', (['[[0, 0, 0]]'], {}), '([[0, 0, 0]])\n', (2650, 2663), False, 'import numpy\n'), ((515, 522), 'sympy.sqrt', 'sqrt', (['(5)'], {}), '(5)\n', (519, 522), False, 'from sympy import pi, sqrt\n'), ((1208, 1215), 'sympy.sqrt', 'sqrt', (['(5)'], {}), '(5)\n', (1212, 1215), False, 'from sympy import pi, sqrt\n')]
|
import numpy as np
import pickle
import os
from pathlib import Path
from metrics.class_imbalance import get_classes, class_proportion
from metrics.phi_div import average_dkl
from metrics.wasserstein import wasserstein_2
def compute_metrics(ds,
split,
inv_temp,
num_parties,
num_classes,
alpha,
lengthscale,
party_datasets,
party_labels,
reference_dataset,
candidate_datasets,
candidate_labels,
rewards,
deltas,
mus):
print("Computing metrics")
party_datasets_with_rewards = []
for i in range(num_parties):
party_datasets_with_rewards.append(np.concatenate([party_datasets[i], rewards[i]], axis=0))
print("Length of rewards: {}".format([len(r) for r in rewards]))
print("alpha:\n{}".format(alpha))
print("Calculating average DKLs before")
dkls_before = average_dkl(party_datasets, reference_dataset)
print(dkls_before)
print("Calculating average DKLs after")
dkls_after = average_dkl(party_datasets_with_rewards, reference_dataset)
print(dkls_after)
print("Correlation coefficient with alpha: \n{}".format(np.corrcoef(alpha, dkls_after)[0, 1]))
class_props = []
for result in rewards:
class_props.append(
class_proportion(get_classes(np.array(result), candidate_datasets[0], candidate_labels), num_classes))
print("Class proportions and class imbalance of rewards: {}".format(class_props))
print("Calculating Wasserstein-2 before")
wass_before = [wasserstein_2(party_datasets[i], reference_dataset) for i in range(num_parties)]
wass_after = [wasserstein_2(np.concatenate([party_datasets[i], np.array(rewards[i])], axis=0), reference_dataset)
for i in range(num_parties)]
print("Wasserstein-2 before: \n{}".format(wass_before))
print("Wasserstein-2 after: \n{}".format(wass_after))
print("Correlation coefficient with alpha: \n{}".format(np.corrcoef(alpha, wass_after)[0, 1]))
#Save metrics
Path(os.getcwd() + '/data/metrics').mkdir(parents=True, exist_ok=True)
pickle.dump((party_datasets, party_labels, reference_dataset, candidate_datasets, candidate_labels,
rewards, deltas, mus, alpha, lengthscale, class_props, wass_before, wass_after, dkls_before, dkls_after),
open("data/metrics/metrics-{}-{}-{}.p".format(ds, split, inv_temp), 'wb'))
|
[
"os.getcwd",
"numpy.corrcoef",
"metrics.wasserstein.wasserstein_2",
"numpy.array",
"metrics.phi_div.average_dkl",
"numpy.concatenate"
] |
[((1107, 1153), 'metrics.phi_div.average_dkl', 'average_dkl', (['party_datasets', 'reference_dataset'], {}), '(party_datasets, reference_dataset)\n', (1118, 1153), False, 'from metrics.phi_div import average_dkl\n'), ((1243, 1302), 'metrics.phi_div.average_dkl', 'average_dkl', (['party_datasets_with_rewards', 'reference_dataset'], {}), '(party_datasets_with_rewards, reference_dataset)\n', (1254, 1302), False, 'from metrics.phi_div import average_dkl\n'), ((1779, 1830), 'metrics.wasserstein.wasserstein_2', 'wasserstein_2', (['party_datasets[i]', 'reference_dataset'], {}), '(party_datasets[i], reference_dataset)\n', (1792, 1830), False, 'from metrics.wasserstein import wasserstein_2\n'), ((870, 925), 'numpy.concatenate', 'np.concatenate', (['[party_datasets[i], rewards[i]]'], {'axis': '(0)'}), '([party_datasets[i], rewards[i]], axis=0)\n', (884, 925), True, 'import numpy as np\n'), ((1387, 1417), 'numpy.corrcoef', 'np.corrcoef', (['alpha', 'dkls_after'], {}), '(alpha, dkls_after)\n', (1398, 1417), True, 'import numpy as np\n'), ((2208, 2238), 'numpy.corrcoef', 'np.corrcoef', (['alpha', 'wass_after'], {}), '(alpha, wass_after)\n', (2219, 2238), True, 'import numpy as np\n'), ((1549, 1565), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (1557, 1565), True, 'import numpy as np\n'), ((1928, 1948), 'numpy.array', 'np.array', (['rewards[i]'], {}), '(rewards[i])\n', (1936, 1948), True, 'import numpy as np\n'), ((2278, 2289), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2287, 2289), False, 'import os\n')]
|
import unittest
from typing import List
import numpy as np
from py_headless_daw.processing.stream.stream_gain import StreamGain
from py_headless_daw.schema.dto.time_interval import TimeInterval
from py_headless_daw.schema.events.event import Event
from py_headless_daw.schema.events.parameter_value_event import ParameterValueEvent
class StreamGainStrategyTest(unittest.TestCase):
def test_stream_gain_strategy(self):
strategy = StreamGain(np.float32(0.25))
interval = TimeInterval()
interval.start_in_bars = 0
interval.end_in_bars = 1
in_stream_buffer = np.ones(shape=(100,), dtype=np.float32)
out_stream_buffer = np.zeros(shape=(100,), dtype=np.float32)
setter_event: ParameterValueEvent = ParameterValueEvent(0, StreamGain.PARAMETER_GAIN, 0.55)
input_event_buffer: List[Event] = [setter_event]
output_event_buffer: List[Event] = []
strategy.render(interval, [in_stream_buffer], [out_stream_buffer], [input_event_buffer], [output_event_buffer])
# the first few samples are closer to the initial value
for x in range(0, 3):
self.assertTrue(0.24 < out_stream_buffer[x] < 0.26)
# while the last few are closer to the target one
for x in range(out_stream_buffer.shape[0] - 3, out_stream_buffer.shape[0]):
self.assertTrue(0.24 < out_stream_buffer[x] > 0.45)
strategy.render(interval, [in_stream_buffer], [out_stream_buffer], [[]], [[]])
# now we render without any events in the input, the logic in the
# strategy is slightly different in this case
for x in range(out_stream_buffer.shape[0] - 3, out_stream_buffer.shape[0]):
self.assertTrue(out_stream_buffer[x] > 0.45)
|
[
"py_headless_daw.schema.dto.time_interval.TimeInterval",
"py_headless_daw.schema.events.parameter_value_event.ParameterValueEvent",
"numpy.float32",
"numpy.zeros",
"numpy.ones"
] |
[((495, 509), 'py_headless_daw.schema.dto.time_interval.TimeInterval', 'TimeInterval', ([], {}), '()\n', (507, 509), False, 'from py_headless_daw.schema.dto.time_interval import TimeInterval\n'), ((606, 645), 'numpy.ones', 'np.ones', ([], {'shape': '(100,)', 'dtype': 'np.float32'}), '(shape=(100,), dtype=np.float32)\n', (613, 645), True, 'import numpy as np\n'), ((674, 714), 'numpy.zeros', 'np.zeros', ([], {'shape': '(100,)', 'dtype': 'np.float32'}), '(shape=(100,), dtype=np.float32)\n', (682, 714), True, 'import numpy as np\n'), ((760, 815), 'py_headless_daw.schema.events.parameter_value_event.ParameterValueEvent', 'ParameterValueEvent', (['(0)', 'StreamGain.PARAMETER_GAIN', '(0.55)'], {}), '(0, StreamGain.PARAMETER_GAIN, 0.55)\n', (779, 815), False, 'from py_headless_daw.schema.events.parameter_value_event import ParameterValueEvent\n'), ((457, 473), 'numpy.float32', 'np.float32', (['(0.25)'], {}), '(0.25)\n', (467, 473), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
# import pydicom
import os
from pydicom.filereader import dcmread, read_dicomdir
from glob import glob
import cv2
import numpy as np
cv2.destroyAllWindows()
# window prop
screensize = ((-1440,0),(0,900))
screenwidth = screensize[0][1]-screensize[0][0]
screenheight = screensize[1][1]-screensize[1][0]
headertop= 30
headerbottom = 8
headerside = 8
n = 3
m = 2
windowwidth = int((screenwidth - n * headerside*2)/ n)
windowheight = int((screenheight - m * (headertop + headerbottom)) /m)
# input directory
dicom_dir = r"E:\BTSynchSGH\datasets\necklysis\input\dicom"
fps = glob(os.path.join(dicom_dir,"*.dcm"))
ds_list = [dcmread(filename) for filename in fps]
# select image
image = ds_list[10].pixel_array
# image details
image_height, image_width = image.shape
# image pre-processing
image_norm = cv2.normalize(image, dst=None, alpha=0, beta=65536, norm_type=cv2.NORM_MINMAX) # so that can see better
image_norm_uint8 = cv2.convertScaleAbs(image_norm)
min_head_thresh = 10000
max_head_thresh = 65535
# get outline of head
ret, image_thresh = cv2.threshold(image_norm,min_head_thresh, max_head_thresh, cv2.THRESH_TOZERO)
image_thresh_uint8 = cv2.convertScaleAbs(image_thresh)
image_canny = cv2.Canny(image_thresh_uint8,100,150)
# get contour
im2, contours, hierarchy = cv2.findContours(image_canny,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
image_norm_3chan = np.stack([image_norm]*3,axis=-1)
# get largest contour
perimeter = [cv2.arcLength(cnt,True) for cnt in contours]
idx_max = np.argmax(np.array(perimeter))
image_contours = cv2.drawContours(image_norm_3chan.copy(), [contours[idx_max]], 0, (0,65535,0), 3)
# display process images
# original image
cv2.namedWindow("image_norm",cv2.WINDOW_NORMAL)
cv2.moveWindow("image_norm",screensize[0][0],0)
cv2.resizeWindow("image_norm",(windowwidth,windowheight))
cv2.imshow("image_norm", image_norm)
# canny
cv2.namedWindow("image_canny",cv2.WINDOW_NORMAL)
cv2.imshow("image_canny", image_canny)
cv2.resizeWindow("image_canny",(windowwidth,windowheight))
cv2.moveWindow("image_canny",screensize[0][0]+(windowwidth+headerside*2),0)
# contours
cv2.namedWindow("contours",cv2.WINDOW_NORMAL)
cv2.imshow("contours", image_contours)
cv2.resizeWindow("contours",(windowwidth,windowheight))
cv2.moveWindow("contours",screensize[0][0]+(windowwidth+headerside)*2,0)
# cv2.waitKey(1)
# cv2.destroyAllWindows()
|
[
"numpy.stack",
"cv2.Canny",
"pydicom.filereader.dcmread",
"cv2.arcLength",
"cv2.threshold",
"cv2.imshow",
"cv2.resizeWindow",
"cv2.namedWindow",
"numpy.array",
"cv2.convertScaleAbs",
"cv2.normalize",
"cv2.moveWindow",
"cv2.destroyAllWindows",
"os.path.join",
"cv2.findContours"
] |
[((166, 189), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (187, 189), False, 'import cv2\n'), ((838, 916), 'cv2.normalize', 'cv2.normalize', (['image'], {'dst': 'None', 'alpha': '(0)', 'beta': '(65536)', 'norm_type': 'cv2.NORM_MINMAX'}), '(image, dst=None, alpha=0, beta=65536, norm_type=cv2.NORM_MINMAX)\n', (851, 916), False, 'import cv2\n'), ((961, 992), 'cv2.convertScaleAbs', 'cv2.convertScaleAbs', (['image_norm'], {}), '(image_norm)\n', (980, 992), False, 'import cv2\n'), ((1085, 1163), 'cv2.threshold', 'cv2.threshold', (['image_norm', 'min_head_thresh', 'max_head_thresh', 'cv2.THRESH_TOZERO'], {}), '(image_norm, min_head_thresh, max_head_thresh, cv2.THRESH_TOZERO)\n', (1098, 1163), False, 'import cv2\n'), ((1184, 1217), 'cv2.convertScaleAbs', 'cv2.convertScaleAbs', (['image_thresh'], {}), '(image_thresh)\n', (1203, 1217), False, 'import cv2\n'), ((1232, 1271), 'cv2.Canny', 'cv2.Canny', (['image_thresh_uint8', '(100)', '(150)'], {}), '(image_thresh_uint8, 100, 150)\n', (1241, 1271), False, 'import cv2\n'), ((1312, 1379), 'cv2.findContours', 'cv2.findContours', (['image_canny', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_NONE'], {}), '(image_canny, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n', (1328, 1379), False, 'import cv2\n'), ((1397, 1432), 'numpy.stack', 'np.stack', (['([image_norm] * 3)'], {'axis': '(-1)'}), '([image_norm] * 3, axis=-1)\n', (1405, 1432), True, 'import numpy as np\n'), ((1696, 1744), 'cv2.namedWindow', 'cv2.namedWindow', (['"""image_norm"""', 'cv2.WINDOW_NORMAL'], {}), "('image_norm', cv2.WINDOW_NORMAL)\n", (1711, 1744), False, 'import cv2\n'), ((1744, 1793), 'cv2.moveWindow', 'cv2.moveWindow', (['"""image_norm"""', 'screensize[0][0]', '(0)'], {}), "('image_norm', screensize[0][0], 0)\n", (1758, 1793), False, 'import cv2\n'), ((1792, 1851), 'cv2.resizeWindow', 'cv2.resizeWindow', (['"""image_norm"""', '(windowwidth, windowheight)'], {}), "('image_norm', (windowwidth, windowheight))\n", (1808, 1851), False, 'import cv2\n'), ((1850, 1886), 'cv2.imshow', 'cv2.imshow', (['"""image_norm"""', 'image_norm'], {}), "('image_norm', image_norm)\n", (1860, 1886), False, 'import cv2\n'), ((1896, 1945), 'cv2.namedWindow', 'cv2.namedWindow', (['"""image_canny"""', 'cv2.WINDOW_NORMAL'], {}), "('image_canny', cv2.WINDOW_NORMAL)\n", (1911, 1945), False, 'import cv2\n'), ((1945, 1983), 'cv2.imshow', 'cv2.imshow', (['"""image_canny"""', 'image_canny'], {}), "('image_canny', image_canny)\n", (1955, 1983), False, 'import cv2\n'), ((1984, 2044), 'cv2.resizeWindow', 'cv2.resizeWindow', (['"""image_canny"""', '(windowwidth, windowheight)'], {}), "('image_canny', (windowwidth, windowheight))\n", (2000, 2044), False, 'import cv2\n'), ((2043, 2130), 'cv2.moveWindow', 'cv2.moveWindow', (['"""image_canny"""', '(screensize[0][0] + (windowwidth + headerside * 2))', '(0)'], {}), "('image_canny', screensize[0][0] + (windowwidth + headerside *\n 2), 0)\n", (2057, 2130), False, 'import cv2\n'), ((2131, 2177), 'cv2.namedWindow', 'cv2.namedWindow', (['"""contours"""', 'cv2.WINDOW_NORMAL'], {}), "('contours', cv2.WINDOW_NORMAL)\n", (2146, 2177), False, 'import cv2\n'), ((2177, 2215), 'cv2.imshow', 'cv2.imshow', (['"""contours"""', 'image_contours'], {}), "('contours', image_contours)\n", (2187, 2215), False, 'import cv2\n'), ((2216, 2273), 'cv2.resizeWindow', 'cv2.resizeWindow', (['"""contours"""', '(windowwidth, windowheight)'], {}), "('contours', (windowwidth, windowheight))\n", (2232, 2273), False, 'import cv2\n'), ((2272, 2357), 'cv2.moveWindow', 'cv2.moveWindow', (['"""contours"""', '(screensize[0][0] + (windowwidth + headerside) * 2)', '(0)'], {}), "('contours', screensize[0][0] + (windowwidth + headerside) * 2, 0\n )\n", (2286, 2357), False, 'import cv2\n'), ((612, 644), 'os.path.join', 'os.path.join', (['dicom_dir', '"""*.dcm"""'], {}), "(dicom_dir, '*.dcm')\n", (624, 644), False, 'import os\n'), ((657, 674), 'pydicom.filereader.dcmread', 'dcmread', (['filename'], {}), '(filename)\n', (664, 674), False, 'from pydicom.filereader import dcmread, read_dicomdir\n'), ((1467, 1491), 'cv2.arcLength', 'cv2.arcLength', (['cnt', '(True)'], {}), '(cnt, True)\n', (1480, 1491), False, 'import cv2\n'), ((1532, 1551), 'numpy.array', 'np.array', (['perimeter'], {}), '(perimeter)\n', (1540, 1551), True, 'import numpy as np\n')]
|
import glob
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
import xarray as xr
from mpl_toolkits.basemap import Basemap
import gc
import matplotlib
matplotlib.rc('font', size=12)
data_path = 'processed_netcdf'
multibeam_files = glob.glob(data_path + '/*.nc')
multibeam_files.sort()
lon0, lon1 = -122.2, -121.7
lat0, lat1 = 36.6, 37.
parallels = np.arange(lat0, lat1 + 0.1, 0.1)
meridians = np.arange(lon0, lon1 + 0.1, 0.1)
fig = plt.figure(figsize=(8, 6))
map = Basemap(llcrnrlon=lon0, llcrnrlat=lat0, urcrnrlon=lon1, urcrnrlat=lat1, \
resolution='f')
map.drawcoastlines()
map.drawparallels(parallels, labels=~np.isnan(parallels))
map.drawmeridians(meridians, labels=~np.isnan(meridians))
skip = 4
for f in multibeam_files:
print('Plotting ', f)
ds = xr.open_dataset(f)
lon = np.array(ds.longitude[::skip,::skip])
lat = np.array(ds.latitude[::skip,::skip])
depth = np.array(ds.depth[::skip,::skip])
plt.pcolor(lon, lat, depth, vmin=0, vmax=100, cmap=cm.viridis_r)
del lon, lat, depth, ds
gc.collect()
plt.colorbar()
fig.suptitle('Monterey Bay bathymetry from shipboard Multibeam EM-712')
plt.savefig('monterey_bay_multibeam_bathymetry.png', dpi=300)
plt.close(fig)
|
[
"matplotlib.pyplot.pcolor",
"matplotlib.rc",
"matplotlib.pyplot.close",
"xarray.open_dataset",
"numpy.isnan",
"matplotlib.pyplot.colorbar",
"gc.collect",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.array",
"glob.glob",
"mpl_toolkits.basemap.Basemap",
"matplotlib.pyplot.savefig"
] |
[((180, 210), 'matplotlib.rc', 'matplotlib.rc', (['"""font"""'], {'size': '(12)'}), "('font', size=12)\n", (193, 210), False, 'import matplotlib\n'), ((262, 292), 'glob.glob', 'glob.glob', (["(data_path + '/*.nc')"], {}), "(data_path + '/*.nc')\n", (271, 292), False, 'import glob\n'), ((381, 413), 'numpy.arange', 'np.arange', (['lat0', '(lat1 + 0.1)', '(0.1)'], {}), '(lat0, lat1 + 0.1, 0.1)\n', (390, 413), True, 'import numpy as np\n'), ((426, 458), 'numpy.arange', 'np.arange', (['lon0', '(lon1 + 0.1)', '(0.1)'], {}), '(lon0, lon1 + 0.1, 0.1)\n', (435, 458), True, 'import numpy as np\n'), ((466, 492), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (476, 492), True, 'import matplotlib.pyplot as plt\n'), ((499, 590), 'mpl_toolkits.basemap.Basemap', 'Basemap', ([], {'llcrnrlon': 'lon0', 'llcrnrlat': 'lat0', 'urcrnrlon': 'lon1', 'urcrnrlat': 'lat1', 'resolution': '"""f"""'}), "(llcrnrlon=lon0, llcrnrlat=lat0, urcrnrlon=lon1, urcrnrlat=lat1,\n resolution='f')\n", (506, 590), False, 'from mpl_toolkits.basemap import Basemap\n'), ((1087, 1101), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (1099, 1101), True, 'import matplotlib.pyplot as plt\n'), ((1175, 1236), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""monterey_bay_multibeam_bathymetry.png"""'], {'dpi': '(300)'}), "('monterey_bay_multibeam_bathymetry.png', dpi=300)\n", (1186, 1236), True, 'import matplotlib.pyplot as plt\n'), ((1237, 1251), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (1246, 1251), True, 'import matplotlib.pyplot as plt\n'), ((812, 830), 'xarray.open_dataset', 'xr.open_dataset', (['f'], {}), '(f)\n', (827, 830), True, 'import xarray as xr\n'), ((841, 879), 'numpy.array', 'np.array', (['ds.longitude[::skip, ::skip]'], {}), '(ds.longitude[::skip, ::skip])\n', (849, 879), True, 'import numpy as np\n'), ((889, 926), 'numpy.array', 'np.array', (['ds.latitude[::skip, ::skip]'], {}), '(ds.latitude[::skip, ::skip])\n', (897, 926), True, 'import numpy as np\n'), ((938, 972), 'numpy.array', 'np.array', (['ds.depth[::skip, ::skip]'], {}), '(ds.depth[::skip, ::skip])\n', (946, 972), True, 'import numpy as np\n'), ((976, 1040), 'matplotlib.pyplot.pcolor', 'plt.pcolor', (['lon', 'lat', 'depth'], {'vmin': '(0)', 'vmax': '(100)', 'cmap': 'cm.viridis_r'}), '(lon, lat, depth, vmin=0, vmax=100, cmap=cm.viridis_r)\n', (986, 1040), True, 'import matplotlib.pyplot as plt\n'), ((1073, 1085), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1083, 1085), False, 'import gc\n'), ((661, 680), 'numpy.isnan', 'np.isnan', (['parallels'], {}), '(parallels)\n', (669, 680), True, 'import numpy as np\n'), ((719, 738), 'numpy.isnan', 'np.isnan', (['meridians'], {}), '(meridians)\n', (727, 738), True, 'import numpy as np\n')]
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
import math
import numpy as np
def quadratic(**kwargs) -> float:
return sum(x_i ** 2 for _, x_i in kwargs.items())
def ackley(x_1=None, x_2=None, a=20, b=0.2, c=2*math.pi):
d = 2
return -a * np.exp(-b * np.sqrt((x_1**2 + x_2**2) / d)) - np.exp(np.cos(c * x_1) + np.cos(c * x_2)) + a + np.exp(1)
def flower(**kwargs):
a = 1
b = 2
c = 4
x_1 = kwargs['x_1']
x_2 = kwargs['x_2']
x_norm = np.sqrt(x_1**2 + x_2**2)
return a * x_norm + b * np.sin(c * np.arctan2(x_1, x_2))
|
[
"numpy.arctan2",
"numpy.exp",
"numpy.cos",
"numpy.sqrt"
] |
[((526, 554), 'numpy.sqrt', 'np.sqrt', (['(x_1 ** 2 + x_2 ** 2)'], {}), '(x_1 ** 2 + x_2 ** 2)\n', (533, 554), True, 'import numpy as np\n'), ((390, 399), 'numpy.exp', 'np.exp', (['(1)'], {}), '(1)\n', (396, 399), True, 'import numpy as np\n'), ((593, 613), 'numpy.arctan2', 'np.arctan2', (['x_1', 'x_2'], {}), '(x_1, x_2)\n', (603, 613), True, 'import numpy as np\n'), ((349, 364), 'numpy.cos', 'np.cos', (['(c * x_1)'], {}), '(c * x_1)\n', (355, 364), True, 'import numpy as np\n'), ((367, 382), 'numpy.cos', 'np.cos', (['(c * x_2)'], {}), '(c * x_2)\n', (373, 382), True, 'import numpy as np\n'), ((308, 342), 'numpy.sqrt', 'np.sqrt', (['((x_1 ** 2 + x_2 ** 2) / d)'], {}), '((x_1 ** 2 + x_2 ** 2) / d)\n', (315, 342), True, 'import numpy as np\n')]
|
import sys
import numpy as np
import quaternionic
import pytest
def test_self_return():
def f1(a, b, c):
d = np.asarray(a).copy()
assert isinstance(a, np.ndarray) and isinstance(a, quaternionic.array)
assert isinstance(b, np.ndarray) and isinstance(b, quaternionic.array)
assert isinstance(c, np.ndarray) and isinstance(c, quaternionic.array)
assert isinstance(d, np.ndarray) and not isinstance(d, quaternionic.array)
return d
a = quaternionic.array.random((17, 3, 4))
b = quaternionic.array.random((13, 3, 4))
c = quaternionic.array.random((11, 3, 4))
d1 = f1(a, b, c)
assert isinstance(d1, np.ndarray) and not isinstance(d1, quaternionic.array)
f2 = quaternionic.utilities.type_self_return(f1)
d2 = f2(a, b, c)
assert isinstance(d2, np.ndarray) and isinstance(d2, quaternionic.array)
f1.nin = 3
f3 = quaternionic.utilities.type_self_return(f1)
d3 = f3(a, b, c)
assert isinstance(d3, np.ndarray) and isinstance(d3, quaternionic.array)
def test_ndarray_args():
def f1(a, b, c):
d = np.asarray(a).copy()
assert isinstance(a, np.ndarray) and not isinstance(a, quaternionic.array)
assert isinstance(b, np.ndarray) and not isinstance(b, quaternionic.array)
assert isinstance(c, np.ndarray) and not isinstance(c, quaternionic.array)
assert isinstance(d, np.ndarray) and not isinstance(d, quaternionic.array)
return d
a = quaternionic.array.random((17, 3, 4))
b = quaternionic.array.random((13, 3, 4))
c = quaternionic.array.random((11, 3, 4))
f2 = quaternionic.utilities.ndarray_args(f1)
d2 = f2(a, b, c)
assert isinstance(d2, np.ndarray) and not isinstance(d2, quaternionic.array)
f1.nin = 3
f3 = quaternionic.utilities.ndarray_args(f1)
d3 = f3(a, b, c)
assert isinstance(d3, np.ndarray) and not isinstance(d3, quaternionic.array)
def test_ndarray_args_and_return():
def f1(a, b, c):
d = np.asarray(a).copy()
assert isinstance(a, np.ndarray) and not isinstance(a, quaternionic.array)
assert isinstance(b, np.ndarray) and not isinstance(b, quaternionic.array)
assert isinstance(c, np.ndarray) and not isinstance(c, quaternionic.array)
assert isinstance(d, np.ndarray) and not isinstance(d, quaternionic.array)
return d
a = quaternionic.array.random((17, 3, 4))
b = quaternionic.array.random((13, 3, 4))
c = quaternionic.array.random((11, 3, 4))
f2 = quaternionic.utilities.ndarray_args_and_return(f1)
d2 = f2(a, b, c)
assert isinstance(d2, np.ndarray) and isinstance(d2, quaternionic.array)
f1.nin = 3
f3 = quaternionic.utilities.ndarray_args_and_return(f1)
d3 = f3(a, b, c)
assert isinstance(d3, np.ndarray) and isinstance(d3, quaternionic.array)
@pytest.mark.skipif(sys.implementation.name.lower() == 'pypy', reason="No numba on pypy")
def test_types_to_ftylist():
import numba
types_to_ftylist = quaternionic.utilities.convert_numpy_ufunc_type_to_numba_ftylist
types = '?bhilqpBHILQPfdgF->D'
ftylist = numba.complex128(
numba.boolean,
numba.byte,
numba.short,
numba.intc,
numba.int_,
numba.longlong,
numba.intp,
numba.char,
numba.ushort,
numba.uintc,
numba.uint,
numba.ulonglong,
numba.uintp,
numba.float32,
numba.float_,
numba.double,
numba.complex64,
)
assert types_to_ftylist([types]) == [ftylist]
def test_pyguvectorize():
_quaternion_resolution = 10 * np.finfo(float).resolution
np.random.seed(1234)
one = quaternionic.array(1, 0, 0, 0)
x = quaternionic.array.random((7, 13, 4))
y = quaternionic.array.random((13, 4))
z = np.random.rand(13)
arg0s = [one, -(1+2*_quaternion_resolution)*one, -one, x]
for k in dir(quaternionic.algebra_ufuncs):
if not k.startswith('__'):
f1 = getattr(quaternionic.algebra_ufuncs, k)
f2 = getattr(quaternionic.algebra, k)
sig = f2.signature
inputs = sig.split('->')[0].split(',')
for arg0 in arg0s:
args = [arg0.ndarray] if inputs[0] == '(n)' else [z,]
if len(inputs) > 1:
args.append(y.ndarray if inputs[1] == '(n)' else z)
assert np.allclose(
f1(*args),
quaternionic.utilities.pyguvectorize(f2.types, f2.signature)(f2)(*args),
atol=0.0,
rtol=_quaternion_resolution
)
|
[
"quaternionic.utilities.ndarray_args",
"numpy.random.seed",
"sys.implementation.name.lower",
"numpy.random.rand",
"numba.complex128",
"numpy.asarray",
"numpy.finfo",
"quaternionic.array",
"quaternionic.array.random",
"quaternionic.utilities.pyguvectorize",
"quaternionic.utilities.type_self_return",
"quaternionic.utilities.ndarray_args_and_return"
] |
[((489, 526), 'quaternionic.array.random', 'quaternionic.array.random', (['(17, 3, 4)'], {}), '((17, 3, 4))\n', (514, 526), False, 'import quaternionic\n'), ((535, 572), 'quaternionic.array.random', 'quaternionic.array.random', (['(13, 3, 4)'], {}), '((13, 3, 4))\n', (560, 572), False, 'import quaternionic\n'), ((581, 618), 'quaternionic.array.random', 'quaternionic.array.random', (['(11, 3, 4)'], {}), '((11, 3, 4))\n', (606, 618), False, 'import quaternionic\n'), ((730, 773), 'quaternionic.utilities.type_self_return', 'quaternionic.utilities.type_self_return', (['f1'], {}), '(f1)\n', (769, 773), False, 'import quaternionic\n'), ((896, 939), 'quaternionic.utilities.type_self_return', 'quaternionic.utilities.type_self_return', (['f1'], {}), '(f1)\n', (935, 939), False, 'import quaternionic\n'), ((1476, 1513), 'quaternionic.array.random', 'quaternionic.array.random', (['(17, 3, 4)'], {}), '((17, 3, 4))\n', (1501, 1513), False, 'import quaternionic\n'), ((1522, 1559), 'quaternionic.array.random', 'quaternionic.array.random', (['(13, 3, 4)'], {}), '((13, 3, 4))\n', (1547, 1559), False, 'import quaternionic\n'), ((1568, 1605), 'quaternionic.array.random', 'quaternionic.array.random', (['(11, 3, 4)'], {}), '((11, 3, 4))\n', (1593, 1605), False, 'import quaternionic\n'), ((1615, 1654), 'quaternionic.utilities.ndarray_args', 'quaternionic.utilities.ndarray_args', (['f1'], {}), '(f1)\n', (1650, 1654), False, 'import quaternionic\n'), ((1781, 1820), 'quaternionic.utilities.ndarray_args', 'quaternionic.utilities.ndarray_args', (['f1'], {}), '(f1)\n', (1816, 1820), False, 'import quaternionic\n'), ((2372, 2409), 'quaternionic.array.random', 'quaternionic.array.random', (['(17, 3, 4)'], {}), '((17, 3, 4))\n', (2397, 2409), False, 'import quaternionic\n'), ((2418, 2455), 'quaternionic.array.random', 'quaternionic.array.random', (['(13, 3, 4)'], {}), '((13, 3, 4))\n', (2443, 2455), False, 'import quaternionic\n'), ((2464, 2501), 'quaternionic.array.random', 'quaternionic.array.random', (['(11, 3, 4)'], {}), '((11, 3, 4))\n', (2489, 2501), False, 'import quaternionic\n'), ((2511, 2561), 'quaternionic.utilities.ndarray_args_and_return', 'quaternionic.utilities.ndarray_args_and_return', (['f1'], {}), '(f1)\n', (2557, 2561), False, 'import quaternionic\n'), ((2684, 2734), 'quaternionic.utilities.ndarray_args_and_return', 'quaternionic.utilities.ndarray_args_and_return', (['f1'], {}), '(f1)\n', (2730, 2734), False, 'import quaternionic\n'), ((3108, 3370), 'numba.complex128', 'numba.complex128', (['numba.boolean', 'numba.byte', 'numba.short', 'numba.intc', 'numba.int_', 'numba.longlong', 'numba.intp', 'numba.char', 'numba.ushort', 'numba.uintc', 'numba.uint', 'numba.ulonglong', 'numba.uintp', 'numba.float32', 'numba.float_', 'numba.double', 'numba.complex64'], {}), '(numba.boolean, numba.byte, numba.short, numba.intc, numba.\n int_, numba.longlong, numba.intp, numba.char, numba.ushort, numba.uintc,\n numba.uint, numba.ulonglong, numba.uintp, numba.float32, numba.float_,\n numba.double, numba.complex64)\n', (3124, 3370), False, 'import numba\n'), ((3644, 3664), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (3658, 3664), True, 'import numpy as np\n'), ((3675, 3705), 'quaternionic.array', 'quaternionic.array', (['(1)', '(0)', '(0)', '(0)'], {}), '(1, 0, 0, 0)\n', (3693, 3705), False, 'import quaternionic\n'), ((3714, 3751), 'quaternionic.array.random', 'quaternionic.array.random', (['(7, 13, 4)'], {}), '((7, 13, 4))\n', (3739, 3751), False, 'import quaternionic\n'), ((3760, 3794), 'quaternionic.array.random', 'quaternionic.array.random', (['(13, 4)'], {}), '((13, 4))\n', (3785, 3794), False, 'import quaternionic\n'), ((3803, 3821), 'numpy.random.rand', 'np.random.rand', (['(13)'], {}), '(13)\n', (3817, 3821), True, 'import numpy as np\n'), ((2855, 2886), 'sys.implementation.name.lower', 'sys.implementation.name.lower', ([], {}), '()\n', (2884, 2886), False, 'import sys\n'), ((3613, 3628), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (3621, 3628), True, 'import numpy as np\n'), ((123, 136), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (133, 136), True, 'import numpy as np\n'), ((1098, 1111), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (1108, 1111), True, 'import numpy as np\n'), ((1994, 2007), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (2004, 2007), True, 'import numpy as np\n'), ((4453, 4513), 'quaternionic.utilities.pyguvectorize', 'quaternionic.utilities.pyguvectorize', (['f2.types', 'f2.signature'], {}), '(f2.types, f2.signature)\n', (4489, 4513), False, 'import quaternionic\n')]
|
from unittest import TestCase
import numpy as np
import os
from xaitk_saliency.impls.gen_detector_prop_sal.drise_scoring import DetectorRISE
from xaitk_saliency import GenerateDetectorProposalSaliency
from smqtk_core.configuration import configuration_test_helper
from tests import DATA_DIR, EXPECTED_MASKS_4x6
class TestSimilarityScoring (TestCase):
def test_init_(self) -> None:
"""
Test if implementation is usable.
"""
impl = DetectorRISE()
assert impl.is_usable() and isinstance(impl, GenerateDetectorProposalSaliency)
def test_default_param(self) -> None:
"""
Test default construction.
"""
impl = DetectorRISE()
assert impl.proximity_metric == 'cosine'
def test_get_config(self) -> None:
"""
Test expected configuation behavior.
"""
impl = DetectorRISE('euclidean')
for i in configuration_test_helper(impl):
assert i.proximity_metric == 'euclidean'
def test_metric_args(self) -> None:
"""
Test non-default metric type.
"""
impl = DetectorRISE('hamming')
assert impl.proximity_metric == 'hamming'
def test_shape_sanity(self) -> None:
"""
Test basic scoring with a single feature for broadcasting sanity check.
"""
impl = DetectorRISE()
np.random.seed(2)
image1_dets = np.random.rand(2, (7))
pertb_dets = np.random.rand(10, 2, (7))
pertb_mask = np.random.randint(low=0, high=2, size=(10, 15, 25), dtype='int')
sal = impl.generate(image1_dets, pertb_dets, pertb_mask)
assert sal.shape == (2, 15, 25)
def test_standard_detection(self) -> None:
"""
Test basic scoring on known values and non-square masks.
"""
impl = DetectorRISE()
image1_dets = np.array([[1, 1, 4, 3, 0, 1, 0.89]])
pertb_dets = np.array([[[1, 2, 6, 6, 0.3, 1, 0.995]],
[[0, 1, 2, 2, 0.2, 2, 0.03]],
[[1, 0, 2, 2, 0.45, 1, 0.81]],
[[1, 1, 6, 6, 0.5, 1, 0.625]],
[[0, 2, 3, 5, 0.03, 1, 0.56]],
[[1, 2, 6, 3, 0.01, 1, 0.07]],])
sal = impl.generate(image1_dets, pertb_dets, EXPECTED_MASKS_4x6)
standard_sal = np.load(os.path.join(DATA_DIR, 'drisesal.npy'))
assert sal.shape == (1, 4, 6)
assert np.allclose(standard_sal, sal)
|
[
"xaitk_saliency.impls.gen_detector_prop_sal.drise_scoring.DetectorRISE",
"numpy.random.seed",
"numpy.allclose",
"smqtk_core.configuration.configuration_test_helper",
"numpy.random.randint",
"numpy.array",
"numpy.random.rand",
"os.path.join"
] |
[((472, 486), 'xaitk_saliency.impls.gen_detector_prop_sal.drise_scoring.DetectorRISE', 'DetectorRISE', ([], {}), '()\n', (484, 486), False, 'from xaitk_saliency.impls.gen_detector_prop_sal.drise_scoring import DetectorRISE\n'), ((691, 705), 'xaitk_saliency.impls.gen_detector_prop_sal.drise_scoring.DetectorRISE', 'DetectorRISE', ([], {}), '()\n', (703, 705), False, 'from xaitk_saliency.impls.gen_detector_prop_sal.drise_scoring import DetectorRISE\n'), ((879, 904), 'xaitk_saliency.impls.gen_detector_prop_sal.drise_scoring.DetectorRISE', 'DetectorRISE', (['"""euclidean"""'], {}), "('euclidean')\n", (891, 904), False, 'from xaitk_saliency.impls.gen_detector_prop_sal.drise_scoring import DetectorRISE\n'), ((922, 953), 'smqtk_core.configuration.configuration_test_helper', 'configuration_test_helper', (['impl'], {}), '(impl)\n', (947, 953), False, 'from smqtk_core.configuration import configuration_test_helper\n'), ((1126, 1149), 'xaitk_saliency.impls.gen_detector_prop_sal.drise_scoring.DetectorRISE', 'DetectorRISE', (['"""hamming"""'], {}), "('hamming')\n", (1138, 1149), False, 'from xaitk_saliency.impls.gen_detector_prop_sal.drise_scoring import DetectorRISE\n'), ((1361, 1375), 'xaitk_saliency.impls.gen_detector_prop_sal.drise_scoring.DetectorRISE', 'DetectorRISE', ([], {}), '()\n', (1373, 1375), False, 'from xaitk_saliency.impls.gen_detector_prop_sal.drise_scoring import DetectorRISE\n'), ((1384, 1401), 'numpy.random.seed', 'np.random.seed', (['(2)'], {}), '(2)\n', (1398, 1401), True, 'import numpy as np\n'), ((1424, 1444), 'numpy.random.rand', 'np.random.rand', (['(2)', '(7)'], {}), '(2, 7)\n', (1438, 1444), True, 'import numpy as np\n'), ((1468, 1492), 'numpy.random.rand', 'np.random.rand', (['(10)', '(2)', '(7)'], {}), '(10, 2, 7)\n', (1482, 1492), True, 'import numpy as np\n'), ((1516, 1580), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(2)', 'size': '(10, 15, 25)', 'dtype': '"""int"""'}), "(low=0, high=2, size=(10, 15, 25), dtype='int')\n", (1533, 1580), True, 'import numpy as np\n'), ((1839, 1853), 'xaitk_saliency.impls.gen_detector_prop_sal.drise_scoring.DetectorRISE', 'DetectorRISE', ([], {}), '()\n', (1851, 1853), False, 'from xaitk_saliency.impls.gen_detector_prop_sal.drise_scoring import DetectorRISE\n'), ((1876, 1912), 'numpy.array', 'np.array', (['[[1, 1, 4, 3, 0, 1, 0.89]]'], {}), '([[1, 1, 4, 3, 0, 1, 0.89]])\n', (1884, 1912), True, 'import numpy as np\n'), ((1934, 2138), 'numpy.array', 'np.array', (['[[[1, 2, 6, 6, 0.3, 1, 0.995]], [[0, 1, 2, 2, 0.2, 2, 0.03]], [[1, 0, 2, 2,\n 0.45, 1, 0.81]], [[1, 1, 6, 6, 0.5, 1, 0.625]], [[0, 2, 3, 5, 0.03, 1, \n 0.56]], [[1, 2, 6, 3, 0.01, 1, 0.07]]]'], {}), '([[[1, 2, 6, 6, 0.3, 1, 0.995]], [[0, 1, 2, 2, 0.2, 2, 0.03]], [[1,\n 0, 2, 2, 0.45, 1, 0.81]], [[1, 1, 6, 6, 0.5, 1, 0.625]], [[0, 2, 3, 5, \n 0.03, 1, 0.56]], [[1, 2, 6, 3, 0.01, 1, 0.07]]])\n', (1942, 2138), True, 'import numpy as np\n'), ((2483, 2513), 'numpy.allclose', 'np.allclose', (['standard_sal', 'sal'], {}), '(standard_sal, sal)\n', (2494, 2513), True, 'import numpy as np\n'), ((2390, 2428), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""drisesal.npy"""'], {}), "(DATA_DIR, 'drisesal.npy')\n", (2402, 2428), False, 'import os\n')]
|
import numpy as np
import autoeap
from numpy.testing import assert_array_almost_equal
import os
PACKAGEDIR = os.path.abspath(os.path.dirname(__file__))
def test_raw_lightcurve():
time,flux,flux_err = autoeap.createlightcurve('EPIC220198696',campaign=8)
lc = np.genfromtxt(os.path.join(PACKAGEDIR,"EPIC220198696_c8_autoEAP.lc"),skip_header=1).T
assert_array_almost_equal(time,lc[0])
assert_array_almost_equal(flux,lc[1].astype(np.float32))
assert_array_almost_equal(flux_err,lc[2].astype(np.float32))
|
[
"os.path.dirname",
"os.path.join",
"autoeap.createlightcurve",
"numpy.testing.assert_array_almost_equal"
] |
[((126, 151), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (141, 151), False, 'import os\n'), ((206, 259), 'autoeap.createlightcurve', 'autoeap.createlightcurve', (['"""EPIC220198696"""'], {'campaign': '(8)'}), "('EPIC220198696', campaign=8)\n", (230, 259), False, 'import autoeap\n'), ((360, 398), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['time', 'lc[0]'], {}), '(time, lc[0])\n', (385, 398), False, 'from numpy.testing import assert_array_almost_equal\n'), ((283, 338), 'os.path.join', 'os.path.join', (['PACKAGEDIR', '"""EPIC220198696_c8_autoEAP.lc"""'], {}), "(PACKAGEDIR, 'EPIC220198696_c8_autoEAP.lc')\n", (295, 338), False, 'import os\n')]
|
# ------------------------------------------------------------------------------
# Hippocampus segmentation task for the HarP dataset
# (http://www.hippocampal-protocol.net/SOPs/index.php)
# ------------------------------------------------------------------------------
import os
import re
import SimpleITK as sitk
import nibabel as nib
import numpy as np
import mp.data.datasets.dataset_utils as du
from mp.data.datasets.dataset_segmentation import SegmentationDataset, SegmentationInstance
from mp.paths import storage_data_path
from mp.utils.mask_bounding_box import mask_bbox_3D
from mp.utils.load_restore import join_path
class HarP(SegmentationDataset):
r"""Class for the segmentation of the HarP dataset,
found at http://www.hippocampal-protocol.net/SOPs/index.php
with the masks as .nii files and the scans as .mnc files.
"""
def __init__(self, subset=None, hold_out_ixs=None):
# Part is either: "Training", "Validation" or "All"
default = {"Part": "All"}
if subset is not None:
default.update(subset)
subset = default
else:
subset = default
if hold_out_ixs is None:
hold_out_ixs = []
global_name = 'HarP'
name = du.get_dataset_name(global_name, subset)
dataset_path = os.path.join(storage_data_path, global_name)
original_data_path = du.get_original_data_path(global_name)
# Build instances
instances = []
folders = []
if subset["Part"] in ["Training", "All"]:
folders.append(("100", "Training"))
if subset["Part"] in ["Validation", "All"]:
folders.append(("35", "Validation"))
for orig_folder, dst_folder in folders:
# Paths with the sub-folder for the current subset
dst_folder_path = os.path.join(dataset_path, dst_folder)
# Copy the images if not done already
if not os.path.isdir(dst_folder_path):
_extract_images(original_data_path, dst_folder_path, orig_folder)
# Fetch all patient/study names
study_names = set(file_name.split('.nii')[0].split('_gt')[0] for file_name
in os.listdir(os.path.join(dataset_path, dst_folder)))
for study_name in study_names:
instances.append(SegmentationInstance(
x_path=os.path.join(dataset_path, dst_folder, study_name + '.nii.gz'),
y_path=os.path.join(dataset_path, dst_folder, study_name + '_gt.nii.gz'),
name=study_name,
group_id=None
))
label_names = ['background', 'hippocampus']
super().__init__(instances, name=name, label_names=label_names,
modality='T1w MRI', nr_channels=1, hold_out_ixs=hold_out_ixs)
def _extract_images(source_path, target_path, subset):
r"""Extracts images, merges mask labels (if specified) and saves the
modified images.
"""
# Folder 100 is for training (100 subjects), 35 subjects are left over for validation
affine = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
images_path = os.path.join(source_path, subset)
labels_path = os.path.join(source_path, f'Labels_{subset}_NIFTI')
# Create directories
if not os.path.isdir(target_path):
os.makedirs(target_path)
files_with_swapped_masks = {"ADNI_007_S_1304_74384_ACPC.mnc",
"ADNI_016_S_4121_280306_ACPC.mnc",
"ADNI_029_S_4279_265980_ACPC.mnc",
"ADNI_136_S_0429_109839_ACPC.mnc"}
# For each MRI, there are 2 segmentation (left and right hippocampus)
for filename in os.listdir(images_path):
# Loading the .mnc file and converting it to a .nii.gz file
minc = nib.load(os.path.join(images_path, filename))
x: np.array = nib.Nifti1Image(np.asarray(minc.dataobj), affine=affine).get_data()
# We need to recover the study name of the image name to construct the name of the segmentation files
match = re.match(r"ADNI_[0-9]+_S_[0-9]+_[0-9]+", filename)
if match is None:
raise Exception(f"A file ({filename}) does not match the expected file naming format")
# For each side of the brain
for side in ("_L", "_R"):
study_name = match[0] + side
y = sitk.ReadImage(os.path.join(labels_path, study_name + ".nii"))
y = sitk.GetArrayFromImage(y)
# Shape expected: (189, 233, 197)
assert x.shape == y.shape
# BUGFIX: Some segmentation have some weird values eg {26896.988, 26897.988} instead of {0, 1}
y = (y - np.min(y.flat)).astype(np.uint32)
# Cropping bounds computed to fit the ground truth
if (side == "_L") ^ (filename in files_with_swapped_masks):
y = y[40: 104, 78: 142, 49: 97]
x_cropped = x[40: 104, 78: 142, 49: 97]
else:
y = y[40: 104, 78: 142, 97: 145]
x_cropped = x[40: 104, 78: 142, 97: 145]
# Need to do move an axis as numpy coordinates are [z, y, x] and SimpleITK's are [x, y, z]
x_cropped = np.moveaxis(x_cropped, [0, 2], [2, 0])
# Changing the study name if needed
if filename in files_with_swapped_masks:
study_name = match[0] + ("_R" if side == "_L" else "_L")
# Save new images so they can be loaded directly
sitk.WriteImage(sitk.GetImageFromArray(y),
join_path([target_path, study_name + "_gt.nii.gz"]))
nib.save(nib.Nifti1Image(x_cropped, affine),
join_path([target_path, study_name + ".nii.gz"]))
|
[
"nibabel.Nifti1Image",
"numpy.moveaxis",
"os.makedirs",
"mp.data.datasets.dataset_utils.get_dataset_name",
"os.path.isdir",
"numpy.asarray",
"re.match",
"SimpleITK.GetArrayFromImage",
"mp.data.datasets.dataset_utils.get_original_data_path",
"numpy.min",
"numpy.array",
"SimpleITK.GetImageFromArray",
"mp.utils.load_restore.join_path",
"os.path.join",
"os.listdir"
] |
[((3132, 3198), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]'], {}), '([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\n', (3140, 3198), True, 'import numpy as np\n'), ((3287, 3320), 'os.path.join', 'os.path.join', (['source_path', 'subset'], {}), '(source_path, subset)\n', (3299, 3320), False, 'import os\n'), ((3339, 3390), 'os.path.join', 'os.path.join', (['source_path', 'f"""Labels_{subset}_NIFTI"""'], {}), "(source_path, f'Labels_{subset}_NIFTI')\n", (3351, 3390), False, 'import os\n'), ((3852, 3875), 'os.listdir', 'os.listdir', (['images_path'], {}), '(images_path)\n', (3862, 3875), False, 'import os\n'), ((1253, 1293), 'mp.data.datasets.dataset_utils.get_dataset_name', 'du.get_dataset_name', (['global_name', 'subset'], {}), '(global_name, subset)\n', (1272, 1293), True, 'import mp.data.datasets.dataset_utils as du\n'), ((1317, 1361), 'os.path.join', 'os.path.join', (['storage_data_path', 'global_name'], {}), '(storage_data_path, global_name)\n', (1329, 1361), False, 'import os\n'), ((1391, 1429), 'mp.data.datasets.dataset_utils.get_original_data_path', 'du.get_original_data_path', (['global_name'], {}), '(global_name)\n', (1416, 1429), True, 'import mp.data.datasets.dataset_utils as du\n'), ((3428, 3454), 'os.path.isdir', 'os.path.isdir', (['target_path'], {}), '(target_path)\n', (3441, 3454), False, 'import os\n'), ((3464, 3488), 'os.makedirs', 'os.makedirs', (['target_path'], {}), '(target_path)\n', (3475, 3488), False, 'import os\n'), ((4223, 4272), 're.match', 're.match', (['"""ADNI_[0-9]+_S_[0-9]+_[0-9]+"""', 'filename'], {}), "('ADNI_[0-9]+_S_[0-9]+_[0-9]+', filename)\n", (4231, 4272), False, 'import re\n'), ((1842, 1880), 'os.path.join', 'os.path.join', (['dataset_path', 'dst_folder'], {}), '(dataset_path, dst_folder)\n', (1854, 1880), False, 'import os\n'), ((3969, 4004), 'os.path.join', 'os.path.join', (['images_path', 'filename'], {}), '(images_path, filename)\n', (3981, 4004), False, 'import os\n'), ((4608, 4633), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['y'], {}), '(y)\n', (4630, 4633), True, 'import SimpleITK as sitk\n'), ((5373, 5411), 'numpy.moveaxis', 'np.moveaxis', (['x_cropped', '[0, 2]', '[2, 0]'], {}), '(x_cropped, [0, 2], [2, 0])\n', (5384, 5411), True, 'import numpy as np\n'), ((1951, 1981), 'os.path.isdir', 'os.path.isdir', (['dst_folder_path'], {}), '(dst_folder_path)\n', (1964, 1981), False, 'import os\n'), ((4544, 4590), 'os.path.join', 'os.path.join', (['labels_path', "(study_name + '.nii')"], {}), "(labels_path, study_name + '.nii')\n", (4556, 4590), False, 'import os\n'), ((5677, 5702), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['y'], {}), '(y)\n', (5699, 5702), True, 'import SimpleITK as sitk\n'), ((5732, 5783), 'mp.utils.load_restore.join_path', 'join_path', (["[target_path, study_name + '_gt.nii.gz']"], {}), "([target_path, study_name + '_gt.nii.gz'])\n", (5741, 5783), False, 'from mp.utils.load_restore import join_path\n'), ((5806, 5840), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['x_cropped', 'affine'], {}), '(x_cropped, affine)\n', (5821, 5840), True, 'import nibabel as nib\n'), ((5863, 5911), 'mp.utils.load_restore.join_path', 'join_path', (["[target_path, study_name + '.nii.gz']"], {}), "([target_path, study_name + '.nii.gz'])\n", (5872, 5911), False, 'from mp.utils.load_restore import join_path\n'), ((4044, 4068), 'numpy.asarray', 'np.asarray', (['minc.dataobj'], {}), '(minc.dataobj)\n', (4054, 4068), True, 'import numpy as np\n'), ((4847, 4861), 'numpy.min', 'np.min', (['y.flat'], {}), '(y.flat)\n', (4853, 4861), True, 'import numpy as np\n'), ((2241, 2279), 'os.path.join', 'os.path.join', (['dataset_path', 'dst_folder'], {}), '(dataset_path, dst_folder)\n', (2253, 2279), False, 'import os\n'), ((2408, 2470), 'os.path.join', 'os.path.join', (['dataset_path', 'dst_folder', "(study_name + '.nii.gz')"], {}), "(dataset_path, dst_folder, study_name + '.nii.gz')\n", (2420, 2470), False, 'import os\n'), ((2499, 2564), 'os.path.join', 'os.path.join', (['dataset_path', 'dst_folder', "(study_name + '_gt.nii.gz')"], {}), "(dataset_path, dst_folder, study_name + '_gt.nii.gz')\n", (2511, 2564), False, 'import os\n')]
|
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
# code changed to Python3
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import tarfile
from IPython.display import display, Image
from scipy import ndimage
from sklearn.linear_model import LogisticRegression
from sklearn.metrics.pairwise import cosine_similarity
from urllib.request import urlretrieve
import pickle
import IPython
# Config the matlotlib backend as plotting inline in IPython
# %matplotlib inline
url = 'http://commondatastorage.googleapis.com/books1000/'
last_percent_reported = None
def download_progress_hook(count, blockSize, totalSize):
"""A hook to report the progress of a download. This is mostly intended for users with
slow internet connections. Reports every 1% change in download progress.
"""
global last_percent_reported
percent = int(count * blockSize * 100 / totalSize)
if last_percent_reported != percent:
if percent % 5 == 0:
sys.stdout.write("%s%%" % percent)
sys.stdout.flush()
else:
sys.stdout.write(".")
sys.stdout.flush()
last_percent_reported = percent
def maybe_download(filename, expected_bytes, force=False):
"""Download a file if not present, and make sure it's the right size."""
if force or not os.path.exists(filename):
print('Attempting to download:', filename)
filename, _ = urlretrieve(url + filename, filename, reporthook=download_progress_hook)
print('\nDownload Complete!')
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)
test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)
num_classes = 10
np.random.seed(133)
def maybe_extract(filename, force=False):
root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz
if os.path.isdir(root) and not force:
# You may override by setting force=True.
print('%s already present - Skipping extraction of %s.' % (root, filename))
else:
print('Extracting data for %s. This may take a while. Please wait.' % root)
tar = tarfile.open(filename)
sys.stdout.flush()
tar.extractall()
tar.close()
data_folders = [
os.path.join(root, d) for d in sorted(os.listdir(root))
if os.path.isdir(os.path.join(root, d))]
if len(data_folders) != num_classes:
raise Exception(
'Expected %d folders, one per class. Found %d instead.' % (
num_classes, len(data_folders)))
print(data_folders)
return data_folders
train_folders = maybe_extract(train_filename)
test_folders = maybe_extract(test_filename)
#IPython.display.display_png('notMNIST_large/B/MDEtMDEtMDAudHRm.png')
#IPython.display.display_png('notMNIST_large/J/Nng3b2N0IEFsdGVybmF0ZSBSZWd1bGFyLnR0Zg==.png')
image_size = 28 # Pixel width and height.
pixel_depth = 255.0 # Number of levels per pixel.
def load_letter(folder, min_num_images):
"""Load the data for a single letter label."""
image_files = os.listdir(folder)
dataset = np.ndarray(shape=(len(image_files), image_size, image_size),
dtype=np.float32)
print(folder)
num_images = 0
for image in image_files:
image_file = os.path.join(folder, image)
try:
image_data = (ndimage.imread(image_file).astype(float) -
pixel_depth / 2) / pixel_depth
if image_data.shape != (image_size, image_size):
raise Exception('Unexpected image shape: %s' % str(image_data.shape))
dataset[num_images, :, :] = image_data
num_images = num_images + 1
except IOError as e:
print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')
dataset = dataset[0:num_images, :, :]
if num_images < min_num_images:
raise Exception('Many fewer images than expected: %d < %d' %
(num_images, min_num_images))
print('Full dataset tensor:', dataset.shape)
print('Mean:', np.mean(dataset))
print('Standard deviation:', np.std(dataset))
return dataset
def maybe_pickle(data_folders, min_num_images_per_class, force=False):
dataset_names = []
for folder in data_folders:
set_filename = folder + '.pickle'
dataset_names.append(set_filename)
if os.path.exists(set_filename) and not force:
# You may override by setting force=True.
print('%s already present - Skipping pickling.' % set_filename)
else:
print('Pickling %s.' % set_filename)
dataset = load_letter(folder, min_num_images_per_class)
try:
with open(set_filename, 'wb') as f:
pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', set_filename, ':', e)
return dataset_names
train_datasets = maybe_pickle(train_folders, 45000)
test_datasets = maybe_pickle(test_folders, 1800)
def load_dataset(filename):
with open(filename, 'rb') as f:
return pickle.load(f)
# Display a random matrix with a specified figure number and a grayscale colormap
# largeNameA = train_datasets[0]
# print(largeNameA)
# largeDataA = load_dataset(largeNameA)
# img1 = largeDataA[0, :, :]
# plt.matshow(img1, cmap=plt.cm.gray)
# plt.show()
#
# smallNameJ = test_datasets[9]
# print(smallNameJ)
# smallDataJ = load_dataset(smallNameJ)
# img2 = smallDataJ[0, :, :]
# plt.matshow(img2, cmap=plt.cm.gray)
# plt.show()
# Check whether the data is balanced between classes
# for name in train_datasets:
# dataset = load_dataset(name)
# print(name, ' size:', dataset.shape)
#
# for name in test_datasets:
# dataset = load_dataset(name)
# print(name, ' size:', dataset.shape)
def make_arrays(nb_rows, img_size):
if nb_rows:
dataset = np.ndarray((nb_rows, img_size, img_size), dtype=np.float32)
labels = np.ndarray(nb_rows, dtype=np.int32)
else:
dataset, labels = None, None
return dataset, labels
def merge_datasets(pickle_files, train_size, valid_size=0):
num_classes = len(pickle_files)
valid_dataset, valid_labels = make_arrays(valid_size, image_size)
train_dataset, train_labels = make_arrays(train_size, image_size)
vsize_per_class = valid_size // num_classes
tsize_per_class = train_size // num_classes
start_v, start_t = 0, 0
end_v, end_t = vsize_per_class, tsize_per_class
end_l = vsize_per_class + tsize_per_class
for label, pickle_file in enumerate(pickle_files):
try:
with open(pickle_file, 'rb') as f:
letter_set = pickle.load(f)
# let's shuffle the letters to have random validation and training set
np.random.shuffle(letter_set)
if valid_dataset is not None:
valid_letter = letter_set[:vsize_per_class, :, :]
valid_dataset[start_v:end_v, :, :] = valid_letter
valid_labels[start_v:end_v] = label
start_v += vsize_per_class
end_v += vsize_per_class
train_letter = letter_set[vsize_per_class:end_l, :, :]
train_dataset[start_t:end_t, :, :] = train_letter
train_labels[start_t:end_t] = label
start_t += tsize_per_class
end_t += tsize_per_class
except Exception as e:
print('Unable to process data from', pickle_file, ':', e)
raise
return valid_dataset, valid_labels, train_dataset, train_labels
# def show_images(dataset, labels, count):
# for i in range(0,count):
# print(labels[i])
# plt.matshow(dataset[i,:,:], cmap=plt.cm.gray)
# plt.show()
# show_images(train_dataset, train_labels, 3)
# show_images(test_dataset, test_labels, 3)
# show_images(valid_dataset, valid_labels, 3)
pickle_file = 'notMNIST.pickle'
if not os.path.exists(pickle_file):
train_size = 200000
valid_size = 10000
test_size = 10000
valid_dataset, valid_labels, train_dataset, train_labels = merge_datasets(
train_datasets, train_size, valid_size)
_, _, test_dataset, test_labels = merge_datasets(test_datasets, test_size)
indices = np.arange(train_dataset.shape[0])
np.random.shuffle(indices)
train_dataset = train_dataset[indices]
train_labels = train_labels[indices]
try:
f = open(pickle_file, 'wb')
save = {
'train_dataset': train_dataset,
'train_labels': train_labels,
'valid_dataset': valid_dataset,
'valid_labels': valid_labels,
'test_dataset': test_dataset,
'test_labels': test_labels,
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
def load_datasets(pickle_file):
statinfo = os.stat(pickle_file)
print('Compressed pickle size:', statinfo.st_size)
f = open(pickle_file, 'rb')
save = pickle.load(f)
f.close()
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
return train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels
train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels = load_datasets(pickle_file)
print('Training:', train_dataset.shape, train_labels.shape)
print('Validation:', valid_dataset.shape, valid_labels.shape)
print('Testing:', test_dataset.shape, test_labels.shape)
def sanitize_dataset(dataset, labels, filter_dataset, similarity_epsilon):
similarity = cosine_similarity(np.reshape(dataset, (dataset.shape[0],-1)), np.reshape(filter_dataset, (filter_dataset.shape[0],-1)))
same_filter = np.sum(similarity == 1, axis=1) > 0
similar_filter = np.sum(similarity > 1-similarity_epsilon, axis=1) > 0
same_count = np.sum(same_filter)
similar_count = np.sum(similar_filter)
filtered_dataset = dataset[same_filter==False]
filtered_labels = labels[same_filter==False]
return filtered_dataset, filtered_labels, same_count, similar_count
sanit_pickle_file = 'notMNIST_sanit.pickle'
if not os.path.exists(sanit_pickle_file):
filtered_valid_dataset, filtered_valid_labels, train_valid_same, train_valid_similar = \
sanitize_dataset(valid_dataset, valid_labels, train_dataset, 0.001)
print("training-validation: same=", train_valid_same, "similar=", train_valid_similar)
filtered_test_dataset, filtered_test_labels, train_test_same, train_test_similar = \
sanitize_dataset(test_dataset, test_labels, train_dataset, 0.001)
print("training-testing: same=", train_test_same, "similar=", train_test_similar)
filtered_test_dataset, filtered_test_labels, valid_test_same, valid_test_similar = \
sanitize_dataset(filtered_test_dataset, filtered_test_labels, filtered_valid_dataset, 0.001)
print("validation-testing: same=", valid_test_same, "similar=", valid_test_similar)
try:
f = open(sanit_pickle_file, 'wb')
save = {
'train_dataset': train_dataset,
'train_labels': train_labels,
'valid_dataset': filtered_valid_dataset,
'valid_labels': filtered_valid_labels,
'test_dataset': filtered_test_dataset,
'test_labels': filtered_test_labels,
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
train_dataset, train_labels, filtered_valid_dataset, filtered_valid_labels, filtered_test_dataset, filtered_test_labels = load_datasets(sanit_pickle_file)
print('Training (sanitized):', train_dataset.shape, train_labels.shape)
print('Validation (sanitized):', filtered_valid_dataset.shape, filtered_valid_labels.shape)
print('Testing (sanitized):', filtered_test_dataset.shape, filtered_test_labels.shape)
def train_model(dataset, labels, size=None):
maxSize = dataset.shape[0]
if size is None:
size = maxSize
elif size > maxSize:
size = maxSize
else:
dataset = dataset[0:size]
labels = labels[0:size]
X = np.reshape(dataset, (size,-1))
y = labels
lr = LogisticRegression(n_jobs=4)
lr.fit(X, y)
return lr
def model_score(model, dataset, labels):
X = np.reshape(dataset, (dataset.shape[0],-1))
y = labels
return model.score(X, y)
def train(size=None):
if size is None:
print("Training with all examples:")
else:
print("Training with ", size, " examples:")
model = train_model(train_dataset, train_labels, size)
print(" validation score: ", model_score(model, valid_dataset, valid_labels))
print(" test score: ", model_score(model, test_dataset, test_labels))
print(" validation score (sanitized): ", model_score(model, filtered_valid_dataset, filtered_valid_labels))
print(" test score (sanitized): ", model_score(model, filtered_test_dataset, filtered_test_labels))
for size in [50, 100, 1000, 5000]:
train(size)
# training on all examples:
#train()
|
[
"sys.stdout.write",
"pickle.dump",
"numpy.random.seed",
"numpy.sum",
"pickle.load",
"numpy.arange",
"sys.stdout.flush",
"numpy.mean",
"numpy.ndarray",
"os.path.join",
"numpy.std",
"os.path.exists",
"numpy.reshape",
"tarfile.open",
"numpy.random.shuffle",
"os.stat",
"urllib.request.urlretrieve",
"sklearn.linear_model.LogisticRegression",
"os.listdir",
"scipy.ndimage.imread",
"os.path.isdir",
"os.path.splitext"
] |
[((2095, 2114), 'numpy.random.seed', 'np.random.seed', (['(133)'], {}), '(133)\n', (2109, 2114), True, 'import numpy as np\n'), ((1683, 1700), 'os.stat', 'os.stat', (['filename'], {}), '(filename)\n', (1690, 1700), False, 'import os\n'), ((3488, 3506), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (3498, 3506), False, 'import os\n'), ((8594, 8621), 'os.path.exists', 'os.path.exists', (['pickle_file'], {}), '(pickle_file)\n', (8608, 8621), False, 'import os\n'), ((8923, 8956), 'numpy.arange', 'np.arange', (['train_dataset.shape[0]'], {}), '(train_dataset.shape[0])\n', (8932, 8956), True, 'import numpy as np\n'), ((8962, 8988), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (8979, 8988), True, 'import numpy as np\n'), ((9607, 9627), 'os.stat', 'os.stat', (['pickle_file'], {}), '(pickle_file)\n', (9614, 9627), False, 'import os\n'), ((9731, 9745), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (9742, 9745), False, 'import pickle\n'), ((10772, 10791), 'numpy.sum', 'np.sum', (['same_filter'], {}), '(same_filter)\n', (10778, 10791), True, 'import numpy as np\n'), ((10813, 10835), 'numpy.sum', 'np.sum', (['similar_filter'], {}), '(similar_filter)\n', (10819, 10835), True, 'import numpy as np\n'), ((11068, 11101), 'os.path.exists', 'os.path.exists', (['sanit_pickle_file'], {}), '(sanit_pickle_file)\n', (11082, 11101), False, 'import os\n'), ((13102, 13133), 'numpy.reshape', 'np.reshape', (['dataset', '(size, -1)'], {}), '(dataset, (size, -1))\n', (13112, 13133), True, 'import numpy as np\n'), ((13159, 13187), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'n_jobs': '(4)'}), '(n_jobs=4)\n', (13177, 13187), False, 'from sklearn.linear_model import LogisticRegression\n'), ((13276, 13319), 'numpy.reshape', 'np.reshape', (['dataset', '(dataset.shape[0], -1)'], {}), '(dataset, (dataset.shape[0], -1))\n', (13286, 13319), True, 'import numpy as np\n'), ((1555, 1627), 'urllib.request.urlretrieve', 'urlretrieve', (['(url + filename)', 'filename'], {'reporthook': 'download_progress_hook'}), '(url + filename, filename, reporthook=download_progress_hook)\n', (1566, 1627), False, 'from urllib.request import urlretrieve\n'), ((2251, 2270), 'os.path.isdir', 'os.path.isdir', (['root'], {}), '(root)\n', (2264, 2270), False, 'import os\n'), ((2533, 2555), 'tarfile.open', 'tarfile.open', (['filename'], {}), '(filename)\n', (2545, 2555), False, 'import tarfile\n'), ((2565, 2583), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2581, 2583), False, 'import sys\n'), ((2662, 2683), 'os.path.join', 'os.path.join', (['root', 'd'], {}), '(root, d)\n', (2674, 2683), False, 'import os\n'), ((3719, 3746), 'os.path.join', 'os.path.join', (['folder', 'image'], {}), '(folder, image)\n', (3731, 3746), False, 'import os\n'), ((4523, 4539), 'numpy.mean', 'np.mean', (['dataset'], {}), '(dataset)\n', (4530, 4539), True, 'import numpy as np\n'), ((4575, 4590), 'numpy.std', 'np.std', (['dataset'], {}), '(dataset)\n', (4581, 4590), True, 'import numpy as np\n'), ((5623, 5637), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5634, 5637), False, 'import pickle\n'), ((6444, 6503), 'numpy.ndarray', 'np.ndarray', (['(nb_rows, img_size, img_size)'], {'dtype': 'np.float32'}), '((nb_rows, img_size, img_size), dtype=np.float32)\n', (6454, 6503), True, 'import numpy as np\n'), ((6522, 6557), 'numpy.ndarray', 'np.ndarray', (['nb_rows'], {'dtype': 'np.int32'}), '(nb_rows, dtype=np.int32)\n', (6532, 6557), True, 'import numpy as np\n'), ((9392, 9437), 'pickle.dump', 'pickle.dump', (['save', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(save, f, pickle.HIGHEST_PROTOCOL)\n', (9403, 9437), False, 'import pickle\n'), ((10521, 10564), 'numpy.reshape', 'np.reshape', (['dataset', '(dataset.shape[0], -1)'], {}), '(dataset, (dataset.shape[0], -1))\n', (10531, 10564), True, 'import numpy as np\n'), ((10565, 10622), 'numpy.reshape', 'np.reshape', (['filter_dataset', '(filter_dataset.shape[0], -1)'], {}), '(filter_dataset, (filter_dataset.shape[0], -1))\n', (10575, 10622), True, 'import numpy as np\n'), ((10642, 10673), 'numpy.sum', 'np.sum', (['(similarity == 1)'], {'axis': '(1)'}), '(similarity == 1, axis=1)\n', (10648, 10673), True, 'import numpy as np\n'), ((10700, 10751), 'numpy.sum', 'np.sum', (['(similarity > 1 - similarity_epsilon)'], {'axis': '(1)'}), '(similarity > 1 - similarity_epsilon, axis=1)\n', (10706, 10751), True, 'import numpy as np\n'), ((12258, 12303), 'pickle.dump', 'pickle.dump', (['save', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(save, f, pickle.HIGHEST_PROTOCOL)\n', (12269, 12303), False, 'import pickle\n'), ((1099, 1133), 'sys.stdout.write', 'sys.stdout.write', (["('%s%%' % percent)"], {}), "('%s%%' % percent)\n", (1115, 1133), False, 'import sys\n'), ((1147, 1165), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1163, 1165), False, 'import sys\n'), ((1194, 1215), 'sys.stdout.write', 'sys.stdout.write', (['"""."""'], {}), "('.')\n", (1210, 1215), False, 'import sys\n'), ((1229, 1247), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1245, 1247), False, 'import sys\n'), ((1454, 1478), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (1468, 1478), False, 'import os\n'), ((4844, 4872), 'os.path.exists', 'os.path.exists', (['set_filename'], {}), '(set_filename)\n', (4858, 4872), False, 'import os\n'), ((2191, 2217), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (2207, 2217), False, 'import os\n'), ((2700, 2716), 'os.listdir', 'os.listdir', (['root'], {}), '(root)\n', (2710, 2716), False, 'import os\n'), ((2744, 2765), 'os.path.join', 'os.path.join', (['root', 'd'], {}), '(root, d)\n', (2756, 2765), False, 'import os\n'), ((7256, 7270), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (7267, 7270), False, 'import pickle\n'), ((7376, 7405), 'numpy.random.shuffle', 'np.random.shuffle', (['letter_set'], {}), '(letter_set)\n', (7393, 7405), True, 'import numpy as np\n'), ((5246, 5294), 'pickle.dump', 'pickle.dump', (['dataset', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(dataset, f, pickle.HIGHEST_PROTOCOL)\n', (5257, 5294), False, 'import pickle\n'), ((3788, 3814), 'scipy.ndimage.imread', 'ndimage.imread', (['image_file'], {}), '(image_file)\n', (3802, 3814), False, 'from scipy import ndimage\n')]
|
import logging
import numpy as np
import math
from .drawing import Camouflage, NoPattern, SolidColor, MultiGradient, ImagePattern, Gradient, Image, Symbol
from .fonts import LANGUAGE_MAP
from .generate import (
dataset_generator,
basic_attribute_sampler,
flatten_mask,
flatten_mask_except_first,
add_occlusion,
rand_seed,
)
def generate_i(n_samples, alphabet = None, language="english", font = 'calibri', set = "plain", seed=None, **kwargs):
"""[summary]
Args:
n_samples ([type]): [description]
language (str, optional): [description]. Defaults to "english".
seed ([type], optional): [description]. Defaults to None.
"""
if alphabet is None:
alphabet = LANGUAGE_MAP[language].get_alphabet(support_bold=False)
print(alphabet)
fg = SolidColor((1, 1, 1))
bg = SolidColor((0, 0, 0))
rotation = 0
translation = (0.0,0.0)
if set == 'rotation':
rotation = (lambda rng: rng.uniform(low=0, high=1)*math.pi)
elif set == 'translation':
translation= (lambda rng: tuple(rng.uniform(low=-1, high=1, size=2)))
elif set == 'gradient':
fg = None
bg = None
attr_sampler = basic_attribute_sampler(
alphabet=alphabet,
font = font,
is_slant=False,
is_bold=False,
background=bg,
foreground=fg,
rotation=rotation,
scale=0.7,
translation=translation,
inverse_color=False,
pixel_noise_scale=0.0,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_plain_dataset_alphabet_onlygrad(n_samples, chars, seed=None, **kwargs):
"""
"""
alphabet = LANGUAGE_MAP['english'].get_alphabet(support_bold=False)
#print(alphabet.fonts[:10])
attr_sampler = basic_attribute_sampler(
alphabet=alphabet,
char=lambda rng: rng.choice(chars),
font=lambda rng: rng.choice(alphabet.fonts[50:55]),
is_slant=False,
is_bold=False,
rotation=0,
scale=0.7,
translation=(0.0, 0.0),
inverse_color=False,
pixel_noise_scale=0.0,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_plain_dataset_alphabet(n_samples, chars, seed=None, **kwargs):
"""
"""
alphabet = LANGUAGE_MAP['english'].get_alphabet(support_bold=False)
#print(alphabet.fonts[:10])
fg = [SolidColor((1, 1, 1)), ImagePattern(seed=123)]
bg = [SolidColor((0, 0, 0)), ImagePattern(seed=123)]
attr_sampler = basic_attribute_sampler(
alphabet=alphabet,
char=lambda rng: rng.choice(chars),
font=lambda rng: rng.choice(alphabet.fonts[50:55]),
is_slant=False,
is_bold=False,
background= lambda rng:rng.choice(bg),
foreground= lambda rng:rng.choice(fg),
rotation=0,
scale=0.7,
translation=(0.0, 0.0),
inverse_color=False,
pixel_noise_scale=0.0,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_plain_dataset(n_samples, language="english", seed=None, **kwargs):
"""
"""
alphabet = LANGUAGE_MAP[language].get_alphabet(support_bold=False)
fg = SolidColor((1, 1, 1))
bg = SolidColor((0, 0, 0))
attr_sampler = basic_attribute_sampler(
alphabet=alphabet,
is_slant=False,
is_bold=False,
background=bg,
foreground=fg,
rotation=0,
scale=0.7,
translation=(0.0, 0.0),
inverse_color=False,
pixel_noise_scale=0.0,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_plain_rotated_dataset(n_samples, language="english", seed=None, **kwargs):
"""
"""
alphabet = LANGUAGE_MAP[language].get_alphabet(support_bold=False)
fg = SolidColor((1, 1, 1))
bg = SolidColor((0, 0, 0))
attr_sampler = basic_attribute_sampler(
alphabet=alphabet,
is_slant=False,
is_bold=False,
background=bg,
foreground=fg,
rotation=lambda rng: rng.uniform(low=0, high=1)*math.pi,
scale=1.0,
translation=(0.0, 0.0),
inverse_color=False,
pixel_noise_scale=0.0,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_plain_translated_dataset(n_samples, language="english", seed=None, **kwargs):
"""Generate default with translation uniformly b/w (-1,1)
"""
alphabet = LANGUAGE_MAP[language].get_alphabet(support_bold=False)
fg = SolidColor((1, 1, 1))
bg = SolidColor((0, 0, 0))
attr_sampler = basic_attribute_sampler(
alphabet=alphabet,
is_slant=False,
is_bold=False,
background=bg,
foreground=fg,
rotation=0,
scale=1.0,
translation=lambda rng: tuple(rng.uniform(low=-1, high=1, size=2)),
inverse_color=False,
pixel_noise_scale=0.0,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_plain_scaled_dataset(n_samples, language="english", seed=None, **kwargs):
"""
"""
alphabet = LANGUAGE_MAP[language].get_alphabet(support_bold=False)
fg = SolidColor((1, 1, 1))
bg = SolidColor((0, 0, 0))
attr_sampler = basic_attribute_sampler(
alphabet=alphabet,
is_slant=False,
is_bold=False,
background=bg,
foreground=fg,
rotation=0,
scale=None,
translation=(0.0, 0.0),
inverse_color=False,
pixel_noise_scale=0.0,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_plain_bold_dataset(n_samples, language="english", seed=None, **kwargs):
"""
"""
alphabet = LANGUAGE_MAP[language].get_alphabet(support_bold=True)
fg = SolidColor((1, 1, 1))
bg = SolidColor((0, 0, 0))
attr_sampler = basic_attribute_sampler(
alphabet=alphabet,
is_slant=False,
is_bold=True,
background=bg,
foreground=fg,
rotation=0,
scale=1.0,
translation=(0.0, 0.0),
inverse_color=False,
pixel_noise_scale=0.0,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_plain_italic_dataset(n_samples, language="english", seed=None, **kwargs):
"""Generate white on black, centered symbols.
The only factors of variations are font and char.
"""
alphabet = LANGUAGE_MAP[language].get_alphabet(support_bold=False)
fg = SolidColor((1, 1, 1))
bg = SolidColor((0, 0, 0))
attr_sampler = basic_attribute_sampler(
alphabet=alphabet,
is_slant=True,
is_bold=False,
background=bg,
foreground=fg,
rotation=0,
scale=1.0,
translation=(0.0, 0.0),
inverse_color=False,
pixel_noise_scale=0.0,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_plain_gradient_dataset(n_samples, language="english", seed=None, **kwargs):
"""Generate white on black, centered symbols.
The only factors of variations are font and char.
"""
alphabet = LANGUAGE_MAP[language].get_alphabet(support_bold=False)
attr_sampler = basic_attribute_sampler(
alphabet=alphabet,
is_slant=False,
is_bold=False,
rotation=0,
scale=1.0,
translation=(0.0, 0.0),
inverse_color=False,
pixel_noise_scale=0.0,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_plain_natural_dataset(n_samples, language="english", seed=None, **kwargs):
"""Generate white on black, centered symbols.
The only factors of variations are font and char.
"""
alphabet = LANGUAGE_MAP[language].get_alphabet(support_bold=False)
attr_sampler = basic_attribute_sampler(
alphabet=alphabet,
background=lambda rng: ImagePattern(seed=rand_seed(rng)), #lambda rng: Gradient(seed=rand_seed(_rng))
foreground=lambda rng: ImagePattern(seed=rand_seed(rng)),
is_slant=False,
is_bold=False,
rotation=0,
scale=1.0,
translation=(0.0, 0.0),
inverse_color=False,
pixel_noise_scale=0.0,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_plain_camouflage_dataset(n_samples, language="english", seed=None, **kwargs):
"""
"""
alphabet = LANGUAGE_MAP[language].get_alphabet(support_bold=False)
angle = 0
fg = Camouflage(stroke_angle=angle, stroke_width=0.1, stroke_length=0.6, stroke_noise=0)
bg = Camouflage(stroke_angle=angle + np.pi / 2, stroke_width=0.1, stroke_length=0.6, stroke_noise=0)
scale = 0.7 * np.exp(np.random.randn() * 0.1)
attr_sampler = basic_attribute_sampler(
alphabet=alphabet,
is_slant=False,
is_bold=False,
background=bg,
foreground=fg,
rotation=0,
scale=scale,
translation=(0.0, 0.0),
inverse_color=False,
pixel_noise_scale=0.0,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_tiny_dataset(n_samples, language="english", seed=None, **kwarg):
"""Generate a dataset of 8x8 resolution in gray scale
with scale of 1 and minimal variations.
"""
fg = SolidColor((1, 1, 1))
bg = SolidColor((0, 0, 0))
attr_sampler = basic_attribute_sampler(
alphabet=LANGUAGE_MAP[language].get_alphabet(support_bold=False),
background=bg,
foreground=fg,
is_bold=False,
is_slant=False,
scale=1,
resolution=(8, 8),
is_gray=True,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_default_dataset(n_samples, language="english", seed=None, **kwarg):
"""Generate the default dataset,
using gradiant as foreground and background.
"""
attr_sampler = basic_attribute_sampler(alphabet=LANGUAGE_MAP[language].get_alphabet())
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_solid_bg_dataset(n_samples, language="english", seed=None, **kwarg):
"""Same as default datasets, but uses white on black."""
fg = SolidColor((1, 1, 1))
bg = SolidColor((0, 0, 0))
attr_sampler = basic_attribute_sampler(alphabet=LANGUAGE_MAP[language].get_alphabet(), background=bg, foreground=fg)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_natural_images_dataset(n_samples, language="english", seed=None, **kwargs):
"""Same as default dataset, but uses natural images as foreground and background."""
attr_sampler = basic_attribute_sampler(
alphabet=LANGUAGE_MAP[language].get_alphabet(),
background=lambda rng: ImagePattern(seed=rand_seed(rng)),
foreground=lambda rng: ImagePattern(seed=rand_seed(rng)),
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_korean_1k_dataset(n_samples, seed=None, **kwarg):
"""Uses the first 1000 korean symbols"""
alphabet = LANGUAGE_MAP["korean"].get_alphabet(support_bold=True)
chars = alphabet.symbols[:1000]
fonts = alphabet.fonts
attr_sampler = basic_attribute_sampler(char=lambda rng: rng.choice(chars), font=lambda rng: rng.choice(fonts))
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_camouflage_dataset(n_samples, language="english", texture="camouflage", seed=None, **kwarg):
"""Generate a dataset where the pixel distribution
is the same for the foreground and background.
"""
def attr_sampler(seed=None):
if texture == "camouflage":
angle = 0
fg = Camouflage(stroke_angle=angle, stroke_width=0.1, stroke_length=0.6, stroke_noise=0)
bg = Camouflage(stroke_angle=angle + np.pi / 2, stroke_width=0.1, stroke_length=0.6, stroke_noise=0)
elif texture == "shade":
fg, bg = None, None
elif texture == "bw":
fg = SolidColor((1, 1, 1))
bg = SolidColor((0, 0, 0))
else:
raise ValueError("Unknown texture %s." % texture)
scale = 0.7 * np.exp(np.random.randn() * 0.1)
return basic_attribute_sampler(
alphabet=LANGUAGE_MAP[language].get_alphabet(support_bold=True),
background=bg,
foreground=fg,
is_bold=True,
is_slant=False,
scale=scale,
)(seed)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_non_camou_bw_dataset(n_samples, language="english", seed=None, **kwargs):
"""Generate a black and white dataset with
the same attribute distribution as the camouflage dataset.
"""
return generate_camouflage_dataset(n_samples, language=language, texture="bw", seed=seed, **kwargs)
def generate_non_camou_shade_dataset(n_samples, language="english", seed=None, **kwargs):
"""Generate a gradient foreground and background dataset
with same attribute distribution as the camouflage dataset.
"""
return generate_camouflage_dataset(n_samples, language=language, texture="shade", seed=seed, **kwargs)
# for segmentation, detection, counting
# -------------------------------------
def generate_segmentation_dataset(n_samples, language="english", resolution=(128, 128), seed=None, **kwarg):
"""Generate 3-10 symbols of various scale
and rotation and translation (no bold).
"""
def scale(rng):
return 0.1 * np.exp(rng.randn() * 0.4)
def n_symbols(rng):
return rng.choice(list(range(3, 10)))
attr_generator = basic_attribute_sampler(
alphabet=LANGUAGE_MAP[language].get_alphabet(support_bold=False),
resolution=resolution,
scale=scale,
is_bold=False,
n_symbols=n_symbols,
)
return dataset_generator(attr_generator, n_samples, flatten_mask, dataset_seed=seed)
def generate_counting_dataset(
n_samples, language="english", resolution=(128, 128), n_symbols=None, scale_variation=0.5, seed=None, **kwarg
):
"""Generate 3-10 symbols at various scale.
Samples 'a' with prob 70% or a latin lowercase otherwise.
"""
if n_symbols is None:
def n_symbols(rng):
return rng.choice(list(range(3, 10)))
def scale(rng):
return 0.1 * np.exp(rng.randn() * scale_variation)
def char_sampler(rng):
if rng.rand() < 0.3:
return rng.choice(LANGUAGE_MAP[language].get_alphabet(support_bold=False).symbols)
else:
return "a"
attr_generator = basic_attribute_sampler(
char=char_sampler, resolution=resolution, scale=scale, is_bold=False, n_symbols=n_symbols
)
return dataset_generator(attr_generator, n_samples, flatten_mask, dataset_seed=seed)
def generate_counting_dataset_scale_fix(n_samples, seed=None, **kwargs):
"""Generate 3-10 symbols at fixed scale.
Samples 'a' with prob 70% or a latin lowercase otherwise.
"""
return generate_counting_dataset(n_samples, scale_variation=0, seed=seed, **kwargs)
def generate_counting_dataset_crowded(n_samples, seed=None, **kwargs):
"""Generate 30-50 symbols at fixed scale.
Samples 'a' with prob 70% or a latin lowercase otherwise.
"""
def n_symbols(rng):
return rng.choice(list(range(30, 50)))
return generate_counting_dataset(n_samples, scale_variation=0.1, n_symbols=n_symbols, seed=seed, **kwargs)
# for few-shot learning
# ---------------------
def all_chars(n_samples, seed=None, **kwarg):
"""Combines the symbols of all languages (up to 200 per languages).
Note: some fonts may appear rarely.
"""
symbols_list = []
for language in LANGUAGE_MAP.values():
alphabet = language.get_alphabet()
symbols = alphabet.symbols[:200]
logging.info("Using %d/%d symbols from alphabet %s", len(symbols), len(alphabet.symbols), alphabet.name)
symbols_list.extend(zip(symbols, [alphabet] * len(symbols)))
def attr_sampler(seed=None):
char, alphabet = symbols_list[np.random.choice(len(symbols_list))]
return basic_attribute_sampler(alphabet=alphabet, char=char)(seed)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_balanced_font_chars_dataset(n_samples, seed=None, **kwarg):
"""Samples uniformly from all fonts (max 200 per alphabet)
or uniformly from all symbols (max 200 per alphabet)
with probability 50%.
"""
font_list = []
symbols_list = []
for language in LANGUAGE_MAP.values():
alphabet = language.get_alphabet()
fonts = alphabet.fonts[:200]
symbols = alphabet.symbols[:200]
logging.info("Using %d/%d fonts from alphabet %s", len(fonts), len(alphabet.fonts), alphabet.name)
font_list.extend(zip(fonts, [alphabet] * len(fonts)))
logging.info("Using %d/%d symbols from alphabet %s", len(symbols), len(alphabet.symbols), alphabet.name)
symbols_list.extend(zip(symbols, [alphabet] * len(symbols)))
logging.info("Total n_fonts: %d, n_symbols: %d.", len(font_list), len(symbols_list))
def attr_sampler(seed=None):
if np.random.rand() > 0.5:
font, alphabet = font_list[np.random.choice(len(font_list))]
symbol = np.random.choice(alphabet.symbols[:200])
else:
symbol, alphabet = symbols_list[np.random.choice(len(symbols_list))]
font = np.random.choice(alphabet.fonts[:200])
return basic_attribute_sampler(char=symbol, font=font, is_bold=False, is_slant=False)(seed)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
# for active learning
# -------------------
def generate_large_translation(n_samples, language="english", seed=None, **kwarg):
"""Synbols are translated beyond the border of the image
to create a cropping effect. Scale is fixed to 0.5.
"""
attr_sampler = basic_attribute_sampler(
alphabet=LANGUAGE_MAP[language].get_alphabet(), scale=0.5, translation=lambda rng: tuple(rng.rand(2) * 4 - 2)
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def missing_symbol_dataset(n_samples, language="english", seed=None, **kwarg):
"""With 10% probability, no symbols are drawn"""
def background(rng):
return MultiGradient(alpha=0.5, n_gradients=2, types=("linear", "radial"), seed=rand_seed(rng))
def tr(rng):
if rng.rand() > 0.1:
return tuple(rng.rand(2) * 2 - 1)
else:
return 10
attr_generator = basic_attribute_sampler(
alphabet=LANGUAGE_MAP[language].get_alphabet(), translation=tr, background=background
)
return dataset_generator(attr_generator, n_samples, dataset_seed=seed)
def generate_some_large_occlusions(n_samples, language="english", seed=None, **kwarg):
"""With probability 20%, add a large occlusion
over the existing symbol.
"""
def n_occlusion(rng):
if rng.rand() < 0.2:
return 1
else:
return 0
attr_sampler = add_occlusion(
basic_attribute_sampler(alphabet=LANGUAGE_MAP[language].get_alphabet()),
n_occlusion=n_occlusion,
scale=lambda rng: 0.6 * np.exp(rng.randn() * 0.1),
translation=lambda rng: tuple(rng.rand(2) * 6 - 3),
)
return dataset_generator(attr_sampler, n_samples, flatten_mask_except_first, dataset_seed=seed)
def generate_many_small_occlusions(n_samples, language="english", seed=None, **kwarg):
"""Add small occlusions on all images.
Number of occlusions are sampled uniformly in [0,5).
"""
attr_sampler = add_occlusion(
basic_attribute_sampler(alphabet=LANGUAGE_MAP[language].get_alphabet()),
n_occlusion=lambda rng: rng.randint(0, 5),
)
return dataset_generator(attr_sampler, n_samples, flatten_mask_except_first, dataset_seed=seed)
def generate_pixel_noise(n_samples, language="english", seed=None, **kwarg):
"""Add large pixel noise with probability 0.5."""
def pixel_noise(rng):
if rng.rand() > 0.1:
return 0
else:
return 0.3
attr_sampler = basic_attribute_sampler(
alphabet=LANGUAGE_MAP[language].get_alphabet(), pixel_noise_scale=pixel_noise
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
# for font classification
# -----------------------
def less_variations(n_samples, language="english", seed=None, **kwarg):
"""Less variations in scale and rotations.
Also, no bold and no italic. This makes a more accessible font
classification task.
"""
attr_generator = basic_attribute_sampler(
alphabet=LANGUAGE_MAP[language].get_alphabet(),
is_bold=False,
is_slant=False,
scale=lambda rng: 0.5 * np.exp(rng.randn() * 0.1),
rotation=lambda rng: rng.randn() * 0.1,
)
return dataset_generator(attr_generator, n_samples, dataset_seed=seed)
DATASET_GENERATOR_MAP = {
"plain": generate_plain_dataset,
"default": generate_default_dataset,
"default-bw": generate_solid_bg_dataset,
"korean-1k": generate_korean_1k_dataset,
"camouflage": generate_camouflage_dataset,
"non-camou-bw": generate_non_camou_bw_dataset,
"non-camou-shade": generate_non_camou_shade_dataset,
"segmentation": generate_segmentation_dataset,
"counting": generate_counting_dataset,
"counting-fix-scale": generate_counting_dataset_scale_fix,
"counting-crowded": generate_counting_dataset_crowded,
"missing-symbol": missing_symbol_dataset,
"some-large-occlusion": generate_some_large_occlusions,
"many-small-occlusion": generate_many_small_occlusions,
"large-translation": generate_large_translation,
"tiny": generate_tiny_dataset,
"balanced-font-chars": generate_balanced_font_chars_dataset,
"all-chars": all_chars,
"less-variations": less_variations,
"pixel-noise": generate_pixel_noise,
"natural-patterns": generate_natural_images_dataset,
}
|
[
"numpy.random.rand",
"numpy.random.randn",
"numpy.random.choice"
] |
[((17287, 17303), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (17301, 17303), True, 'import numpy as np\n'), ((17405, 17445), 'numpy.random.choice', 'np.random.choice', (['alphabet.symbols[:200]'], {}), '(alphabet.symbols[:200])\n', (17421, 17445), True, 'import numpy as np\n'), ((17560, 17598), 'numpy.random.choice', 'np.random.choice', (['alphabet.fonts[:200]'], {}), '(alphabet.fonts[:200])\n', (17576, 17598), True, 'import numpy as np\n'), ((8775, 8792), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (8790, 8792), True, 'import numpy as np\n'), ((12265, 12282), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (12280, 12282), True, 'import numpy as np\n')]
|
import extract_features as ef
import numpy as np
with open('./input.csv','r',encoding='utf-8') as input_file:
with open('./dataset.csv','w',encoding='utf-8') as dataset:
for line in input_file:
r = line.split(',')
x = r[0].strip()
y = r[1].strip()
example = ef.extractFeatures(x)
result = '{0},{1}\n'.format(
np.array2string(example, separator=','),
y
)
result = result.replace('[','')
result = result.replace(']','')
result = result.replace(' ','')
dataset.write(result)
|
[
"numpy.array2string",
"extract_features.extractFeatures"
] |
[((294, 315), 'extract_features.extractFeatures', 'ef.extractFeatures', (['x'], {}), '(x)\n', (312, 315), True, 'import extract_features as ef\n'), ((353, 392), 'numpy.array2string', 'np.array2string', (['example'], {'separator': '""","""'}), "(example, separator=',')\n", (368, 392), True, 'import numpy as np\n')]
|
# 05 de Junio del 2018
# 31 Mayo 2018
import numpy as np
import matplotlib.pyplot as plt
from time import sleep
import cv2
class Recurrent_Photo:
'''
Recurrent Photo only for testing
'''
def __init__(self, iterations=100, resize=(1280, 720)):
self.camera = cv2.VideoCapture(0)
self.video = np.zeros([iterations, resize[1], resize[0], 3])
for iteration in range(iterations):
self.video[iteration, :, :] = cv2.resize(
(self.camera.read()[1]/255),
# FIXME: AHORA TRABAJAMOS CON LOS TRES CANALES
resize
)
cv2.imshow('Prueba', self.video[iteration, :, :])
cv2.waitKey(1)
self.camera.release()
self.resize = resize
def get_recurrence(self, alpha=(0.75555, 0.25555)):
'''
Alpha are 2 float numbers represented by the amount of
superposition that you want to have in te current image.
Example:
alpha = (0.5, 0.5) is neutral change, were the last
image will have the same intensity of first image.
'''
first = np.array(self.video[0:self.video.shape[0]-1, :, :,])
second = np.array(self.video[1:self.video.shape[0], :, :])
diferences = self.get_diference(
second,
first
)
for image in range(len(diferences)):
diferences[image] = diferences[image-1]* alpha[0] + diferences[image]* alpha[1]
# Mirar ecuacion del cuaderno.
return diferences
def get_diference(self, A, B):
'''
Get diference from two items
'''
return np.abs(A - B)
def resize_images(X, dimensions=(100, 75)):
if len(X.shape) == 3:
X = cv2.resize(X, dimensions)
else:
for image in X:
image = cv2.resize(image, dimensions)
return X
def show_image(X):
if len(X.shape) == 3:
cv2.imshow('image', X)
else:
for image in X:
cv2.imshow('X', image)
cv2.waitKey(1)
sleep(0.05)
non_movement = Recurrent_Photo(50)
print('Prepare next movement...')
sleep(2)
movement = Recurrent_Photo(50)
non_movement_recurrence = non_movement.get_recurrence()
movement_recurrence = movement.get_recurrence()
X = resize_images(non_movement_recurrence)
Y = resize_images(movement_recurrence)
|
[
"numpy.abs",
"cv2.waitKey",
"numpy.zeros",
"time.sleep",
"cv2.VideoCapture",
"numpy.array",
"cv2.imshow",
"cv2.resize"
] |
[((2183, 2191), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (2188, 2191), False, 'from time import sleep\n'), ((285, 304), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (301, 304), False, 'import cv2\n'), ((326, 373), 'numpy.zeros', 'np.zeros', (['[iterations, resize[1], resize[0], 3]'], {}), '([iterations, resize[1], resize[0], 3])\n', (334, 373), True, 'import numpy as np\n'), ((1137, 1190), 'numpy.array', 'np.array', (['self.video[0:self.video.shape[0] - 1, :, :]'], {}), '(self.video[0:self.video.shape[0] - 1, :, :])\n', (1145, 1190), True, 'import numpy as np\n'), ((1207, 1256), 'numpy.array', 'np.array', (['self.video[1:self.video.shape[0], :, :]'], {}), '(self.video[1:self.video.shape[0], :, :])\n', (1215, 1256), True, 'import numpy as np\n'), ((1687, 1700), 'numpy.abs', 'np.abs', (['(A - B)'], {}), '(A - B)\n', (1693, 1700), True, 'import numpy as np\n'), ((1784, 1809), 'cv2.resize', 'cv2.resize', (['X', 'dimensions'], {}), '(X, dimensions)\n', (1794, 1809), False, 'import cv2\n'), ((1965, 1987), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'X'], {}), "('image', X)\n", (1975, 1987), False, 'import cv2\n'), ((630, 679), 'cv2.imshow', 'cv2.imshow', (['"""Prueba"""', 'self.video[iteration, :, :]'], {}), "('Prueba', self.video[iteration, :, :])\n", (640, 679), False, 'import cv2\n'), ((692, 706), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (703, 706), False, 'import cv2\n'), ((1868, 1897), 'cv2.resize', 'cv2.resize', (['image', 'dimensions'], {}), '(image, dimensions)\n', (1878, 1897), False, 'import cv2\n'), ((2038, 2060), 'cv2.imshow', 'cv2.imshow', (['"""X"""', 'image'], {}), "('X', image)\n", (2048, 2060), False, 'import cv2\n'), ((2073, 2087), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2084, 2087), False, 'import cv2\n'), ((2100, 2111), 'time.sleep', 'sleep', (['(0.05)'], {}), '(0.05)\n', (2105, 2111), False, 'from time import sleep\n')]
|
#! /usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import numpy as np
from scipy.stats import rankdata
from ._base import CategoricalStats
class ANOSIM(CategoricalStats):
"""ANOSIM statistical method executor.
Analysis of Similarities (ANOSIM) is a non-parametric method that tests
whether two or more groups of objects are significantly different based on
a categorical factor. The ranks of the distances in the distance matrix are
used to calculate an R statistic, which ranges between -1 (anti-grouping)
to +1 (strong grouping), with an R value of 0 indicating random grouping.
Notes
-----
See [1]_ for the original ANOSIM reference. The general algorithm and
interface are similar to ``vegan::anosim``, available in R's vegan package
[2]_.
References
----------
.. [1] <NAME>. "Non-parametric multivariate analyses of changes in
community structure." Australian journal of ecology 18.1 (1993):
117-143.
.. [2] http://cran.r-project.org/web/packages/vegan/index.html
"""
short_method_name = 'ANOSIM'
long_method_name = 'Analysis of Similarities'
test_statistic_name = 'R statistic'
def __init__(self, distance_matrix, grouping, column=None):
super(ANOSIM, self).__init__(distance_matrix, grouping, column=column)
self._divisor = self._dm.shape[0] * ((self._dm.shape[0] - 1) / 4)
self._ranked_dists = rankdata(self._dm.condensed_form(),
method='average')
def _run(self, grouping):
"""Compute ANOSIM R statistic (between -1 and +1)."""
# Create a matrix where True means that the two objects are in the same
# group. This ufunc requires that grouping is a numeric vector (e.g.,
# it won't work with a grouping vector of strings).
grouping_matrix = np.equal.outer(grouping, grouping)
# Extract upper triangle from the grouping matrix. It is important to
# extract the values in the same order that the distances are extracted
# from the distance matrix (see self._ranked_dists). Extracting the
# upper triangle (excluding the diagonal) preserves this order.
grouping_tri = grouping_matrix[self._tri_idxs]
return self._compute_r_stat(grouping_tri)
def _compute_r_stat(self, grouping_tri):
# within
r_W = np.mean(self._ranked_dists[grouping_tri])
# between
r_B = np.mean(self._ranked_dists[np.invert(grouping_tri)])
return (r_B - r_W) / self._divisor
|
[
"numpy.mean",
"numpy.equal.outer",
"numpy.invert"
] |
[((2254, 2288), 'numpy.equal.outer', 'np.equal.outer', (['grouping', 'grouping'], {}), '(grouping, grouping)\n', (2268, 2288), True, 'import numpy as np\n'), ((2779, 2820), 'numpy.mean', 'np.mean', (['self._ranked_dists[grouping_tri]'], {}), '(self._ranked_dists[grouping_tri])\n', (2786, 2820), True, 'import numpy as np\n'), ((2880, 2903), 'numpy.invert', 'np.invert', (['grouping_tri'], {}), '(grouping_tri)\n', (2889, 2903), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
import speaksee.data as data
import numpy as np
import torch
'''class TestImageField(object):
def test_preprocessing(self):
field = data.ImageField()
image = ''
expected_image = ''
assert field.preprocess(image) == expected_image
'''
class TestTextField(object):
def test_pad(self):
# Default case.
field = data.TextField()
minibatch = [["a", "sentence", "of", "data", "."],
["yet", "another"],
["one", "last", "sent"]]
expected_padded_minibatch = [["a", "sentence", "of", "data", "."],
["yet", "another", "<pad>", "<pad>", "<pad>"],
["one", "last", "sent", "<pad>", "<pad>"]]
expected_lengths = [5, 2, 3]
assert field.pad(minibatch) == expected_padded_minibatch
field = data.TextField(include_lengths=True)
assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths)
# Test fix_length properly truncates and pads.
field = data.TextField(fix_length=3)
minibatch = [["a", "sentence", "of", "data", "."],
["yet", "another"],
["one", "last", "sent"]]
expected_padded_minibatch = [["a", "sentence", "of"],
["yet", "another", "<pad>"],
["one", "last", "sent"]]
expected_lengths = [3, 2, 3]
assert field.pad(minibatch) == expected_padded_minibatch
field = data.TextField(fix_length=3, include_lengths=True)
assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths)
field = data.TextField(fix_length=3, truncate_first=True)
expected_padded_minibatch = [["of", "data", "."],
["yet", "another", "<pad>"],
["one", "last", "sent"]]
assert field.pad(minibatch) == expected_padded_minibatch
# Test init_token is properly handled.
field = data.TextField(fix_length=4, init_token="<bos>")
minibatch = [["a", "sentence", "of", "data", "."],
["yet", "another"],
["one", "last", "sent"]]
expected_padded_minibatch = [["<bos>", "a", "sentence", "of"],
["<bos>", "yet", "another", "<pad>"],
["<bos>", "one", "last", "sent"]]
expected_lengths = [4, 3, 4]
assert field.pad(minibatch) == expected_padded_minibatch
field = data.TextField(fix_length=4, init_token="<bos>", include_lengths=True)
assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths)
# Test init_token and eos_token are properly handled.
field = data.TextField(init_token="<bos>", eos_token="<eos>")
minibatch = [["a", "sentence", "of", "data", "."],
["yet", "another"],
["one", "last", "sent"]]
expected_padded_minibatch = [
["<bos>", "a", "sentence", "of", "data", ".", "<eos>"],
["<bos>", "yet", "another", "<eos>", "<pad>", "<pad>", "<pad>"],
["<bos>", "one", "last", "sent", "<eos>", "<pad>", "<pad>"]]
expected_lengths = [7, 4, 5]
assert field.pad(minibatch) == expected_padded_minibatch
field = data.TextField(init_token="<bos>", eos_token="<eos>", include_lengths=True)
assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths)
def test_decode(self):
def test_all_dtypes(word_idxs, expected_output):
assert field.decode(word_idxs) == expected_output
assert field.decode(np.asarray(word_idxs)) == expected_output
assert field.decode(torch.from_numpy(np.asarray(word_idxs))) == expected_output
class MyVocab(object):
def __init__(self, eos_token):
self.itos = {0: 'a',
1: 'b',
2: eos_token,
3: 'c'}
field = data.TextField()
field.vocab = MyVocab(field.eos_token)
# Empty captions (not tested for PyTorch tensors)
word_idxs = []
expected_output = ''
assert field.decode(word_idxs) == expected_output
assert field.decode(np.asarray(word_idxs)) == expected_output
word_idxs = [[]]
expected_output = ['', ]
assert field.decode(word_idxs) == expected_output
assert field.decode(np.asarray(word_idxs)) == expected_output
# Single caption
word_idxs = [0, 3, 2, 1]
expected_output = 'a c'
test_all_dtypes(word_idxs, expected_output)
# Batch of captions
word_idxs = [[0, 3, 2, 1],
[3, 3, 2, 1],
[2, 1, 1, 1]]
expected_output = ['a c', 'c c', '']
test_all_dtypes(word_idxs, expected_output)
|
[
"numpy.asarray",
"speaksee.data.TextField"
] |
[((446, 462), 'speaksee.data.TextField', 'data.TextField', ([], {}), '()\n', (460, 462), True, 'import speaksee.data as data\n'), ((966, 1002), 'speaksee.data.TextField', 'data.TextField', ([], {'include_lengths': '(True)'}), '(include_lengths=True)\n', (980, 1002), True, 'import speaksee.data as data\n'), ((1160, 1188), 'speaksee.data.TextField', 'data.TextField', ([], {'fix_length': '(3)'}), '(fix_length=3)\n', (1174, 1188), True, 'import speaksee.data as data\n'), ((1643, 1693), 'speaksee.data.TextField', 'data.TextField', ([], {'fix_length': '(3)', 'include_lengths': '(True)'}), '(fix_length=3, include_lengths=True)\n', (1657, 1693), True, 'import speaksee.data as data\n'), ((1795, 1844), 'speaksee.data.TextField', 'data.TextField', ([], {'fix_length': '(3)', 'truncate_first': '(True)'}), '(fix_length=3, truncate_first=True)\n', (1809, 1844), True, 'import speaksee.data as data\n'), ((2160, 2208), 'speaksee.data.TextField', 'data.TextField', ([], {'fix_length': '(4)', 'init_token': '"""<bos>"""'}), "(fix_length=4, init_token='<bos>')\n", (2174, 2208), True, 'import speaksee.data as data\n'), ((2690, 2760), 'speaksee.data.TextField', 'data.TextField', ([], {'fix_length': '(4)', 'init_token': '"""<bos>"""', 'include_lengths': '(True)'}), "(fix_length=4, init_token='<bos>', include_lengths=True)\n", (2704, 2760), True, 'import speaksee.data as data\n'), ((2925, 2978), 'speaksee.data.TextField', 'data.TextField', ([], {'init_token': '"""<bos>"""', 'eos_token': '"""<eos>"""'}), "(init_token='<bos>', eos_token='<eos>')\n", (2939, 2978), True, 'import speaksee.data as data\n'), ((3499, 3574), 'speaksee.data.TextField', 'data.TextField', ([], {'init_token': '"""<bos>"""', 'eos_token': '"""<eos>"""', 'include_lengths': '(True)'}), "(init_token='<bos>', eos_token='<eos>', include_lengths=True)\n", (3513, 3574), True, 'import speaksee.data as data\n'), ((4204, 4220), 'speaksee.data.TextField', 'data.TextField', ([], {}), '()\n', (4218, 4220), True, 'import speaksee.data as data\n'), ((4465, 4486), 'numpy.asarray', 'np.asarray', (['word_idxs'], {}), '(word_idxs)\n', (4475, 4486), True, 'import numpy as np\n'), ((4652, 4673), 'numpy.asarray', 'np.asarray', (['word_idxs'], {}), '(word_idxs)\n', (4662, 4673), True, 'import numpy as np\n'), ((3839, 3860), 'numpy.asarray', 'np.asarray', (['word_idxs'], {}), '(word_idxs)\n', (3849, 3860), True, 'import numpy as np\n'), ((3930, 3951), 'numpy.asarray', 'np.asarray', (['word_idxs'], {}), '(word_idxs)\n', (3940, 3951), True, 'import numpy as np\n')]
|
import numpy as np
"""
:param MLLD_functions: this class has several functions that are usually used by myself.
"""
class MLLD_functions:
def standardization(self, variable):
"""
:param variable: the array with the variables you wish to standardize
:return: standardized array
"""
var_average = np.average(variable)
var_std = np.std(variable)
new_variable = []
for i in range(variable.size):
new_variable_i = (variable[i] - var_average)/var_std
new_variable.append(new_variable_i)
self.new_variable = np.array(new_variable)
return self.new_variable
|
[
"numpy.std",
"numpy.average",
"numpy.array"
] |
[((342, 362), 'numpy.average', 'np.average', (['variable'], {}), '(variable)\n', (352, 362), True, 'import numpy as np\n'), ((385, 401), 'numpy.std', 'np.std', (['variable'], {}), '(variable)\n', (391, 401), True, 'import numpy as np\n'), ((608, 630), 'numpy.array', 'np.array', (['new_variable'], {}), '(new_variable)\n', (616, 630), True, 'import numpy as np\n')]
|
"""User defined module for simulation."""
import numpy
def get_analytical(grid, asol, user_bc):
"""Compute and set the analytical solution.
Arguments
---------
grid : flowx.Grid object
Grid containing data.
asol : string
Name of the variable on the grid.
"""
X, Y = numpy.meshgrid(grid.x, grid.y)
if(user_bc == 'dirichlet'):
values = numpy.sin(2 * numpy.pi * X) * numpy.sin(2 * numpy.pi * Y)
else:
values = numpy.cos(2 * numpy.pi * X) * numpy.cos(2 * numpy.pi * Y)
grid.set_values(asol, values.transpose())
def get_rhs(grid, rvar, user_bc):
"""Compute and set the right-hand side of the Poisson system.
Arguments
---------
grid : flowx.Grid object
Grid containing data.
rvar : string
Name of the variable on the grid.
"""
X, Y = numpy.meshgrid(grid.x, grid.y)
if(user_bc == 'dirichlet'):
values = (-8 * numpy.pi**2 *
numpy.sin(2 * numpy.pi * X) * numpy.sin(2 * numpy.pi * Y))
else:
values = (-8 * numpy.pi**2 *
numpy.cos(2 * numpy.pi * X) * numpy.cos(2 * numpy.pi * Y))
grid.set_values(rvar, values.transpose())
|
[
"numpy.sin",
"numpy.meshgrid",
"numpy.cos"
] |
[((315, 345), 'numpy.meshgrid', 'numpy.meshgrid', (['grid.x', 'grid.y'], {}), '(grid.x, grid.y)\n', (329, 345), False, 'import numpy\n'), ((856, 886), 'numpy.meshgrid', 'numpy.meshgrid', (['grid.x', 'grid.y'], {}), '(grid.x, grid.y)\n', (870, 886), False, 'import numpy\n'), ((396, 423), 'numpy.sin', 'numpy.sin', (['(2 * numpy.pi * X)'], {}), '(2 * numpy.pi * X)\n', (405, 423), False, 'import numpy\n'), ((426, 453), 'numpy.sin', 'numpy.sin', (['(2 * numpy.pi * Y)'], {}), '(2 * numpy.pi * Y)\n', (435, 453), False, 'import numpy\n'), ((481, 508), 'numpy.cos', 'numpy.cos', (['(2 * numpy.pi * X)'], {}), '(2 * numpy.pi * X)\n', (490, 508), False, 'import numpy\n'), ((511, 538), 'numpy.cos', 'numpy.cos', (['(2 * numpy.pi * Y)'], {}), '(2 * numpy.pi * Y)\n', (520, 538), False, 'import numpy\n'), ((1005, 1032), 'numpy.sin', 'numpy.sin', (['(2 * numpy.pi * Y)'], {}), '(2 * numpy.pi * Y)\n', (1014, 1032), False, 'import numpy\n'), ((1129, 1156), 'numpy.cos', 'numpy.cos', (['(2 * numpy.pi * Y)'], {}), '(2 * numpy.pi * Y)\n', (1138, 1156), False, 'import numpy\n'), ((975, 1002), 'numpy.sin', 'numpy.sin', (['(2 * numpy.pi * X)'], {}), '(2 * numpy.pi * X)\n', (984, 1002), False, 'import numpy\n'), ((1099, 1126), 'numpy.cos', 'numpy.cos', (['(2 * numpy.pi * X)'], {}), '(2 * numpy.pi * X)\n', (1108, 1126), False, 'import numpy\n')]
|
from pipeline.feature_engineering.preprocessing.abstract_preprocessor import Preprocessor
from pipeline.feature_engineering.preprocessing.replacement_strategies.mean_replacement_strategy import MeanReplacementStrategy
from pipeline.feature_engineering.preprocessing.replacement_strategies.del_row_replacement_strategy import DelRowReplacementStrategy
from pipeline.feature_engineering.preprocessing.replacement_strategies.replacement_val_replacement_strategy import ReplacementValReplacementStrategy
from overrides import overrides
import traceback
import os
import pandas
from sklearn.decomposition import PCA
import numpy
class SussexHuaweiPreprocessor(Preprocessor):
def __init__(self):
super().__init__()
@overrides
def segment_data(self, data, mode, label_column=None, args=None):
"""
Segements a time series based on a label column, semantic segementation of a fixed interval.
:param data:
:param mode:
:param label_column:
:param args:
:return:
"""
try:
if data is None or mode is None:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
if not isinstance(data, pandas.DataFrame):
raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)
if mode == 'semantic':
raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value)
if mode == 'labels':
# 1. Select all data with desired label value
data_segments = []
for target_label in args:
selected_data = data[data[label_column] == target_label]
# 2. Split by non-subsequent indices
# Source for next 3 lines after comment:
# https://stackoverflow.com/questions/56257329/how-to-split-a-dataframe-based-on-consecutive-index
non_sequence = pandas.Series(selected_data.index).diff() != 1
grouper = non_sequence.cumsum().values
selected_data_segments = [group for _, group in selected_data.groupby(grouper)]
for segment in selected_data_segments:
data_segments.append(segment)
return data_segments
if mode == 'fixed_interval':
segment_length = args[0]
aggregate = args[1]
exact_length = args[2]
segments_aggregated = []
split = lambda df, chunk_size : numpy.array_split(df, len(df) // chunk_size + 1, axis=0)
# 1. Ensure index is datetime index and standardize type
data.index = pandas.DatetimeIndex(data.index.astype('datetime64[1s]'))
#2. Segment data
segments = split(data, segment_length)
if not exact_length:
for segment in segments:
segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]'))
return segments
#3. Remove segments that are too long or too short after splitting
min_length_subsegements = []
for segment in segments:
if segment.shape[0] == segment_length:
min_length_subsegements.append(segment)
if not aggregate:
for segment in min_length_subsegements:
segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]'))
return min_length_subsegements
#3. Resample and aggregate data
segments_combined = None
for segment in min_length_subsegements:
segment = segment.reset_index()
segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]'))
segment = self.resample_quantitative_data(segment,
freq="{}s".format(segment_length),
mode = 'mean')
if segments_combined is None:
segments_combined = segment
else:
segments_combined = pandas.concat([segments_combined, segment], axis=0)
if segments_combined is not None:
segments_combined = segments_combined.reset_index()
segments_combined.index = pandas.DatetimeIndex(
segments_combined.index.astype('datetime64[1s]'))
segments_aggregated.append(segments_combined)
return segments_aggregated
except (TypeError, NotImplementedError, ValueError):
self.logger.error(traceback.format_exc())
os._exit(1)
except Exception:
self.logger.error(traceback.format_exc())
os._exit(2)
@overrides
def de_segment_data(self, data_segments, selected_columns=None, axis = 0):
"""
Desegements as time series.
:param data_segments:
:param selected_columns:
:param axis:
:return:
"""
try:
data = None
for ind in range(len(data_segments)):
if data is None:
data = data_segments[ind][selected_columns]
else:
data = pandas.concat([data, data_segments[ind][selected_columns]], axis=axis)
data = data.reset_index(drop=True)
return data
except (TypeError, NotImplementedError, ValueError):
self.logger.error(traceback.format_exc())
os._exit(1)
except Exception:
self.logger.error(traceback.format_exc())
os._exit(2)
@overrides
def remove_nans(self, data, replacement_mode, replacement_value=None):
"""
Remove NaNs
:param data:
:param replacement_mode: string, 'mean', 'replacement_val', 'delet_row'
:param replacement_value: any type, used as value if replacment_mode is 'default_val'
:return: pandas.DataFrame
"""
try:
if data is None or replacement_mode is None:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
if not isinstance(data, pandas.DataFrame):
raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)
if replacement_mode == 'mean':
return MeanReplacementStrategy().replace(data, 'NaN')
if replacement_mode == 'del_row':
return DelRowReplacementStrategy().replace(data, 'NaN')
if replacement_mode == 'replacement_val':
return ReplacementValReplacementStrategy().replace(data, 'NaN', replacement_vals=replacement_value)
raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value)
except (TypeError, NotImplementedError, ValueError):
self.logger.error(traceback.format_exc())
os._exit(1)
except Exception:
self.logger.error(traceback.format_exc())
os._exit(2)
@overrides
def remove_outliers_from_quantitative_data(self, data, replacement_mode, columns, quantile = None, threshold = None):
"""
Removes outlieres either based on quantile or a threshold value.
:param data:
:param replacement_mode:
:param columns:
:param quantile:
:param threshold:
:return:
"""
try:
if data is None or replacement_mode is None or columns is None:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
if not isinstance(data, pandas.DataFrame) or not isinstance(columns, list) or not isinstance(replacement_mode, str):
raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)
if len(columns) < 1:
raise ValueError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value)
if replacement_mode == 'quantile':
# Source for next 7 lines of code after comment:
# https://nextjournal.com/schmudde/how-to-remove-outliers-in-data
for column in columns:
not_outliers = data[column].between(
data[column].quantile(1.0 - quantile),
data[column].quantile(quantile)
)
data[column] = data[column][not_outliers]
index_names = data[~not_outliers].index
data.drop(index_names, inplace=True)
old_index = data.index
data = data.reset_index(drop=False)
data = data.set_index(old_index)
return data
if replacement_mode == 'threshold':
raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value)
raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value)
except (TypeError, NotImplementedError, ValueError):
self.logger.error(traceback.format_exc())
os._exit(1)
except Exception:
self.logger.error(traceback.format_exc())
os._exit(2)
@overrides
def resample_quantitative_data(self, data, freq, mode = None):
"""
Resamples quantitative data.
:param data:
:param freq:
:param mode:
:return:
"""
# Source:
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.resample.html
# https://jakevdp.github.io/PythonDataScienceHandbook/03.11-working-with-time-series.html
try:
if data is None or freq is None:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
if not isinstance(data, pandas.DataFrame) or not isinstance(freq, str):
raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)
if mode == 'mean' or mode is None:
return data.resample(freq).mean()
if mode == 'sum':
return data.resample(freq).sum()
except (TypeError, NotImplementedError, ValueError):
self.logger.error(traceback.format_exc())
os._exit(1)
except Exception:
self.logger.error(traceback.format_exc())
os._exit(2)
@overrides
def convert_unix_to_datetime(self, data, column, unit):
"""
Converts unix time stamps to date time.
:param data:
:param column:
:param unit:
:return:
"""
# Source:
# https://stackoverflow.com/questions/19231871/convert-unix-time-to-readable-date-in-pandas-dataframe
# https://stackoverflow.com/questions/42698421/pandas-to-datetime-from-milliseconds-produces-incorrect-datetime
try:
if data is None or column is None or unit is None:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
if not isinstance(data, pandas.DataFrame) or not isinstance(column, str) or not isinstance(unit, str):
raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)
data[column] = pandas.to_datetime(data[column], unit=unit)
return data
except (TypeError, NotImplementedError, ValueError):
self.logger.error(traceback.format_exc())
os._exit(1)
except Exception:
self.logger.error(traceback.format_exc())
os._exit(2)
@overrides
def remove_unwanted_labels(self, data, unwanted_labels, replacement_mode):
"""
Remove rows that have an unwanted label.
:param data:
:param unwanted_labels:
:param replacement_mode:
:return:
"""
try:
if data is None or replacement_mode is None or unwanted_labels is None:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
if not isinstance(data, pandas.DataFrame) or not isinstance(unwanted_labels, list) or not isinstance(replacement_mode, str):
raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)
if replacement_mode == 'del_row':
return DelRowReplacementStrategy().replace(data, 'unwanted_labels', unwanted_labels)
raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value)
except (TypeError, NotImplementedError, ValueError):
self.logger.error(traceback.format_exc())
os._exit(1)
except Exception:
self.logger.error(traceback.format_exc())
os._exit(2)
@overrides
def project_accelerometer_to_global_coordinates(self, data, target_columns, mode, args=None):
"""
Project accelerometer data from local vehicle coordinates to a global coordinate system.
:param data:
:param target_columns:
:param mode:
:param args:
:return:
"""
try:
if data is None or target_columns is None or mode is None:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
if not isinstance(data, pandas.DataFrame) or not isinstance(mode, str) or not isinstance(target_columns, list):
raise TypeError(type(data))
if mode == 'mean_estimate_gravity':
raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value)
if mode == 'gyroscope':
raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value)
if mode == 'gravity':
if len(target_columns) != len(args):
raise TypeError(self.messages.PROVIDED_ARRAYS_DONT_MATCH_LENGTH.value)
for ind, column in enumerate(target_columns):
data[column] = data[column] - data[args[ind]]
return data
if mode == 'orientation':
if len(target_columns)+1 != len(args):
raise TypeError(self.messages.PROVIDED_ARRAYS_DONT_MATCH_LENGTH.value)
# Source for theory behind below calculation
# https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation
# https://en.wikipedia.org/wiki/Homogeneous_coordinates
# #https://stackoverflow.com/questions/2422750/in-opengl-vertex-shaders-what-is-w-and-why-do-i-divide-by-it
for ind, column in enumerate(target_columns):
data[column] = data[column] * (data[args[ind]] / data[args[3]])
return data
raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value)
except (TypeError, NotImplementedError, ValueError):
self.logger.error(traceback.format_exc())
os._exit(1)
except Exception:
self.logger.error(traceback.format_exc())
os._exit(2)
@overrides
def label_data(self, labels, data):
"""
Combines labels vector and data matrix.
:param labels:
:param data:
:return:
"""
try:
if data is None or labels is None:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
if not (isinstance(data, pandas.DataFrame) and isinstance(labels, pandas.DataFrame)):
raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)
if (len(labels) != len(data)):
raise TypeError(self.messages.PROVIDED_FRAME_DOESNT_MATCH_DATA.value)
return pandas.concat((labels, data), axis=1)
except (TypeError, NotImplementedError, ValueError):
self.logger.error(traceback.format_exc())
os._exit(1)
except Exception:
self.logger.error(traceback.format_exc())
os._exit(2)
@overrides
def znormalize_quantitative_data(self, data, columns = None, mean = None, std = None):
"""
Apply z-normalization to a data set.
:param data:
:param columns:
:param mean:
:param std:
:return:
"""
try:
if data is None:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
if not isinstance(data, pandas.DataFrame):
raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)
if not all(column in data.keys() for column in columns):
raise TypeError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value)
if mean is None and std is None:
if columns is not None:
mean = data[columns].mean()
std = data[columns].std()
data[columns] = (data[columns] - data[columns].mean()) / data[columns].std()
else:
mean = data.mean()
std = data.std()
data = (data - data.mean()) / data.std()
elif mean is not None and std is not None:
if columns is not None:
data[columns] = (data[columns] - mean) / std
else:
data = (data - mean) / std
else:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
return data, mean, std
except (TypeError, NotImplementedError, ValueError):
self.logger.error(traceback.format_exc())
os._exit(1)
except Exception:
self.logger.error(traceback.format_exc())
os._exit(2)
@overrides
def min_max_normalize_quantitative_data(self, data, columns=None):
"""
Apply min-max-normalization to a data set.
:param data:
:param columns:
:return:
"""
try:
if data is None:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
if not isinstance(data, pandas.DataFrame):
raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)
if not all(column in data.keys() for column in columns):
raise TypeError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value)
if columns is not None:
data[columns]=(data[columns]-data[columns].min())/(data[columns].max()-data[columns].min()) # to center around 0.0 substract 0.5
else:
data = (data - data.min()) / (data.max() - data.min()) # to center around 0.0 substract 0.5
return data
except (TypeError, NotImplementedError, ValueError):
self.logger.error(traceback.format_exc())
os._exit(1)
except Exception:
self.logger.error(traceback.format_exc())
os._exit(2)
@overrides
def re_represent_data(self, current_representation, target_representation, data):
"""
Change representation of a data set.
:param current_representation:
:param target_representation:
:param data:
:return:
"""
raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value)
@overrides
def reduce_quantitativ_data_dimensionality(self, data, mode, reduced_column_name = 'reduced', columns = None):
"""
Apply a dimensionality reduction technique to a data set.
:param data:
:param mode:
:param reduced_column_name:
:param columns:
:return:
"""
try:
if data is None or mode is None or reduced_column_name is None:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
if not isinstance(data, pandas.DataFrame) or not isinstance(mode, str) or not isinstance(reduced_column_name, str):
raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)
if mode == 'euclidean':
# Source:
# https://thispointer.com/pandas-apply-apply-a-function-to-each-row-column-in-dataframe/
# https://www.google.com/search?client=ubuntu&channel=fs&q=euclidean+norm&ie=utf-8&oe=utf-8
# https://stackoverflow.com/questions/54260920/combine-merge-dataframes-with-different-indexes-and-different-column-names
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.append.html
reduced = data[columns].apply(numpy.square, axis=1)[columns].sum(axis=1).apply(numpy.sqrt) #**(1/2) alternative
old_index = data.index
data = pandas.concat([data, reduced], axis=1)
data = data.rename(columns={0: reduced_column_name})
data = data.reset_index(drop=True)
data = data.set_index(old_index)
return data
if mode == 'manhatten':
reduced = data[columns].sum(axis=1).apply(numpy.abs, axis=1)
old_index = data.index
data = pandas.concat([data, reduced], axis=1)
data = data.rename(columns={0: reduced_column_name})
data = data.reset_index(drop=True)
data = data.set_index(old_index)
return data
if mode == 'pca':
# Source:
# https://stackoverflow.com/questions/23282130/principal-components-analysis-using-pandas-dataframe
# https://stackoverflow.com/questions/54260920/combine-merge-dataframes-with-different-indexes-and-different-column-names
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.append.html
# https://en.wikipedia.org/wiki/Principal_component_analysis
pca = PCA(n_components=1)
pca.fit(data[columns])
reduced = pandas.DataFrame((numpy.dot(pca.components_, data[columns].T).T))
reduced = reduced.rename(columns={0:reduced_column_name})
reduced = reduced.reset_index(drop=True)
old_index = data.index
data = data.reset_index(drop=True)
data = pandas.concat([data, reduced], axis=1)
data = data.set_index(old_index)
return data
raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value)
except (TypeError, NotImplementedError, ValueError):
self.logger.error(traceback.format_exc())
os._exit(1)
except Exception:
self.logger.error(traceback.format_exc())
os._exit(2)
@overrides
def encode_categorical_features(self, data, mode, columns, encoding_function):
"""
Encode categorical features using an encoding function.
:param data:
:param mode:
:param columns:
:param encoding_function:
:return:
"""
try:
if data is None or mode is None or columns is None:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
if not isinstance(data, pandas.DataFrame) or not isinstance(mode, str) or not isinstance(
columns, list):
raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)
if mode == 'custom_function':
if encoding_function is None:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
for column in columns:
data[column] = encoding_function(data[column])
return data
raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value)
except (TypeError, NotImplementedError, ValueError):
self.logger.error(traceback.format_exc())
os._exit(1)
except Exception:
self.logger.error(traceback.format_exc())
os._exit(2)
@overrides
def inference_split_process(self, data, config, meta_data):
"""
Apply all preprocessing steps necessary for inference.
:param data: pandas.DataFrame
:param params: List
:return: pandas.DataFrame, pandas.DataFrame, pandas.DataFrame
"""
print('Fetch params')
acelerometer_columns = [config['data_set_column_names'][1:][0], config['data_set_column_names'][1:][1], config['data_set_column_names'][1:][2]]
freq = config['pre_proc_resample_freq'] # '1000ms'
mean_train = meta_data['mean_train']
std_train = meta_data['std_train']
print('Convert time unit, remove nans')
data = self.convert_unix_to_datetime(data, column='time', unit='ms')
data = self.remove_nans(data, replacement_mode='del_row')
data.set_index(data['time'], drop=True, inplace=True)
print('Resample')
data = self.resample_quantitative_data(data,
freq=freq) # 8000 1.25 Hz
print('Dimensionality reduction')
data = self.reduce_quantitativ_data_dimensionality(
data=data,
mode=config['feature_eng_dim_reduction_type'], # works better than euclidean for motif
columns=acelerometer_columns,
reduced_column_name='acceleration_abs'
)
print('Normalizing, outlier removal')
selected_columns = ['acceleration_abs']
data, mean, std = self.znormalize_quantitative_data(data, selected_columns, mean_train, std_train)
data = self.remove_outliers_from_quantitative_data(
data,
replacement_mode='quantile',
columns=selected_columns,
quantile=0.99 # current run @0.95 for classical approach via TS Fresh
)[:-1]
return data
@overrides
def training_split_process(self, data, config, labels):
"""
Apply all preprocessing steps necessary for training.
:param data: pandas.DataFrame
:param params: List
:return: pandas.DataFrame, pandas.DataFrame, pandas.DataFrame
"""
print('Fetch params')
#print(params)
labels = labels
test_sz = config['pre_proc_test_sz']
train_sz = config['pre_proc_training_sz']
valid_sz = config['pre_proc_validation_sz']
#acelerometer_columns = ['acceleration_x', 'acceleration_y', 'acceleration_z']
acelerometer_columns = [config['data_set_column_names'][1:][0], config['data_set_column_names'][1:][1], config['data_set_column_names'][1:][2]]
selected_coarse_labels = config['pre_proc_movement_type_label'] #[5]
selected_road_labels = config['pre_proc_road_type_label'] #[1, 3]
freq = config['pre_proc_resample_freq'] #'1000ms'
print('Convert time unit, label data, remove nans')
data = self.convert_unix_to_datetime(data, column = 'time', unit = 'ms')
data = self.label_data(data, labels)
data = self.remove_nans(data, replacement_mode='del_row')
print('Train, Test, Validation split')
data_len = data.shape[0]
test_len = int(data_len * test_sz)
train_len = int(data_len * train_sz)
valid_len = int(data_len * valid_sz)
data_train, data_test_valid = data.head(train_len), data.tail(test_len+valid_len)
data_test = data_test_valid.head(test_len)
data_valid = data_test_valid.tail(valid_len)
print('Segment by labels')
#Segment Train
car_train_segments = self.segment_data(data_train, mode='labels',
label_column='coarse_label',
args=selected_coarse_labels)
data_train_segments = []
for car_segment in car_train_segments:
road_segments = self.segment_data(car_segment, mode='labels',
label_column='road_label',
args=selected_road_labels
)
for road_segment in road_segments:
data_train_segments.append(road_segment)
#Segment Test
car_test_segments = self.segment_data(data_test, mode='labels',
label_column='coarse_label',
args=selected_coarse_labels)
data_test_segments = []
for car_segment in car_test_segments:
road_segments = self.segment_data(car_segment, mode='labels',
label_column='road_label',
args=selected_road_labels
)
for road_segment in road_segments:
data_test_segments.append(road_segment)
#Segment Valid
car_valid_segments = self.segment_data(data_valid, mode='labels',
label_column='coarse_label',
args=selected_coarse_labels)
data_valid_segments = []
for car_segment in car_valid_segments:
road_segments = self.segment_data(car_segment, mode='labels',
label_column='road_label',
args=selected_road_labels
)
for road_segment in road_segments:
data_valid_segments.append(road_segment)
print('Resample')
#Train
for ind in range(len(data_train_segments)):
data_train_segments[ind] = data_train_segments[ind].set_index('time')
data_train_segments[ind] = self.resample_quantitative_data(data_train_segments[ind],
freq=freq) # 8000 1.25 Hz
#Test
for ind in range(len(data_test_segments)):
data_test_segments[ind] = data_test_segments[ind].set_index('time')
data_test_segments[ind] = self.resample_quantitative_data(data_test_segments[ind],
freq=freq)
#Valid
for ind in range(len(data_valid_segments)):
data_valid_segments[ind] = data_valid_segments[ind].set_index('time')
data_valid_segments[ind] = self.resample_quantitative_data(data_valid_segments[ind],
freq=freq)
print('Dimensionality reduction')
#Train
for ind in range(len(data_train_segments)):
data_train_segments[ind] = self.reduce_quantitativ_data_dimensionality(
data=data_train_segments[ind],
mode=config['feature_eng_dim_reduction_type'], # works better than euclidean for motif
columns=acelerometer_columns,
reduced_column_name='acceleration_abs'
)
#Test
for ind in range(len(data_test_segments)):
data_test_segments[ind] = self.reduce_quantitativ_data_dimensionality(
data=data_test_segments[ind],
mode=config['feature_eng_dim_reduction_type'], # works better than euclidean for motif
columns=acelerometer_columns,
reduced_column_name='acceleration_abs'
)
#Valid
for ind in range(len(data_valid_segments)):
data_valid_segments[ind] = self.reduce_quantitativ_data_dimensionality(
data=data_valid_segments[ind],
mode=config['feature_eng_dim_reduction_type'], # works better than euclidean for motif
columns=acelerometer_columns,
reduced_column_name='acceleration_abs'
)
print('Normalizing, outlier removal')
#Train
selected_columns = ['acceleration_abs',
'road_label', 'id'] # 'acceleration_abs'
data_train = self.de_segment_data(data_train_segments, selected_columns)
data_train, mean_train, std_train = self.znormalize_quantitative_data(data_train, selected_columns[:-2])
data_train = self.remove_outliers_from_quantitative_data(
data_train,
replacement_mode='quantile',
columns=selected_columns[:-2],
quantile=0.99 # current run @0.95 for classical approach via TS Fresh
)[:-2]
#Test
data_test = self.de_segment_data(data_test_segments, selected_columns)
data_test, mean_test, std_test = self.znormalize_quantitative_data(data_test,
selected_columns[:-2],
mean_train, std_train)
data_test = self.remove_outliers_from_quantitative_data(
data_test,
replacement_mode='quantile',
columns=selected_columns[:-2],
quantile=0.99 # current run @0.95 for classical approach via TS Fresh
)[:-2]
#Valid
data_valid = self.de_segment_data(data_valid_segments, selected_columns)
data_valid, mean_valid, std_valid = self.znormalize_quantitative_data(data_valid,
selected_columns[:-2],
mean_train, std_train)
data_valid = self.remove_outliers_from_quantitative_data(
data_valid,
replacement_mode='quantile',
columns=selected_columns[:-2],
quantile=0.99 # current run @0.95 for classical approach via TS Fresh
)[:-2]
data_train = data_train.loc[:, ~data_train.columns.duplicated()]
data_test = data_test.loc[:, ~data_test.columns.duplicated()]
data_valid = data_valid.loc[:, ~data_valid.columns.duplicated()]
#print('Rolling mean smoothing')
#data_train['acceleration_abs'] = data_train['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3) #TODO make configureable
#data_test['acceleration_abs'] = data_test['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3)
#data_valid['acceleration_abs'] = data_valid['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3)
#data_train = self.remove_nans(data_train, replacement_mode='del_row')
#data_test = self.remove_nans(data_test, replacement_mode='del_row')
#data_valid = self.remove_nans(data_valid, replacement_mode='del_row')
#print(data_train.head(100))
return data_train, mean_train, std_train, data_test, data_valid
|
[
"pipeline.feature_engineering.preprocessing.replacement_strategies.del_row_replacement_strategy.DelRowReplacementStrategy",
"pipeline.feature_engineering.preprocessing.replacement_strategies.replacement_val_replacement_strategy.ReplacementValReplacementStrategy",
"pipeline.feature_engineering.preprocessing.replacement_strategies.mean_replacement_strategy.MeanReplacementStrategy",
"os._exit",
"pandas.to_datetime",
"traceback.format_exc",
"sklearn.decomposition.PCA",
"pandas.Series",
"numpy.dot",
"pandas.concat"
] |
[((11397, 11440), 'pandas.to_datetime', 'pandas.to_datetime', (['data[column]'], {'unit': 'unit'}), '(data[column], unit=unit)\n', (11415, 11440), False, 'import pandas\n'), ((15773, 15810), 'pandas.concat', 'pandas.concat', (['(labels, data)'], {'axis': '(1)'}), '((labels, data), axis=1)\n', (15786, 15810), False, 'import pandas\n'), ((4889, 4900), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (4897, 4900), False, 'import os\n'), ((4995, 5006), 'os._exit', 'os._exit', (['(2)'], {}), '(2)\n', (5003, 5006), False, 'import os\n'), ((5775, 5786), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (5783, 5786), False, 'import os\n'), ((5881, 5892), 'os._exit', 'os._exit', (['(2)'], {}), '(2)\n', (5889, 5892), False, 'import os\n'), ((7146, 7157), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (7154, 7157), False, 'import os\n'), ((7252, 7263), 'os._exit', 'os._exit', (['(2)'], {}), '(2)\n', (7260, 7263), False, 'import os\n'), ((9267, 9278), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (9275, 9278), False, 'import os\n'), ((9373, 9384), 'os._exit', 'os._exit', (['(2)'], {}), '(2)\n', (9381, 9384), False, 'import os\n'), ((10428, 10439), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (10436, 10439), False, 'import os\n'), ((10533, 10544), 'os._exit', 'os._exit', (['(2)'], {}), '(2)\n', (10541, 10544), False, 'import os\n'), ((11593, 11604), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (11601, 11604), False, 'import os\n'), ((11698, 11709), 'os._exit', 'os._exit', (['(2)'], {}), '(2)\n', (11706, 11709), False, 'import os\n'), ((12724, 12735), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (12732, 12735), False, 'import os\n'), ((12829, 12840), 'os._exit', 'os._exit', (['(2)'], {}), '(2)\n', (12837, 12840), False, 'import os\n'), ((15005, 15016), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (15013, 15016), False, 'import os\n'), ((15110, 15121), 'os._exit', 'os._exit', (['(2)'], {}), '(2)\n', (15118, 15121), False, 'import os\n'), ((15939, 15950), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (15947, 15950), False, 'import os\n'), ((16044, 16055), 'os._exit', 'os._exit', (['(2)'], {}), '(2)\n', (16052, 16055), False, 'import os\n'), ((17668, 17679), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (17676, 17679), False, 'import os\n'), ((17774, 17785), 'os._exit', 'os._exit', (['(2)'], {}), '(2)\n', (17782, 17785), False, 'import os\n'), ((18877, 18888), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (18885, 18888), False, 'import os\n'), ((18982, 18993), 'os._exit', 'os._exit', (['(2)'], {}), '(2)\n', (18990, 18993), False, 'import os\n'), ((20773, 20811), 'pandas.concat', 'pandas.concat', (['[data, reduced]'], {'axis': '(1)'}), '([data, reduced], axis=1)\n', (20786, 20811), False, 'import pandas\n'), ((21185, 21223), 'pandas.concat', 'pandas.concat', (['[data, reduced]'], {'axis': '(1)'}), '([data, reduced], axis=1)\n', (21198, 21223), False, 'import pandas\n'), ((21937, 21956), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(1)'}), '(n_components=1)\n', (21940, 21956), False, 'from sklearn.decomposition import PCA\n'), ((22332, 22370), 'pandas.concat', 'pandas.concat', (['[data, reduced]'], {'axis': '(1)'}), '([data, reduced], axis=1)\n', (22345, 22370), False, 'import pandas\n'), ((22654, 22665), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (22662, 22665), False, 'import os\n'), ((22759, 22770), 'os._exit', 'os._exit', (['(2)'], {}), '(2)\n', (22767, 22770), False, 'import os\n'), ((23960, 23971), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (23968, 23971), False, 'import os\n'), ((24065, 24076), 'os._exit', 'os._exit', (['(2)'], {}), '(2)\n', (24073, 24076), False, 'import os\n'), ((4853, 4875), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (4873, 4875), False, 'import traceback\n'), ((4959, 4981), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (4979, 4981), False, 'import traceback\n'), ((5496, 5566), 'pandas.concat', 'pandas.concat', (['[data, data_segments[ind][selected_columns]]'], {'axis': 'axis'}), '([data, data_segments[ind][selected_columns]], axis=axis)\n', (5509, 5566), False, 'import pandas\n'), ((5739, 5761), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (5759, 5761), False, 'import traceback\n'), ((5845, 5867), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (5865, 5867), False, 'import traceback\n'), ((7110, 7132), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (7130, 7132), False, 'import traceback\n'), ((7216, 7238), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (7236, 7238), False, 'import traceback\n'), ((9231, 9253), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (9251, 9253), False, 'import traceback\n'), ((9337, 9359), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (9357, 9359), False, 'import traceback\n'), ((10392, 10414), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (10412, 10414), False, 'import traceback\n'), ((10497, 10519), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (10517, 10519), False, 'import traceback\n'), ((11557, 11579), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (11577, 11579), False, 'import traceback\n'), ((11662, 11684), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (11682, 11684), False, 'import traceback\n'), ((12688, 12710), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (12708, 12710), False, 'import traceback\n'), ((12793, 12815), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (12813, 12815), False, 'import traceback\n'), ((14969, 14991), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (14989, 14991), False, 'import traceback\n'), ((15074, 15096), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (15094, 15096), False, 'import traceback\n'), ((15903, 15925), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (15923, 15925), False, 'import traceback\n'), ((16008, 16030), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (16028, 16030), False, 'import traceback\n'), ((17632, 17654), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (17652, 17654), False, 'import traceback\n'), ((17738, 17760), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (17758, 17760), False, 'import traceback\n'), ((18841, 18863), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (18861, 18863), False, 'import traceback\n'), ((18946, 18968), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (18966, 18968), False, 'import traceback\n'), ((22618, 22640), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (22638, 22640), False, 'import traceback\n'), ((22723, 22745), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (22743, 22745), False, 'import traceback\n'), ((23924, 23946), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (23944, 23946), False, 'import traceback\n'), ((24029, 24051), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (24049, 24051), False, 'import traceback\n'), ((4334, 4385), 'pandas.concat', 'pandas.concat', (['[segments_combined, segment]'], {'axis': '(0)'}), '([segments_combined, segment], axis=0)\n', (4347, 4385), False, 'import pandas\n'), ((6605, 6630), 'pipeline.feature_engineering.preprocessing.replacement_strategies.mean_replacement_strategy.MeanReplacementStrategy', 'MeanReplacementStrategy', ([], {}), '()\n', (6628, 6630), False, 'from pipeline.feature_engineering.preprocessing.replacement_strategies.mean_replacement_strategy import MeanReplacementStrategy\n'), ((6721, 6748), 'pipeline.feature_engineering.preprocessing.replacement_strategies.del_row_replacement_strategy.DelRowReplacementStrategy', 'DelRowReplacementStrategy', ([], {}), '()\n', (6746, 6748), False, 'from pipeline.feature_engineering.preprocessing.replacement_strategies.del_row_replacement_strategy import DelRowReplacementStrategy\n'), ((6847, 6882), 'pipeline.feature_engineering.preprocessing.replacement_strategies.replacement_val_replacement_strategy.ReplacementValReplacementStrategy', 'ReplacementValReplacementStrategy', ([], {}), '()\n', (6880, 6882), False, 'from pipeline.feature_engineering.preprocessing.replacement_strategies.replacement_val_replacement_strategy import ReplacementValReplacementStrategy\n'), ((12440, 12467), 'pipeline.feature_engineering.preprocessing.replacement_strategies.del_row_replacement_strategy.DelRowReplacementStrategy', 'DelRowReplacementStrategy', ([], {}), '()\n', (12465, 12467), False, 'from pipeline.feature_engineering.preprocessing.replacement_strategies.del_row_replacement_strategy import DelRowReplacementStrategy\n'), ((22040, 22083), 'numpy.dot', 'numpy.dot', (['pca.components_', 'data[columns].T'], {}), '(pca.components_, data[columns].T)\n', (22049, 22083), False, 'import numpy\n'), ((1950, 1984), 'pandas.Series', 'pandas.Series', (['selected_data.index'], {}), '(selected_data.index)\n', (1963, 1984), False, 'import pandas\n')]
|
"""
Sinusoidal Function Sphere function (2 random inputs, scalar output)
======================================================================
In this example, PCE is used to generate a surrogate model for a given set of 2D data.
.. math:: f(x) = x_1^2 + x_2^2
**Description:** Dimensions: 2
**Input Domain:** This function is evaluated on the hypercube :math:`x_i \in [-5.12, 5.12]` for all :math:`i = 1,2`.
**Global minimum:** :math:`f(x^*)=0,` at :math:`x^* = (0,0)`.
**Reference:** <NAME>., & <NAME>. (1978). The global optimization problem: an introduction. Towards global optimization, 2, 1-15.
"""
# %% md
#
# Import necessary libraries.
# %%
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from UQpy.surrogates import *
from UQpy.distributions import Uniform, JointIndependent
# %% md
#
# Define the function.
# %%
def function(x,y):
return x**2 + y**2
# %% md
#
# Create a distribution object, generate samples and evaluate the function at the samples.
# %%
np.random.seed(1)
dist_1 = Uniform(loc=-5.12, scale=10.24)
dist_2 = Uniform(loc=-5.12, scale=10.24)
marg = [dist_1, dist_2]
joint = JointIndependent(marginals=marg)
n_samples = 100
x = joint.rvs(n_samples)
y = function(x[:,0], x[:,1])
# %% md
#
# Visualize the 2D function.
# %%
xmin, xmax = -6,6
ymin, ymax = -6,6
X1 = np.linspace(xmin, xmax, 50)
X2 = np.linspace(ymin, ymax, 50)
X1_, X2_ = np.meshgrid(X1, X2) # grid of points
f = function(X1_, X2_)
fig = plt.figure(figsize=(10,6))
ax = fig.gca(projection='3d')
surf = ax.plot_surface(X1_, X2_, f, rstride=1, cstride=1, cmap='gnuplot2', linewidth=0, antialiased=False)
ax.set_title('True function')
ax.set_xlabel('$x_1$', fontsize=15)
ax.set_ylabel('$x_2$', fontsize=15)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.view_init(20, 140)
fig.colorbar(surf, shrink=0.5, aspect=7)
plt.show()
# %% md
#
# Visualize training data.
# %%
fig = plt.figure(figsize=(10,6))
ax = fig.gca(projection='3d')
ax.scatter(x[:,0], x[:,1], y, s=20, c='r')
ax.set_title('Training data')
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.view_init(20,140)
ax.set_xlabel('$x_1$', fontsize=15)
ax.set_ylabel('$x_2$', fontsize=15)
plt.show()
# %% md
#
# Create an object from the PCE class. Compute PCE coefficients using least squares regression.
# %%
max_degree = 3
polynomial_basis = TotalDegreeBasis(joint, max_degree)
least_squares = LeastSquareRegression()
pce = PolynomialChaosExpansion(polynomial_basis=polynomial_basis, regression_method=least_squares)
pce.fit(x,y)
# %% md
#
# Compute PCE coefficients using LASSO.
# %%
polynomial_basis = TotalDegreeBasis(joint, max_degree)
lasso = LassoRegression()
pce2 = PolynomialChaosExpansion(polynomial_basis=polynomial_basis, regression_method=lasso)
pce2.fit(x,y)
# %% md
#
# Compute PCE coefficients with Ridge regression.
# %%
polynomial_basis = TotalDegreeBasis(joint, max_degree)
ridge = RidgeRegression()
pce3 = PolynomialChaosExpansion(polynomial_basis=polynomial_basis, regression_method=ridge)
pce3.fit(x,y)
# %% md
#
# PCE surrogate is used to predict the behavior of the function at new samples.
# %%
n_test_samples = 10000
x_test = joint.rvs(n_test_samples)
y_test = pce.predict(x_test)
# %% md
#
# Plot PCE prediction.
# %%
fig = plt.figure(figsize=(10,6))
ax = fig.gca(projection='3d')
ax.scatter(x_test[:,0], x_test[:,1], y_test, s=1)
ax.set_title('PCE predictor')
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.view_init(20,140)
ax.set_xlim(-6,6)
ax.set_ylim(-6,6)
ax.set_xlabel('$x_1$', fontsize=15)
ax.set_ylabel('$x_2$', fontsize=15)
plt.show()
# %% md
# Error Estimation
# -----------------
# Construct a validation dataset and get the validation error.
# %%
# validation sample
n_samples = 150
x_val = joint.rvs(n_samples)
y_val = function(x_val[:,0], x_val[:,1])
# PCE predictions
y_pce = pce.predict(x_val).flatten()
y_pce2 = pce2.predict(x_val).flatten()
y_pce3 = pce3.predict(x_val).flatten()
# mean relative validation errors
error = np.sum(np.abs((y_val - y_pce)/y_val))/n_samples
error2 = np.sum(np.abs((y_val - y_pce2)/y_val))/n_samples
error3 = np.sum(np.abs((y_val - y_pce3)/y_val))/n_samples
print('Mean rel. error, LSTSQ:', error)
print('Mean rel. error, LASSO:', error2)
print('Mean rel. error, Ridge:', error3)
# %% md
# Moment Estimation
# -----------------
# Returns mean and variance of the PCE surrogate.
# %%
n_mc = 1000000
x_mc = joint.rvs(n_mc)
y_mc = function(x_mc[:,0], x_mc[:,1])
mean_mc = np.mean(y_mc)
var_mc = np.var(y_mc)
print('Moments from least squares regression :', pce.get_moments())
print('Moments from LASSO regression :', pce2.get_moments())
print('Moments from Ridge regression :', pce3.get_moments())
print('Moments from Monte Carlo integration: ', mean_mc, var_mc)
|
[
"numpy.meshgrid",
"numpy.random.seed",
"matplotlib.pyplot.show",
"numpy.abs",
"UQpy.distributions.JointIndependent",
"matplotlib.pyplot.figure",
"numpy.mean",
"matplotlib.ticker.LinearLocator",
"matplotlib.ticker.FormatStrFormatter",
"numpy.linspace",
"UQpy.distributions.Uniform",
"numpy.var"
] |
[((1125, 1142), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (1139, 1142), True, 'import numpy as np\n'), ((1153, 1184), 'UQpy.distributions.Uniform', 'Uniform', ([], {'loc': '(-5.12)', 'scale': '(10.24)'}), '(loc=-5.12, scale=10.24)\n', (1160, 1184), False, 'from UQpy.distributions import Uniform, JointIndependent\n'), ((1194, 1225), 'UQpy.distributions.Uniform', 'Uniform', ([], {'loc': '(-5.12)', 'scale': '(10.24)'}), '(loc=-5.12, scale=10.24)\n', (1201, 1225), False, 'from UQpy.distributions import Uniform, JointIndependent\n'), ((1259, 1291), 'UQpy.distributions.JointIndependent', 'JointIndependent', ([], {'marginals': 'marg'}), '(marginals=marg)\n', (1275, 1291), False, 'from UQpy.distributions import Uniform, JointIndependent\n'), ((1451, 1478), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', '(50)'], {}), '(xmin, xmax, 50)\n', (1462, 1478), True, 'import numpy as np\n'), ((1484, 1511), 'numpy.linspace', 'np.linspace', (['ymin', 'ymax', '(50)'], {}), '(ymin, ymax, 50)\n', (1495, 1511), True, 'import numpy as np\n'), ((1523, 1542), 'numpy.meshgrid', 'np.meshgrid', (['X1', 'X2'], {}), '(X1, X2)\n', (1534, 1542), True, 'import numpy as np\n'), ((1590, 1617), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (1600, 1617), True, 'import matplotlib.pyplot as plt\n'), ((2024, 2034), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2032, 2034), True, 'import matplotlib.pyplot as plt\n'), ((2086, 2113), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (2096, 2113), True, 'import matplotlib.pyplot as plt\n'), ((2414, 2424), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2422, 2424), True, 'import matplotlib.pyplot as plt\n'), ((3497, 3524), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (3507, 3524), True, 'import matplotlib.pyplot as plt\n'), ((3868, 3878), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3876, 3878), True, 'import matplotlib.pyplot as plt\n'), ((4760, 4773), 'numpy.mean', 'np.mean', (['y_mc'], {}), '(y_mc)\n', (4767, 4773), True, 'import numpy as np\n'), ((4783, 4795), 'numpy.var', 'np.var', (['y_mc'], {}), '(y_mc)\n', (4789, 4795), True, 'import numpy as np\n'), ((1883, 1900), 'matplotlib.ticker.LinearLocator', 'LinearLocator', (['(10)'], {}), '(10)\n', (1896, 1900), False, 'from matplotlib.ticker import LinearLocator, FormatStrFormatter\n'), ((1931, 1958), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.02f"""'], {}), "('%.02f')\n", (1949, 1958), False, 'from matplotlib.ticker import LinearLocator, FormatStrFormatter\n'), ((2244, 2261), 'matplotlib.ticker.LinearLocator', 'LinearLocator', (['(10)'], {}), '(10)\n', (2257, 2261), False, 'from matplotlib.ticker import LinearLocator, FormatStrFormatter\n'), ((2292, 2319), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.02f"""'], {}), "('%.02f')\n", (2310, 2319), False, 'from matplotlib.ticker import LinearLocator, FormatStrFormatter\n'), ((3662, 3679), 'matplotlib.ticker.LinearLocator', 'LinearLocator', (['(10)'], {}), '(10)\n', (3675, 3679), False, 'from matplotlib.ticker import LinearLocator, FormatStrFormatter\n'), ((3710, 3737), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.02f"""'], {}), "('%.02f')\n", (3728, 3737), False, 'from matplotlib.ticker import LinearLocator, FormatStrFormatter\n'), ((4288, 4319), 'numpy.abs', 'np.abs', (['((y_val - y_pce) / y_val)'], {}), '((y_val - y_pce) / y_val)\n', (4294, 4319), True, 'import numpy as np\n'), ((4345, 4377), 'numpy.abs', 'np.abs', (['((y_val - y_pce2) / y_val)'], {}), '((y_val - y_pce2) / y_val)\n', (4351, 4377), True, 'import numpy as np\n'), ((4403, 4435), 'numpy.abs', 'np.abs', (['((y_val - y_pce3) / y_val)'], {}), '((y_val - y_pce3) / y_val)\n', (4409, 4435), True, 'import numpy as np\n')]
|
import numpy as np
import pytest
from fast_carpenter.selection.filters import Counter
@pytest.fixture
def weight_names():
return [
"EventWeight",
# "MuonWeight", "ElectronWeight", "JetWeight",
]
@pytest.fixture
def counter(weight_names):
return Counter(weight_names)
def test_init(weight_names, full_wrapped_tree):
c = Counter(weight_names)
assert c._weight_names == weight_names
assert c.counts == (0, 0.0)
assert c._w_counts == (0.0)
def test_increment_mc(counter, full_wrapped_tree):
counter.increment(full_wrapped_tree, is_mc=True)
n_events = len(full_wrapped_tree)
expected_weighted_sum = 229.94895935058594
# expected value is taken from numpy sum, but awkward sum is used
# the difference is small and due to optimization
# see https://github.com/scikit-hep/awkward-1.0/issues/1241
assert counter._w_counts == pytest.approx(np.array([expected_weighted_sum]), 1e-4)
assert counter.counts == (n_events, pytest.approx(expected_weighted_sum, 1e-4))
def test_increment_data(counter, full_wrapped_tree):
counter.increment(full_wrapped_tree, is_mc=False)
n_events = len(full_wrapped_tree)
assert counter._w_counts == (n_events)
assert counter.counts == (n_events, n_events)
def test_add(counter, full_wrapped_tree):
counter.increment(full_wrapped_tree, is_mc=True)
counter.add(counter)
n_events = len(full_wrapped_tree)
expected_weighted_sum = 229.94895935058594
# expected value is taken from numpy sum, but awkward sum is used
# the difference is small and due to optimization
# see https://github.com/scikit-hep/awkward-1.0/issues/1241
assert counter._w_counts == pytest.approx((expected_weighted_sum * 2,), 2e-4)
assert counter.counts == (n_events * 2, pytest.approx(expected_weighted_sum * 2, 2e-4))
def test_increment_without_weights(full_wrapped_tree):
counter = Counter([])
counter.increment(full_wrapped_tree, is_mc=True)
n_events = len(full_wrapped_tree)
with pytest.raises(IndexError):
assert counter._w_counts[0] == n_events
assert counter.counts == (n_events, )
|
[
"pytest.raises",
"pytest.approx",
"numpy.array",
"fast_carpenter.selection.filters.Counter"
] |
[((277, 298), 'fast_carpenter.selection.filters.Counter', 'Counter', (['weight_names'], {}), '(weight_names)\n', (284, 298), False, 'from fast_carpenter.selection.filters import Counter\n'), ((357, 378), 'fast_carpenter.selection.filters.Counter', 'Counter', (['weight_names'], {}), '(weight_names)\n', (364, 378), False, 'from fast_carpenter.selection.filters import Counter\n'), ((1917, 1928), 'fast_carpenter.selection.filters.Counter', 'Counter', (['[]'], {}), '([])\n', (1924, 1928), False, 'from fast_carpenter.selection.filters import Counter\n'), ((1704, 1755), 'pytest.approx', 'pytest.approx', (['(expected_weighted_sum * 2,)', '(0.0002)'], {}), '((expected_weighted_sum * 2,), 0.0002)\n', (1717, 1755), False, 'import pytest\n'), ((2030, 2055), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (2043, 2055), False, 'import pytest\n'), ((911, 944), 'numpy.array', 'np.array', (['[expected_weighted_sum]'], {}), '([expected_weighted_sum])\n', (919, 944), True, 'import numpy as np\n'), ((992, 1036), 'pytest.approx', 'pytest.approx', (['expected_weighted_sum', '(0.0001)'], {}), '(expected_weighted_sum, 0.0001)\n', (1005, 1036), False, 'import pytest\n'), ((1798, 1846), 'pytest.approx', 'pytest.approx', (['(expected_weighted_sum * 2)', '(0.0002)'], {}), '(expected_weighted_sum * 2, 0.0002)\n', (1811, 1846), False, 'import pytest\n')]
|
import torch as pt
import numpy as np
from model.PFSeg import PFSeg3D
from medpy.metric.binary import jc,hd95
from dataset.GuidedBraTSDataset3D import GuidedBraTSDataset3D
# from loss.FALoss3D import FALoss3D
import cv2
from loss.TaskFusionLoss import TaskFusionLoss
from loss.DiceLoss import BinaryDiceLoss
from config import config
import argparse
from tqdm import tqdm
# from tensorboardX import SummaryWriter
crop_size=config.crop_size
size=crop_size[2]
img_size=config.input_img_size
parser = argparse.ArgumentParser(description='Patch-free 3D Medical Image Segmentation.')
parser.add_argument('-dataset_path',type=str,default='/newdata/why/BraTS20',help='path to dataset')
parser.add_argument('-model_save_to',type=str,default='.',help='path to output')
parser.add_argument('-bs', type=int, default=1, help='input batch size')
parser.add_argument('-epoch', type=int, default=100, help='number of epochs')
parser.add_argument('-lr', type=float, default=0.0001, help='learning rate')
parser.add_argument('-w_sr', type=float, default=0.5, help='w_sr of the lossfunc')
parser.add_argument('-w_tf', type=float, default=0.5, help='w_tf of the lossfunc')
parser.add_argument('-load_pretrained',type=str,default='',help='load a pretrained model')
parser.add_argument('-v', help="increase output verbosity", action="store_true")
args = parser.parse_args()
dataset_path=args.dataset_path
lr=args.lr
epoch=args.epoch
batch_size=args.bs
model_path=args.model_save_to
w_sr=args.w_sr
w_tf=args.w_tf
pretrained_model=args.load_pretrained
print(args)
model=PFSeg3D(in_channels=1,out_channels=1).cuda()
if pt.cuda.device_count()>1:
if batch_size<pt.cuda.device_count():
batch_size=pt.cuda.device_count()
print('Batch size has to be larger than GPU#. Set to {:d} instead.'.format(batch_size))
model=pt.nn.DataParallel(model)
if not pretrained_model=='':
model.load_state_dict(pt.load(pretrained_model,map_location = 'cpu'))
trainset=GuidedBraTSDataset3D(dataset_path,mode='train')
valset=GuidedBraTSDataset3D(dataset_path,mode='val')
testset=GuidedBraTSDataset3D(dataset_path,mode='test')
train_dataset=pt.utils.data.DataLoader(trainset,batch_size=batch_size,shuffle=True,drop_last=True)
val_dataset=pt.utils.data.DataLoader(valset,batch_size=1,shuffle=True,drop_last=True)
test_dataset=pt.utils.data.DataLoader(testset,batch_size=1,shuffle=True,drop_last=True)
lossfunc_sr=pt.nn.MSELoss()
lossfunc_seg=pt.nn.BCELoss()
lossfunc_dice=BinaryDiceLoss()
lossfunc_pf=TaskFusionLoss()
optimizer = pt.optim.Adam(model.parameters(), lr=lr)
# # scheduler = pt.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.99)
scheduler=pt.optim.lr_scheduler.ReduceLROnPlateau(optimizer,mode='max',patience=20)
def ValModel():
model.eval()
dice_sum=0
hd_sum=0
jc_sum=0
weight_map=np.zeros((1,1,2*img_size[0],2*img_size[1],2*img_size[2]))
for a in range(0,img_size[0]-crop_size[0]+1,crop_size[0]//2): # overlap0.5
for b in range(0,img_size[1]-crop_size[1]+1,crop_size[1]//2):
for c in range(0,img_size[2]-crop_size[2]+1,crop_size[2]//2):
weight_map[:,:,(2*a):(2*(a+crop_size[0])),(2*b):(2*(b+crop_size[1])),(2*c):(2*(c+crop_size[2]))]+=1
weight_map=1./weight_map
for i,data in enumerate(val_dataset):
output_list=np.zeros((1,1,2*img_size[0],2*img_size[1],2*img_size[2]))
label_list=np.zeros((1,1,2*img_size[0],2*img_size[1],2*img_size[2]))
(inputs,labels,_,guidance,mask)=data
labels3D = pt.autograd.Variable(labels).type(pt.FloatTensor).cuda().unsqueeze(1)
guidance = pt.autograd.Variable(guidance).type(pt.FloatTensor).cuda().unsqueeze(1)
mask = pt.autograd.Variable(mask).type(pt.FloatTensor).cuda().unsqueeze(1)
for a in range(0,img_size[0]-crop_size[0]+1,crop_size[0]//2): # overlap0.5
for b in range(0,img_size[1]-crop_size[1]+1,crop_size[1]//2):
for c in range(0,img_size[2]-crop_size[2]+1,crop_size[2]//2):
inputs3D = pt.autograd.Variable(inputs[:,a:(a+crop_size[0]),b:(b+crop_size[1]),c:(c+crop_size[2])]).type(pt.FloatTensor).cuda().unsqueeze(1)
with pt.no_grad():
outputs3D,_ = model(inputs3D,guidance)
outputs3D=np.array(outputs3D.cpu().data.numpy())
output_list[:,:,(2*a):(2*(a+crop_size[0])),(2*b):(2*(b+crop_size[1])),(2*c):(2*(c+crop_size[2]))]+=outputs3D
label_list=np.array(labels3D.cpu().data.numpy())
output_list=np.array(output_list)*weight_map
output_list[output_list<0.5]=0
output_list[output_list>=0.5]=1
pr_sum = output_list.sum()
gt_sum = label_list.sum()
pr_gt_sum = np.sum(output_list[label_list == 1])
dice = 2 * pr_gt_sum / (pr_sum + gt_sum)
dice_sum += dice
if args.v:
final_img=np.zeros(shape=(2*img_size[1],2*2*img_size[2]))
final_img[:,:2*img_size[2]]=output_list[0,0,64,:,:]*255
final_img[:,2*img_size[2]:]=label_list[0,0,64,:,:]*255
cv2.imwrite('ValPhase_BraTS.png',final_img)
print("dice:",dice)
hausdorff=hd95(output_list.squeeze(0).squeeze(0),label_list.squeeze(0).squeeze(0))
jaccard=jc(output_list.squeeze(0).squeeze(0),label_list.squeeze(0).squeeze(0))
hd_sum+=hausdorff
jc_sum+=jaccard
print("Finished. Total dice: ",dice_sum/len(val_dataset),'\n')
print("Finished. Avg Jaccard: ",jc_sum/len(val_dataset))
print("Finished. Avg hausdorff: ",hd_sum/len(val_dataset))
return dice_sum/len(val_dataset)
def TestModel():
model.eval()
dice_sum=0
hd_sum=0
jc_sum=0
weight_map=np.zeros((1,1,2*img_size[0],2*img_size[1],2*img_size[2]))
for a in range(0,img_size[0]-crop_size[0]+1,crop_size[0]//2): # overlap0.5
for b in range(0,img_size[1]-crop_size[1]+1,crop_size[1]//2):
for c in range(0,img_size[2]-crop_size[2]+1,crop_size[2]//2):
weight_map[:,:,(2*a):(2*(a+crop_size[0])),(2*b):(2*(b+crop_size[1])),(2*c):(2*(c+crop_size[2]))]+=1
weight_map=1./weight_map
for i,data in enumerate(test_dataset):
output_list=np.zeros((1,1,2*img_size[0],2*img_size[1],2*img_size[2]))
label_list=np.zeros((1,1,2*img_size[0],2*img_size[1],2*img_size[2]))
(inputs,labels,_,guidance,mask)=data
labels3D = pt.autograd.Variable(labels).type(pt.FloatTensor).cuda().unsqueeze(1)
guidance = pt.autograd.Variable(guidance).type(pt.FloatTensor).cuda().unsqueeze(1)
mask = pt.autograd.Variable(mask).type(pt.FloatTensor).cuda().unsqueeze(1)
for a in range(0,img_size[0]-crop_size[0]+1,crop_size[0]//2): # overlap0.5
for b in range(0,img_size[1]-crop_size[1]+1,crop_size[1]//2):
for c in range(0,img_size[2]-crop_size[2]+1,crop_size[2]//2):
inputs3D = pt.autograd.Variable(inputs[:,a:(a+crop_size[0]),b:(b+crop_size[1]),c:(c+crop_size[2])]).type(pt.FloatTensor).cuda().unsqueeze(1)
with pt.no_grad():
outputs3D,_ = model(inputs3D,guidance)
outputs3D=np.array(outputs3D.cpu().data.numpy())
output_list[:,:,(2*a):(2*(a+crop_size[0])),(2*b):(2*(b+crop_size[1])),(2*c):(2*(c+crop_size[2]))]+=outputs3D
label_list=np.array(labels3D.cpu().data.numpy())
output_list=np.array(output_list)*weight_map
output_list[output_list<0.5]=0
output_list[output_list>=0.5]=1
final_img=np.zeros(shape=(2*img_size[1],2*2*img_size[2]))
final_img[:,:2*img_size[2]]=output_list[0,0,64,:,:]*255
final_img[:,2*img_size[2]:]=label_list[0,0,64,:,:]*255
cv2.imwrite('TestPhase_BraTS.png',final_img)
pr_sum = output_list.sum()
gt_sum = label_list.sum()
pr_gt_sum = np.sum(output_list[label_list == 1])
dice = 2 * pr_gt_sum / (pr_sum + gt_sum)
dice_sum += dice
hausdorff=hd95(output_list.squeeze(0).squeeze(0),label_list.squeeze(0).squeeze(0))
jaccard=jc(output_list.squeeze(0).squeeze(0),label_list.squeeze(0).squeeze(0))
hd_sum+=hausdorff
jc_sum+=jaccard
print("Finished. Test Total dice: ",dice_sum/len(test_dataset),'\n')
print("Finished. Test Avg Jaccard: ",jc_sum/len(test_dataset))
print("Finished. Test Avg hausdorff: ",hd_sum/len(test_dataset))
return dice_sum/len(test_dataset)
best_dice=0
iterator=tqdm(train_dataset, ncols=100)
for x in range(epoch):
model.train()
loss_sum=0
print('\n==>Epoch',x,': lr=',optimizer.param_groups[0]['lr'],'==>\n')
for data in iterator:
(inputs,labels_seg,labels_sr,guidance,mask)=data
optimizer.zero_grad()
inputs = pt.autograd.Variable(inputs).type(pt.FloatTensor).cuda().unsqueeze(1)
guidance = pt.autograd.Variable(guidance).type(pt.FloatTensor).cuda().unsqueeze(1)
mask = pt.autograd.Variable(mask).type(pt.FloatTensor).cuda().unsqueeze(1)
labels_seg = pt.autograd.Variable(labels_seg).type(pt.FloatTensor).cuda().unsqueeze(1)
labels_sr = pt.autograd.Variable(labels_sr).type(pt.FloatTensor).cuda().unsqueeze(1)
outputs_seg,outputs_sr = model(inputs,guidance)
loss_seg = lossfunc_seg(outputs_seg, labels_seg)
loss_sr = lossfunc_sr(outputs_sr, labels_sr)
loss_pf = lossfunc_pf(outputs_seg,outputs_sr,labels_seg*labels_sr)
loss_guide=lossfunc_sr(mask*outputs_sr,mask*labels_sr)
loss=lossfunc_dice(outputs_seg,labels_seg)+loss_seg+w_sr*(loss_sr+loss_guide)+w_tf*loss_pf
loss.backward()
optimizer.step()
loss_sum+=loss.item()
if args.v:
final_img=np.zeros(shape=(2*size,2*size*5))
iterator.set_postfix(loss=loss.item(),loss_seg=loss_seg.item(),loss_sr=loss_sr.item())
final_img[:,0:(2*size)]=outputs_seg.cpu().data.numpy()[0,0,size//2,:,:]*255
final_img[:,(2*size):(4*size)]=outputs_sr.cpu().data.numpy()[0,0,size//2,:,:]*255
final_img[:,(4*size):(6*size)]=labels_seg.cpu().data.numpy()[0,0,size//2,:,:]*255
final_img[:,(6*size):(8*size)]=labels_sr.cpu().data.numpy()[0,0,size//2,:,:]*255
final_img[:,(8*size):]=cv2.resize(inputs.cpu().data.numpy()[0,0,size//4,:,:],((2*size),(2*size)))*255
cv2.imwrite('combine.png',final_img)
print('==>End of epoch',x,'==>\n')
print('===VAL===>')
dice=ValModel()
scheduler.step(dice)
if dice>best_dice:
best_dice=dice
print('New best dice! Model saved to',model_path+'/PFSeg_3D_BraTS_patch-free_bs'+str(batch_size)+'_best.pt')
pt.save(model.state_dict(), model_path+'/PFSeg_3D_BraTS_patch-free_bs'+str(batch_size)+'_best.pt')
print('===TEST===>')
TestModel()
print('\nBest Dice:',best_dice)
|
[
"numpy.sum",
"argparse.ArgumentParser",
"torch.cuda.device_count",
"torch.no_grad",
"torch.nn.MSELoss",
"torch.nn.BCELoss",
"torch.utils.data.DataLoader",
"loss.TaskFusionLoss.TaskFusionLoss",
"cv2.imwrite",
"torch.load",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"tqdm.tqdm",
"model.PFSeg.PFSeg3D",
"torch.autograd.Variable",
"loss.DiceLoss.BinaryDiceLoss",
"dataset.GuidedBraTSDataset3D.GuidedBraTSDataset3D",
"numpy.zeros",
"numpy.array",
"torch.nn.DataParallel"
] |
[((500, 585), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Patch-free 3D Medical Image Segmentation."""'}), "(description='Patch-free 3D Medical Image Segmentation.'\n )\n", (523, 585), False, 'import argparse\n'), ((1956, 2004), 'dataset.GuidedBraTSDataset3D.GuidedBraTSDataset3D', 'GuidedBraTSDataset3D', (['dataset_path'], {'mode': '"""train"""'}), "(dataset_path, mode='train')\n", (1976, 2004), False, 'from dataset.GuidedBraTSDataset3D import GuidedBraTSDataset3D\n'), ((2011, 2057), 'dataset.GuidedBraTSDataset3D.GuidedBraTSDataset3D', 'GuidedBraTSDataset3D', (['dataset_path'], {'mode': '"""val"""'}), "(dataset_path, mode='val')\n", (2031, 2057), False, 'from dataset.GuidedBraTSDataset3D import GuidedBraTSDataset3D\n'), ((2065, 2112), 'dataset.GuidedBraTSDataset3D.GuidedBraTSDataset3D', 'GuidedBraTSDataset3D', (['dataset_path'], {'mode': '"""test"""'}), "(dataset_path, mode='test')\n", (2085, 2112), False, 'from dataset.GuidedBraTSDataset3D import GuidedBraTSDataset3D\n'), ((2127, 2218), 'torch.utils.data.DataLoader', 'pt.utils.data.DataLoader', (['trainset'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'drop_last': '(True)'}), '(trainset, batch_size=batch_size, shuffle=True,\n drop_last=True)\n', (2151, 2218), True, 'import torch as pt\n'), ((2224, 2300), 'torch.utils.data.DataLoader', 'pt.utils.data.DataLoader', (['valset'], {'batch_size': '(1)', 'shuffle': '(True)', 'drop_last': '(True)'}), '(valset, batch_size=1, shuffle=True, drop_last=True)\n', (2248, 2300), True, 'import torch as pt\n'), ((2311, 2388), 'torch.utils.data.DataLoader', 'pt.utils.data.DataLoader', (['testset'], {'batch_size': '(1)', 'shuffle': '(True)', 'drop_last': '(True)'}), '(testset, batch_size=1, shuffle=True, drop_last=True)\n', (2335, 2388), True, 'import torch as pt\n'), ((2399, 2414), 'torch.nn.MSELoss', 'pt.nn.MSELoss', ([], {}), '()\n', (2412, 2414), True, 'import torch as pt\n'), ((2428, 2443), 'torch.nn.BCELoss', 'pt.nn.BCELoss', ([], {}), '()\n', (2441, 2443), True, 'import torch as pt\n'), ((2458, 2474), 'loss.DiceLoss.BinaryDiceLoss', 'BinaryDiceLoss', ([], {}), '()\n', (2472, 2474), False, 'from loss.DiceLoss import BinaryDiceLoss\n'), ((2487, 2503), 'loss.TaskFusionLoss.TaskFusionLoss', 'TaskFusionLoss', ([], {}), '()\n', (2501, 2503), False, 'from loss.TaskFusionLoss import TaskFusionLoss\n'), ((2642, 2717), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'pt.optim.lr_scheduler.ReduceLROnPlateau', (['optimizer'], {'mode': '"""max"""', 'patience': '(20)'}), "(optimizer, mode='max', patience=20)\n", (2681, 2717), True, 'import torch as pt\n'), ((8485, 8515), 'tqdm.tqdm', 'tqdm', (['train_dataset'], {'ncols': '(100)'}), '(train_dataset, ncols=100)\n', (8489, 8515), False, 'from tqdm import tqdm\n'), ((1601, 1623), 'torch.cuda.device_count', 'pt.cuda.device_count', ([], {}), '()\n', (1621, 1623), True, 'import torch as pt\n'), ((1817, 1842), 'torch.nn.DataParallel', 'pt.nn.DataParallel', (['model'], {}), '(model)\n', (1835, 1842), True, 'import torch as pt\n'), ((2806, 2873), 'numpy.zeros', 'np.zeros', (['(1, 1, 2 * img_size[0], 2 * img_size[1], 2 * img_size[2])'], {}), '((1, 1, 2 * img_size[0], 2 * img_size[1], 2 * img_size[2]))\n', (2814, 2873), True, 'import numpy as np\n'), ((5703, 5770), 'numpy.zeros', 'np.zeros', (['(1, 1, 2 * img_size[0], 2 * img_size[1], 2 * img_size[2])'], {}), '((1, 1, 2 * img_size[0], 2 * img_size[1], 2 * img_size[2]))\n', (5711, 5770), True, 'import numpy as np\n'), ((1553, 1591), 'model.PFSeg.PFSeg3D', 'PFSeg3D', ([], {'in_channels': '(1)', 'out_channels': '(1)'}), '(in_channels=1, out_channels=1)\n', (1560, 1591), False, 'from model.PFSeg import PFSeg3D\n'), ((1645, 1667), 'torch.cuda.device_count', 'pt.cuda.device_count', ([], {}), '()\n', (1665, 1667), True, 'import torch as pt\n'), ((1688, 1710), 'torch.cuda.device_count', 'pt.cuda.device_count', ([], {}), '()\n', (1708, 1710), True, 'import torch as pt\n'), ((1898, 1943), 'torch.load', 'pt.load', (['pretrained_model'], {'map_location': '"""cpu"""'}), "(pretrained_model, map_location='cpu')\n", (1905, 1943), True, 'import torch as pt\n'), ((3297, 3364), 'numpy.zeros', 'np.zeros', (['(1, 1, 2 * img_size[0], 2 * img_size[1], 2 * img_size[2])'], {}), '((1, 1, 2 * img_size[0], 2 * img_size[1], 2 * img_size[2]))\n', (3305, 3364), True, 'import numpy as np\n'), ((3374, 3441), 'numpy.zeros', 'np.zeros', (['(1, 1, 2 * img_size[0], 2 * img_size[1], 2 * img_size[2])'], {}), '((1, 1, 2 * img_size[0], 2 * img_size[1], 2 * img_size[2]))\n', (3382, 3441), True, 'import numpy as np\n'), ((4728, 4764), 'numpy.sum', 'np.sum', (['output_list[label_list == 1]'], {}), '(output_list[label_list == 1])\n', (4734, 4764), True, 'import numpy as np\n'), ((6195, 6262), 'numpy.zeros', 'np.zeros', (['(1, 1, 2 * img_size[0], 2 * img_size[1], 2 * img_size[2])'], {}), '((1, 1, 2 * img_size[0], 2 * img_size[1], 2 * img_size[2]))\n', (6203, 6262), True, 'import numpy as np\n'), ((6272, 6339), 'numpy.zeros', 'np.zeros', (['(1, 1, 2 * img_size[0], 2 * img_size[1], 2 * img_size[2])'], {}), '((1, 1, 2 * img_size[0], 2 * img_size[1], 2 * img_size[2]))\n', (6280, 6339), True, 'import numpy as np\n'), ((7548, 7602), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2 * img_size[1], 2 * 2 * img_size[2])'}), '(shape=(2 * img_size[1], 2 * 2 * img_size[2]))\n', (7556, 7602), True, 'import numpy as np\n'), ((7731, 7776), 'cv2.imwrite', 'cv2.imwrite', (['"""TestPhase_BraTS.png"""', 'final_img'], {}), "('TestPhase_BraTS.png', final_img)\n", (7742, 7776), False, 'import cv2\n'), ((7874, 7910), 'numpy.sum', 'np.sum', (['output_list[label_list == 1]'], {}), '(output_list[label_list == 1])\n', (7880, 7910), True, 'import numpy as np\n'), ((4517, 4538), 'numpy.array', 'np.array', (['output_list'], {}), '(output_list)\n', (4525, 4538), True, 'import numpy as np\n'), ((4881, 4935), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2 * img_size[1], 2 * 2 * img_size[2])'}), '(shape=(2 * img_size[1], 2 * 2 * img_size[2]))\n', (4889, 4935), True, 'import numpy as np\n'), ((5076, 5120), 'cv2.imwrite', 'cv2.imwrite', (['"""ValPhase_BraTS.png"""', 'final_img'], {}), "('ValPhase_BraTS.png', final_img)\n", (5087, 5120), False, 'import cv2\n'), ((7416, 7437), 'numpy.array', 'np.array', (['output_list'], {}), '(output_list)\n', (7424, 7437), True, 'import numpy as np\n'), ((9737, 9777), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2 * size, 2 * size * 5)'}), '(shape=(2 * size, 2 * size * 5))\n', (9745, 9777), True, 'import numpy as np\n'), ((10365, 10402), 'cv2.imwrite', 'cv2.imwrite', (['"""combine.png"""', 'final_img'], {}), "('combine.png', final_img)\n", (10376, 10402), False, 'import cv2\n'), ((4164, 4176), 'torch.no_grad', 'pt.no_grad', ([], {}), '()\n', (4174, 4176), True, 'import torch as pt\n'), ((7062, 7074), 'torch.no_grad', 'pt.no_grad', ([], {}), '()\n', (7072, 7074), True, 'import torch as pt\n'), ((3497, 3525), 'torch.autograd.Variable', 'pt.autograd.Variable', (['labels'], {}), '(labels)\n', (3517, 3525), True, 'import torch as pt\n'), ((3586, 3616), 'torch.autograd.Variable', 'pt.autograd.Variable', (['guidance'], {}), '(guidance)\n', (3606, 3616), True, 'import torch as pt\n'), ((3673, 3699), 'torch.autograd.Variable', 'pt.autograd.Variable', (['mask'], {}), '(mask)\n', (3693, 3699), True, 'import torch as pt\n'), ((6395, 6423), 'torch.autograd.Variable', 'pt.autograd.Variable', (['labels'], {}), '(labels)\n', (6415, 6423), True, 'import torch as pt\n'), ((6484, 6514), 'torch.autograd.Variable', 'pt.autograd.Variable', (['guidance'], {}), '(guidance)\n', (6504, 6514), True, 'import torch as pt\n'), ((6571, 6597), 'torch.autograd.Variable', 'pt.autograd.Variable', (['mask'], {}), '(mask)\n', (6591, 6597), True, 'import torch as pt\n'), ((8778, 8806), 'torch.autograd.Variable', 'pt.autograd.Variable', (['inputs'], {}), '(inputs)\n', (8798, 8806), True, 'import torch as pt\n'), ((8867, 8897), 'torch.autograd.Variable', 'pt.autograd.Variable', (['guidance'], {}), '(guidance)\n', (8887, 8897), True, 'import torch as pt\n'), ((8954, 8980), 'torch.autograd.Variable', 'pt.autograd.Variable', (['mask'], {}), '(mask)\n', (8974, 8980), True, 'import torch as pt\n'), ((9043, 9075), 'torch.autograd.Variable', 'pt.autograd.Variable', (['labels_seg'], {}), '(labels_seg)\n', (9063, 9075), True, 'import torch as pt\n'), ((9137, 9168), 'torch.autograd.Variable', 'pt.autograd.Variable', (['labels_sr'], {}), '(labels_sr)\n', (9157, 9168), True, 'import torch as pt\n'), ((4009, 4104), 'torch.autograd.Variable', 'pt.autograd.Variable', (['inputs[:, a:a + crop_size[0], b:b + crop_size[1], c:c + crop_size[2]]'], {}), '(inputs[:, a:a + crop_size[0], b:b + crop_size[1], c:c +\n crop_size[2]])\n', (4029, 4104), True, 'import torch as pt\n'), ((6907, 7002), 'torch.autograd.Variable', 'pt.autograd.Variable', (['inputs[:, a:a + crop_size[0], b:b + crop_size[1], c:c + crop_size[2]]'], {}), '(inputs[:, a:a + crop_size[0], b:b + crop_size[1], c:c +\n crop_size[2]])\n', (6927, 7002), True, 'import torch as pt\n')]
|
import numpy as np
import pandas as pd
import torch
import torch.utils.data
import torch.optim as optim
from torch.optim import Adam
from torch.nn import functional as F
from torch.nn import (Dropout, LeakyReLU, Linear, Module, ReLU, Sequential,
Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss,SmoothL1Loss)
from model.synthesizer.transformer import ImageTransformer,DataTransformer
from tqdm import tqdm
class Classifier(Module):
def __init__(self,input_dim, dis_dims,st_ed):
super(Classifier,self).__init__()
dim = input_dim-(st_ed[1]-st_ed[0])
seq = []
self.str_end = st_ed
for item in list(dis_dims):
seq += [
Linear(dim, item),
LeakyReLU(0.2),
Dropout(0.5)
]
dim = item
if (st_ed[1]-st_ed[0])==1:
seq += [Linear(dim, 1)]
elif (st_ed[1]-st_ed[0])==2:
seq += [Linear(dim, 1),Sigmoid()]
else:
seq += [Linear(dim,(st_ed[1]-st_ed[0]))]
self.seq = Sequential(*seq)
def forward(self, input):
label=None
if (self.str_end[1]-self.str_end[0])==1:
label = input[:, self.str_end[0]:self.str_end[1]]
else:
label = torch.argmax(input[:, self.str_end[0]:self.str_end[1]], axis=-1)
new_imp = torch.cat((input[:,:self.str_end[0]],input[:,self.str_end[1]:]),1)
if ((self.str_end[1]-self.str_end[0])==2) | ((self.str_end[1]-self.str_end[0])==1):
return self.seq(new_imp).view(-1), label
else:
return self.seq(new_imp), label
def apply_activate(data, output_info):
data_t = []
st = 0
for item in output_info:
if item[1] == 'tanh':
ed = st + item[0]
data_t.append(torch.tanh(data[:, st:ed]))
st = ed
elif item[1] == 'softmax':
ed = st + item[0]
data_t.append(F.gumbel_softmax(data[:, st:ed], tau=0.2))
st = ed
return torch.cat(data_t, dim=1)
def get_st_ed(target_col_index,output_info):
st = 0
c= 0
tc= 0
for item in output_info:
if c==target_col_index:
break
if item[1]=='tanh':
st += item[0]
elif item[1] == 'softmax':
st += item[0]
c+=1
tc+=1
ed= st+output_info[tc][0]
return (st,ed)
def random_choice_prob_index_sampling(probs,col_idx):
option_list = []
for i in col_idx:
pp = probs[i]
option_list.append(np.random.choice(np.arange(len(probs[i])), p=pp))
return np.array(option_list).reshape(col_idx.shape)
def random_choice_prob_index(a, axis=1):
r = np.expand_dims(np.random.rand(a.shape[1 - axis]), axis=axis)
return (a.cumsum(axis=axis) > r).argmax(axis=axis)
def maximum_interval(output_info):
max_interval = 0
for item in output_info:
max_interval = max(max_interval, item[0])
return max_interval
class Cond(object):
def __init__(self, data, output_info):
self.model = []
st = 0
counter = 0
for item in output_info:
if item[1] == 'tanh':
st += item[0]
continue
elif item[1] == 'softmax':
ed = st + item[0]
counter += 1
self.model.append(np.argmax(data[:, st:ed], axis=-1))
st = ed
self.interval = []
self.n_col = 0
self.n_opt = 0
st = 0
self.p = np.zeros((counter, maximum_interval(output_info)))
self.p_sampling = []
for item in output_info:
if item[1] == 'tanh':
st += item[0]
continue
elif item[1] == 'softmax':
ed = st + item[0]
tmp = np.sum(data[:, st:ed], axis=0)
tmp_sampling = np.sum(data[:, st:ed], axis=0)
tmp = np.log(tmp + 1)
tmp = tmp / np.sum(tmp)
tmp_sampling = tmp_sampling / np.sum(tmp_sampling)
self.p_sampling.append(tmp_sampling)
self.p[self.n_col, :item[0]] = tmp
self.interval.append((self.n_opt, item[0]))
self.n_opt += item[0]
self.n_col += 1
st = ed
self.interval = np.asarray(self.interval)
def sample_train(self, batch):
if self.n_col == 0:
return None
batch = batch
idx = np.random.choice(np.arange(self.n_col), batch)
vec = np.zeros((batch, self.n_opt), dtype='float32')
mask = np.zeros((batch, self.n_col), dtype='float32')
mask[np.arange(batch), idx] = 1
opt1prime = random_choice_prob_index(self.p[idx])
for i in np.arange(batch):
vec[i, self.interval[idx[i], 0] + opt1prime[i]] = 1
return vec, mask, idx, opt1prime
def sample(self, batch):
if self.n_col == 0:
return None
batch = batch
idx = np.random.choice(np.arange(self.n_col), batch)
vec = np.zeros((batch, self.n_opt), dtype='float32')
opt1prime = random_choice_prob_index_sampling(self.p_sampling,idx)
for i in np.arange(batch):
vec[i, self.interval[idx[i], 0] + opt1prime[i]] = 1
return vec
def cond_loss(data, output_info, c, m):
loss = []
st = 0
st_c = 0
for item in output_info:
if item[1] == 'tanh':
st += item[0]
continue
elif item[1] == 'softmax':
ed = st + item[0]
ed_c = st_c + item[0]
tmp = F.cross_entropy(
data[:, st:ed],
torch.argmax(c[:, st_c:ed_c], dim=1),
reduction='none')
loss.append(tmp)
st = ed
st_c = ed_c
loss = torch.stack(loss, dim=1)
return (loss * m).sum() / data.size()[0]
class Sampler(object):
def __init__(self, data, output_info):
super(Sampler, self).__init__()
self.data = data
self.model = []
self.n = len(data)
st = 0
for item in output_info:
if item[1] == 'tanh':
st += item[0]
continue
elif item[1] == 'softmax':
ed = st + item[0]
tmp = []
for j in range(item[0]):
tmp.append(np.nonzero(data[:, st + j])[0])
self.model.append(tmp)
st = ed
def sample(self, n, col, opt):
if col is None:
idx = np.random.choice(np.arange(self.n), n)
return self.data[idx]
idx = []
for c, o in zip(col, opt):
idx.append(np.random.choice(self.model[c][o]))
return self.data[idx]
class Discriminator(Module):
def __init__(self, side, layers):
super(Discriminator, self).__init__()
self.side = side
info = len(layers)-2
self.seq = Sequential(*layers)
self.seq_info = Sequential(*layers[:info])
def forward(self, input):
return (self.seq(input)), self.seq_info(input)
class Generator(Module):
def __init__(self, side, layers):
super(Generator, self).__init__()
self.side = side
self.seq = Sequential(*layers)
def forward(self, input_):
return self.seq(input_)
def determine_layers_disc(side, num_channels):
assert side >= 4 and side <= 32
layer_dims = [(1, side), (num_channels, side // 2)]
while layer_dims[-1][1] > 3 and len(layer_dims) < 4:
layer_dims.append((layer_dims[-1][0] * 2, layer_dims[-1][1] // 2))
layers_D = []
for prev, curr in zip(layer_dims, layer_dims[1:]):
layers_D += [
Conv2d(prev[0], curr[0], 4, 2, 1, bias=False),
BatchNorm2d(curr[0]),
LeakyReLU(0.2, inplace=True)
]
print()
layers_D += [
Conv2d(layer_dims[-1][0], 1, layer_dims[-1][1], 1, 0),
Sigmoid()
]
return layers_D
def determine_layers_gen(side, random_dim, num_channels):
assert side >= 4 and side <= 32
layer_dims = [(1, side), (num_channels, side // 2)]
while layer_dims[-1][1] > 3 and len(layer_dims) < 4:
layer_dims.append((layer_dims[-1][0] * 2, layer_dims[-1][1] // 2))
layers_G = [
ConvTranspose2d(
random_dim, layer_dims[-1][0], layer_dims[-1][1], 1, 0, output_padding=0, bias=False)
]
for prev, curr in zip(reversed(layer_dims), reversed(layer_dims[:-1])):
layers_G += [
BatchNorm2d(prev[0]),
ReLU(True),
ConvTranspose2d(prev[0], curr[0], 4, 2, 1, output_padding=0, bias=True)
]
return layers_G
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0)
class CTABGANSynthesizer:
def __init__(self,
class_dim=(256, 256, 256, 256),
random_dim=100,
num_channels=64,
l2scale=1e-5,
batch_size=500,
epochs=1):
self.random_dim = random_dim
self.class_dim = class_dim
self.num_channels = num_channels
self.dside = None
self.gside = None
self.l2scale = l2scale
self.batch_size = batch_size
self.epochs = epochs
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def fit(self, train_data=pd.DataFrame, categorical=[], mixed={}, type={}):
problem_type = None
target_index=None
if type:
problem_type = list(type.keys())[0]
if problem_type:
target_index = train_data.columns.get_loc(type[problem_type])
self.transformer = DataTransformer(train_data=train_data, categorical_list=categorical, mixed_dict=mixed)
self.transformer.fit()
train_data = self.transformer.transform(train_data.values)
data_sampler = Sampler(train_data, self.transformer.output_info)
data_dim = self.transformer.output_dim
self.cond_generator = Cond(train_data, self.transformer.output_info)
sides = [4, 8, 16, 24, 32]
col_size_d = data_dim + self.cond_generator.n_opt
for i in sides:
if i * i >= col_size_d:
self.dside = i
break
sides = [4, 8, 16, 24, 32]
col_size_g = data_dim
for i in sides:
if i * i >= col_size_g:
self.gside = i
break
layers_G = determine_layers_gen(self.gside, self.random_dim+self.cond_generator.n_opt, self.num_channels)
layers_D = determine_layers_disc(self.dside, self.num_channels)
self.generator = Generator(self.gside, layers_G).to(self.device)
discriminator = Discriminator(self.dside, layers_D).to(self.device)
optimizer_params = dict(lr=2e-4, betas=(0.5, 0.9), eps=1e-3, weight_decay=self.l2scale)
optimizerG = Adam(self.generator.parameters(), **optimizer_params)
optimizerD = Adam(discriminator.parameters(), **optimizer_params)
st_ed = None
classifier=None
optimizerC= None
if target_index != None:
st_ed= get_st_ed(target_index,self.transformer.output_info)
classifier = Classifier(data_dim,self.class_dim,st_ed).to(self.device)
optimizerC = optim.Adam(classifier.parameters(),**optimizer_params)
self.generator.apply(weights_init)
discriminator.apply(weights_init)
self.Gtransformer = ImageTransformer(self.gside)
self.Dtransformer = ImageTransformer(self.dside)
steps_per_epoch = max(1, len(train_data) // self.batch_size)
for i in tqdm(range(self.epochs)):
for _ in range(steps_per_epoch):
noisez = torch.randn(self.batch_size, self.random_dim, device=self.device)
condvec = self.cond_generator.sample_train(self.batch_size)
c, m, col, opt = condvec
c = torch.from_numpy(c).to(self.device)
m = torch.from_numpy(m).to(self.device)
noisez = torch.cat([noisez, c], dim=1)
noisez = noisez.view(self.batch_size,self.random_dim+self.cond_generator.n_opt,1,1)
perm = np.arange(self.batch_size)
np.random.shuffle(perm)
real = data_sampler.sample(self.batch_size, col[perm], opt[perm])
c_perm = c[perm]
real = torch.from_numpy(real.astype('float32')).to(self.device)
fake = self.generator(noisez)
faket = self.Gtransformer.inverse_transform(fake)
fakeact = apply_activate(faket, self.transformer.output_info)
fake_cat = torch.cat([fakeact, c], dim=1)
real_cat = torch.cat([real, c_perm], dim=1)
real_cat_d = self.Dtransformer.transform(real_cat)
fake_cat_d = self.Dtransformer.transform(fake_cat)
optimizerD.zero_grad()
y_real,_ = discriminator(real_cat_d)
y_fake,_ = discriminator(fake_cat_d)
loss_d = (-(torch.log(y_real + 1e-4).mean()) - (torch.log(1. - y_fake + 1e-4).mean()))
loss_d.backward()
optimizerD.step()
noisez = torch.randn(self.batch_size, self.random_dim, device=self.device)
condvec = self.cond_generator.sample_train(self.batch_size)
c, m, col, opt = condvec
c = torch.from_numpy(c).to(self.device)
m = torch.from_numpy(m).to(self.device)
noisez = torch.cat([noisez, c], dim=1)
noisez = noisez.view(self.batch_size,self.random_dim+self.cond_generator.n_opt,1,1)
optimizerG.zero_grad()
fake = self.generator(noisez)
faket = self.Gtransformer.inverse_transform(fake)
fakeact = apply_activate(faket, self.transformer.output_info)
fake_cat = torch.cat([fakeact, c], dim=1)
fake_cat = self.Dtransformer.transform(fake_cat)
y_fake,info_fake = discriminator(fake_cat)
cross_entropy = cond_loss(faket, self.transformer.output_info, c, m)
_,info_real = discriminator(real_cat_d)
g = -(torch.log(y_fake + 1e-4).mean()) + cross_entropy
g.backward(retain_graph=True)
loss_mean = torch.norm(torch.mean(info_fake.view(self.batch_size,-1), dim=0) - torch.mean(info_real.view(self.batch_size,-1), dim=0), 1)
loss_std = torch.norm(torch.std(info_fake.view(self.batch_size,-1), dim=0) - torch.std(info_real.view(self.batch_size,-1), dim=0), 1)
loss_info = loss_mean + loss_std
loss_info.backward()
optimizerG.step()
if problem_type:
fake = self.generator(noisez)
faket = self.Gtransformer.inverse_transform(fake)
fakeact = apply_activate(faket, self.transformer.output_info)
real_pre, real_label = classifier(real)
fake_pre, fake_label = classifier(fakeact)
c_loss = CrossEntropyLoss()
if (st_ed[1] - st_ed[0])==1:
c_loss= SmoothL1Loss()
real_label = real_label.type_as(real_pre)
fake_label = fake_label.type_as(fake_pre)
real_label = torch.reshape(real_label,real_pre.size())
fake_label = torch.reshape(fake_label,fake_pre.size())
elif (st_ed[1] - st_ed[0])==2:
c_loss = BCELoss()
real_label = real_label.type_as(real_pre)
fake_label = fake_label.type_as(fake_pre)
loss_cc = c_loss(real_pre, real_label)
loss_cg = c_loss(fake_pre, fake_label)
optimizerG.zero_grad()
loss_cg.backward()
optimizerG.step()
optimizerC.zero_grad()
loss_cc.backward()
optimizerC.step()
def sample(self, n):
self.generator.eval()
output_info = self.transformer.output_info
steps = n // self.batch_size + 1
data = []
for i in range(steps):
noisez = torch.randn(self.batch_size, self.random_dim, device=self.device)
condvec = self.cond_generator.sample(self.batch_size)
c = condvec
c = torch.from_numpy(c).to(self.device)
noisez = torch.cat([noisez, c], dim=1)
noisez = noisez.view(self.batch_size,self.random_dim+self.cond_generator.n_opt,1,1)
fake = self.generator(noisez)
faket = self.Gtransformer.inverse_transform(fake)
fakeact = apply_activate(faket,output_info)
data.append(fakeact.detach().cpu().numpy())
data = np.concatenate(data, axis=0)
result = self.transformer.inverse_transform(data)
return result[0:n]
|
[
"torch.nn.Dropout",
"numpy.sum",
"numpy.argmax",
"torch.argmax",
"torch.cat",
"torch.randn",
"torch.nn.init.constant_",
"numpy.arange",
"torch.nn.BCELoss",
"model.synthesizer.transformer.ImageTransformer",
"torch.nn.Linear",
"numpy.random.choice",
"torch.log",
"numpy.random.shuffle",
"numpy.asarray",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d",
"torch.cuda.is_available",
"torch.nn.LeakyReLU",
"numpy.concatenate",
"torch.nn.Sigmoid",
"torch.tanh",
"torch.from_numpy",
"torch.nn.ReLU",
"torch.stack",
"torch.nn.ConvTranspose2d",
"torch.nn.Sequential",
"numpy.log",
"numpy.zeros",
"torch.nn.CrossEntropyLoss",
"numpy.nonzero",
"model.synthesizer.transformer.DataTransformer",
"torch.nn.init.normal_",
"numpy.array",
"numpy.random.rand",
"torch.nn.SmoothL1Loss",
"torch.nn.functional.gumbel_softmax"
] |
[((2100, 2124), 'torch.cat', 'torch.cat', (['data_t'], {'dim': '(1)'}), '(data_t, dim=1)\n', (2109, 2124), False, 'import torch\n'), ((6050, 6074), 'torch.stack', 'torch.stack', (['loss'], {'dim': '(1)'}), '(loss, dim=1)\n', (6061, 6074), False, 'import torch\n'), ((1104, 1120), 'torch.nn.Sequential', 'Sequential', (['*seq'], {}), '(*seq)\n', (1114, 1120), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((1426, 1496), 'torch.cat', 'torch.cat', (['(input[:, :self.str_end[0]], input[:, self.str_end[1]:])', '(1)'], {}), '((input[:, :self.str_end[0]], input[:, self.str_end[1]:]), 1)\n', (1435, 1496), False, 'import torch\n'), ((2803, 2836), 'numpy.random.rand', 'np.random.rand', (['a.shape[1 - axis]'], {}), '(a.shape[1 - axis])\n', (2817, 2836), True, 'import numpy as np\n'), ((4504, 4529), 'numpy.asarray', 'np.asarray', (['self.interval'], {}), '(self.interval)\n', (4514, 4529), True, 'import numpy as np\n'), ((4725, 4771), 'numpy.zeros', 'np.zeros', (['(batch, self.n_opt)'], {'dtype': '"""float32"""'}), "((batch, self.n_opt), dtype='float32')\n", (4733, 4771), True, 'import numpy as np\n'), ((4787, 4833), 'numpy.zeros', 'np.zeros', (['(batch, self.n_col)'], {'dtype': '"""float32"""'}), "((batch, self.n_col), dtype='float32')\n", (4795, 4833), True, 'import numpy as np\n'), ((4952, 4968), 'numpy.arange', 'np.arange', (['batch'], {}), '(batch)\n', (4961, 4968), True, 'import numpy as np\n'), ((5275, 5321), 'numpy.zeros', 'np.zeros', (['(batch, self.n_opt)'], {'dtype': '"""float32"""'}), "((batch, self.n_opt), dtype='float32')\n", (5283, 5321), True, 'import numpy as np\n'), ((5423, 5439), 'numpy.arange', 'np.arange', (['batch'], {}), '(batch)\n', (5432, 5439), True, 'import numpy as np\n'), ((7200, 7219), 'torch.nn.Sequential', 'Sequential', (['*layers'], {}), '(*layers)\n', (7210, 7219), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((7244, 7270), 'torch.nn.Sequential', 'Sequential', (['*layers[:info]'], {}), '(*layers[:info])\n', (7254, 7270), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((7507, 7526), 'torch.nn.Sequential', 'Sequential', (['*layers'], {}), '(*layers)\n', (7517, 7526), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((8144, 8197), 'torch.nn.Conv2d', 'Conv2d', (['layer_dims[-1][0]', '(1)', 'layer_dims[-1][1]', '(1)', '(0)'], {}), '(layer_dims[-1][0], 1, layer_dims[-1][1], 1, 0)\n', (8150, 8197), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((8208, 8217), 'torch.nn.Sigmoid', 'Sigmoid', ([], {}), '()\n', (8215, 8217), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((8561, 8666), 'torch.nn.ConvTranspose2d', 'ConvTranspose2d', (['random_dim', 'layer_dims[-1][0]', 'layer_dims[-1][1]', '(1)', '(0)'], {'output_padding': '(0)', 'bias': '(False)'}), '(random_dim, layer_dims[-1][0], layer_dims[-1][1], 1, 0,\n output_padding=0, bias=False)\n', (8576, 8666), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((9063, 9101), 'torch.nn.init.normal_', 'init.normal_', (['m.weight.data', '(0.0)', '(0.02)'], {}), '(m.weight.data, 0.0, 0.02)\n', (9075, 9101), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((10196, 10286), 'model.synthesizer.transformer.DataTransformer', 'DataTransformer', ([], {'train_data': 'train_data', 'categorical_list': 'categorical', 'mixed_dict': 'mixed'}), '(train_data=train_data, categorical_list=categorical,\n mixed_dict=mixed)\n', (10211, 10286), False, 'from model.synthesizer.transformer import ImageTransformer, DataTransformer\n'), ((12066, 12094), 'model.synthesizer.transformer.ImageTransformer', 'ImageTransformer', (['self.gside'], {}), '(self.gside)\n', (12082, 12094), False, 'from model.synthesizer.transformer import ImageTransformer, DataTransformer\n'), ((12130, 12158), 'model.synthesizer.transformer.ImageTransformer', 'ImageTransformer', (['self.dside'], {}), '(self.dside)\n', (12146, 12158), False, 'from model.synthesizer.transformer import ImageTransformer, DataTransformer\n'), ((18112, 18140), 'numpy.concatenate', 'np.concatenate', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (18126, 18140), True, 'import numpy as np\n'), ((1334, 1398), 'torch.argmax', 'torch.argmax', (['input[:, self.str_end[0]:self.str_end[1]]'], {'axis': '(-1)'}), '(input[:, self.str_end[0]:self.str_end[1]], axis=-1)\n', (1346, 1398), False, 'import torch\n'), ((2693, 2714), 'numpy.array', 'np.array', (['option_list'], {}), '(option_list)\n', (2701, 2714), True, 'import numpy as np\n'), ((4680, 4701), 'numpy.arange', 'np.arange', (['self.n_col'], {}), '(self.n_col)\n', (4689, 4701), True, 'import numpy as np\n'), ((5230, 5251), 'numpy.arange', 'np.arange', (['self.n_col'], {}), '(self.n_col)\n', (5239, 5251), True, 'import numpy as np\n'), ((7973, 8018), 'torch.nn.Conv2d', 'Conv2d', (['prev[0]', 'curr[0]', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(prev[0], curr[0], 4, 2, 1, bias=False)\n', (7979, 8018), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((8032, 8052), 'torch.nn.BatchNorm2d', 'BatchNorm2d', (['curr[0]'], {}), '(curr[0])\n', (8043, 8052), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((8066, 8094), 'torch.nn.LeakyReLU', 'LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (8075, 8094), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((8793, 8813), 'torch.nn.BatchNorm2d', 'BatchNorm2d', (['prev[0]'], {}), '(prev[0])\n', (8804, 8813), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((8827, 8837), 'torch.nn.ReLU', 'ReLU', (['(True)'], {}), '(True)\n', (8831, 8837), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((8851, 8922), 'torch.nn.ConvTranspose2d', 'ConvTranspose2d', (['prev[0]', 'curr[0]', '(4)', '(2)', '(1)'], {'output_padding': '(0)', 'bias': '(True)'}), '(prev[0], curr[0], 4, 2, 1, output_padding=0, bias=True)\n', (8866, 8922), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((9155, 9193), 'torch.nn.init.normal_', 'init.normal_', (['m.weight.data', '(1.0)', '(0.02)'], {}), '(m.weight.data, 1.0, 0.02)\n', (9167, 9193), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((9202, 9232), 'torch.nn.init.constant_', 'init.constant_', (['m.bias.data', '(0)'], {}), '(m.bias.data, 0)\n', (9216, 9232), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((17507, 17572), 'torch.randn', 'torch.randn', (['self.batch_size', 'self.random_dim'], {'device': 'self.device'}), '(self.batch_size, self.random_dim, device=self.device)\n', (17518, 17572), False, 'import torch\n'), ((17736, 17765), 'torch.cat', 'torch.cat', (['[noisez, c]'], {'dim': '(1)'}), '([noisez, c], dim=1)\n', (17745, 17765), False, 'import torch\n'), ((719, 736), 'torch.nn.Linear', 'Linear', (['dim', 'item'], {}), '(dim, item)\n', (725, 736), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((754, 768), 'torch.nn.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (763, 768), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((786, 798), 'torch.nn.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (793, 798), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((900, 914), 'torch.nn.Linear', 'Linear', (['dim', '(1)'], {}), '(dim, 1)\n', (906, 914), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((1887, 1913), 'torch.tanh', 'torch.tanh', (['data[:, st:ed]'], {}), '(data[:, st:ed])\n', (1897, 1913), False, 'import torch\n'), ((4847, 4863), 'numpy.arange', 'np.arange', (['batch'], {}), '(batch)\n', (4856, 4863), True, 'import numpy as np\n'), ((6816, 6833), 'numpy.arange', 'np.arange', (['self.n'], {}), '(self.n)\n', (6825, 6833), True, 'import numpy as np\n'), ((6947, 6981), 'numpy.random.choice', 'np.random.choice', (['self.model[c][o]'], {}), '(self.model[c][o])\n', (6963, 6981), True, 'import numpy as np\n'), ((9819, 9844), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (9842, 9844), False, 'import torch\n'), ((12376, 12441), 'torch.randn', 'torch.randn', (['self.batch_size', 'self.random_dim'], {'device': 'self.device'}), '(self.batch_size, self.random_dim, device=self.device)\n', (12387, 12441), False, 'import torch\n'), ((12697, 12726), 'torch.cat', 'torch.cat', (['[noisez, c]'], {'dim': '(1)'}), '([noisez, c], dim=1)\n', (12706, 12726), False, 'import torch\n'), ((12871, 12897), 'numpy.arange', 'np.arange', (['self.batch_size'], {}), '(self.batch_size)\n', (12880, 12897), True, 'import numpy as np\n'), ((12914, 12937), 'numpy.random.shuffle', 'np.random.shuffle', (['perm'], {}), '(perm)\n', (12931, 12937), True, 'import numpy as np\n'), ((13412, 13442), 'torch.cat', 'torch.cat', (['[fakeact, c]'], {'dim': '(1)'}), '([fakeact, c], dim=1)\n', (13421, 13442), False, 'import torch\n'), ((13470, 13502), 'torch.cat', 'torch.cat', (['[real, c_perm]'], {'dim': '(1)'}), '([real, c_perm], dim=1)\n', (13479, 13502), False, 'import torch\n'), ((14041, 14106), 'torch.randn', 'torch.randn', (['self.batch_size', 'self.random_dim'], {'device': 'self.device'}), '(self.batch_size, self.random_dim, device=self.device)\n', (14052, 14106), False, 'import torch\n'), ((14379, 14408), 'torch.cat', 'torch.cat', (['[noisez, c]'], {'dim': '(1)'}), '([noisez, c], dim=1)\n', (14388, 14408), False, 'import torch\n'), ((14769, 14799), 'torch.cat', 'torch.cat', (['[fakeact, c]'], {'dim': '(1)'}), '([fakeact, c], dim=1)\n', (14778, 14799), False, 'import torch\n'), ((982, 996), 'torch.nn.Linear', 'Linear', (['dim', '(1)'], {}), '(dim, 1)\n', (988, 996), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((997, 1006), 'torch.nn.Sigmoid', 'Sigmoid', ([], {}), '()\n', (1004, 1006), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((1042, 1074), 'torch.nn.Linear', 'Linear', (['dim', '(st_ed[1] - st_ed[0])'], {}), '(dim, st_ed[1] - st_ed[0])\n', (1048, 1074), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((2026, 2067), 'torch.nn.functional.gumbel_softmax', 'F.gumbel_softmax', (['data[:, st:ed]'], {'tau': '(0.2)'}), '(data[:, st:ed], tau=0.2)\n', (2042, 2067), True, 'from torch.nn import functional as F\n'), ((3958, 3988), 'numpy.sum', 'np.sum', (['data[:, st:ed]'], {'axis': '(0)'}), '(data[:, st:ed], axis=0)\n', (3964, 3988), True, 'import numpy as np\n'), ((4022, 4052), 'numpy.sum', 'np.sum', (['data[:, st:ed]'], {'axis': '(0)'}), '(data[:, st:ed], axis=0)\n', (4028, 4052), True, 'import numpy as np\n'), ((4080, 4095), 'numpy.log', 'np.log', (['(tmp + 1)'], {}), '(tmp + 1)\n', (4086, 4095), True, 'import numpy as np\n'), ((5897, 5933), 'torch.argmax', 'torch.argmax', (['c[:, st_c:ed_c]'], {'dim': '(1)'}), '(c[:, st_c:ed_c], dim=1)\n', (5909, 5933), False, 'import torch\n'), ((16165, 16183), 'torch.nn.CrossEntropyLoss', 'CrossEntropyLoss', ([], {}), '()\n', (16181, 16183), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((17679, 17698), 'torch.from_numpy', 'torch.from_numpy', (['c'], {}), '(c)\n', (17695, 17698), False, 'import torch\n'), ((3465, 3499), 'numpy.argmax', 'np.argmax', (['data[:, st:ed]'], {'axis': '(-1)'}), '(data[:, st:ed], axis=-1)\n', (3474, 3499), True, 'import numpy as np\n'), ((4126, 4137), 'numpy.sum', 'np.sum', (['tmp'], {}), '(tmp)\n', (4132, 4137), True, 'import numpy as np\n'), ((4184, 4204), 'numpy.sum', 'np.sum', (['tmp_sampling'], {}), '(tmp_sampling)\n', (4190, 4204), True, 'import numpy as np\n'), ((12580, 12599), 'torch.from_numpy', 'torch.from_numpy', (['c'], {}), '(c)\n', (12596, 12599), False, 'import torch\n'), ((12636, 12655), 'torch.from_numpy', 'torch.from_numpy', (['m'], {}), '(m)\n', (12652, 12655), False, 'import torch\n'), ((14262, 14281), 'torch.from_numpy', 'torch.from_numpy', (['c'], {}), '(c)\n', (14278, 14281), False, 'import torch\n'), ((14318, 14337), 'torch.from_numpy', 'torch.from_numpy', (['m'], {}), '(m)\n', (14334, 14337), False, 'import torch\n'), ((16287, 16301), 'torch.nn.SmoothL1Loss', 'SmoothL1Loss', ([], {}), '()\n', (16299, 16301), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((13888, 13920), 'torch.log', 'torch.log', (['(1.0 - y_fake + 0.0001)'], {}), '(1.0 - y_fake + 0.0001)\n', (13897, 13920), False, 'import torch\n'), ((16722, 16731), 'torch.nn.BCELoss', 'BCELoss', ([], {}), '()\n', (16729, 16731), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((6610, 6637), 'numpy.nonzero', 'np.nonzero', (['data[:, st + j]'], {}), '(data[:, st + j])\n', (6620, 6637), True, 'import numpy as np\n'), ((13852, 13878), 'torch.log', 'torch.log', (['(y_real + 0.0001)'], {}), '(y_real + 0.0001)\n', (13861, 13878), False, 'import torch\n'), ((15144, 15170), 'torch.log', 'torch.log', (['(y_fake + 0.0001)'], {}), '(y_fake + 0.0001)\n', (15153, 15170), False, 'import torch\n')]
|
"""
util.py
Some utility functions
"""
import os
import numpy as np
from sklearn.neighbors import BallTree, radius_neighbors_graph
import networkx as nx
__all__ = ["ORCA_PATH", "pbc", "orbits", "weights", "compute_graph"]
ORCA_PATH = os.path.abspath(os.path.abspath(__file__) + "../../../orca/orca.exe")
def pbc(x0, x1, dims):
delta = np.abs(x0 - x1)
delta = np.where(delta > 0.5 * dims, delta - dims, delta)
return np.sqrt((delta**2).sum(axis=-1))
orbits = np.array([
1, 2, 2, 2, 3, 4, 3, 3, 4, 3, 4, 4, 4, 4, 3, 4, 6, 5, 4, 5, 6, 6, 4, 4, 4, 5, 7, 4, 6, 6, 7, 4, 6, 6, 6, 5, 6, 7,
7, 5, 7, 6, 7, 6, 5, 5, 6, 8, 7, 6, 6, 8, 6, 9, 5, 6, 4, 6, 6, 7, 8, 6, 6, 8, 7, 6, 7, 7, 8, 5, 6, 6, 4
],
dtype=np.float)
weights = 1. - np.log(orbits) / np.log(73.)
def compute_graph(X, r_cut, **kwargs):
if kwargs["dims"] is not None:
BT = BallTree(X, metric=kwargs["metric"], dims=kwargs["dims"])
else:
BT = BallTree(X, metric=kwargs["metric"])
rng_con = radius_neighbors_graph(BT, r_cut, n_jobs=1, mode='connectivity')
A = np.matrix(rng_con.toarray())
G = nx.from_numpy_matrix(A)
return G
|
[
"os.path.abspath",
"numpy.abs",
"networkx.from_numpy_matrix",
"sklearn.neighbors.radius_neighbors_graph",
"numpy.log",
"numpy.where",
"numpy.array",
"sklearn.neighbors.BallTree"
] |
[((477, 734), 'numpy.array', 'np.array', (['[1, 2, 2, 2, 3, 4, 3, 3, 4, 3, 4, 4, 4, 4, 3, 4, 6, 5, 4, 5, 6, 6, 4, 4, 4,\n 5, 7, 4, 6, 6, 7, 4, 6, 6, 6, 5, 6, 7, 7, 5, 7, 6, 7, 6, 5, 5, 6, 8, 7,\n 6, 6, 8, 6, 9, 5, 6, 4, 6, 6, 7, 8, 6, 6, 8, 7, 6, 7, 7, 8, 5, 6, 6, 4]'], {'dtype': 'np.float'}), '([1, 2, 2, 2, 3, 4, 3, 3, 4, 3, 4, 4, 4, 4, 3, 4, 6, 5, 4, 5, 6, 6,\n 4, 4, 4, 5, 7, 4, 6, 6, 7, 4, 6, 6, 6, 5, 6, 7, 7, 5, 7, 6, 7, 6, 5, 5,\n 6, 8, 7, 6, 6, 8, 6, 9, 5, 6, 4, 6, 6, 7, 8, 6, 6, 8, 7, 6, 7, 7, 8, 5,\n 6, 6, 4], dtype=np.float)\n', (485, 734), True, 'import numpy as np\n'), ((344, 359), 'numpy.abs', 'np.abs', (['(x0 - x1)'], {}), '(x0 - x1)\n', (350, 359), True, 'import numpy as np\n'), ((372, 421), 'numpy.where', 'np.where', (['(delta > 0.5 * dims)', '(delta - dims)', 'delta'], {}), '(delta > 0.5 * dims, delta - dims, delta)\n', (380, 421), True, 'import numpy as np\n'), ((1016, 1080), 'sklearn.neighbors.radius_neighbors_graph', 'radius_neighbors_graph', (['BT', 'r_cut'], {'n_jobs': '(1)', 'mode': '"""connectivity"""'}), "(BT, r_cut, n_jobs=1, mode='connectivity')\n", (1038, 1080), False, 'from sklearn.neighbors import BallTree, radius_neighbors_graph\n'), ((1126, 1149), 'networkx.from_numpy_matrix', 'nx.from_numpy_matrix', (['A'], {}), '(A)\n', (1146, 1149), True, 'import networkx as nx\n'), ((253, 278), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (268, 278), False, 'import os\n'), ((766, 780), 'numpy.log', 'np.log', (['orbits'], {}), '(orbits)\n', (772, 780), True, 'import numpy as np\n'), ((783, 795), 'numpy.log', 'np.log', (['(73.0)'], {}), '(73.0)\n', (789, 795), True, 'import numpy as np\n'), ((884, 941), 'sklearn.neighbors.BallTree', 'BallTree', (['X'], {'metric': "kwargs['metric']", 'dims': "kwargs['dims']"}), "(X, metric=kwargs['metric'], dims=kwargs['dims'])\n", (892, 941), False, 'from sklearn.neighbors import BallTree, radius_neighbors_graph\n'), ((965, 1001), 'sklearn.neighbors.BallTree', 'BallTree', (['X'], {'metric': "kwargs['metric']"}), "(X, metric=kwargs['metric'])\n", (973, 1001), False, 'from sklearn.neighbors import BallTree, radius_neighbors_graph\n')]
|
"""
@author: <NAME>
@contact: <EMAIL>
"""
import logging
import numpy as np # type: ignore
import sys
from typing import Callable
def ert_type(x, stype, label):
if not isinstance(x, stype):
raise AssertionError(f"{label} should be {stype}, {type(x)} instead")
def ert_multiTypes(x, types, label):
cond = any(isinstance(x, t) for t in types)
if not cond:
raise AssertionError(f"{label} should be one of {types}, {type(x)} instead")
def ert_nonNeg(x, label, include_zero=False):
if not include_zero:
if x <= 0:
raise AssertionError(f"{label} should be greater than 0")
elif x < 0:
raise AssertionError(f"{label} should be greater than or equal to 0")
def ert_inInterv(x, vmin, vmax, label, leftClose=False, rightClose=True):
if leftClose:
if rightClose:
if x < vmin or x > vmax:
raise AssertionError(f"expected {vmin}<={label}<={vmax}")
elif x < vmin or x >= vmax:
raise AssertionError(f"expected {vmin}<={label}<{vmax}")
elif rightClose:
if x <= vmin or x > vmax:
raise AssertionError(f"expected {vmin}<{label}<={vmax}")
elif x <= vmin or x >= vmax:
raise AssertionError(f"expected {vmin}<{label}<{vmax}")
def ert_in_dtype(x, dtype):
if dtype.startswith("f"):
if x > np.finfo(dtype).max:
raise AssertionError(
" ".join(
[
"expected to be lower than {dtype} max:",
f"{x} < {np.finfo(dtype).max}",
]
)
)
elif dtype.startswith("u") or dtype.startswith("i"):
if x > np.iinfo(dtype).max:
raise AssertionError(
" ".join(
[
"expected to be lower than {dtype} max:",
f"{x} < {np.iinfo(dtype).max}",
]
)
)
else:
logging.warning(f"assert not implemented for dtype '{dtype}'")
def enable_rich_assert(fun: Callable) -> Callable:
def wrapper(*args, **kwargs):
try:
return fun(*args, **kwargs)
except AssertionError as e:
logging.exception(e)
sys.exit()
return wrapper
|
[
"logging.exception",
"logging.warning",
"numpy.iinfo",
"numpy.finfo",
"sys.exit"
] |
[((1998, 2060), 'logging.warning', 'logging.warning', (['f"""assert not implemented for dtype \'{dtype}\'"""'], {}), '(f"assert not implemented for dtype \'{dtype}\'")\n', (2013, 2060), False, 'import logging\n'), ((1350, 1365), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (1358, 1365), True, 'import numpy as np\n'), ((2249, 2269), 'logging.exception', 'logging.exception', (['e'], {}), '(e)\n', (2266, 2269), False, 'import logging\n'), ((2282, 2292), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2290, 2292), False, 'import sys\n'), ((1701, 1716), 'numpy.iinfo', 'np.iinfo', (['dtype'], {}), '(dtype)\n', (1709, 1716), True, 'import numpy as np\n'), ((1552, 1567), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (1560, 1567), True, 'import numpy as np\n'), ((1903, 1918), 'numpy.iinfo', 'np.iinfo', (['dtype'], {}), '(dtype)\n', (1911, 1918), True, 'import numpy as np\n')]
|
import ast
import os
import cv2
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
from keras.applications.densenet import preprocess_input
from keras.metrics import (categorical_accuracy, top_k_categorical_accuracy)
from keras.models import Model, load_model
DP_DIR = './input/shuffle-csvs/'
INPUT_DIR = './input/quickdraw-doodle-recognition/'
BASE_SIZE = 256
NCSVS = 200
NCATS = 340
np.random.seed(seed=2018)
tf.set_random_seed(seed=2018)
def f2cat(filename: str) -> str:
return filename.split('.')[0]
def list_all_categories():
files = os.listdir(os.path.join(INPUT_DIR, 'train_simplified'))
return sorted([f2cat(f) for f in files], key=str.lower)
def apk(actual, predicted, k=3):
"""
Source: https://github.com/benhamner/Metrics/blob/master/Python/ml_metrics/average_precision.py
"""
if len(predicted) > k:
predicted = predicted[:k]
score = 0.0
num_hits = 0.0
for i, p in enumerate(predicted):
if p in actual and p not in predicted[:i]:
num_hits += 1.0
score += num_hits / (i + 1.0)
if not actual:
return 0.0
return score / min(len(actual), k)
def mapk(actual, predicted, k=3):
"""
Source: https://github.com/benhamner/Metrics/blob/master/Python/ml_metrics/average_precision.py
"""
return np.mean([apk(a, p, k) for a, p in zip(actual, predicted)])
def preds2catids(predictions):
return pd.DataFrame(np.argsort(-predictions, axis=1)[:, :3], columns=['a', 'b', 'c'])
def top_3_accuracy(y_true, y_pred):
return top_k_categorical_accuracy(y_true, y_pred, k=3)
def draw_cv2(raw_strokes, size=256, lw=6, time_color=True):
img = np.zeros((BASE_SIZE, BASE_SIZE), np.uint8)
for t, stroke in enumerate(raw_strokes):
for i in range(len(stroke[0]) - 1):
color = 255 - min(t, 10) * 13 if time_color else 255
_ = cv2.line(img, (stroke[0][i], stroke[1][i]),
(stroke[0][i + 1], stroke[1][i + 1]), color, lw)
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
if size != BASE_SIZE:
return cv2.resize(img, (size, size))
else:
return img
def image_generator_xd(size, batchsize, ks, lw=6, time_color=True):
while True:
for k in np.random.permutation(ks):
filename = os.path.join(DP_DIR, 'train_k{}.csv.gz'.format(k))
for df in pd.read_csv(filename, chunksize=batchsize):
df['drawing'] = df['drawing'].apply(ast.literal_eval)
x = np.zeros((len(df), size, size, 3))
for i, raw_strokes in enumerate(df.drawing.values):
x[i, :, :, :] = draw_cv2(raw_strokes, size=size, lw=lw,
time_color=time_color)
x = preprocess_input(x).astype(np.float32)
y = keras.utils.to_categorical(df.y, num_classes=NCATS)
yield x, y
def df_to_image_array_xd(df, size, lw=6, time_color=True):
df['drawing'] = df['drawing'].apply(ast.literal_eval)
x = np.zeros((len(df), size, size, 3))
for i, raw_strokes in enumerate(df.drawing.values):
x[i, :, :, :] = draw_cv2(
raw_strokes, size=size, lw=lw, time_color=time_color)
x = preprocess_input(x).astype(np.float32)
return x
class TTA_ModelWrapper():
"""A simple TTA wrapper for keras computer vision models.
Args:
model (keras model): A fitted keras model with a predict method.
"""
def __init__(self, model):
self.model = model
def predict(self, X):
"""Wraps the predict method of the provided model.
Augments the testdata with horizontal and vertical flips and
averages the results.
Args:
X (numpy array of dim 4): The data to get predictions for.
"""
p0 = self.model.predict(X, batch_size=128, verbose=1)
p1 = self.model.predict(np.flipud(X), batch_size=128, verbose=1)
p = (p0 + p1) / 2
return np.array(p)
|
[
"cv2.line",
"keras.applications.densenet.preprocess_input",
"numpy.random.seed",
"tensorflow.keras.utils.to_categorical",
"cv2.cvtColor",
"pandas.read_csv",
"numpy.zeros",
"numpy.flipud",
"tensorflow.set_random_seed",
"numpy.argsort",
"numpy.array",
"numpy.random.permutation",
"keras.metrics.top_k_categorical_accuracy",
"os.path.join",
"cv2.resize"
] |
[((429, 454), 'numpy.random.seed', 'np.random.seed', ([], {'seed': '(2018)'}), '(seed=2018)\n', (443, 454), True, 'import numpy as np\n'), ((455, 484), 'tensorflow.set_random_seed', 'tf.set_random_seed', ([], {'seed': '(2018)'}), '(seed=2018)\n', (473, 484), True, 'import tensorflow as tf\n'), ((1588, 1635), 'keras.metrics.top_k_categorical_accuracy', 'top_k_categorical_accuracy', (['y_true', 'y_pred'], {'k': '(3)'}), '(y_true, y_pred, k=3)\n', (1614, 1635), False, 'from keras.metrics import categorical_accuracy, top_k_categorical_accuracy\n'), ((1708, 1750), 'numpy.zeros', 'np.zeros', (['(BASE_SIZE, BASE_SIZE)', 'np.uint8'], {}), '((BASE_SIZE, BASE_SIZE), np.uint8)\n', (1716, 1750), True, 'import numpy as np\n'), ((2049, 2086), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_GRAY2RGB'], {}), '(img, cv2.COLOR_GRAY2RGB)\n', (2061, 2086), False, 'import cv2\n'), ((606, 649), 'os.path.join', 'os.path.join', (['INPUT_DIR', '"""train_simplified"""'], {}), "(INPUT_DIR, 'train_simplified')\n", (618, 649), False, 'import os\n'), ((2128, 2157), 'cv2.resize', 'cv2.resize', (['img', '(size, size)'], {}), '(img, (size, size))\n', (2138, 2157), False, 'import cv2\n'), ((2290, 2315), 'numpy.random.permutation', 'np.random.permutation', (['ks'], {}), '(ks)\n', (2311, 2315), True, 'import numpy as np\n'), ((4029, 4040), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (4037, 4040), True, 'import numpy as np\n'), ((1473, 1505), 'numpy.argsort', 'np.argsort', (['(-predictions)'], {'axis': '(1)'}), '(-predictions, axis=1)\n', (1483, 1505), True, 'import numpy as np\n'), ((1921, 2017), 'cv2.line', 'cv2.line', (['img', '(stroke[0][i], stroke[1][i])', '(stroke[0][i + 1], stroke[1][i + 1])', 'color', 'lw'], {}), '(img, (stroke[0][i], stroke[1][i]), (stroke[0][i + 1], stroke[1][i +\n 1]), color, lw)\n', (1929, 2017), False, 'import cv2\n'), ((2413, 2455), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'chunksize': 'batchsize'}), '(filename, chunksize=batchsize)\n', (2424, 2455), True, 'import pandas as pd\n'), ((3278, 3297), 'keras.applications.densenet.preprocess_input', 'preprocess_input', (['x'], {}), '(x)\n', (3294, 3297), False, 'from keras.applications.densenet import preprocess_input\n'), ((3947, 3959), 'numpy.flipud', 'np.flipud', (['X'], {}), '(X)\n', (3956, 3959), True, 'import numpy as np\n'), ((2873, 2924), 'tensorflow.keras.utils.to_categorical', 'keras.utils.to_categorical', (['df.y'], {'num_classes': 'NCATS'}), '(df.y, num_classes=NCATS)\n', (2899, 2924), False, 'from tensorflow import keras\n'), ((2814, 2833), 'keras.applications.densenet.preprocess_input', 'preprocess_input', (['x'], {}), '(x)\n', (2830, 2833), False, 'from keras.applications.densenet import preprocess_input\n')]
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# Adapted from https://github.com/microsoft/CodeXGLUE/blob/main/Text-Code/NL-code-search-Adv/evaluator/evaluator.py
import logging
import sys, json
import numpy as np
def read_answers(filename):
answers = {}
with open(filename) as f:
for idx, line in enumerate(f):
line = line.strip()
js = json.loads(line)
answers[str(idx)] = str(idx)
return answers
def read_predictions(filename):
predictions = {}
with open(filename) as f:
for idx, line in enumerate(f):
line = line.strip()
js = json.loads(line)
predictions[str(idx)] = js['answers']
return predictions
def calculate_scores(answers, predictions):
scores = []
for key in answers:
# import ipdb
# ipdb.set_trace()
if key not in predictions:
logging.error("Missing prediction for url {}.".format(key))
sys.exit()
flag = False
for rank, idx in enumerate(predictions[key]):
if idx == answers[key]:
scores.append(1 / (rank + 1))
flag = True
break
if flag is False:
scores.append(0)
result = {}
result['MRR'] = round(np.mean(scores), 4)
return result
def main():
import argparse
parser = argparse.ArgumentParser(description='Evaluate leaderboard predictions for POJ-104 dataset.')
parser.add_argument('--answers', '-a', help="filename of the labels, in txt format.")
parser.add_argument('--predictions', '-p', help="filename of the leaderboard predictions, in txt format.")
args = parser.parse_args()
print("reading gold answers")
answers = read_answers(args.answers)
print("reading predcited answers")
predictions = read_predictions(args.predictions)
print("computing scores")
scores = calculate_scores(answers, predictions)
print(scores)
if __name__ == '__main__':
main()
# python mrr.py -a /home/wasiahmad/workspace/projects/NeuralKpGen/data/scikp/kp20k_separated/KP20k.test.jsonl -p /home/rizwan/DPR/predictions_KP20k.jsonl
|
[
"numpy.mean",
"argparse.ArgumentParser",
"sys.exit",
"json.loads"
] |
[((1399, 1496), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Evaluate leaderboard predictions for POJ-104 dataset."""'}), "(description=\n 'Evaluate leaderboard predictions for POJ-104 dataset.')\n", (1422, 1496), False, 'import argparse\n'), ((1314, 1329), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (1321, 1329), True, 'import numpy as np\n'), ((405, 421), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (415, 421), False, 'import sys, json\n'), ((655, 671), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (665, 671), False, 'import sys, json\n'), ((999, 1009), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1007, 1009), False, 'import sys, json\n')]
|
from __future__ import print_function
import argparse
import numpy as np
import os, csv
from dataset import CIFAR10IndexPseudoLabelEnsemble
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
import torch.utils.data as Data
import torch.backends.cudnn as cudnn
from utils import progress_bar, TwoCropTransformAdv
from losses import SupConLoss
import tensorboard_logger as tb_logger
from models.resnet_cifar_multibn_ensembleFC import resnet18 as ResNet18
import random
from fr_util import generate_high
from utils import adjust_learning_rate, warmup_learning_rate
import apex
# ================================================================== #
# Inputs and Pre-definition #
# ================================================================== #
# Arguments
parser = argparse.ArgumentParser()
parser.add_argument('--name', type=str, default='advcl_cifar10',
help='name of the run')
parser.add_argument('--cname', type=str, default='imagenet_clPretrain',
help='')
parser.add_argument('--batch-size', type=int, default=512,
help='batch size')
parser.add_argument('--epoch', type=int, default=1000,
help='total epochs')
parser.add_argument('--save-epoch', type=int, default=100,
help='save epochs')
parser.add_argument('--epsilon', type=float, default=8,
help='The upper bound change of L-inf norm on input pixels')
parser.add_argument('--iter', type=int, default=5,
help='The number of iterations for iterative attacks')
parser.add_argument('--radius', type=int, default=8,
help='radius of low freq images')
parser.add_argument('--ce_weight', type=float, default=0.2,
help='cross entp weight')
# contrastive related
parser.add_argument('-t', '--nce_t', default=0.5, type=float,
help='temperature')
parser.add_argument('--seed', default=0, type=float,
help='random seed')
parser.add_argument('--dataset', type=str, default='cifar10', help='dataset')
parser.add_argument('--cosine', action='store_true',
help='using cosine annealing')
parser.add_argument('--warm', action='store_true',
help='warm-up for large batch training')
parser.add_argument('--learning_rate', type=float, default=0.5,
help='learning rate')
parser.add_argument('--lr_decay_epochs', type=str, default='700,800,900',
help='where to decay lr, can be a list')
parser.add_argument('--lr_decay_rate', type=float, default=0.1,
help='decay rate for learning rate')
parser.add_argument('--weight_decay', type=float, default=1e-4,
help='weight decay')
parser.add_argument('--momentum', type=float, default=0.9,
help='momentum')
args = parser.parse_args()
args.epochs = args.epoch
args.decay = args.weight_decay
args.cosine = True
import math
if args.batch_size > 256:
args.warm = True
if args.warm:
args.warmup_from = 0.01
args.warm_epochs = 10
if args.cosine:
eta_min = args.learning_rate * (args.lr_decay_rate ** 3)
args.warmup_to = eta_min + (args.learning_rate - eta_min) * (
1 + math.cos(math.pi * args.warm_epochs / args.epochs)) / 2
else:
args.warmup_to = args.learning_rate
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
args.name = 'AdvCL_Cifar10'
config = {
'epsilon': args.epsilon / 255.,
'num_steps': args.iter,
'step_size': 2.0 / 255,
'random_start': True,
'loss_func': 'xent',
}
# ================================================================== #
# Data and Pre-processing #
# ================================================================== #
print('=====> Preparing data...')
# Multi-cuda
if torch.cuda.is_available():
n_gpu = torch.cuda.device_count()
batch_size = args.batch_size
transform_train = transforms.Compose([
transforms.RandomResizedCrop(size=32, scale=(0.2, 1.)),
transforms.RandomHorizontalFlip(),
transforms.RandomApply([
transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)
], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.ToTensor(),
])
train_transform_org = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
])
transform_train = TwoCropTransformAdv(transform_train, train_transform_org)
transform_test = transforms.Compose([
transforms.ToTensor(),
])
label_pseudo_train_list = []
num_classes_list = [2, 10, 50, 100, 500]
dict_name = 'data/{}_pseudo_labels.pkl'.format(args.cname)
f = open(dict_name, 'rb') # Pickle file is newly created where foo1.py is
feat_label_dict = pickle.load(f) # dump data to f
f.close()
for i in range(5):
class_num = num_classes_list[i]
key_train = 'pseudo_train_{}'.format(class_num)
label_pseudo_train = feat_label_dict[key_train]
label_pseudo_train_list.append(label_pseudo_train)
train_dataset = CIFAR10IndexPseudoLabelEnsemble(root='data',
transform=transform_train,
pseudoLabel_002=label_pseudo_train_list[0],
pseudoLabel_010=label_pseudo_train_list[1],
pseudoLabel_050=label_pseudo_train_list[2],
pseudoLabel_100=label_pseudo_train_list[3],
pseudoLabel_500=label_pseudo_train_list[4],
download=True)
# Data Loader
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=n_gpu*4)
# ================================================================== #
# Model, Loss and Optimizer #
# ================================================================== #
# PGD attack model
class AttackPGD(nn.Module):
def __init__(self, model, config):
super(AttackPGD, self).__init__()
self.model = model
self.rand = config['random_start']
self.step_size = config['step_size']
self.epsilon = config['epsilon']
self.num_steps = config['num_steps']
assert config['loss_func'] == 'xent', 'Plz use xent for loss function.'
def forward(self, images_t1, images_t2, images_org, targets, criterion):
x1 = images_t1.clone().detach()
x2 = images_t2.clone().detach()
x_cl = images_org.clone().detach()
x_ce = images_org.clone().detach()
images_org_high = generate_high(x_cl.clone(), r=args.radius)
x_HFC = images_org_high.clone().detach()
if self.rand:
x_cl = x_cl + torch.zeros_like(x1).uniform_(-self.epsilon, self.epsilon)
x_ce = x_ce + torch.zeros_like(x1).uniform_(-self.epsilon, self.epsilon)
for i in range(self.num_steps):
x_cl.requires_grad_()
x_ce.requires_grad_()
with torch.enable_grad():
f_proj, f_pred = self.model(x_cl, bn_name='pgd', contrast=True)
fce_proj, fce_pred, logits_ce = self.model(x_ce, bn_name='pgd_ce', contrast=True, CF=True, return_logits=True, nonlinear=False)
f1_proj, f1_pred = self.model(x1, bn_name='normal', contrast=True)
f2_proj, f2_pred = self.model(x2, bn_name='normal', contrast=True)
f_high_proj, f_high_pred = self.model(x_HFC, bn_name='normal', contrast=True)
features = torch.cat([f_proj.unsqueeze(1), f1_proj.unsqueeze(1), f2_proj.unsqueeze(1), f_high_proj.unsqueeze(1)], dim=1)
loss_contrast = criterion(features)
loss_ce = 0
for label_idx in range(5):
tgt = targets[label_idx].long()
lgt = logits_ce[label_idx]
loss_ce += F.cross_entropy(lgt, tgt, size_average=False, ignore_index=-1) / 5.
loss = loss_contrast + loss_ce * args.ce_weight
grad_x_cl, grad_x_ce = torch.autograd.grad(loss, [x_cl, x_ce])
x_cl = x_cl.detach() + self.step_size * torch.sign(grad_x_cl.detach())
x_cl = torch.min(torch.max(x_cl, images_org - self.epsilon), images_org + self.epsilon)
x_cl = torch.clamp(x_cl, 0, 1)
x_ce = x_ce.detach() + self.step_size * torch.sign(grad_x_ce.detach())
x_ce = torch.min(torch.max(x_ce, images_org - self.epsilon), images_org + self.epsilon)
x_ce = torch.clamp(x_ce, 0, 1)
return x1, x2, x_cl, x_ce, x_HFC
print('=====> Building model...')
bn_names = ['normal', 'pgd', 'pgd_ce']
model = ResNet18(bn_names=bn_names)
model = model.cuda()
# tb_logger
if not os.path.exists('logger'):
os.makedirs('logger')
logname = ('logger/pretrain_{}'.format(args.name))
logger = tb_logger.Logger(logdir=logname, flush_secs=2)
if torch.cuda.device_count() > 1:
print("=====> Let's use", torch.cuda.device_count(), "GPUs!")
model = apex.parallel.convert_syncbn_model(model)
model = nn.DataParallel(model)
model = model.cuda()
cudnn.benchmark = True
else:
print('single gpu version is not supported, please use multiple GPUs!')
raise NotImplementedError
net = AttackPGD(model, config)
# Loss and optimizer
ce_criterion = nn.CrossEntropyLoss(ignore_index=-1)
contrast_criterion = SupConLoss(temperature=args.nce_t)
optimizer = torch.optim.SGD(net.parameters(), lr=args.learning_rate, momentum=0.9, weight_decay=args.decay)
# optimizer = torch.optim.Adam(net.parameters(), lr=args.lr)
# ================================================================== #
# Train and Test #
# ================================================================== #
def train(epoch):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, _, targets, ind) in enumerate(train_loader):
tt = []
for tt_ in targets:
tt.append(tt_.to(device).long())
targets = tt
image_t1, image_t2, image_org = inputs
image_t1 = image_t1.cuda(non_blocking=True)
image_t2 = image_t2.cuda(non_blocking=True)
image_org = image_org.cuda(non_blocking=True)
warmup_learning_rate(args, epoch+1, batch_idx, len(train_loader), optimizer)
# attack contrast
optimizer.zero_grad()
x1, x2, x_cl, x_ce, x_HFC = net(image_t1, image_t2, image_org, targets, contrast_criterion)
f_proj, f_pred = model(x_cl, bn_name='pgd', contrast=True)
fce_proj, fce_pred, logits_ce = model(x_ce, bn_name='pgd_ce', contrast=True, CF=True, return_logits=True, nonlinear=False)
f1_proj, f1_pred = model(x1, bn_name='normal', contrast=True)
f2_proj, f2_pred = model(x2, bn_name='normal', contrast=True)
f_high_proj, f_high_pred = model(x_HFC, bn_name='normal', contrast=True)
features = torch.cat(
[f_proj.unsqueeze(1), f1_proj.unsqueeze(1), f2_proj.unsqueeze(1), f_high_proj.unsqueeze(1)], dim=1)
contrast_loss = contrast_criterion(features)
ce_loss = 0
for label_idx in range(5):
tgt = targets[label_idx].long()
lgt = logits_ce[label_idx]
ce_loss += ce_criterion(lgt, tgt) / 5.
loss = contrast_loss + ce_loss * args.ce_weight
loss.backward()
optimizer.step()
train_loss += loss.item()
total += targets[0].size(0)
progress_bar(batch_idx, len(train_loader),
'Loss: %.3f (%d/%d)'
% (train_loss/(batch_idx+1), correct, total))
return train_loss/batch_idx, 0.
# ================================================================== #
# Checkpoint #
# ================================================================== #
# Save checkpoint
def checkpoint(epoch):
print('=====> Saving checkpoint...')
state = {
'model': model.state_dict(),
'epoch': epoch,
'rng_state': torch.get_rng_state()
}
save_dir = './checkpoint/{}'.format(args.name)
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
torch.save(state, '{}/epoch_{}.ckpt'.format(save_dir, epoch))
# ================================================================== #
# Run the model #
# ================================================================== #
np.random.seed(args.seed)
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
for epoch in range(start_epoch, args.epoch+2):
adjust_learning_rate(args, optimizer, epoch+1)
train_loss, train_acc = train(epoch)
logger.log_value('train_loss', train_loss, epoch)
logger.log_value('learning_rate', optimizer.param_groups[0]['lr'], epoch)
if epoch % args.save_epoch == 0:
checkpoint(epoch)
|
[
"dataset.CIFAR10IndexPseudoLabelEnsemble",
"numpy.random.seed",
"argparse.ArgumentParser",
"torch.autograd.grad",
"utils.adjust_learning_rate",
"losses.SupConLoss",
"torch.get_rng_state",
"torch.cuda.device_count",
"models.resnet_cifar_multibn_ensembleFC.resnet18",
"pickle.load",
"tensorboard_logger.Logger",
"torch.utils.data.DataLoader",
"os.path.exists",
"random.seed",
"math.cos",
"utils.TwoCropTransformAdv",
"torchvision.transforms.RandomHorizontalFlip",
"torch.zeros_like",
"torch.manual_seed",
"torch.nn.functional.cross_entropy",
"torch.clamp",
"torch.cuda.is_available",
"torch.max",
"torch.enable_grad",
"torchvision.transforms.RandomCrop",
"torchvision.transforms.ColorJitter",
"os.makedirs",
"os.path.isdir",
"torch.nn.CrossEntropyLoss",
"torchvision.transforms.RandomResizedCrop",
"torchvision.transforms.RandomGrayscale",
"torch.cuda.manual_seed_all",
"apex.parallel.convert_syncbn_model",
"torch.nn.DataParallel",
"torchvision.transforms.ToTensor"
] |
[((886, 911), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (909, 911), False, 'import argparse\n'), ((4093, 4118), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4116, 4118), False, 'import torch\n'), ((4706, 4763), 'utils.TwoCropTransformAdv', 'TwoCropTransformAdv', (['transform_train', 'train_transform_org'], {}), '(transform_train, train_transform_org)\n', (4725, 4763), False, 'from utils import progress_bar, TwoCropTransformAdv\n'), ((5057, 5071), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5068, 5071), False, 'import pickle\n'), ((5331, 5655), 'dataset.CIFAR10IndexPseudoLabelEnsemble', 'CIFAR10IndexPseudoLabelEnsemble', ([], {'root': '"""data"""', 'transform': 'transform_train', 'pseudoLabel_002': 'label_pseudo_train_list[0]', 'pseudoLabel_010': 'label_pseudo_train_list[1]', 'pseudoLabel_050': 'label_pseudo_train_list[2]', 'pseudoLabel_100': 'label_pseudo_train_list[3]', 'pseudoLabel_500': 'label_pseudo_train_list[4]', 'download': '(True)'}), "(root='data', transform=transform_train,\n pseudoLabel_002=label_pseudo_train_list[0], pseudoLabel_010=\n label_pseudo_train_list[1], pseudoLabel_050=label_pseudo_train_list[2],\n pseudoLabel_100=label_pseudo_train_list[3], pseudoLabel_500=\n label_pseudo_train_list[4], download=True)\n", (5362, 5655), False, 'from dataset import CIFAR10IndexPseudoLabelEnsemble\n'), ((6003, 6117), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'train_dataset', 'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(n_gpu * 4)'}), '(dataset=train_dataset, batch_size=batch_size,\n shuffle=True, num_workers=n_gpu * 4)\n', (6030, 6117), False, 'import torch\n'), ((9225, 9252), 'models.resnet_cifar_multibn_ensembleFC.resnet18', 'ResNet18', ([], {'bn_names': 'bn_names'}), '(bn_names=bn_names)\n', (9233, 9252), True, 'from models.resnet_cifar_multibn_ensembleFC import resnet18 as ResNet18\n'), ((9405, 9451), 'tensorboard_logger.Logger', 'tb_logger.Logger', ([], {'logdir': 'logname', 'flush_secs': '(2)'}), '(logdir=logname, flush_secs=2)\n', (9421, 9451), True, 'import tensorboard_logger as tb_logger\n'), ((9872, 9908), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'ignore_index': '(-1)'}), '(ignore_index=-1)\n', (9891, 9908), True, 'import torch.nn as nn\n'), ((9930, 9964), 'losses.SupConLoss', 'SupConLoss', ([], {'temperature': 'args.nce_t'}), '(temperature=args.nce_t)\n', (9940, 9964), False, 'from losses import SupConLoss\n'), ((13097, 13122), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (13111, 13122), True, 'import numpy as np\n'), ((13123, 13145), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (13134, 13145), False, 'import random\n'), ((13146, 13174), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (13163, 13174), False, 'import torch\n'), ((13175, 13212), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (13201, 13212), False, 'import torch\n'), ((4132, 4157), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (4155, 4157), False, 'import torch\n'), ((9293, 9317), 'os.path.exists', 'os.path.exists', (['"""logger"""'], {}), "('logger')\n", (9307, 9317), False, 'import os, csv\n'), ((9323, 9344), 'os.makedirs', 'os.makedirs', (['"""logger"""'], {}), "('logger')\n", (9334, 9344), False, 'import os, csv\n'), ((9455, 9480), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (9478, 9480), False, 'import torch\n'), ((9564, 9605), 'apex.parallel.convert_syncbn_model', 'apex.parallel.convert_syncbn_model', (['model'], {}), '(model)\n', (9598, 9605), False, 'import apex\n'), ((9618, 9640), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (9633, 9640), True, 'import torch.nn as nn\n'), ((13265, 13313), 'utils.adjust_learning_rate', 'adjust_learning_rate', (['args', 'optimizer', '(epoch + 1)'], {}), '(args, optimizer, epoch + 1)\n', (13285, 13313), False, 'from utils import adjust_learning_rate, warmup_learning_rate\n'), ((3608, 3633), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3631, 3633), False, 'import torch\n'), ((4235, 4290), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', ([], {'size': '(32)', 'scale': '(0.2, 1.0)'}), '(size=32, scale=(0.2, 1.0))\n', (4263, 4290), True, 'import torchvision.transforms as transforms\n'), ((4299, 4332), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (4330, 4332), True, 'import torchvision.transforms as transforms\n'), ((4449, 4482), 'torchvision.transforms.RandomGrayscale', 'transforms.RandomGrayscale', ([], {'p': '(0.2)'}), '(p=0.2)\n', (4475, 4482), True, 'import torchvision.transforms as transforms\n'), ((4492, 4513), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4511, 4513), True, 'import torchvision.transforms as transforms\n'), ((4569, 4605), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(32)'], {'padding': '(4)'}), '(32, padding=4)\n', (4590, 4605), True, 'import torchvision.transforms as transforms\n'), ((4615, 4648), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (4646, 4648), True, 'import torchvision.transforms as transforms\n'), ((4658, 4679), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4677, 4679), True, 'import torchvision.transforms as transforms\n'), ((4807, 4828), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4826, 4828), True, 'import torchvision.transforms as transforms\n'), ((9516, 9541), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (9539, 9541), False, 'import torch\n'), ((12671, 12692), 'torch.get_rng_state', 'torch.get_rng_state', ([], {}), '()\n', (12690, 12692), False, 'import torch\n'), ((12761, 12784), 'os.path.isdir', 'os.path.isdir', (['save_dir'], {}), '(save_dir)\n', (12774, 12784), False, 'import os, csv\n'), ((12794, 12815), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (12805, 12815), False, 'import os, csv\n'), ((8610, 8649), 'torch.autograd.grad', 'torch.autograd.grad', (['loss', '[x_cl, x_ce]'], {}), '(loss, [x_cl, x_ce])\n', (8629, 8649), False, 'import torch\n'), ((8852, 8875), 'torch.clamp', 'torch.clamp', (['x_cl', '(0)', '(1)'], {}), '(x_cl, 0, 1)\n', (8863, 8875), False, 'import torch\n'), ((9078, 9101), 'torch.clamp', 'torch.clamp', (['x_ce', '(0)', '(1)'], {}), '(x_ce, 0, 1)\n', (9089, 9101), False, 'import torch\n'), ((4379, 4421), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', (['(0.4)', '(0.4)', '(0.4)', '(0.1)'], {}), '(0.4, 0.4, 0.4, 0.1)\n', (4401, 4421), True, 'import torchvision.transforms as transforms\n'), ((7548, 7567), 'torch.enable_grad', 'torch.enable_grad', ([], {}), '()\n', (7565, 7567), False, 'import torch\n'), ((8762, 8804), 'torch.max', 'torch.max', (['x_cl', '(images_org - self.epsilon)'], {}), '(x_cl, images_org - self.epsilon)\n', (8771, 8804), False, 'import torch\n'), ((8988, 9030), 'torch.max', 'torch.max', (['x_ce', '(images_org - self.epsilon)'], {}), '(x_ce, images_org - self.epsilon)\n', (8997, 9030), False, 'import torch\n'), ((3380, 3430), 'math.cos', 'math.cos', (['(math.pi * args.warm_epochs / args.epochs)'], {}), '(math.pi * args.warm_epochs / args.epochs)\n', (3388, 3430), False, 'import math\n'), ((7278, 7298), 'torch.zeros_like', 'torch.zeros_like', (['x1'], {}), '(x1)\n', (7294, 7298), False, 'import torch\n'), ((7363, 7383), 'torch.zeros_like', 'torch.zeros_like', (['x1'], {}), '(x1)\n', (7379, 7383), False, 'import torch\n'), ((8443, 8505), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['lgt', 'tgt'], {'size_average': '(False)', 'ignore_index': '(-1)'}), '(lgt, tgt, size_average=False, ignore_index=-1)\n', (8458, 8505), True, 'import torch.nn.functional as F\n')]
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 19 21:04:18 2017
@author: pd
"""
#from IPython import get_ipython
#get_ipython().magic('reset -sf')
import numpy as np
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
n_features=200
X, y = datasets.make_classification(750, n_features=n_features, n_informative=5, random_state=29)
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.25, random_state=0)
accuracies = []
for x in np.arange(1, n_features+1,5):
dt = DecisionTreeClassifier(max_depth=x)
dt.fit(X_train, y_train)
preds = dt.predict(X_test)
accuracies.append((preds == y_test).mean())
f, ax = plt.subplots(figsize=(7, 5))
ax.plot(range(1, n_features+1,5), accuracies, 'ko')
#ax.plot(range(1, n_features+1)[:12], accuracies[:12], color='k')
ax.set_title("Decision Tree Accuracy")
ax.set_ylabel("% Correct")
ax.set_xlabel("Max Depth")
plt.show()
|
[
"sklearn.cross_validation.train_test_split",
"matplotlib.pyplot.show",
"sklearn.datasets.make_classification",
"sklearn.tree.DecisionTreeClassifier",
"numpy.arange",
"matplotlib.pyplot.subplots"
] |
[((378, 472), 'sklearn.datasets.make_classification', 'datasets.make_classification', (['(750)'], {'n_features': 'n_features', 'n_informative': '(5)', 'random_state': '(29)'}), '(750, n_features=n_features, n_informative=5,\n random_state=29)\n', (406, 472), False, 'from sklearn import datasets\n'), ((507, 561), 'sklearn.cross_validation.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.25)', 'random_state': '(0)'}), '(X, y, test_size=0.25, random_state=0)\n', (523, 561), False, 'from sklearn.cross_validation import train_test_split\n'), ((588, 619), 'numpy.arange', 'np.arange', (['(1)', '(n_features + 1)', '(5)'], {}), '(1, n_features + 1, 5)\n', (597, 619), True, 'import numpy as np\n'), ((800, 828), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(7, 5)'}), '(figsize=(7, 5))\n', (812, 828), True, 'import matplotlib.pyplot as plt\n'), ((1042, 1052), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1050, 1052), True, 'import matplotlib.pyplot as plt\n'), ((627, 662), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'max_depth': 'x'}), '(max_depth=x)\n', (649, 662), False, 'from sklearn.tree import DecisionTreeClassifier\n')]
|
#!/usr/bin/python
# coding: utf-8
import sys
import Levenshtein
import numpy as np
assert len(sys.argv) > 1
with open(sys.argv[1], 'r', encoding='utf-8') as file:
lines = file.readlines()
n_lines = len(lines)
distances = np.zeros((n_lines, n_lines), dtype=int)
messages = []
for x in range(n_lines):
for y in range(x + 1, n_lines):
if x != y:
value = Levenshtein.distance(lines[x], lines[y])
distances[x,y] = value
if value < 5:
message = "lines {} and {} look similar\n{}{}\n".format(x, y, lines[x], lines[y])
messages.append(message)
for message in messages:
print(message)
|
[
"Levenshtein.distance",
"numpy.zeros"
] |
[((229, 268), 'numpy.zeros', 'np.zeros', (['(n_lines, n_lines)'], {'dtype': 'int'}), '((n_lines, n_lines), dtype=int)\n', (237, 268), True, 'import numpy as np\n'), ((384, 424), 'Levenshtein.distance', 'Levenshtein.distance', (['lines[x]', 'lines[y]'], {}), '(lines[x], lines[y])\n', (404, 424), False, 'import Levenshtein\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import edward as ed
import numpy as np
import tensorflow as tf
from collections import namedtuple
from edward.models import (
Beta, Dirichlet, DirichletProcess, Gamma, MultivariateNormalDiag,
Normal, Poisson, TransformedDistribution)
from tensorflow.contrib.distributions import bijectors
class test_transform_class(tf.test.TestCase):
def assertSamplePosNeg(self, sample):
num_pos = np.sum((sample > 0.0), axis=0, keepdims=True)
num_neg = np.sum((sample < 0.0), axis=0, keepdims=True)
self.assertTrue((num_pos > 0).all())
self.assertTrue((num_neg > 0).all())
def test_args(self):
with self.test_session():
x = Normal(-100.0, 1.0)
y = ed.transform(x, bijectors.Softplus())
sample = y.sample(10).eval()
self.assertTrue((sample >= 0.0).all())
def test_kwargs(self):
with self.test_session():
x = Normal(-100.0, 1.0)
y = ed.transform(x, bijector=bijectors.Softplus())
sample = y.sample(10).eval()
self.assertTrue((sample >= 0.0).all())
def test_01(self):
with self.test_session():
x = Beta(1.0, 1.0)
y = ed.transform(x)
self.assertIsInstance(y, TransformedDistribution)
sample = y.sample(10, seed=1).eval()
self.assertSamplePosNeg(sample)
def test_nonnegative(self):
with self.test_session():
x = Gamma(1.0, 1.0)
y = ed.transform(x)
self.assertIsInstance(y, TransformedDistribution)
sample = y.sample(10, seed=1).eval()
self.assertSamplePosNeg(sample)
def test_simplex(self):
with self.test_session():
x = Dirichlet([1.1, 1.2, 1.3, 1.4])
y = ed.transform(x)
self.assertIsInstance(y, TransformedDistribution)
sample = y.sample(10, seed=1).eval()
self.assertSamplePosNeg(sample)
def test_real(self):
with self.test_session():
x = Normal(0.0, 1.0)
y = ed.transform(x)
self.assertIsInstance(y, Normal)
sample = y.sample(10, seed=1).eval()
self.assertSamplePosNeg(sample)
def test_multivariate_real(self):
with self.test_session():
x = MultivariateNormalDiag(tf.zeros(2), tf.ones(2))
y = ed.transform(x)
sample = y.sample(10, seed=1).eval()
self.assertSamplePosNeg(sample)
def test_no_support(self):
with self.test_session():
x = DirichletProcess(1.0, Normal(0.0, 1.0))
with self.assertRaises(AttributeError):
y = ed.transform(x)
def test_unhandled_support(self):
with self.test_session():
FakeRV = namedtuple('FakeRV', ['support'])
x = FakeRV(support='rational')
with self.assertRaises(ValueError):
y = ed.transform(x)
if __name__ == '__main__':
tf.test.main()
|
[
"tensorflow.test.main",
"tensorflow.ones",
"edward.transform",
"numpy.sum",
"edward.models.Dirichlet",
"tensorflow.zeros",
"collections.namedtuple",
"edward.models.Normal",
"edward.models.Gamma",
"edward.models.Beta",
"tensorflow.contrib.distributions.bijectors.Softplus"
] |
[((2782, 2796), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (2794, 2796), True, 'import tensorflow as tf\n'), ((511, 554), 'numpy.sum', 'np.sum', (['(sample > 0.0)'], {'axis': '(0)', 'keepdims': '(True)'}), '(sample > 0.0, axis=0, keepdims=True)\n', (517, 554), True, 'import numpy as np\n'), ((571, 614), 'numpy.sum', 'np.sum', (['(sample < 0.0)'], {'axis': '(0)', 'keepdims': '(True)'}), '(sample < 0.0, axis=0, keepdims=True)\n', (577, 614), True, 'import numpy as np\n'), ((763, 782), 'edward.models.Normal', 'Normal', (['(-100.0)', '(1.0)'], {}), '(-100.0, 1.0)\n', (769, 782), False, 'from edward.models import Beta, Dirichlet, DirichletProcess, Gamma, MultivariateNormalDiag, Normal, Poisson, TransformedDistribution\n'), ((977, 996), 'edward.models.Normal', 'Normal', (['(-100.0)', '(1.0)'], {}), '(-100.0, 1.0)\n', (983, 996), False, 'from edward.models import Beta, Dirichlet, DirichletProcess, Gamma, MultivariateNormalDiag, Normal, Poisson, TransformedDistribution\n'), ((1196, 1210), 'edward.models.Beta', 'Beta', (['(1.0)', '(1.0)'], {}), '(1.0, 1.0)\n', (1200, 1210), False, 'from edward.models import Beta, Dirichlet, DirichletProcess, Gamma, MultivariateNormalDiag, Normal, Poisson, TransformedDistribution\n'), ((1221, 1236), 'edward.transform', 'ed.transform', (['x'], {}), '(x)\n', (1233, 1236), True, 'import edward as ed\n'), ((1445, 1460), 'edward.models.Gamma', 'Gamma', (['(1.0)', '(1.0)'], {}), '(1.0, 1.0)\n', (1450, 1460), False, 'from edward.models import Beta, Dirichlet, DirichletProcess, Gamma, MultivariateNormalDiag, Normal, Poisson, TransformedDistribution\n'), ((1471, 1486), 'edward.transform', 'ed.transform', (['x'], {}), '(x)\n', (1483, 1486), True, 'import edward as ed\n'), ((1691, 1722), 'edward.models.Dirichlet', 'Dirichlet', (['[1.1, 1.2, 1.3, 1.4]'], {}), '([1.1, 1.2, 1.3, 1.4])\n', (1700, 1722), False, 'from edward.models import Beta, Dirichlet, DirichletProcess, Gamma, MultivariateNormalDiag, Normal, Poisson, TransformedDistribution\n'), ((1733, 1748), 'edward.transform', 'ed.transform', (['x'], {}), '(x)\n', (1745, 1748), True, 'import edward as ed\n'), ((1950, 1966), 'edward.models.Normal', 'Normal', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (1956, 1966), False, 'from edward.models import Beta, Dirichlet, DirichletProcess, Gamma, MultivariateNormalDiag, Normal, Poisson, TransformedDistribution\n'), ((1977, 1992), 'edward.transform', 'ed.transform', (['x'], {}), '(x)\n', (1989, 1992), True, 'import edward as ed\n'), ((2248, 2263), 'edward.transform', 'ed.transform', (['x'], {}), '(x)\n', (2260, 2263), True, 'import edward as ed\n'), ((2611, 2644), 'collections.namedtuple', 'namedtuple', (['"""FakeRV"""', "['support']"], {}), "('FakeRV', ['support'])\n", (2621, 2644), False, 'from collections import namedtuple\n'), ((809, 829), 'tensorflow.contrib.distributions.bijectors.Softplus', 'bijectors.Softplus', ([], {}), '()\n', (827, 829), False, 'from tensorflow.contrib.distributions import bijectors\n'), ((2213, 2224), 'tensorflow.zeros', 'tf.zeros', (['(2)'], {}), '(2)\n', (2221, 2224), True, 'import tensorflow as tf\n'), ((2226, 2236), 'tensorflow.ones', 'tf.ones', (['(2)'], {}), '(2)\n', (2233, 2236), True, 'import tensorflow as tf\n'), ((2437, 2453), 'edward.models.Normal', 'Normal', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (2443, 2453), False, 'from edward.models import Beta, Dirichlet, DirichletProcess, Gamma, MultivariateNormalDiag, Normal, Poisson, TransformedDistribution\n'), ((2513, 2528), 'edward.transform', 'ed.transform', (['x'], {}), '(x)\n', (2525, 2528), True, 'import edward as ed\n'), ((2736, 2751), 'edward.transform', 'ed.transform', (['x'], {}), '(x)\n', (2748, 2751), True, 'import edward as ed\n'), ((1032, 1052), 'tensorflow.contrib.distributions.bijectors.Softplus', 'bijectors.Softplus', ([], {}), '()\n', (1050, 1052), False, 'from tensorflow.contrib.distributions import bijectors\n')]
|
# -*- coding: utf-8 -*-
"""
For testing neuromaps.stats functionality
"""
import numpy as np
import pytest
from neuromaps import stats
@pytest.mark.xfail
def test_compare_images():
assert False
def test_permtest_metric():
rs = np.random.default_rng(12345678)
x, y = rs.random(size=(2, 100))
r, p = stats.permtest_metric(x, y)
assert np.allclose([r, p], [0.0345815411043023, 0.7192807192807192])
r, p = stats.permtest_metric(np.c_[x, x[::-1]], np.c_[y, y])
assert np.allclose(r, [0.0345815411043023, 0.03338608427980476])
assert np.allclose(p, [0.7192807192807192, 0.7472527472527473])
@pytest.mark.parametrize('x, y, expected', [
# basic one-dimensional input
(range(5), range(5), (1.0, 0.0)),
# broadcasting occurs regardless of input order
(np.stack([range(5), range(5, 0, -1)], 1), range(5),
([1.0, -1.0], [0.0, 0.0])),
(range(5), np.stack([range(5), range(5, 0, -1)], 1),
([1.0, -1.0], [0.0, 0.0])),
# correlation between matching columns
(np.stack([range(5), range(5, 0, -1)], 1),
np.stack([range(5), range(5, 0, -1)], 1),
([1.0, 1.0], [0.0, 0.0]))
])
def test_efficient_pearsonr(x, y, expected):
assert np.allclose(stats.efficient_pearsonr(x, y), expected)
def test_efficient_pearsonr_errors():
with pytest.raises(ValueError):
stats.efficient_pearsonr(range(4), range(5))
assert all(np.isnan(a) for a in stats.efficient_pearsonr([], []))
|
[
"neuromaps.stats.efficient_pearsonr",
"numpy.allclose",
"numpy.isnan",
"numpy.random.default_rng",
"pytest.raises",
"neuromaps.stats.permtest_metric"
] |
[((241, 272), 'numpy.random.default_rng', 'np.random.default_rng', (['(12345678)'], {}), '(12345678)\n', (262, 272), True, 'import numpy as np\n'), ((320, 347), 'neuromaps.stats.permtest_metric', 'stats.permtest_metric', (['x', 'y'], {}), '(x, y)\n', (341, 347), False, 'from neuromaps import stats\n'), ((359, 420), 'numpy.allclose', 'np.allclose', (['[r, p]', '[0.0345815411043023, 0.7192807192807192]'], {}), '([r, p], [0.0345815411043023, 0.7192807192807192])\n', (370, 420), True, 'import numpy as np\n'), ((433, 486), 'neuromaps.stats.permtest_metric', 'stats.permtest_metric', (['np.c_[x, x[::-1]]', 'np.c_[y, y]'], {}), '(np.c_[x, x[::-1]], np.c_[y, y])\n', (454, 486), False, 'from neuromaps import stats\n'), ((498, 555), 'numpy.allclose', 'np.allclose', (['r', '[0.0345815411043023, 0.03338608427980476]'], {}), '(r, [0.0345815411043023, 0.03338608427980476])\n', (509, 555), True, 'import numpy as np\n'), ((567, 623), 'numpy.allclose', 'np.allclose', (['p', '[0.7192807192807192, 0.7472527472527473]'], {}), '(p, [0.7192807192807192, 0.7472527472527473])\n', (578, 623), True, 'import numpy as np\n'), ((1214, 1244), 'neuromaps.stats.efficient_pearsonr', 'stats.efficient_pearsonr', (['x', 'y'], {}), '(x, y)\n', (1238, 1244), False, 'from neuromaps import stats\n'), ((1305, 1330), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1318, 1330), False, 'import pytest\n'), ((1401, 1412), 'numpy.isnan', 'np.isnan', (['a'], {}), '(a)\n', (1409, 1412), True, 'import numpy as np\n'), ((1422, 1454), 'neuromaps.stats.efficient_pearsonr', 'stats.efficient_pearsonr', (['[]', '[]'], {}), '([], [])\n', (1446, 1454), False, 'from neuromaps import stats\n')]
|
import numpy as np
import pytest
import snc.environments.job_generators.discrete_review_job_generator \
as drjg
import snc.environments.controlled_random_walk as crw
import snc.environments.state_initialiser as si
import snc.agents.general_heuristics.random_nonidling_agent \
as random_nonidling_agent
import snc.agents.general_heuristics.longest_buffer_priority_agent \
as longest_priority_agent
import snc.agents.general_heuristics.custom_activity_priority_agent \
as custom_priority_agent
def get_null_env_params(state, num_resources=None, buffer_processing_matrix=None,
constituency_matrix=None):
num_buffers = state.shape[0]
arrival_rate = np.ones_like(state)
if num_resources is None:
num_resources = num_buffers
if buffer_processing_matrix is None:
buffer_processing_matrix = -np.triu(np.ones((num_buffers, num_resources)))
if constituency_matrix is None:
constituency_matrix = np.zeros((num_resources, num_resources))
time_interval = 1
return {
"cost_per_buffer": np.zeros_like(state),
"capacity": np.zeros_like(state),
"constituency_matrix": constituency_matrix,
"job_generator": drjg.DeterministicDiscreteReviewJobGenerator(
arrival_rate, buffer_processing_matrix, sim_time_interval=time_interval
),
"state_initialiser": si.DeterministicCRWStateInitialiser(state),
"job_conservation_flag": True,
"list_boundary_constraint_matrices": None,
}
def test_random_heuristic_agent_starving():
# Single server queue
safety_stock = 10.0
state = 5 * np.ones((1, 1))
env_params = get_null_env_params(state)
env_params["constituency_matrix"] = np.ones((1, 1))
env_params["list_boundary_constraint_matrices"] = [np.ones((1, 1))]
env = crw.ControlledRandomWalk(**env_params)
agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert action == np.zeros((1, 1))
def test_random_heuristic_agent():
# Single server queue
safety_stock = 1.0
state = 1.1 * np.ones((1, 1))
env_params = get_null_env_params(state)
env_params["constituency_matrix"] = np.ones((1, 1))
env_params["list_boundary_constraint_matrices"] = [np.ones((1, 1))]
env = crw.ControlledRandomWalk(**env_params)
agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert action == np.ones((1, 1))
def test_random_heuristic_agent_multiple_buffers_eye_condition_starving():
# Station scheduling three buffers, each of them having to be above safety stock
safety_stock = 10.0
state = 5 * np.ones((3, 1))
env_params = get_null_env_params(state)
env_params["constituency_matrix"] = np.ones((1, 3))
env_params["list_boundary_constraint_matrices"] = [np.eye(3)]
env = crw.ControlledRandomWalk(**env_params)
agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.all(action == np.zeros((3, 1)))
def test_random_heuristic_agent_multiple_buffers_eye_condition():
# Station scheduling three buffers, each of them having to be above safety stock
safety_stock = 1.0
state = 1.1 * np.ones((3, 1))
env_params = get_null_env_params(state)
env_params["constituency_matrix"] = np.ones((1, 3))
env_params["list_boundary_constraint_matrices"] = [np.eye(3)]
env = crw.ControlledRandomWalk(**env_params)
agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.sum(action) == 1
def test_random_heuristic_agent_multiple_buffers_sum_condition_starving():
# Station scheduling three buffers, the sum of their size having to be above safety stock
safety_stock = 10.0
state = 3 * np.ones((3, 1))
env_params = get_null_env_params(state)
env_params["constituency_matrix"] = np.ones((1, 3))
env_params["list_boundary_constraint_matrices"] = [np.ones((1, 3))]
env = crw.ControlledRandomWalk(**env_params)
agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.all(action == np.zeros((3, 1)))
def test_random_heuristic_agent_multiple_buffers_sum_condition():
# Station scheduling three buffers, the sum of their size having to be above safety stock
safety_stock = 10.0
state = 5 * np.ones((3, 1))
env_params = get_null_env_params(state)
env_params["constituency_matrix"] = np.ones((1, 3))
env_params["list_boundary_constraint_matrices"] = [np.ones((1, 3))]
env = crw.ControlledRandomWalk(**env_params)
agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.sum(action) == 1
def test_random_heuristic_agent_multiple_buffers_multiple_resources_eye_cond_starving():
# Two stations, each one scheduling two buffers, each of them having to be above safety stock.
safety_stock = 10.0
state = 5 * np.ones((4, 1))
env_params = get_null_env_params(state)
env_params["constituency_matrix"] = np.array([[1, 1, 0, 0], [0, 0, 1, 1]])
env_params["list_boundary_constraint_matrices"] = [np.hstack((np.eye(2), np.zeros((2, 2)))),
np.hstack((np.zeros((2, 2)), np.eye(2)))]
env = crw.ControlledRandomWalk(**env_params)
agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.all(action == np.zeros((4, 1)))
def test_random_heuristic_agent_multiple_buffers_multiple_resources_eye_cond():
# Two stations, each one scheduling two buffers, each of them having to be above safety stock.
safety_stock = 9.9
state = 10 * np.ones((4, 1))
env_params = get_null_env_params(state)
env_params["constituency_matrix"] = np.array([[1, 1, 0, 0], [0, 0, 1, 1]])
env_params["list_boundary_constraint_matrices"] = [np.hstack((np.eye(2), np.zeros((2, 2)))),
np.hstack((np.zeros((2, 2)), np.eye(2)))]
env = crw.ControlledRandomWalk(**env_params)
agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.sum(action) == 2
def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_starving():
# Two stations, each one scheduling two buffers, the sum of their size having to be above
# safety stock.
safety_stock = 10
state = 4 * np.ones((4, 1))
env_params = get_null_env_params(state)
env_params["constituency_matrix"] = np.array([[1, 1, 0, 0], [0, 0, 1, 1]])
env_params["list_boundary_constraint_matrices"] = [np.array([[1, 1, 0, 0]]),
np.array([[0, 0, 1, 1]])]
env = crw.ControlledRandomWalk(**env_params)
agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.all(action == np.zeros((4, 1)))
def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond():
# Two stations, each one scheduling two buffers, the sum of their size having to be above safety
# stock.
safety_stock = 9.9
state = 5 * np.ones((4, 1))
env_params = get_null_env_params(state)
env_params["constituency_matrix"] = np.array([[1, 1, 0, 0], [0, 0, 1, 1]])
env_params["list_boundary_constraint_matrices"] = [np.array([[1, 1, 0, 0]]),
np.array([[0, 0, 1, 1]])]
env = crw.ControlledRandomWalk(**env_params)
agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.sum(action) == 2
def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_1_starve():
# Two stations, each one scheduling two buffers, the sum of their size having to be above safety
# stock.
safety_stock = 9.9
state = np.array([4, 5, 5, 5])[:, None]
env_params = get_null_env_params(state)
env_params["constituency_matrix"] = np.array([[1, 1, 0, 0], [0, 0, 1, 1]])
env_params["list_boundary_constraint_matrices"] = [np.array([[1, 1, 0, 0]]),
np.array([[0, 0, 1, 1]])]
env = crw.ControlledRandomWalk(**env_params)
agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.sum(action[2:4]) == 1 and np.all(action[0:2] == np.zeros((2, 1)))
def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_2_starve():
# Two stations, each one scheduling two buffers, the sum of their size having to be above safety
# stock.
safety_stock = 9.9
state = np.array([5, 5, 5, 4])[:, None]
env_params = get_null_env_params(state)
env_params["constituency_matrix"] = np.array([[1, 1, 0, 0], [0, 0, 1, 1]])
env_params["list_boundary_constraint_matrices"] = [np.array([[1, 1, 0, 0]]),
np.array([[0, 0, 1, 1]])]
env = crw.ControlledRandomWalk(**env_params)
agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.sum(action[0:2]) == 1 and np.all(action[2:4] == np.zeros((2, 1)))
def test_priority_nonidling_heuristic_agent_starving():
# Single server queue
buffer_processing_matrix = - np.ones((1, 1))
safety_stock = 10.0
state = 5 * np.ones((1, 1))
env_params = get_null_env_params(
state, buffer_processing_matrix=buffer_processing_matrix)
env_params["constituency_matrix"] = np.ones((1, 1))
env_params["list_boundary_constraint_matrices"] = [np.ones((1, 1))]
env = crw.ControlledRandomWalk(**env_params)
agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert action == np.zeros((1, 1))
def test_priority_nonidling_heuristic_agent():
# Single server queue
buffer_processing_matrix = - np.ones((1, 1))
safety_stock = 4.0
state = 5 * np.ones((1, 1))
env_params = get_null_env_params(
state, buffer_processing_matrix=buffer_processing_matrix)
env_params["constituency_matrix"] = np.ones((1, 1))
env_params["list_boundary_constraint_matrices"] = [np.ones((1, 1))]
env = crw.ControlledRandomWalk(**env_params)
agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock, name="LPAAgent")
action = agent.map_state_to_actions(state)
assert action == np.ones((1, 1))
def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_starving():
# One station scheduling two buffers, one larger than the other, but both below safety stock.
buffer_processing_matrix = - np.eye(2)
safety_stock = 10.0
state = np.array([9, 5])[:, None]
env_params = get_null_env_params(
state, buffer_processing_matrix=buffer_processing_matrix)
env_params["constituency_matrix"] = np.ones((1, 2))
env_params["list_boundary_constraint_matrices"] = [np.eye(2)]
env = crw.ControlledRandomWalk(**env_params)
agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.all(action == np.zeros((2, 1)))
def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_small_one_starve():
# One station scheduling two buffers, one larger than the other. Only the large one is above
# safety stock.
buffer_processing_matrix = - np.eye(2)
safety_stock = 10.0
state = np.array([9, 11])[:, None]
env_params = get_null_env_params(
state, buffer_processing_matrix=buffer_processing_matrix)
env_params["constituency_matrix"] = np.ones((1, 2))
env_params["list_boundary_constraint_matrices"] = [np.eye(2)]
env = crw.ControlledRandomWalk(**env_params)
agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.all(action == np.array([0, 1])[:, None])
def test_priority_nonidling_heuristic_agent_multi_buffers_eye_cond_small_one_starve_reverse_ord():
# One station scheduling two buffers, one larger than the other. Only the large one is above
# safety stock, swap order with respect to previous test.
buffer_processing_matrix = - np.eye(2)
safety_stock = 10.0
state = np.array([11, 10])[:, None]
env_params = get_null_env_params(
state, buffer_processing_matrix=buffer_processing_matrix)
env_params["constituency_matrix"] = np.ones((1, 2))
env_params["list_boundary_constraint_matrices"] = [np.eye(2)]
env = crw.ControlledRandomWalk(**env_params)
agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.all(action == np.array([1, 0])[:, None])
def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition():
# One station scheduling two buffers, one larger than the other, both above safety stock.
buffer_processing_matrix = - np.eye(2)
safety_stock = 10.0
state = np.array([30, 20])[:, None]
env_params = get_null_env_params(
state, buffer_processing_matrix=buffer_processing_matrix)
env_params["constituency_matrix"] = np.ones((1, 2))
env_params["list_boundary_constraint_matrices"] = [np.eye(2)]
env = crw.ControlledRandomWalk(**env_params)
agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.all(action == np.array([1, 0])[:, None])
def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_reverse_order():
# One station scheduling two buffers, one larger than the other, both above safety stock (swap
# order with previous test).
buffer_processing_matrix = - np.eye(2)
safety_stock = 10.0
state = np.array([20, 30])[:, None]
env_params = get_null_env_params(
state, buffer_processing_matrix=buffer_processing_matrix)
env_params["constituency_matrix"] = np.ones((1, 2))
env_params["list_boundary_constraint_matrices"] = [np.eye(2)]
env = crw.ControlledRandomWalk(**env_params)
agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.all(action == np.array([0, 1])[:, None])
def test_priority_nonidling_heuristic_agent_multiple_largest_buffers_eye_condition():
# One station scheduling two buffers, both equal and above safety stock.
buffer_processing_matrix = - np.eye(2)
safety_stock = 10.0
state = np.array([11, 11])[:, None]
env_params = get_null_env_params(
state, buffer_processing_matrix=buffer_processing_matrix)
env_params["constituency_matrix"] = np.ones((1, 2))
env_params["list_boundary_constraint_matrices"] = [np.eye(2)]
env = crw.ControlledRandomWalk(**env_params)
agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.sum(action) == 1
def test_priority_nonidling_heuristic_agent_multiple_buffers_multiple_resources_sum_cond():
# Two stations, each one scheduling two buffers. The stations are connected in serial, such that
# buffer 1 is connected with buffer 3, and 2 with 4.
# Kind of condition doesn't matter since the largest buffer has to be above safety stock in this
# agent.
buffer_processing_matrix = np.array([[-1, 0, 0, 0],
[0, -1, 0, 0],
[1, 0, -1, 0],
[0, 1, 0, -1]])
safety_stock = 10
state = np.array([30, 20, 20, 30])[:, None]
env_params = get_null_env_params(
state, buffer_processing_matrix=buffer_processing_matrix)
env_params["constituency_matrix"] = np.array([[1, 1, 0, 0], [0, 0, 1, 1]])
env_params["list_boundary_constraint_matrices"] = [np.array([[1, 1, 0, 0]]),
np.array([[0, 0, 1, 1]])]
env = crw.ControlledRandomWalk(**env_params)
agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.all(action == np.array([1, 0, 0, 1])[:, None])
def test_priority_nonidling_heuristic_agent_multi_buffers_and_resources_sum_cond_reverse_order():
# Two stations, each one scheduling two buffers. The stations are connected in serial, such that
# buffer 1 is connected with buffer 3, and 2 with 4.
# Kind of condition doesn't matter since the largest buffer has to be above safety stock in this
# agent.
buffer_processing_matrix = np.array([[-1, 0, 0, 0],
[0, -1, 0, 0],
[1, 0, -1, 0],
[0, 1, 0, -1]])
safety_stock = 10
state = np.array([20, 30, 30, 20])[:, None]
env_params = get_null_env_params(
state, buffer_processing_matrix=buffer_processing_matrix)
env_params["constituency_matrix"] = np.array([[1, 1, 0, 0], [0, 0, 1, 1]])
env_params["list_boundary_constraint_matrices"] = [np.array([[1, 1, 0, 0]]),
np.array([[0, 0, 1, 1]])]
env = crw.ControlledRandomWalk(**env_params)
agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.all(action == np.array([0, 1, 1, 0])[:, None])
def test_priority_nonidling_heuristic_agent_multiple_buffers_and_resources_sum_cond_2_starve():
# Two stations, each one scheduling two buffers. The stations are connected in serial, such that
# buffer 1 is connected with buffer 3, and 2 with 4.
# Kind of condition doesn't matter since the largest buffer has to be above safety stock in this
# agent.
buffer_processing_matrix = np.array([[-1, 0, 0, 0],
[0, -1, 0, 0],
[1, 0, -1, 0],
[0, 1, 0, -1]])
safety_stock = 10
state = np.array([30, 20, 9, 5])[:, None]
env_params = get_null_env_params(
state, buffer_processing_matrix=buffer_processing_matrix)
env_params["constituency_matrix"] = np.array([[1, 1, 0, 0], [0, 0, 1, 1]])
env_params["list_boundary_constraint_matrices"] = [np.array([[1, 1, 0, 0]]),
np.array([[0, 0, 1, 1]])]
env = crw.ControlledRandomWalk(**env_params)
agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.all(action == np.array([1, 0, 0, 0])[:, None])
def test_priority_nonidling_heuristic_agent_multiple_largest_buffers_multiple_resources_sum_cond():
# Two stations, each one scheduling two buffers. The stations are connected in serial, such that
# buffer 1 is connected with buffer 3, and 2 with 4.
# Kind of condition doesn't matter since the largest buffer has to be above safety stock in this
# agent.
buffer_processing_matrix = np.array([[-1, 0, 0, 0],
[0, -1, 0, 0],
[1, 0, -1, 0],
[0, 1, 0, -1]])
safety_stock = 10
state = np.array([30, 30, 9, 5])[:, None]
env_params = get_null_env_params(
state, buffer_processing_matrix=buffer_processing_matrix)
env_params["constituency_matrix"] = np.array([[1, 1, 0, 0], [0, 0, 1, 1]])
env_params["list_boundary_constraint_matrices"] = [np.array([[1, 1, 0, 0]]),
np.array([[0, 0, 1, 1]])]
env = crw.ControlledRandomWalk(**env_params)
agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.sum(action[0:2]) == 1 and np.all(action[2:4] == np.array([0, 0])[:, None])
def test_priority_nonidling_heuristic_agent_multiple_activities_buffers_and_resources():
# Two stations, each one scheduling two buffers. The stations are connected in serial, such that
# buffer 1 is connected with buffer 3, and 2 with 4.
# Kind of condition doesn't matter since the largest buffer has to be above safety stock in this
# agent.
buffer_processing_matrix = np.array([[-1, 0, -1, 0],
[0, -1, 0, 0],
[1, 0, -1, 0],
[0, 1, 0, -1]])
safety_stock = 10
state = np.array([30, 20, 5, 20])[:, None]
env_params = get_null_env_params(
state, buffer_processing_matrix=buffer_processing_matrix)
env_params["constituency_matrix"] = np.array([[1, 1, 1, 0], [0, 0, 1, 1]])
env_params["list_boundary_constraint_matrices"] = [np.array([[1, 1, 0, 0], [0, 0, 1, 0]]),
np.array([[0, 0, 1, 1]])]
env = crw.ControlledRandomWalk(**env_params)
agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert (action[0] + action[2] == 1) and (action[1]
== 0) and (action[3] == 1)
def test_priority_heuristic_agent_init_all_resources_given():
priorities = {0: 0, 1: 2, 2: 5}
state = np.array([[10.], [10.], [10.]])
buffer_processing_matrix = np.array([[-1., 0., 0., 0., 0., 0., 0.],
[0., -1., -1., 0., 0., 0., 0.],
[0., 0., 0., -1., -1., -1., -1.]])
constituency_matrix = np.array([[1., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 0., 0., 0., 0.],
[0., 0., 0., 1., 1., 1., 1.]])
env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix,
constituency_matrix=constituency_matrix)
env = crw.ControlledRandomWalk(**env_params)
agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities)
assert agent.priorities == priorities
def test_priority_heuristic_agent_init_not_all_resources_given():
priorities = {0: 0, 2: 5}
expected_priorities = {0: 0, 1: None, 2: 5}
state = np.array([[10.], [10.], [10.]])
buffer_processing_matrix = np.array([[-1., 0., 0., 0., 0., 0., 0.],
[0., -1., -1., 0., 0., 0., 0.],
[0., 0., 0., -1., -1., -1., -1.]])
constituency_matrix = np.array([[1., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 0., 0., 0., 0.],
[0., 0., 0., 1., 1., 1., 1.]])
env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix,
constituency_matrix=constituency_matrix)
env = crw.ControlledRandomWalk(**env_params)
agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities)
assert agent.priorities == expected_priorities
def test_priority_heuristic_agent_init_wrong_activity_given():
priorities = {0: 0, 2: 1}
state = np.array([[10.], [10.], [10.]])
buffer_processing_matrix = np.array([[-1., 0., 0., 0., 0., 0., 0.],
[0., -1., -1., 0., 0., 0., 0.],
[0., 0., 0., -1., -1., -1., -1.]])
constituency_matrix = np.array([[1., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 0., 0., 0., 0.],
[0., 0., 0., 1., 1., 1., 1.]])
env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix,
constituency_matrix=constituency_matrix)
env = crw.ControlledRandomWalk(**env_params)
with pytest.raises(AssertionError):
_ = custom_priority_agent.CustomActivityPriorityAgent(env, priorities)
def test_priority_heuristic_agent_sample_random_action_empty_possible_actions():
priorities = {0: 0, 1: 2, 2: 5}
state = np.array([[10.], [10.], [0.]])
buffer_processing_matrix = np.array([[-1., 0., 0., 0., 0., 0., 0.],
[0., -1., -1., 0., 0., 0., 0.],
[0., 0., 0., -1., -1., -1., -1.]])
constituency_matrix = np.array([[1., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 0., 0., 0., 0.],
[0., 0., 0., 1., 1., 1., 1.]])
env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix,
constituency_matrix=constituency_matrix)
env = crw.ControlledRandomWalk(**env_params)
agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities)
action = np.array([[1], [0], [1], [0], [0], [0], [0]])
activities = np.array([3, 4, 5, 6])
updated_action = agent.sample_random_actions(state=state, action=action, activities=activities)
assert np.all(action == updated_action)
def test_priority_heuristic_agent_sample_random_action_one_possible_action():
priorities = {0: 0, 1: 2, 2: 5}
state = np.array([[10.], [0.], [10.]])
buffer_processing_matrix = np.array([[-1., 0., -1., 0., 0., 0., 0.],
[0., -1., 0., 0., 0., 0., 0.],
[0., 0., 0., -1., -1., -1., -1.]])
constituency_matrix = np.array([[1., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 0., 0., 0., 0.],
[0., 0., 0., 1., 1., 1., 1.]])
env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix,
constituency_matrix=constituency_matrix)
env = crw.ControlledRandomWalk(**env_params)
agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities)
action = np.array([[1], [0], [0], [0], [1], [0], [0]])
expected_action = np.array([[1], [0], [1], [0], [1], [0], [0]])
activities = np.array([1, 2])
updated_action = agent.sample_random_actions(state=state, action=action, activities=activities)
assert np.all(expected_action == updated_action)
def test_priority_heuristic_agent_sample_random_action_multiple_possible_actions():
np.random.seed(42)
priorities = {0: 0, 1: 2, 2: 5}
state = np.array([[10.], [10.], [10.]])
buffer_processing_matrix = np.array([[-1., 0., 0., 0., 0., 0., 0.],
[0., -1., -1., 0., 0., 0., 0.],
[0., 0., 0., -1., -1., -1., -1.]])
constituency_matrix = np.array([[1., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 0., 0., 0., 0.],
[0., 0., 0., 1., 1., 1., 1.]])
env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix,
constituency_matrix=constituency_matrix)
env = crw.ControlledRandomWalk(**env_params)
agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities)
action = np.array([[1], [0], [1], [0], [0], [0], [0]])
expected_action = np.array([[1], [0], [1], [0.25], [0.25], [0.25], [0.25]])
activities = np.array([3, 4, 5, 6])
num_sim = int(1e4)
updated_action = np.zeros((buffer_processing_matrix.shape[1], num_sim))
for i in np.arange(num_sim):
updated_action[:, [i]] = agent.sample_random_actions(state=state, action=action,
activities=activities)
average_updated_action = np.sum(updated_action, axis=1) / float(num_sim)
np.testing.assert_array_almost_equal(average_updated_action.reshape(-1, 1), expected_action,
decimal=2)
def test_priority_heuristic_agent_map_state_to_actions_no_priorities():
np.random.seed(42)
priorities = {}
state = np.array([[10.], [10.], [10.]])
buffer_processing_matrix = np.array([[-1., 0., 0., 0., 0., 0., 0.],
[0., -1., -1., 0., 0., 0., 0.],
[0., 0., 0., -1., -1., -1., -1.]])
constituency_matrix = np.array([[1., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 0., 0., 0., 0.],
[0., 0., 0., 1., 1., 1., 1.]])
env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix,
constituency_matrix=constituency_matrix)
env = crw.ControlledRandomWalk(**env_params)
agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities)
expected_action = np.array([[1], [0.5], [0.5], [0.25], [0.25], [0.25], [0.25]])
num_sim = int(1e4)
actions = np.zeros((buffer_processing_matrix.shape[1], num_sim))
for i in np.arange(num_sim):
actions[:, [i]] = agent.map_state_to_actions(state=state)
average_action = np.sum(actions, axis=1) / float(num_sim)
np.testing.assert_array_almost_equal(average_action.reshape(-1, 1), expected_action,
decimal=2)
def test_priority_heuristic_agent_map_state_to_actions_full_priorities_empty_buffer():
np.random.seed(41)
priorities = {0: 0, 1: 2, 2: 5}
state = np.array([[10.], [10.], [0.]])
buffer_processing_matrix = np.array([[-1., 0., 0., -1., -1., 0., -1.],
[0., -1., -1., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., -1., 0.]])
constituency_matrix = np.array([[1., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 0., 0., 0., 0.],
[0., 0., 0., 1., 1., 1., 1.]])
constituency_matrix_original = constituency_matrix.copy()
env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix,
constituency_matrix=constituency_matrix)
env = crw.ControlledRandomWalk(**env_params)
agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities)
expected_average_action = np.array([[1], [0.], [1.], [0.33], [0.33], [0.], [0.33]])
num_sim = 5e4
actions = np.zeros((buffer_processing_matrix.shape[1], int(num_sim)))
for i in np.arange(int(num_sim)):
actions[:, [i]] = agent.map_state_to_actions(state=state)
average_action = np.sum(actions, axis=1) / num_sim
np.testing.assert_array_almost_equal(average_action.reshape(-1, 1), expected_average_action,
decimal=2)
assert np.all(constituency_matrix_original == constituency_matrix)
assert np.all(constituency_matrix_original == env.constituency_matrix)
def test_priority_heuristic_agent_map_state_to_actions_full_priorities_full_buffer():
priorities = {0: 0, 1: 2, 2: 5}
state = np.array([[10.], [10.], [10.]])
buffer_processing_matrix = np.array([[-1., 0., 0., 0., 0., 0., 0.],
[0., -1., -1., 0., 0., 0., 0.],
[0., 0., 0., -1., -1., -1., -1.]])
constituency_matrix = np.array([[1., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 0., 0., 0., 0.],
[0., 0., 0., 1., 1., 1., 1.]])
env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix,
constituency_matrix=constituency_matrix)
env = crw.ControlledRandomWalk(**env_params)
agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities)
expected_action = np.array([[1], [0], [1], [0], [0], [1], [0]])
action = agent.map_state_to_actions(state=state)
assert np.all(action == expected_action)
|
[
"snc.environments.job_generators.discrete_review_job_generator.DeterministicDiscreteReviewJobGenerator",
"numpy.zeros_like",
"numpy.ones_like",
"numpy.random.seed",
"numpy.sum",
"snc.agents.general_heuristics.longest_buffer_priority_agent.LongestBufferPriorityAgent",
"numpy.zeros",
"numpy.ones",
"snc.agents.general_heuristics.random_nonidling_agent.RandomNonIdlingAgent",
"pytest.raises",
"snc.environments.state_initialiser.DeterministicCRWStateInitialiser",
"numpy.array",
"numpy.arange",
"numpy.eye",
"snc.environments.controlled_random_walk.ControlledRandomWalk",
"snc.agents.general_heuristics.custom_activity_priority_agent.CustomActivityPriorityAgent",
"numpy.all"
] |
[((696, 715), 'numpy.ones_like', 'np.ones_like', (['state'], {}), '(state)\n', (708, 715), True, 'import numpy as np\n'), ((1739, 1754), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (1746, 1754), True, 'import numpy as np\n'), ((1838, 1876), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (1862, 1876), True, 'import snc.environments.controlled_random_walk as crw\n'), ((1889, 1951), 'snc.agents.general_heuristics.random_nonidling_agent.RandomNonIdlingAgent', 'random_nonidling_agent.RandomNonIdlingAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (1932, 1951), True, 'import snc.agents.general_heuristics.random_nonidling_agent as random_nonidling_agent\n'), ((2241, 2256), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (2248, 2256), True, 'import numpy as np\n'), ((2340, 2378), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (2364, 2378), True, 'import snc.environments.controlled_random_walk as crw\n'), ((2391, 2453), 'snc.agents.general_heuristics.random_nonidling_agent.RandomNonIdlingAgent', 'random_nonidling_agent.RandomNonIdlingAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (2434, 2453), True, 'import snc.agents.general_heuristics.random_nonidling_agent as random_nonidling_agent\n'), ((2840, 2855), 'numpy.ones', 'np.ones', (['(1, 3)'], {}), '((1, 3))\n', (2847, 2855), True, 'import numpy as np\n'), ((2933, 2971), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (2957, 2971), True, 'import snc.environments.controlled_random_walk as crw\n'), ((2984, 3046), 'snc.agents.general_heuristics.random_nonidling_agent.RandomNonIdlingAgent', 'random_nonidling_agent.RandomNonIdlingAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (3027, 3046), True, 'import snc.agents.general_heuristics.random_nonidling_agent as random_nonidling_agent\n'), ((3434, 3449), 'numpy.ones', 'np.ones', (['(1, 3)'], {}), '((1, 3))\n', (3441, 3449), True, 'import numpy as np\n'), ((3527, 3565), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (3551, 3565), True, 'import snc.environments.controlled_random_walk as crw\n'), ((3578, 3640), 'snc.agents.general_heuristics.random_nonidling_agent.RandomNonIdlingAgent', 'random_nonidling_agent.RandomNonIdlingAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (3621, 3640), True, 'import snc.agents.general_heuristics.random_nonidling_agent as random_nonidling_agent\n'), ((4030, 4045), 'numpy.ones', 'np.ones', (['(1, 3)'], {}), '((1, 3))\n', (4037, 4045), True, 'import numpy as np\n'), ((4129, 4167), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (4153, 4167), True, 'import snc.environments.controlled_random_walk as crw\n'), ((4180, 4242), 'snc.agents.general_heuristics.random_nonidling_agent.RandomNonIdlingAgent', 'random_nonidling_agent.RandomNonIdlingAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (4223, 4242), True, 'import snc.agents.general_heuristics.random_nonidling_agent as random_nonidling_agent\n'), ((4638, 4653), 'numpy.ones', 'np.ones', (['(1, 3)'], {}), '((1, 3))\n', (4645, 4653), True, 'import numpy as np\n'), ((4737, 4775), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (4761, 4775), True, 'import snc.environments.controlled_random_walk as crw\n'), ((4788, 4850), 'snc.agents.general_heuristics.random_nonidling_agent.RandomNonIdlingAgent', 'random_nonidling_agent.RandomNonIdlingAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (4831, 4850), True, 'import snc.agents.general_heuristics.random_nonidling_agent as random_nonidling_agent\n'), ((5259, 5297), 'numpy.array', 'np.array', (['[[1, 1, 0, 0], [0, 0, 1, 1]]'], {}), '([[1, 1, 0, 0], [0, 0, 1, 1]])\n', (5267, 5297), True, 'import numpy as np\n'), ((5503, 5541), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (5527, 5541), True, 'import snc.environments.controlled_random_walk as crw\n'), ((5554, 5616), 'snc.agents.general_heuristics.random_nonidling_agent.RandomNonIdlingAgent', 'random_nonidling_agent.RandomNonIdlingAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (5597, 5616), True, 'import snc.agents.general_heuristics.random_nonidling_agent as random_nonidling_agent\n'), ((6031, 6069), 'numpy.array', 'np.array', (['[[1, 1, 0, 0], [0, 0, 1, 1]]'], {}), '([[1, 1, 0, 0], [0, 0, 1, 1]])\n', (6039, 6069), True, 'import numpy as np\n'), ((6275, 6313), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (6299, 6313), True, 'import snc.environments.controlled_random_walk as crw\n'), ((6326, 6388), 'snc.agents.general_heuristics.random_nonidling_agent.RandomNonIdlingAgent', 'random_nonidling_agent.RandomNonIdlingAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (6369, 6388), True, 'import snc.agents.general_heuristics.random_nonidling_agent as random_nonidling_agent\n'), ((6810, 6848), 'numpy.array', 'np.array', (['[[1, 1, 0, 0], [0, 0, 1, 1]]'], {}), '([[1, 1, 0, 0], [0, 0, 1, 1]])\n', (6818, 6848), True, 'import numpy as np\n'), ((7022, 7060), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (7046, 7060), True, 'import snc.environments.controlled_random_walk as crw\n'), ((7073, 7135), 'snc.agents.general_heuristics.random_nonidling_agent.RandomNonIdlingAgent', 'random_nonidling_agent.RandomNonIdlingAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (7116, 7135), True, 'import snc.agents.general_heuristics.random_nonidling_agent as random_nonidling_agent\n'), ((7564, 7602), 'numpy.array', 'np.array', (['[[1, 1, 0, 0], [0, 0, 1, 1]]'], {}), '([[1, 1, 0, 0], [0, 0, 1, 1]])\n', (7572, 7602), True, 'import numpy as np\n'), ((7776, 7814), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (7800, 7814), True, 'import snc.environments.controlled_random_walk as crw\n'), ((7827, 7889), 'snc.agents.general_heuristics.random_nonidling_agent.RandomNonIdlingAgent', 'random_nonidling_agent.RandomNonIdlingAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (7870, 7889), True, 'import snc.agents.general_heuristics.random_nonidling_agent as random_nonidling_agent\n'), ((8324, 8362), 'numpy.array', 'np.array', (['[[1, 1, 0, 0], [0, 0, 1, 1]]'], {}), '([[1, 1, 0, 0], [0, 0, 1, 1]])\n', (8332, 8362), True, 'import numpy as np\n'), ((8536, 8574), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (8560, 8574), True, 'import snc.environments.controlled_random_walk as crw\n'), ((8587, 8649), 'snc.agents.general_heuristics.random_nonidling_agent.RandomNonIdlingAgent', 'random_nonidling_agent.RandomNonIdlingAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (8630, 8649), True, 'import snc.agents.general_heuristics.random_nonidling_agent as random_nonidling_agent\n'), ((9133, 9171), 'numpy.array', 'np.array', (['[[1, 1, 0, 0], [0, 0, 1, 1]]'], {}), '([[1, 1, 0, 0], [0, 0, 1, 1]])\n', (9141, 9171), True, 'import numpy as np\n'), ((9345, 9383), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (9369, 9383), True, 'import snc.environments.controlled_random_walk as crw\n'), ((9396, 9458), 'snc.agents.general_heuristics.random_nonidling_agent.RandomNonIdlingAgent', 'random_nonidling_agent.RandomNonIdlingAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (9439, 9458), True, 'import snc.agents.general_heuristics.random_nonidling_agent as random_nonidling_agent\n'), ((9919, 9934), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (9926, 9934), True, 'import numpy as np\n'), ((10018, 10056), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (10042, 10056), True, 'import snc.environments.controlled_random_walk as crw\n'), ((10069, 10137), 'snc.agents.general_heuristics.longest_buffer_priority_agent.LongestBufferPriorityAgent', 'longest_priority_agent.LongestBufferPriorityAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (10118, 10137), True, 'import snc.agents.general_heuristics.longest_buffer_priority_agent as longest_priority_agent\n'), ((10546, 10561), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (10553, 10561), True, 'import numpy as np\n'), ((10645, 10683), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (10669, 10683), True, 'import snc.environments.controlled_random_walk as crw\n'), ((10696, 10786), 'snc.agents.general_heuristics.longest_buffer_priority_agent.LongestBufferPriorityAgent', 'longest_priority_agent.LongestBufferPriorityAgent', (['env', 'safety_stock'], {'name': '"""LPAAgent"""'}), "(env, safety_stock, name=\n 'LPAAgent')\n", (10745, 10786), True, 'import snc.agents.general_heuristics.longest_buffer_priority_agent as longest_priority_agent\n'), ((11302, 11317), 'numpy.ones', 'np.ones', (['(1, 2)'], {}), '((1, 2))\n', (11309, 11317), True, 'import numpy as np\n'), ((11395, 11433), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (11419, 11433), True, 'import snc.environments.controlled_random_walk as crw\n'), ((11446, 11514), 'snc.agents.general_heuristics.longest_buffer_priority_agent.LongestBufferPriorityAgent', 'longest_priority_agent.LongestBufferPriorityAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (11495, 11514), True, 'import snc.agents.general_heuristics.longest_buffer_priority_agent as longest_priority_agent\n'), ((12072, 12087), 'numpy.ones', 'np.ones', (['(1, 2)'], {}), '((1, 2))\n', (12079, 12087), True, 'import numpy as np\n'), ((12165, 12203), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (12189, 12203), True, 'import snc.environments.controlled_random_walk as crw\n'), ((12216, 12284), 'snc.agents.general_heuristics.longest_buffer_priority_agent.LongestBufferPriorityAgent', 'longest_priority_agent.LongestBufferPriorityAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (12265, 12284), True, 'import snc.agents.general_heuristics.longest_buffer_priority_agent as longest_priority_agent\n'), ((12898, 12913), 'numpy.ones', 'np.ones', (['(1, 2)'], {}), '((1, 2))\n', (12905, 12913), True, 'import numpy as np\n'), ((12991, 13029), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (13015, 13029), True, 'import snc.environments.controlled_random_walk as crw\n'), ((13042, 13110), 'snc.agents.general_heuristics.longest_buffer_priority_agent.LongestBufferPriorityAgent', 'longest_priority_agent.LongestBufferPriorityAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (13091, 13110), True, 'import snc.agents.general_heuristics.longest_buffer_priority_agent as longest_priority_agent\n'), ((13638, 13653), 'numpy.ones', 'np.ones', (['(1, 2)'], {}), '((1, 2))\n', (13645, 13653), True, 'import numpy as np\n'), ((13731, 13769), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (13755, 13769), True, 'import snc.environments.controlled_random_walk as crw\n'), ((13782, 13850), 'snc.agents.general_heuristics.longest_buffer_priority_agent.LongestBufferPriorityAgent', 'longest_priority_agent.LongestBufferPriorityAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (13831, 13850), True, 'import snc.agents.general_heuristics.longest_buffer_priority_agent as longest_priority_agent\n'), ((14430, 14445), 'numpy.ones', 'np.ones', (['(1, 2)'], {}), '((1, 2))\n', (14437, 14445), True, 'import numpy as np\n'), ((14523, 14561), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (14547, 14561), True, 'import snc.environments.controlled_random_walk as crw\n'), ((14574, 14642), 'snc.agents.general_heuristics.longest_buffer_priority_agent.LongestBufferPriorityAgent', 'longest_priority_agent.LongestBufferPriorityAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (14623, 14642), True, 'import snc.agents.general_heuristics.longest_buffer_priority_agent as longest_priority_agent\n'), ((15161, 15176), 'numpy.ones', 'np.ones', (['(1, 2)'], {}), '((1, 2))\n', (15168, 15176), True, 'import numpy as np\n'), ((15254, 15292), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (15278, 15292), True, 'import snc.environments.controlled_random_walk as crw\n'), ((15305, 15373), 'snc.agents.general_heuristics.longest_buffer_priority_agent.LongestBufferPriorityAgent', 'longest_priority_agent.LongestBufferPriorityAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (15354, 15373), True, 'import snc.agents.general_heuristics.longest_buffer_priority_agent as longest_priority_agent\n'), ((15849, 15919), 'numpy.array', 'np.array', (['[[-1, 0, 0, 0], [0, -1, 0, 0], [1, 0, -1, 0], [0, 1, 0, -1]]'], {}), '([[-1, 0, 0, 0], [0, -1, 0, 0], [1, 0, -1, 0], [0, 1, 0, -1]])\n', (15857, 15919), True, 'import numpy as np\n'), ((16257, 16295), 'numpy.array', 'np.array', (['[[1, 1, 0, 0], [0, 0, 1, 1]]'], {}), '([[1, 1, 0, 0], [0, 0, 1, 1]])\n', (16265, 16295), True, 'import numpy as np\n'), ((16469, 16507), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (16493, 16507), True, 'import snc.environments.controlled_random_walk as crw\n'), ((16520, 16588), 'snc.agents.general_heuristics.longest_buffer_priority_agent.LongestBufferPriorityAgent', 'longest_priority_agent.LongestBufferPriorityAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (16569, 16588), True, 'import snc.agents.general_heuristics.longest_buffer_priority_agent as longest_priority_agent\n'), ((17100, 17170), 'numpy.array', 'np.array', (['[[-1, 0, 0, 0], [0, -1, 0, 0], [1, 0, -1, 0], [0, 1, 0, -1]]'], {}), '([[-1, 0, 0, 0], [0, -1, 0, 0], [1, 0, -1, 0], [0, 1, 0, -1]])\n', (17108, 17170), True, 'import numpy as np\n'), ((17508, 17546), 'numpy.array', 'np.array', (['[[1, 1, 0, 0], [0, 0, 1, 1]]'], {}), '([[1, 1, 0, 0], [0, 0, 1, 1]])\n', (17516, 17546), True, 'import numpy as np\n'), ((17720, 17758), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (17744, 17758), True, 'import snc.environments.controlled_random_walk as crw\n'), ((17771, 17839), 'snc.agents.general_heuristics.longest_buffer_priority_agent.LongestBufferPriorityAgent', 'longest_priority_agent.LongestBufferPriorityAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (17820, 17839), True, 'import snc.agents.general_heuristics.longest_buffer_priority_agent as longest_priority_agent\n'), ((18349, 18419), 'numpy.array', 'np.array', (['[[-1, 0, 0, 0], [0, -1, 0, 0], [1, 0, -1, 0], [0, 1, 0, -1]]'], {}), '([[-1, 0, 0, 0], [0, -1, 0, 0], [1, 0, -1, 0], [0, 1, 0, -1]])\n', (18357, 18419), True, 'import numpy as np\n'), ((18755, 18793), 'numpy.array', 'np.array', (['[[1, 1, 0, 0], [0, 0, 1, 1]]'], {}), '([[1, 1, 0, 0], [0, 0, 1, 1]])\n', (18763, 18793), True, 'import numpy as np\n'), ((18967, 19005), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (18991, 19005), True, 'import snc.environments.controlled_random_walk as crw\n'), ((19018, 19086), 'snc.agents.general_heuristics.longest_buffer_priority_agent.LongestBufferPriorityAgent', 'longest_priority_agent.LongestBufferPriorityAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (19067, 19086), True, 'import snc.agents.general_heuristics.longest_buffer_priority_agent as longest_priority_agent\n'), ((19600, 19670), 'numpy.array', 'np.array', (['[[-1, 0, 0, 0], [0, -1, 0, 0], [1, 0, -1, 0], [0, 1, 0, -1]]'], {}), '([[-1, 0, 0, 0], [0, -1, 0, 0], [1, 0, -1, 0], [0, 1, 0, -1]])\n', (19608, 19670), True, 'import numpy as np\n'), ((20006, 20044), 'numpy.array', 'np.array', (['[[1, 1, 0, 0], [0, 0, 1, 1]]'], {}), '([[1, 1, 0, 0], [0, 0, 1, 1]])\n', (20014, 20044), True, 'import numpy as np\n'), ((20218, 20256), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (20242, 20256), True, 'import snc.environments.controlled_random_walk as crw\n'), ((20269, 20337), 'snc.agents.general_heuristics.longest_buffer_priority_agent.LongestBufferPriorityAgent', 'longest_priority_agent.LongestBufferPriorityAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (20318, 20337), True, 'import snc.agents.general_heuristics.longest_buffer_priority_agent as longest_priority_agent\n'), ((20868, 20939), 'numpy.array', 'np.array', (['[[-1, 0, -1, 0], [0, -1, 0, 0], [1, 0, -1, 0], [0, 1, 0, -1]]'], {}), '([[-1, 0, -1, 0], [0, -1, 0, 0], [1, 0, -1, 0], [0, 1, 0, -1]])\n', (20876, 20939), True, 'import numpy as np\n'), ((21276, 21314), 'numpy.array', 'np.array', (['[[1, 1, 1, 0], [0, 0, 1, 1]]'], {}), '([[1, 1, 1, 0], [0, 0, 1, 1]])\n', (21284, 21314), True, 'import numpy as np\n'), ((21502, 21540), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (21526, 21540), True, 'import snc.environments.controlled_random_walk as crw\n'), ((21553, 21621), 'snc.agents.general_heuristics.longest_buffer_priority_agent.LongestBufferPriorityAgent', 'longest_priority_agent.LongestBufferPriorityAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (21602, 21621), True, 'import snc.agents.general_heuristics.longest_buffer_priority_agent as longest_priority_agent\n'), ((21908, 21942), 'numpy.array', 'np.array', (['[[10.0], [10.0], [10.0]]'], {}), '([[10.0], [10.0], [10.0]])\n', (21916, 21942), True, 'import numpy as np\n'), ((21971, 22103), 'numpy.array', 'np.array', (['[[-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, -1.0, -1.0, 0.0, 0.0, 0.0, 0.0\n ], [0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0]]'], {}), '([[-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, -1.0, -1.0, 0.0, 0.0,\n 0.0, 0.0], [0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0]])\n', (21979, 22103), True, 'import numpy as np\n'), ((22187, 22313), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0, \n 0.0, 0.0], [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])\n', (22195, 22313), True, 'import numpy as np\n'), ((22550, 22588), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (22574, 22588), True, 'import snc.environments.controlled_random_walk as crw\n'), ((22601, 22667), 'snc.agents.general_heuristics.custom_activity_priority_agent.CustomActivityPriorityAgent', 'custom_priority_agent.CustomActivityPriorityAgent', (['env', 'priorities'], {}), '(env, priorities)\n', (22650, 22667), True, 'import snc.agents.general_heuristics.custom_activity_priority_agent as custom_priority_agent\n'), ((22869, 22903), 'numpy.array', 'np.array', (['[[10.0], [10.0], [10.0]]'], {}), '([[10.0], [10.0], [10.0]])\n', (22877, 22903), True, 'import numpy as np\n'), ((22932, 23064), 'numpy.array', 'np.array', (['[[-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, -1.0, -1.0, 0.0, 0.0, 0.0, 0.0\n ], [0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0]]'], {}), '([[-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, -1.0, -1.0, 0.0, 0.0,\n 0.0, 0.0], [0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0]])\n', (22940, 23064), True, 'import numpy as np\n'), ((23148, 23274), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0, \n 0.0, 0.0], [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])\n', (23156, 23274), True, 'import numpy as np\n'), ((23511, 23549), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (23535, 23549), True, 'import snc.environments.controlled_random_walk as crw\n'), ((23562, 23628), 'snc.agents.general_heuristics.custom_activity_priority_agent.CustomActivityPriorityAgent', 'custom_priority_agent.CustomActivityPriorityAgent', (['env', 'priorities'], {}), '(env, priorities)\n', (23611, 23628), True, 'import snc.agents.general_heuristics.custom_activity_priority_agent as custom_priority_agent\n'), ((23788, 23822), 'numpy.array', 'np.array', (['[[10.0], [10.0], [10.0]]'], {}), '([[10.0], [10.0], [10.0]])\n', (23796, 23822), True, 'import numpy as np\n'), ((23851, 23983), 'numpy.array', 'np.array', (['[[-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, -1.0, -1.0, 0.0, 0.0, 0.0, 0.0\n ], [0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0]]'], {}), '([[-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, -1.0, -1.0, 0.0, 0.0,\n 0.0, 0.0], [0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0]])\n', (23859, 23983), True, 'import numpy as np\n'), ((24067, 24193), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0, \n 0.0, 0.0], [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])\n', (24075, 24193), True, 'import numpy as np\n'), ((24430, 24468), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (24454, 24468), True, 'import snc.environments.controlled_random_walk as crw\n'), ((24719, 24752), 'numpy.array', 'np.array', (['[[10.0], [10.0], [0.0]]'], {}), '([[10.0], [10.0], [0.0]])\n', (24727, 24752), True, 'import numpy as np\n'), ((24781, 24913), 'numpy.array', 'np.array', (['[[-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, -1.0, -1.0, 0.0, 0.0, 0.0, 0.0\n ], [0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0]]'], {}), '([[-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, -1.0, -1.0, 0.0, 0.0,\n 0.0, 0.0], [0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0]])\n', (24789, 24913), True, 'import numpy as np\n'), ((24997, 25123), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0, \n 0.0, 0.0], [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])\n', (25005, 25123), True, 'import numpy as np\n'), ((25360, 25398), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (25384, 25398), True, 'import snc.environments.controlled_random_walk as crw\n'), ((25411, 25477), 'snc.agents.general_heuristics.custom_activity_priority_agent.CustomActivityPriorityAgent', 'custom_priority_agent.CustomActivityPriorityAgent', (['env', 'priorities'], {}), '(env, priorities)\n', (25460, 25477), True, 'import snc.agents.general_heuristics.custom_activity_priority_agent as custom_priority_agent\n'), ((25492, 25537), 'numpy.array', 'np.array', (['[[1], [0], [1], [0], [0], [0], [0]]'], {}), '([[1], [0], [1], [0], [0], [0], [0]])\n', (25500, 25537), True, 'import numpy as np\n'), ((25555, 25577), 'numpy.array', 'np.array', (['[3, 4, 5, 6]'], {}), '([3, 4, 5, 6])\n', (25563, 25577), True, 'import numpy as np\n'), ((25689, 25721), 'numpy.all', 'np.all', (['(action == updated_action)'], {}), '(action == updated_action)\n', (25695, 25721), True, 'import numpy as np\n'), ((25850, 25883), 'numpy.array', 'np.array', (['[[10.0], [0.0], [10.0]]'], {}), '([[10.0], [0.0], [10.0]])\n', (25858, 25883), True, 'import numpy as np\n'), ((25912, 26044), 'numpy.array', 'np.array', (['[[-1.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0], [0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0\n ], [0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0]]'], {}), '([[-1.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0], [0.0, -1.0, 0.0, 0.0, 0.0,\n 0.0, 0.0], [0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0]])\n', (25920, 26044), True, 'import numpy as np\n'), ((26128, 26254), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0, \n 0.0, 0.0], [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])\n', (26136, 26254), True, 'import numpy as np\n'), ((26491, 26529), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (26515, 26529), True, 'import snc.environments.controlled_random_walk as crw\n'), ((26542, 26608), 'snc.agents.general_heuristics.custom_activity_priority_agent.CustomActivityPriorityAgent', 'custom_priority_agent.CustomActivityPriorityAgent', (['env', 'priorities'], {}), '(env, priorities)\n', (26591, 26608), True, 'import snc.agents.general_heuristics.custom_activity_priority_agent as custom_priority_agent\n'), ((26623, 26668), 'numpy.array', 'np.array', (['[[1], [0], [0], [0], [1], [0], [0]]'], {}), '([[1], [0], [0], [0], [1], [0], [0]])\n', (26631, 26668), True, 'import numpy as np\n'), ((26691, 26736), 'numpy.array', 'np.array', (['[[1], [0], [1], [0], [1], [0], [0]]'], {}), '([[1], [0], [1], [0], [1], [0], [0]])\n', (26699, 26736), True, 'import numpy as np\n'), ((26754, 26770), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (26762, 26770), True, 'import numpy as np\n'), ((26882, 26923), 'numpy.all', 'np.all', (['(expected_action == updated_action)'], {}), '(expected_action == updated_action)\n', (26888, 26923), True, 'import numpy as np\n'), ((27014, 27032), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (27028, 27032), True, 'import numpy as np\n'), ((27081, 27115), 'numpy.array', 'np.array', (['[[10.0], [10.0], [10.0]]'], {}), '([[10.0], [10.0], [10.0]])\n', (27089, 27115), True, 'import numpy as np\n'), ((27144, 27276), 'numpy.array', 'np.array', (['[[-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, -1.0, -1.0, 0.0, 0.0, 0.0, 0.0\n ], [0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0]]'], {}), '([[-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, -1.0, -1.0, 0.0, 0.0,\n 0.0, 0.0], [0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0]])\n', (27152, 27276), True, 'import numpy as np\n'), ((27360, 27486), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0, \n 0.0, 0.0], [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])\n', (27368, 27486), True, 'import numpy as np\n'), ((27723, 27761), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (27747, 27761), True, 'import snc.environments.controlled_random_walk as crw\n'), ((27774, 27840), 'snc.agents.general_heuristics.custom_activity_priority_agent.CustomActivityPriorityAgent', 'custom_priority_agent.CustomActivityPriorityAgent', (['env', 'priorities'], {}), '(env, priorities)\n', (27823, 27840), True, 'import snc.agents.general_heuristics.custom_activity_priority_agent as custom_priority_agent\n'), ((27855, 27900), 'numpy.array', 'np.array', (['[[1], [0], [1], [0], [0], [0], [0]]'], {}), '([[1], [0], [1], [0], [0], [0], [0]])\n', (27863, 27900), True, 'import numpy as np\n'), ((27923, 27980), 'numpy.array', 'np.array', (['[[1], [0], [1], [0.25], [0.25], [0.25], [0.25]]'], {}), '([[1], [0], [1], [0.25], [0.25], [0.25], [0.25]])\n', (27931, 27980), True, 'import numpy as np\n'), ((27998, 28020), 'numpy.array', 'np.array', (['[3, 4, 5, 6]'], {}), '([3, 4, 5, 6])\n', (28006, 28020), True, 'import numpy as np\n'), ((28065, 28119), 'numpy.zeros', 'np.zeros', (['(buffer_processing_matrix.shape[1], num_sim)'], {}), '((buffer_processing_matrix.shape[1], num_sim))\n', (28073, 28119), True, 'import numpy as np\n'), ((28133, 28151), 'numpy.arange', 'np.arange', (['num_sim'], {}), '(num_sim)\n', (28142, 28151), True, 'import numpy as np\n'), ((28630, 28648), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (28644, 28648), True, 'import numpy as np\n'), ((28681, 28715), 'numpy.array', 'np.array', (['[[10.0], [10.0], [10.0]]'], {}), '([[10.0], [10.0], [10.0]])\n', (28689, 28715), True, 'import numpy as np\n'), ((28744, 28876), 'numpy.array', 'np.array', (['[[-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, -1.0, -1.0, 0.0, 0.0, 0.0, 0.0\n ], [0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0]]'], {}), '([[-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, -1.0, -1.0, 0.0, 0.0,\n 0.0, 0.0], [0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0]])\n', (28752, 28876), True, 'import numpy as np\n'), ((28960, 29086), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0, \n 0.0, 0.0], [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])\n', (28968, 29086), True, 'import numpy as np\n'), ((29323, 29361), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (29347, 29361), True, 'import snc.environments.controlled_random_walk as crw\n'), ((29374, 29440), 'snc.agents.general_heuristics.custom_activity_priority_agent.CustomActivityPriorityAgent', 'custom_priority_agent.CustomActivityPriorityAgent', (['env', 'priorities'], {}), '(env, priorities)\n', (29423, 29440), True, 'import snc.agents.general_heuristics.custom_activity_priority_agent as custom_priority_agent\n'), ((29464, 29525), 'numpy.array', 'np.array', (['[[1], [0.5], [0.5], [0.25], [0.25], [0.25], [0.25]]'], {}), '([[1], [0.5], [0.5], [0.25], [0.25], [0.25], [0.25]])\n', (29472, 29525), True, 'import numpy as np\n'), ((29563, 29617), 'numpy.zeros', 'np.zeros', (['(buffer_processing_matrix.shape[1], num_sim)'], {}), '((buffer_processing_matrix.shape[1], num_sim))\n', (29571, 29617), True, 'import numpy as np\n'), ((29631, 29649), 'numpy.arange', 'np.arange', (['num_sim'], {}), '(num_sim)\n', (29640, 29649), True, 'import numpy as np\n'), ((30013, 30031), 'numpy.random.seed', 'np.random.seed', (['(41)'], {}), '(41)\n', (30027, 30031), True, 'import numpy as np\n'), ((30080, 30113), 'numpy.array', 'np.array', (['[[10.0], [10.0], [0.0]]'], {}), '([[10.0], [10.0], [0.0]])\n', (30088, 30113), True, 'import numpy as np\n'), ((30142, 30275), 'numpy.array', 'np.array', (['[[-1.0, 0.0, 0.0, -1.0, -1.0, 0.0, -1.0], [0.0, -1.0, -1.0, 0.0, 0.0, 0.0, \n 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0]]'], {}), '([[-1.0, 0.0, 0.0, -1.0, -1.0, 0.0, -1.0], [0.0, -1.0, -1.0, 0.0, \n 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0]])\n', (30150, 30275), True, 'import numpy as np\n'), ((30358, 30484), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0, \n 0.0, 0.0], [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])\n', (30366, 30484), True, 'import numpy as np\n'), ((30783, 30821), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (30807, 30821), True, 'import snc.environments.controlled_random_walk as crw\n'), ((30834, 30900), 'snc.agents.general_heuristics.custom_activity_priority_agent.CustomActivityPriorityAgent', 'custom_priority_agent.CustomActivityPriorityAgent', (['env', 'priorities'], {}), '(env, priorities)\n', (30883, 30900), True, 'import snc.agents.general_heuristics.custom_activity_priority_agent as custom_priority_agent\n'), ((30932, 30992), 'numpy.array', 'np.array', (['[[1], [0.0], [1.0], [0.33], [0.33], [0.0], [0.33]]'], {}), '([[1], [0.0], [1.0], [0.33], [0.33], [0.0], [0.33]])\n', (30940, 30992), True, 'import numpy as np\n'), ((31401, 31460), 'numpy.all', 'np.all', (['(constituency_matrix_original == constituency_matrix)'], {}), '(constituency_matrix_original == constituency_matrix)\n', (31407, 31460), True, 'import numpy as np\n'), ((31472, 31535), 'numpy.all', 'np.all', (['(constituency_matrix_original == env.constituency_matrix)'], {}), '(constituency_matrix_original == env.constituency_matrix)\n', (31478, 31535), True, 'import numpy as np\n'), ((31672, 31706), 'numpy.array', 'np.array', (['[[10.0], [10.0], [10.0]]'], {}), '([[10.0], [10.0], [10.0]])\n', (31680, 31706), True, 'import numpy as np\n'), ((31735, 31867), 'numpy.array', 'np.array', (['[[-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, -1.0, -1.0, 0.0, 0.0, 0.0, 0.0\n ], [0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0]]'], {}), '([[-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, -1.0, -1.0, 0.0, 0.0,\n 0.0, 0.0], [0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0]])\n', (31743, 31867), True, 'import numpy as np\n'), ((31951, 32077), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0, \n 0.0, 0.0], [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])\n', (31959, 32077), True, 'import numpy as np\n'), ((32314, 32352), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (32338, 32352), True, 'import snc.environments.controlled_random_walk as crw\n'), ((32365, 32431), 'snc.agents.general_heuristics.custom_activity_priority_agent.CustomActivityPriorityAgent', 'custom_priority_agent.CustomActivityPriorityAgent', (['env', 'priorities'], {}), '(env, priorities)\n', (32414, 32431), True, 'import snc.agents.general_heuristics.custom_activity_priority_agent as custom_priority_agent\n'), ((32455, 32500), 'numpy.array', 'np.array', (['[[1], [0], [1], [0], [0], [1], [0]]'], {}), '([[1], [0], [1], [0], [0], [1], [0]])\n', (32463, 32500), True, 'import numpy as np\n'), ((32565, 32598), 'numpy.all', 'np.all', (['(action == expected_action)'], {}), '(action == expected_action)\n', (32571, 32598), True, 'import numpy as np\n'), ((972, 1012), 'numpy.zeros', 'np.zeros', (['(num_resources, num_resources)'], {}), '((num_resources, num_resources))\n', (980, 1012), True, 'import numpy as np\n'), ((1076, 1096), 'numpy.zeros_like', 'np.zeros_like', (['state'], {}), '(state)\n', (1089, 1096), True, 'import numpy as np\n'), ((1118, 1138), 'numpy.zeros_like', 'np.zeros_like', (['state'], {}), '(state)\n', (1131, 1138), True, 'import numpy as np\n'), ((1217, 1338), 'snc.environments.job_generators.discrete_review_job_generator.DeterministicDiscreteReviewJobGenerator', 'drjg.DeterministicDiscreteReviewJobGenerator', (['arrival_rate', 'buffer_processing_matrix'], {'sim_time_interval': 'time_interval'}), '(arrival_rate,\n buffer_processing_matrix, sim_time_interval=time_interval)\n', (1261, 1338), True, 'import snc.environments.job_generators.discrete_review_job_generator as drjg\n'), ((1387, 1429), 'snc.environments.state_initialiser.DeterministicCRWStateInitialiser', 'si.DeterministicCRWStateInitialiser', (['state'], {}), '(state)\n', (1422, 1429), True, 'import snc.environments.state_initialiser as si\n'), ((1639, 1654), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (1646, 1654), True, 'import numpy as np\n'), ((1810, 1825), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (1817, 1825), True, 'import numpy as np\n'), ((2020, 2036), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (2028, 2036), True, 'import numpy as np\n'), ((2141, 2156), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (2148, 2156), True, 'import numpy as np\n'), ((2312, 2327), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (2319, 2327), True, 'import numpy as np\n'), ((2522, 2537), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (2529, 2537), True, 'import numpy as np\n'), ((2740, 2755), 'numpy.ones', 'np.ones', (['(3, 1)'], {}), '((3, 1))\n', (2747, 2755), True, 'import numpy as np\n'), ((2911, 2920), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (2917, 2920), True, 'import numpy as np\n'), ((3334, 3349), 'numpy.ones', 'np.ones', (['(3, 1)'], {}), '((3, 1))\n', (3341, 3349), True, 'import numpy as np\n'), ((3505, 3514), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (3511, 3514), True, 'import numpy as np\n'), ((3699, 3713), 'numpy.sum', 'np.sum', (['action'], {}), '(action)\n', (3705, 3713), True, 'import numpy as np\n'), ((3930, 3945), 'numpy.ones', 'np.ones', (['(3, 1)'], {}), '((3, 1))\n', (3937, 3945), True, 'import numpy as np\n'), ((4101, 4116), 'numpy.ones', 'np.ones', (['(1, 3)'], {}), '((1, 3))\n', (4108, 4116), True, 'import numpy as np\n'), ((4538, 4553), 'numpy.ones', 'np.ones', (['(3, 1)'], {}), '((3, 1))\n', (4545, 4553), True, 'import numpy as np\n'), ((4709, 4724), 'numpy.ones', 'np.ones', (['(1, 3)'], {}), '((1, 3))\n', (4716, 4724), True, 'import numpy as np\n'), ((4909, 4923), 'numpy.sum', 'np.sum', (['action'], {}), '(action)\n', (4915, 4923), True, 'import numpy as np\n'), ((5159, 5174), 'numpy.ones', 'np.ones', (['(4, 1)'], {}), '((4, 1))\n', (5166, 5174), True, 'import numpy as np\n'), ((5931, 5946), 'numpy.ones', 'np.ones', (['(4, 1)'], {}), '((4, 1))\n', (5938, 5946), True, 'import numpy as np\n'), ((6447, 6461), 'numpy.sum', 'np.sum', (['action'], {}), '(action)\n', (6453, 6461), True, 'import numpy as np\n'), ((6710, 6725), 'numpy.ones', 'np.ones', (['(4, 1)'], {}), '((4, 1))\n', (6717, 6725), True, 'import numpy as np\n'), ((6904, 6928), 'numpy.array', 'np.array', (['[[1, 1, 0, 0]]'], {}), '([[1, 1, 0, 0]])\n', (6912, 6928), True, 'import numpy as np\n'), ((6985, 7009), 'numpy.array', 'np.array', (['[[0, 0, 1, 1]]'], {}), '([[0, 0, 1, 1]])\n', (6993, 7009), True, 'import numpy as np\n'), ((7464, 7479), 'numpy.ones', 'np.ones', (['(4, 1)'], {}), '((4, 1))\n', (7471, 7479), True, 'import numpy as np\n'), ((7658, 7682), 'numpy.array', 'np.array', (['[[1, 1, 0, 0]]'], {}), '([[1, 1, 0, 0]])\n', (7666, 7682), True, 'import numpy as np\n'), ((7739, 7763), 'numpy.array', 'np.array', (['[[0, 0, 1, 1]]'], {}), '([[0, 0, 1, 1]])\n', (7747, 7763), True, 'import numpy as np\n'), ((7948, 7962), 'numpy.sum', 'np.sum', (['action'], {}), '(action)\n', (7954, 7962), True, 'import numpy as np\n'), ((8208, 8230), 'numpy.array', 'np.array', (['[4, 5, 5, 5]'], {}), '([4, 5, 5, 5])\n', (8216, 8230), True, 'import numpy as np\n'), ((8418, 8442), 'numpy.array', 'np.array', (['[[1, 1, 0, 0]]'], {}), '([[1, 1, 0, 0]])\n', (8426, 8442), True, 'import numpy as np\n'), ((8499, 8523), 'numpy.array', 'np.array', (['[[0, 0, 1, 1]]'], {}), '([[0, 0, 1, 1]])\n', (8507, 8523), True, 'import numpy as np\n'), ((9017, 9039), 'numpy.array', 'np.array', (['[5, 5, 5, 4]'], {}), '([5, 5, 5, 4])\n', (9025, 9039), True, 'import numpy as np\n'), ((9227, 9251), 'numpy.array', 'np.array', (['[[1, 1, 0, 0]]'], {}), '([[1, 1, 0, 0]])\n', (9235, 9251), True, 'import numpy as np\n'), ((9308, 9332), 'numpy.array', 'np.array', (['[[0, 0, 1, 1]]'], {}), '([[0, 0, 1, 1]])\n', (9316, 9332), True, 'import numpy as np\n'), ((9703, 9718), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (9710, 9718), True, 'import numpy as np\n'), ((9759, 9774), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (9766, 9774), True, 'import numpy as np\n'), ((9990, 10005), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (9997, 10005), True, 'import numpy as np\n'), ((10206, 10222), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (10214, 10222), True, 'import numpy as np\n'), ((10331, 10346), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (10338, 10346), True, 'import numpy as np\n'), ((10386, 10401), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (10393, 10401), True, 'import numpy as np\n'), ((10617, 10632), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (10624, 10632), True, 'import numpy as np\n'), ((10850, 10865), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (10857, 10865), True, 'import numpy as np\n'), ((11086, 11095), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (11092, 11095), True, 'import numpy as np\n'), ((11132, 11148), 'numpy.array', 'np.array', (['[9, 5]'], {}), '([9, 5])\n', (11140, 11148), True, 'import numpy as np\n'), ((11373, 11382), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (11379, 11382), True, 'import numpy as np\n'), ((11855, 11864), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (11861, 11864), True, 'import numpy as np\n'), ((11901, 11918), 'numpy.array', 'np.array', (['[9, 11]'], {}), '([9, 11])\n', (11909, 11918), True, 'import numpy as np\n'), ((12143, 12152), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (12149, 12152), True, 'import numpy as np\n'), ((12680, 12689), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (12686, 12689), True, 'import numpy as np\n'), ((12726, 12744), 'numpy.array', 'np.array', (['[11, 10]'], {}), '([11, 10])\n', (12734, 12744), True, 'import numpy as np\n'), ((12969, 12978), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (12975, 12978), True, 'import numpy as np\n'), ((13420, 13429), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (13426, 13429), True, 'import numpy as np\n'), ((13466, 13484), 'numpy.array', 'np.array', (['[30, 20]'], {}), '([30, 20])\n', (13474, 13484), True, 'import numpy as np\n'), ((13709, 13718), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (13715, 13718), True, 'import numpy as np\n'), ((14212, 14221), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (14218, 14221), True, 'import numpy as np\n'), ((14258, 14276), 'numpy.array', 'np.array', (['[20, 30]'], {}), '([20, 30])\n', (14266, 14276), True, 'import numpy as np\n'), ((14501, 14510), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (14507, 14510), True, 'import numpy as np\n'), ((14943, 14952), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (14949, 14952), True, 'import numpy as np\n'), ((14989, 15007), 'numpy.array', 'np.array', (['[11, 11]'], {}), '([11, 11])\n', (14997, 15007), True, 'import numpy as np\n'), ((15232, 15241), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (15238, 15241), True, 'import numpy as np\n'), ((15432, 15446), 'numpy.sum', 'np.sum', (['action'], {}), '(action)\n', (15438, 15446), True, 'import numpy as np\n'), ((16077, 16103), 'numpy.array', 'np.array', (['[30, 20, 20, 30]'], {}), '([30, 20, 20, 30])\n', (16085, 16103), True, 'import numpy as np\n'), ((16351, 16375), 'numpy.array', 'np.array', (['[[1, 1, 0, 0]]'], {}), '([[1, 1, 0, 0]])\n', (16359, 16375), True, 'import numpy as np\n'), ((16432, 16456), 'numpy.array', 'np.array', (['[[0, 0, 1, 1]]'], {}), '([[0, 0, 1, 1]])\n', (16440, 16456), True, 'import numpy as np\n'), ((17328, 17354), 'numpy.array', 'np.array', (['[20, 30, 30, 20]'], {}), '([20, 30, 30, 20])\n', (17336, 17354), True, 'import numpy as np\n'), ((17602, 17626), 'numpy.array', 'np.array', (['[[1, 1, 0, 0]]'], {}), '([[1, 1, 0, 0]])\n', (17610, 17626), True, 'import numpy as np\n'), ((17683, 17707), 'numpy.array', 'np.array', (['[[0, 0, 1, 1]]'], {}), '([[0, 0, 1, 1]])\n', (17691, 17707), True, 'import numpy as np\n'), ((18577, 18601), 'numpy.array', 'np.array', (['[30, 20, 9, 5]'], {}), '([30, 20, 9, 5])\n', (18585, 18601), True, 'import numpy as np\n'), ((18849, 18873), 'numpy.array', 'np.array', (['[[1, 1, 0, 0]]'], {}), '([[1, 1, 0, 0]])\n', (18857, 18873), True, 'import numpy as np\n'), ((18930, 18954), 'numpy.array', 'np.array', (['[[0, 0, 1, 1]]'], {}), '([[0, 0, 1, 1]])\n', (18938, 18954), True, 'import numpy as np\n'), ((19828, 19852), 'numpy.array', 'np.array', (['[30, 30, 9, 5]'], {}), '([30, 30, 9, 5])\n', (19836, 19852), True, 'import numpy as np\n'), ((20100, 20124), 'numpy.array', 'np.array', (['[[1, 1, 0, 0]]'], {}), '([[1, 1, 0, 0]])\n', (20108, 20124), True, 'import numpy as np\n'), ((20181, 20205), 'numpy.array', 'np.array', (['[[0, 0, 1, 1]]'], {}), '([[0, 0, 1, 1]])\n', (20189, 20205), True, 'import numpy as np\n'), ((21097, 21122), 'numpy.array', 'np.array', (['[30, 20, 5, 20]'], {}), '([30, 20, 5, 20])\n', (21105, 21122), True, 'import numpy as np\n'), ((21370, 21408), 'numpy.array', 'np.array', (['[[1, 1, 0, 0], [0, 0, 1, 0]]'], {}), '([[1, 1, 0, 0], [0, 0, 1, 0]])\n', (21378, 21408), True, 'import numpy as np\n'), ((21465, 21489), 'numpy.array', 'np.array', (['[[0, 0, 1, 1]]'], {}), '([[0, 0, 1, 1]])\n', (21473, 21489), True, 'import numpy as np\n'), ((24478, 24507), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (24491, 24507), False, 'import pytest\n'), ((24521, 24587), 'snc.agents.general_heuristics.custom_activity_priority_agent.CustomActivityPriorityAgent', 'custom_priority_agent.CustomActivityPriorityAgent', (['env', 'priorities'], {}), '(env, priorities)\n', (24570, 24587), True, 'import snc.agents.general_heuristics.custom_activity_priority_agent as custom_priority_agent\n'), ((28355, 28385), 'numpy.sum', 'np.sum', (['updated_action'], {'axis': '(1)'}), '(updated_action, axis=1)\n', (28361, 28385), True, 'import numpy as np\n'), ((29738, 29761), 'numpy.sum', 'np.sum', (['actions'], {'axis': '(1)'}), '(actions, axis=1)\n', (29744, 29761), True, 'import numpy as np\n'), ((31207, 31230), 'numpy.sum', 'np.sum', (['actions'], {'axis': '(1)'}), '(actions, axis=1)\n', (31213, 31230), True, 'import numpy as np\n'), ((3122, 3138), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (3130, 3138), True, 'import numpy as np\n'), ((4318, 4334), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (4326, 4334), True, 'import numpy as np\n'), ((5692, 5708), 'numpy.zeros', 'np.zeros', (['(4, 1)'], {}), '((4, 1))\n', (5700, 5708), True, 'import numpy as np\n'), ((7211, 7227), 'numpy.zeros', 'np.zeros', (['(4, 1)'], {}), '((4, 1))\n', (7219, 7227), True, 'import numpy as np\n'), ((8708, 8727), 'numpy.sum', 'np.sum', (['action[2:4]'], {}), '(action[2:4])\n', (8714, 8727), True, 'import numpy as np\n'), ((9517, 9536), 'numpy.sum', 'np.sum', (['action[0:2]'], {}), '(action[0:2])\n', (9523, 9536), True, 'import numpy as np\n'), ((11590, 11606), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (11598, 11606), True, 'import numpy as np\n'), ((20396, 20415), 'numpy.sum', 'np.sum', (['action[0:2]'], {}), '(action[0:2])\n', (20402, 20415), True, 'import numpy as np\n'), ((867, 904), 'numpy.ones', 'np.ones', (['(num_buffers, num_resources)'], {}), '((num_buffers, num_resources))\n', (874, 904), True, 'import numpy as np\n'), ((5364, 5373), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (5370, 5373), True, 'import numpy as np\n'), ((5375, 5391), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (5383, 5391), True, 'import numpy as np\n'), ((5461, 5477), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (5469, 5477), True, 'import numpy as np\n'), ((5479, 5488), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (5485, 5488), True, 'import numpy as np\n'), ((6136, 6145), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (6142, 6145), True, 'import numpy as np\n'), ((6147, 6163), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (6155, 6163), True, 'import numpy as np\n'), ((6233, 6249), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (6241, 6249), True, 'import numpy as np\n'), ((6251, 6260), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (6257, 6260), True, 'import numpy as np\n'), ((8759, 8775), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (8767, 8775), True, 'import numpy as np\n'), ((9568, 9584), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (9576, 9584), True, 'import numpy as np\n'), ((12360, 12376), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (12368, 12376), True, 'import numpy as np\n'), ((13186, 13202), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (13194, 13202), True, 'import numpy as np\n'), ((13926, 13942), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (13934, 13942), True, 'import numpy as np\n'), ((14718, 14734), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (14726, 14734), True, 'import numpy as np\n'), ((16664, 16686), 'numpy.array', 'np.array', (['[1, 0, 0, 1]'], {}), '([1, 0, 0, 1])\n', (16672, 16686), True, 'import numpy as np\n'), ((17915, 17937), 'numpy.array', 'np.array', (['[0, 1, 1, 0]'], {}), '([0, 1, 1, 0])\n', (17923, 17937), True, 'import numpy as np\n'), ((19162, 19184), 'numpy.array', 'np.array', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (19170, 19184), True, 'import numpy as np\n'), ((20447, 20463), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (20455, 20463), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import pprint
def missingIsNan(s):
return np.nan if s == b'?' else float(s)
def makeStandardize(X):
means = X.mean(axis = 0)
stds = X.std(axis = 0)
def standardize(origX):
return (origX - means) / stds
def unstandardize(stdX):
return stds * stdX + means
return (standardize, unstandardize)
if __name__ == '__main__':
# 1. Load the data.
data = np.loadtxt("Data\\auto-mpg.data", usecols = range(8), converters = {3: missingIsNan})
# 2. 'Clean' the data.
Cdata = data[~np.isnan(data).any(axis = 1)]
# 3. Split it into input (X) and target (T)
# Target = mpg (first column)
# Input = remaining - columns 2 to 7
T = Cdata[:, 0:1]
X = Cdata[:, 1:]
# 4. Append column of 1s to X
# X1 = np.insert(X, 0, 1, 1)
# 4. Split the data into training (80 %) and testing data (20 %)
nRows = X.shape[0]
nTrain = int(round(0.8*nRows))
nTest = nRows - nTrain
# Shuffle row numbers
rows = np.arange(nRows)
np.random.shuffle(rows)
trainIndices = rows[:nTrain]
testIndices = rows[nTrain:]
# Check that training and testing sets are disjoint
# print(np.intersect1d(trainIndices, testIndices))
Xtrain = X[trainIndices, :]
Ttrain = T[trainIndices, :]
Xtest = X[testIndices, :]
Ttest = T[testIndices, :]
# 5. Standardize
(standardize, unstandardize) = makeStandardize(Xtrain)
XtrainS = standardize(Xtrain)
XtestS = standardize(Xtest)
# 6. Tack column of 1s
XtrainS1 = np.insert(XtrainS, 0, 1, 1)
XtestS1 = np.insert(XtestS, 0, 1, 1)
# 7. Find weights (solve for w)
w = np.linalg.lstsq(XtrainS1.T @ XtrainS1, XtrainS1.T @ Ttrain, rcond = None)[0]
# 8. Predict
predict = XtestS1 @ w
# 9. Compute RSME
rsme = np.sqrt(np.mean((predict - Ttest)**2))
print(rsme)
|
[
"numpy.linalg.lstsq",
"numpy.isnan",
"numpy.insert",
"numpy.mean",
"numpy.arange",
"numpy.random.shuffle"
] |
[((1004, 1020), 'numpy.arange', 'np.arange', (['nRows'], {}), '(nRows)\n', (1013, 1020), True, 'import numpy as np\n'), ((1023, 1046), 'numpy.random.shuffle', 'np.random.shuffle', (['rows'], {}), '(rows)\n', (1040, 1046), True, 'import numpy as np\n'), ((1517, 1544), 'numpy.insert', 'np.insert', (['XtrainS', '(0)', '(1)', '(1)'], {}), '(XtrainS, 0, 1, 1)\n', (1526, 1544), True, 'import numpy as np\n'), ((1557, 1583), 'numpy.insert', 'np.insert', (['XtestS', '(0)', '(1)', '(1)'], {}), '(XtestS, 0, 1, 1)\n', (1566, 1583), True, 'import numpy as np\n'), ((1627, 1698), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['(XtrainS1.T @ XtrainS1)', '(XtrainS1.T @ Ttrain)'], {'rcond': 'None'}), '(XtrainS1.T @ XtrainS1, XtrainS1.T @ Ttrain, rcond=None)\n', (1642, 1698), True, 'import numpy as np\n'), ((1784, 1815), 'numpy.mean', 'np.mean', (['((predict - Ttest) ** 2)'], {}), '((predict - Ttest) ** 2)\n', (1791, 1815), True, 'import numpy as np\n'), ((563, 577), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (571, 577), True, 'import numpy as np\n')]
|
from typing import Tuple, Dict
import random
import numpy as np
import torch
from torchvision import datasets, transforms
from sklearn.metrics.pairwise import cosine_distances
from matplotlib import pyplot as plt
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
CIFAR10_ANNOTATION = {
0: 'airplane',
1: 'automobile',
2: 'bird',
3: 'cat',
4: 'deer',
5: 'dog',
6: 'frog',
7: 'horse',
8: 'ship',
9: 'truck'
}
def plot_cifar_image(image, label=""):
plt.title(label)
plt.imshow(image.permute(1, 2, 0).numpy())
plt.show()
class AccumulateStats:
def __enter__(self):
pass
def __exit__(self):
pass
def __call__(self):
pass
class AverageMeter(object):
"""
Computes and stores the average and current value
"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class MeterLogger:
def __init__(self, meters: Tuple[str], writer: SummaryWriter):
self.average_meters: Dict[str, AverageMeter] = {k: AverageMeter() for k in meters}
self._writer = writer
def update(self, name: str, val, n=1):
self.average_meters[name].update(val, n)
def reset(self):
for meter in self.average_meters.values():
meter.reset()
def write(self, step, prefix):
for name, meter in self.average_meters.items():
tag = prefix + '/' + name
self._writer.add_scalar(tag, meter.avg, step)
class ImageLogger:
def __init__(self, writer: SummaryWriter, mean=None, std=None):
self._writer = writer
self.mean = mean
self.std = std
if self.mean is not None:
self.mean = torch.tensor(self.mean).reshape(1, 3, 1, 1)
if self.std is not None:
self.std = torch.tensor(self.std).reshape(1, 3, 1, 1)
def write(self, images, reconstruction, step, prefix):
images = images.cpu()
reconstruction = reconstruction.cpu()
if self.mean is not None and self.std is not None:
images = images * self.std + self.mean
reconstruction = reconstruction * self.std + self.mean
image_tag = prefix + '/' + 'image'
self._writer.add_images(image_tag, images, step)
reconstruction_tag = prefix + '/' + 'reconstruction'
self._writer.add_images(reconstruction_tag, reconstruction, step)
class VQEmbeddingLogger:
def __init__(self, writer: SummaryWriter):
self._writer = writer
def write(self, embeddings, step):
embeddings = embeddings.detach().cpu().numpy()
sim = cosine_distances(embeddings)
self._writer.add_image('cos_sim_vq_embeddings', sim, step, dataformats='HW')
def double_soft_orthogonality(weights: torch.Tensor):
a = torch.norm(weights @ weights.t() - torch.eye(weights.shape[0]).to(weights.device)) ** 2
b = torch.norm(weights.t() @ weights - torch.eye(weights.shape[1]).to(weights.device)) ** 2
return a + b
def set_random_seed(seed: int, cuda: bool = False):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if cuda:
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.random.manual_seed(seed)
torch.backends.cudnn.deterministic = True
|
[
"matplotlib.pyplot.title",
"sklearn.metrics.pairwise.cosine_distances",
"numpy.random.seed",
"matplotlib.pyplot.show",
"torch.eye",
"torch.manual_seed",
"torch.cuda.random.manual_seed",
"torch.cuda.manual_seed",
"torch.cuda.manual_seed_all",
"random.seed",
"torch.tensor"
] |
[((569, 585), 'matplotlib.pyplot.title', 'plt.title', (['label'], {}), '(label)\n', (578, 585), True, 'from matplotlib import pyplot as plt\n'), ((637, 647), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (645, 647), True, 'from matplotlib import pyplot as plt\n'), ((3421, 3438), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (3432, 3438), False, 'import random\n'), ((3443, 3463), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3457, 3463), True, 'import numpy as np\n'), ((3468, 3491), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (3485, 3491), False, 'import torch\n'), ((2982, 3010), 'sklearn.metrics.pairwise.cosine_distances', 'cosine_distances', (['embeddings'], {}), '(embeddings)\n', (2998, 3010), False, 'from sklearn.metrics.pairwise import cosine_distances\n'), ((3514, 3542), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (3536, 3542), False, 'import torch\n'), ((3551, 3583), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (3577, 3583), False, 'import torch\n'), ((3592, 3627), 'torch.cuda.random.manual_seed', 'torch.cuda.random.manual_seed', (['seed'], {}), '(seed)\n', (3621, 3627), False, 'import torch\n'), ((2073, 2096), 'torch.tensor', 'torch.tensor', (['self.mean'], {}), '(self.mean)\n', (2085, 2096), False, 'import torch\n'), ((2174, 2196), 'torch.tensor', 'torch.tensor', (['self.std'], {}), '(self.std)\n', (2186, 2196), False, 'import torch\n'), ((3196, 3223), 'torch.eye', 'torch.eye', (['weights.shape[0]'], {}), '(weights.shape[0])\n', (3205, 3223), False, 'import torch\n'), ((3292, 3319), 'torch.eye', 'torch.eye', (['weights.shape[1]'], {}), '(weights.shape[1])\n', (3301, 3319), False, 'import torch\n')]
|
import os,sys,talib,numpy,math,logging,time,datetime,numbers
from collections import OrderedDict
from baseindicator import BaseIndicator
class EMA(BaseIndicator):
def __init__(self,csdata, config = {}):
config["period"] = config.get("period",30)
config["metric"] = config.get("metric","closed")
config["label"] = config.get("label","ema")
config["label"] = "{}{}".format(config["label"],config["period"])
BaseIndicator.__init__(self,csdata,config)
self.chartcolors = ["mediumslateblue"]
self.data = None
self.analysis = None
self.get_analysis()
def get_settings(self):
return "{}".format(self.config["period"])
def get_charts(self):
data = []
for i in range(0,len(self.csdata[ self.config["metric"] ])):
if isinstance(self.data[i],numbers.Number) and self.data[i] > 0:
ts = time.mktime(datetime.datetime.strptime(self.csdata["time"][i], "%Y-%m-%dT%H:%M:%SZ").timetuple())
data.append({
"x": ts,
"y": self.data[i],
})
return [{
"key": "{}:{}".format(self.label,self.config["period"]),
"type": "line",
"color": "#FFF5EE",
"yAxis": 1,
"values": data
}]
def get_ema(self):
if self.csdata is not None:
try:
smetric = self.scaleup( self.csdata[self.config["metric"]])
data = talib.EMA( numpy.array(smetric), self.config["period"])
self.data = self.scaledown(data)
# scaledown
except Exception as ex:
self.data = None
raise ex
return self.data
def get_analysis(self ):
if self.data is None:
self.get_ema()
ema = self.data[-1]
ema1 = self.data[-2]
slope = None
for k in range(-1,-10,-1):
if slope == None:
slope = self.data[k-1] / self.data[k]
else:
slope = slope / ( self.data[k-1] / self.data[k] )
last_price = self.csdata["closed"][-1]
closing_time = self.csdata["time"][-1]
action = None
if last_price < ema:
action = "oversold"
res = {
"weight": 2,
"time": closing_time,
"indicator-data": {
"ema": ema
},
"analysis": OrderedDict()
}
res["analysis"]["name"] = "{}:{}".format(self.get_name(),self.get_settings())
res["analysis"]["signal"] = action
res["analysis"]["ema"] = ema
res["analysis"]["slope"] = slope
res["analysis"]["order"] = ["ema"]
self.analysis = res
return res
def format_view(self):
newres = dict(self.analysis["analysis"])
newres["slope"] = "{:.4f}".format(newres["slope"])
newres["ema"] = "{:.8f}".format(newres["ema"])
return newres
|
[
"collections.OrderedDict",
"datetime.datetime.strptime",
"numpy.array",
"baseindicator.BaseIndicator.__init__"
] |
[((455, 499), 'baseindicator.BaseIndicator.__init__', 'BaseIndicator.__init__', (['self', 'csdata', 'config'], {}), '(self, csdata, config)\n', (477, 499), False, 'from baseindicator import BaseIndicator\n'), ((2563, 2576), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2574, 2576), False, 'from collections import OrderedDict\n'), ((1567, 1587), 'numpy.array', 'numpy.array', (['smetric'], {}), '(smetric)\n', (1578, 1587), False, 'import os, sys, talib, numpy, math, logging, time, datetime, numbers\n'), ((934, 1006), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (["self.csdata['time'][i]", '"""%Y-%m-%dT%H:%M:%SZ"""'], {}), "(self.csdata['time'][i], '%Y-%m-%dT%H:%M:%SZ')\n", (960, 1006), False, 'import os, sys, talib, numpy, math, logging, time, datetime, numbers\n')]
|
# %%
import os
import sys
# os.chdir("../../..")
os.environ['DJANGO_SETTINGS_MODULE'] = 'MAKDataHub.settings'
import django
django.setup()
# %%
import math
import pickle
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from MAKDataHub.services import Services
profile_service = Services.profile_service()
storage_service = Services.storage_service()
last_run = profile_service.get_last_profile_creation_run()
## New database
full_df: pd.DataFrame = pickle.load(last_run.unlock_data.open('rb'))
# full_df = full_df.loc[full_df.DeviceId != 3].reset_index(drop = True)
## Old database
# unlock_data_path = storage_service.download_file(last_run.unlock_data_uri)
# full_df: pd.DataFrame = pickle.load(open(unlock_data_path, 'rb'))
# full_df = full_df.loc[full_df.DeviceId != '1439cbc3ad71ac06'].reset_index(drop = True)
# %%
df = full_df.iloc[:, list(range(36)) + list(range(72, 108)) + list(range(108, 144)) + list(range(180, 216)) + [216]].reset_index(drop = True)
df = df.loc[(df.DeviceId != 4) & (df.DeviceId != 7), :].reset_index(drop = True)
X, y = df.iloc[:, 0:-1], df.iloc[:, -1]
#%%
full_df.shape
# %%
display(full_df.iloc[:, list(range(36)) + [216]].groupby('DeviceId').agg([np.min, np.max]))
display(full_df.iloc[:, list(range(36, 72)) + [216]].groupby('DeviceId').agg([np.min, np.max]))
display(full_df.iloc[:, list(range(72, 108)) + [216]].groupby('DeviceId').agg([np.min, np.max]))
display(full_df.iloc[:, list(range(108, 144)) + [216]].groupby('DeviceId').agg([np.min, np.max]))
display(full_df.iloc[:, list(range(144, 180)) + [216]].groupby('DeviceId').agg([np.min, np.max]))
display(full_df.iloc[:, list(range(180, 216)) + [216]].groupby('DeviceId').agg([np.min, np.max]))
# %%
display(full_df.iloc[:, list(range(36)) + [216]].groupby('DeviceId').agg([np.mean]))
display(full_df.iloc[:, list(range(36, 72)) + [216]].groupby('DeviceId').agg([np.mean]))
display(full_df.iloc[:, list(range(72, 108)) + [216]].groupby('DeviceId').agg([np.mean]))
display(full_df.iloc[:, list(range(108, 144)) + [216]].groupby('DeviceId').agg([np.mean]))
display(full_df.iloc[:, list(range(144, 180)) + [216]].groupby('DeviceId').agg([np.mean]))
display(full_df.iloc[:, list(range(180, 216)) + [216]].groupby('DeviceId').agg([np.mean]))
# %%
sns.boxplot(df.DeviceId, df.AccMgn_mean)
# %%
sns.boxplot(df.DeviceId, df.AccMgn_median)
# %%
sns.boxplot(df.DeviceId, df.GyrMgn_amax)
# %%
sns.pairplot(df.loc[df.DeviceId != 3, :], hue="DeviceId", vars=["AccMgn_mean", "AccMgn_median"], markers='.')
# %%
test = df.loc[df.DeviceId != '3', :]
sns.swarmplot(data = test, x="DeviceId", y="RotMgn_median")
# %%
test = full_df.loc[:, :]
sns.boxplot(data = test, x="DeviceId", y="GrvMgn_amax")
# %%
print('OneClassSVM')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = y[y == device_id]
X_device = X.loc[y == device_id, :]
X_non_device = X.loc[y != device_id, :]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_device, y_device, test_size=0.2)
from sklearn.svm import OneClassSVM
estimator = OneClassSVM(random_state = 12369)
estimator.fit_predict(X_train)
tp = np.mean(estimator.predict(X_test) == 1)
fn = np.mean(estimator.predict(X_test) == -1)
tn = np.mean(estimator.predict(X_non_device) == 1)
fp = np.mean(estimator.predict(X_non_device) == 1)
accuracy = (tp + tn) / (tp + tn + fn + fp)
precision = tp / (tp + fp)
recall = tp / (tp + fn)
fscore = 2 * recall * precision / (recall + precision)
accuracies.append(accuracy if not np.isnan(accuracy) else 0)
precisions.append(precision if not np.isnan(precision) else 0)
recalls.append(recall if not np.isnan(recall) else 0)
fscores.append(fscore if not np.isnan(fscore) else 0)
print(f'{device_id} - accuracy: {round(accuracy, 2)}, precision: {round(precision, 2)}, recall: {round(recall, 2)}')
# print(f'{device_id} - Class acc: {round(np.mean(estimator.predict(X_test) == 1), 2)}, non-class acc: {round(np.mean(estimator.predict(X_non_device) == -1), 2)}')
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
print('IsolationForest')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = y[y == device_id]
X_device = X.loc[y == device_id, :]
X_non_device = X.loc[y != device_id, :]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_device, y_device, test_size=0.2)
from sklearn.ensemble import IsolationForest
estimator = IsolationForest(n_estimators = 10)
estimator.fit(X_train)
tp = np.mean(estimator.predict(X_test) == 1)
fn = np.mean(estimator.predict(X_test) == -1)
tn = np.mean(estimator.predict(X_non_device) == 1)
fp = np.mean(estimator.predict(X_non_device) == 1)
accuracy = (tp + tn) / (tp + tn + fn + fp)
precision = tp / (tp + fp)
recall = tp / (tp + fn)
fscore = 2 * recall * precision / (recall + precision)
accuracies.append(accuracy if not np.isnan(accuracy) else 0)
precisions.append(precision if not np.isnan(precision) else 0)
recalls.append(recall if not np.isnan(recall) else 0)
fscores.append(fscore if not np.isnan(fscore) else 0)
print(f'{device_id} - accuracy: {round(accuracy, 2)}, precision: {round(precision, 2)}, recall: {round(recall, 2)}')
# print(f'{device_id} - Class acc: {round(np.mean(estimator.predict(X_device) == 1), 2)}, non-class acc: {round(np.mean(estimator.predict(X_non_device) == -1), 2)}')
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
print('LOF')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = y[y == device_id]
X_device = X.loc[y == device_id, :]
X_non_device = X.loc[y != device_id, :]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_device, y_device, test_size=0.2)
from sklearn.neighbors import LocalOutlierFactor
estimator = LocalOutlierFactor(n_neighbors = 10, novelty = True, contamination = 'auto')
estimator.fit(X_train)
tp = np.mean(estimator.predict(X_test) == 1)
fn = np.mean(estimator.predict(X_test) == -1)
tn = np.mean(estimator.predict(X_non_device) == 1)
fp = np.mean(estimator.predict(X_non_device) == 1)
accuracy = (tp + tn) / (tp + tn + fn + fp)
precision = tp / (tp + fp)
recall = tp / (tp + fn)
fscore = 2 * recall * precision / (recall + precision)
accuracies.append(accuracy if not np.isnan(accuracy) else 0)
precisions.append(precision if not np.isnan(precision) else 0)
recalls.append(recall if not np.isnan(recall) else 0)
fscores.append(fscore if not np.isnan(fscore) else 0)
print(f'{device_id} - accuracy: {round(accuracy, 2)}, precision: {round(precision, 2)}, recall: {round(recall, 2)}')
# print(f'{device_id} - Class acc: {round(np.mean(estimator.predict(X_device) == 1), 2)}, non-class acc: {round(np.mean(estimator.predict(X_non_device) == -1), 2)}')
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
print('LinearSVC')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = np.where(y == device_id, 1, 0)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369)
from sklearn.svm import LinearSVC
estimator = LinearSVC(random_state = 12369)
estimator.fit(X_train, y_train)
from sklearn.metrics import classification_report
print(classification_report(y_test, estimator.predict(X_test)))
report = classification_report(y_test, estimator.predict(X_test), output_dict=True)
accuracies.append(report['accuracy'])
precisions.append(report['1']['precision'])
recalls.append(report['1']['recall'])
fscores.append(report['1']['f1-score'])
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
print('KNeighborsClassifier')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = np.where(y == device_id, 1, 0)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369)
from sklearn.neighbors import KNeighborsClassifier
estimator = KNeighborsClassifier()
estimator.fit(X_train, y_train)
from sklearn.metrics import classification_report
print(classification_report(y_test, estimator.predict(X_test)))
report = classification_report(y_test, estimator.predict(X_test), output_dict=True)
accuracies.append(report['accuracy'])
precisions.append(report['1']['precision'])
recalls.append(report['1']['recall'])
fscores.append(report['1']['f1-score'])
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
print('GaussianNB')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = np.where(y == device_id, 1, 0)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369)
from sklearn.naive_bayes import GaussianNB
estimator = GaussianNB()
estimator.fit(X_train, y_train)
from sklearn.metrics import classification_report
print(classification_report(y_test, estimator.predict(X_test)))
report = classification_report(y_test, estimator.predict(X_test), output_dict=True)
accuracies.append(report['accuracy'])
precisions.append(report['1']['precision'])
recalls.append(report['1']['recall'])
fscores.append(report['1']['f1-score'])
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
print('RandomForestClassifier')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = np.where(y == device_id, 1, 0)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369)
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(random_state = 12369)
estimator.fit(X_train, y_train)
from sklearn.metrics import classification_report
print(classification_report(y_test, estimator.predict(X_test)))
report = classification_report(y_test, estimator.predict(X_test), output_dict=True)
accuracies.append(report['accuracy'])
precisions.append(report['1']['precision'])
recalls.append(report['1']['recall'])
fscores.append(report['1']['f1-score'])
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
print('RandomForestClassifier - global model')
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state = 12369)
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(random_state = 12369)
estimator.fit(X_train, y_train)
from sklearn.metrics import classification_report
print(classification_report(y_test, estimator.predict(X_test)))
# %%
print('RandomForestClassifier - standardized')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = np.where(y == device_id, 1, 0)
from sklearn.preprocessing import StandardScaler
X_std = StandardScaler().fit_transform(X)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_std, y_device, test_size=0.2, random_state = 12369)
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(random_state = 12369)
estimator.fit(X_train, y_train)
from sklearn.metrics import classification_report
print(classification_report(y_test, estimator.predict(X_test)))
report = classification_report(y_test, estimator.predict(X_test), output_dict=True)
accuracies.append(report['accuracy'])
precisions.append(report['1']['precision'])
recalls.append(report['1']['recall'])
fscores.append(report['1']['f1-score'])
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
print('RandomForestClassifier + RFECV')
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state = 12369)
from yellowbrick.model_selection import RFECV
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(n_estimators = 10, random_state = 12369)
selector = RFECV(estimator, cv = 5, scoring='f1_weighted', step = 0.05)
selector.fit(X_train, y_train)
selector.show()
from sklearn.metrics import classification_report
print(classification_report(y_test, selector.predict(X_test)))
# %%
print('RandomForestClassifier + RFE20')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = np.where(y == device_id, 1, 0)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369)
from sklearn.feature_selection import RFE
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(n_estimators = 10, random_state = 12369)
selector = RFE(estimator, n_features_to_select = 20, step = 0.05)
selector.fit(X_train, y_train)
from sklearn.metrics import classification_report
print(f'Device {device_id}:')
print(classification_report(y_test, selector.predict(X_test)))
report = classification_report(y_test, selector.predict(X_test), output_dict=True)
accuracies.append(report['accuracy'])
precisions.append(report['1']['precision'])
recalls.append(report['1']['recall'])
fscores.append(report['1']['f1-score'])
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
print('RandomForestClassifier + SelectFromModel')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = np.where(y == device_id, 1, 0)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369)
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(n_estimators = 10, random_state = 12369)
selector = SelectFromModel(estimator, max_features = 20)
selector.fit(X_train, y_train)
from sklearn.metrics import classification_report
print(f'Device {device_id}:')
print(classification_report(y_test, selector.estimator_.predict(X_test)))
report = classification_report(y_test, selector.estimator_.predict(X_test), output_dict=True)
accuracies.append(report['accuracy'])
precisions.append(report['1']['precision'])
recalls.append(report['1']['recall'])
fscores.append(report['1']['f1-score'])
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
print('RandomForestClassifier + PCA')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = np.where(y == device_id, 1, 0)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369)
from sklearn.decomposition import PCA
pca = PCA(n_components=20).fit(X_train)
X_train = pca.transform(X_train)
X_test = pca.transform(X_test)
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(n_estimators = 10, random_state = 12369)
estimator.fit(X_train, y_train)
from sklearn.metrics import classification_report
print(f'Device {device_id}:')
print(classification_report(y_test, estimator.predict(X_test)))
report = classification_report(y_test, estimator.predict(X_test), output_dict=True)
accuracies.append(report['accuracy'])
precisions.append(report['1']['precision'])
recalls.append(report['1']['recall'])
fscores.append(report['1']['f1-score'])
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
print('RandomForestClassifier + SelectKBest (f_classif)')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = np.where(y == device_id, 1, 0)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369)
from sklearn.feature_selection import SelectKBest, f_classif
selector = SelectKBest(score_func = f_classif, k=20).fit(X_train, y_train)
X_train = selector.transform(X_train)
X_test = selector.transform(X_test)
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(n_estimators = 10, random_state = 12369)
estimator.fit(X_train, y_train)
from sklearn.metrics import classification_report
print(f'Device {device_id}:')
print(classification_report(y_test, estimator.predict(X_test)))
report = classification_report(y_test, estimator.predict(X_test), output_dict=True)
accuracies.append(report['accuracy'])
precisions.append(report['1']['precision'])
recalls.append(report['1']['recall'])
fscores.append(report['1']['f1-score'])
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
print('RandomForestClassifier + SelectKBest (mutual_info_classif)')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = np.where(y == device_id, 1, 0)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369)
from sklearn.feature_selection import SelectKBest, mutual_info_classif
selector = SelectKBest(score_func = mutual_info_classif, k=20).fit(X_train, y_train)
X_train = selector.transform(X_train)
X_test = selector.transform(X_test)
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(n_estimators = 10, random_state = 12369)
estimator.fit(X_train, y_train)
from sklearn.metrics import classification_report
print(f'Device {device_id}:')
print(classification_report(y_test, estimator.predict(X_test)))
report = classification_report(y_test, estimator.predict(X_test), output_dict=True)
accuracies.append(report['accuracy'])
precisions.append(report['1']['precision'])
recalls.append(report['1']['recall'])
fscores.append(report['1']['f1-score'])
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
print('RandomForestClassifier + RandomUnderSampler')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = np.where(y == device_id, 1, 0)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369)
from imblearn.under_sampling import RandomUnderSampler
X_oversampled, y_oversampled = RandomUnderSampler().fit_resample(X_train, y_train)
from sklearn.feature_selection import RFE
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(n_estimators = 10, random_state = 12369)
selector = RFE(estimator, n_features_to_select = 20, step = 0.05)
selector.fit(X_oversampled, y_oversampled)
from sklearn.metrics import classification_report
print(f'Device {device_id}:')
print(classification_report(y_test, selector.predict(X_test)))
report = classification_report(y_test, selector.predict(X_test), output_dict=True)
accuracies.append(report['accuracy'])
precisions.append(report['1']['precision'])
recalls.append(report['1']['recall'])
fscores.append(report['1']['f1-score'])
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
print('RandomForestClassifier + RandomOverSampler')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = np.where(y == device_id, 1, 0)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369)
from imblearn.over_sampling import RandomOverSampler
X_oversampled, y_oversampled = RandomOverSampler().fit_resample(X_train, y_train)
from sklearn.feature_selection import RFE
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(n_estimators = 10, random_state = 12369)
selector = RFE(estimator, n_features_to_select = 20, step = 0.05)
selector.fit(X_oversampled, y_oversampled)
from sklearn.metrics import classification_report
print(f'Device {device_id}:')
print(classification_report(y_test, selector.predict(X_test)))
report = classification_report(y_test, selector.predict(X_test), output_dict=True)
accuracies.append(report['accuracy'])
precisions.append(report['1']['precision'])
recalls.append(report['1']['recall'])
fscores.append(report['1']['f1-score'])
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
print('RandomForestClassifier + SMOTE')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = np.where(y == device_id, 1, 0)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369)
from imblearn.over_sampling import SMOTE
X_oversampled, y_oversampled = SMOTE().fit_resample(X_train, y_train)
from sklearn.feature_selection import RFE
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(n_estimators = 10, random_state = 12369)
selector = RFE(estimator, n_features_to_select = 20, step = 0.05)
selector.fit(X_oversampled, y_oversampled)
from sklearn.metrics import classification_report
print(f'Device {device_id}:')
print(classification_report(y_test, selector.predict(X_test)))
report = classification_report(y_test, selector.predict(X_test), output_dict=True)
accuracies.append(report['accuracy'])
precisions.append(report['1']['precision'])
recalls.append(report['1']['recall'])
fscores.append(report['1']['f1-score'])
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
print('RandomForestClassifier + SMOTEENN')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = np.where(y == device_id, 1, 0)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369)
from imblearn.combine import SMOTEENN
X_oversampled, y_oversampled = SMOTEENN().fit_resample(X_train, y_train)
from sklearn.feature_selection import RFE
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(n_estimators = 10, random_state = 12369)
selector = RFE(estimator, n_features_to_select = 20, step = 0.05)
selector.fit(X_oversampled, y_oversampled)
from sklearn.metrics import classification_report
print(f'Device {device_id}:')
print(classification_report(y_test, selector.predict(X_test)))
report = classification_report(y_test, selector.predict(X_test), output_dict=True)
accuracies.append(report['accuracy'])
precisions.append(report['1']['precision'])
recalls.append(report['1']['recall'])
fscores.append(report['1']['f1-score'])
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
print('RandomForestClassifier + SMOTETomek')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = np.where(y == device_id, 1, 0)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369)
from imblearn.combine import SMOTETomek
X_oversampled, y_oversampled = SMOTETomek().fit_resample(X_train, y_train)
from sklearn.feature_selection import RFE
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(n_estimators = 10, random_state = 12369)
selector = RFE(estimator, n_features_to_select = 20, step = 0.05)
selector.fit(X_oversampled, y_oversampled)
from sklearn.metrics import classification_report
print(f'Device {device_id}:')
print(classification_report(y_test, selector.predict(X_test)))
report = classification_report(y_test, selector.predict(X_test), output_dict=True)
accuracies.append(report['accuracy'])
precisions.append(report['1']['precision'])
recalls.append(report['1']['recall'])
fscores.append(report['1']['f1-score'])
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
print('BalancedRandomForestClassifier')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = np.where(y == device_id, 1, 0)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369)
from sklearn.feature_selection import RFE
from imblearn.ensemble import BalancedRandomForestClassifier
estimator = BalancedRandomForestClassifier(n_estimators = 10, random_state = 12369)
selector = RFE(estimator, n_features_to_select = 20, step = 0.05)
selector.fit(X_train, y_train)
from sklearn.metrics import classification_report
print(f'Device {device_id}:')
print(classification_report(y_test, selector.predict(X_test)))
report = classification_report(y_test, selector.predict(X_test), output_dict=True)
accuracies.append(report['accuracy'])
precisions.append(report['1']['precision'])
recalls.append(report['1']['recall'])
fscores.append(report['1']['f1-score'])
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
print('Hyperparameter tuning')
for device_id in y.unique():
y_device = np.where(y == device_id, 1, 0)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369)
from imblearn.combine import SMOTETomek
X_oversampled, y_oversampled = SMOTETomek().fit_resample(X_train, y_train)
from sklearn.feature_selection import RFE
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(random_state = 12369, \
n_estimators = 50,
min_samples_leaf = 1, \
min_samples_split = 2, \
bootstrap = False, \
max_features = 'sqrt', \
max_depth = 20)
selector = RFE(estimator, n_features_to_select = 20, step = 0.05)
# from sklearn.model_selection import GridSearchCV
# param_grid = {
# 'estimator__n_estimators': [10, 50, 100, 200, 500],
# 'estimator__max_features': ['auto', 'sqrt', 'log2'],
# 'estimator__max_depth': [4, 5, 6, 7, 8],
# 'estimator__criterion': ['gini', 'entropy']
# }
from sklearn.model_selection import RandomizedSearchCV
param_grid = {
'estimator__n_estimators': [10, 20, 50, 100],
'estimator__max_features': ['auto', 'sqrt', 'log2'],
'estimator__max_depth': [int(x) for x in np.linspace(2, 20, num = 2)] + [None],
'estimator__min_samples_split': [2, 3, 4, 5],
'estimator__min_samples_leaf': [1, 2, 3],
'estimator__bootstrap': [True, False]
}
grid = RandomizedSearchCV(estimator = selector, \
param_distributions = param_grid, \
n_iter = 100, \
cv = 3, \
verbose = 2, \
random_state = 42, \
n_jobs = -1)
grid.fit(X_oversampled, y_oversampled)
print(grid.best_params_)
# %%
print('RandomForestClassifier + SMOTETomek + parameters')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = np.where(y == device_id, 1, 0)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369)
from imblearn.combine import SMOTETomek
X_oversampled, y_oversampled = SMOTETomek().fit_resample(X_train, y_train)
from sklearn.feature_selection import RFE
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(random_state = 12369, \
n_estimators = 50,
min_samples_leaf = 1, \
min_samples_split = 2, \
bootstrap = False, \
max_features = 'sqrt', \
max_depth = 20)
selector = RFE(estimator, n_features_to_select = 20, step = 0.05)
selector.fit(X_oversampled, y_oversampled)
from sklearn.metrics import classification_report
print(f'Device {device_id}:')
print(classification_report(y_test, selector.predict(X_test)))
report = classification_report(y_test, selector.predict(X_test), output_dict=True)
accuracies.append(report['accuracy'])
precisions.append(report['1']['precision'])
recalls.append(report['1']['recall'])
fscores.append(report['1']['f1-score'])
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
|
[
"django.setup",
"sklearn.preprocessing.StandardScaler",
"sklearn.model_selection.train_test_split",
"sklearn.neighbors.LocalOutlierFactor",
"sklearn.feature_selection.RFE",
"numpy.isnan",
"sklearn.feature_selection.SelectFromModel",
"numpy.mean",
"seaborn.pairplot",
"sklearn.model_selection.RandomizedSearchCV",
"seaborn.swarmplot",
"MAKDataHub.services.Services.storage_service",
"MAKDataHub.services.Services.profile_service",
"numpy.linspace",
"sklearn.svm.LinearSVC",
"sklearn.svm.OneClassSVM",
"sklearn.ensemble.RandomForestClassifier",
"imblearn.under_sampling.RandomUnderSampler",
"imblearn.combine.SMOTETomek",
"imblearn.ensemble.BalancedRandomForestClassifier",
"seaborn.boxplot",
"imblearn.over_sampling.SMOTE",
"sklearn.naive_bayes.GaussianNB",
"sklearn.ensemble.IsolationForest",
"yellowbrick.model_selection.RFECV",
"imblearn.combine.SMOTEENN",
"imblearn.over_sampling.RandomOverSampler",
"sklearn.neighbors.KNeighborsClassifier",
"numpy.where",
"sklearn.decomposition.PCA",
"sklearn.feature_selection.SelectKBest"
] |
[((124, 138), 'django.setup', 'django.setup', ([], {}), '()\n', (136, 138), False, 'import django\n'), ((325, 351), 'MAKDataHub.services.Services.profile_service', 'Services.profile_service', ([], {}), '()\n', (349, 351), False, 'from MAKDataHub.services import Services\n'), ((370, 396), 'MAKDataHub.services.Services.storage_service', 'Services.storage_service', ([], {}), '()\n', (394, 396), False, 'from MAKDataHub.services import Services\n'), ((2287, 2327), 'seaborn.boxplot', 'sns.boxplot', (['df.DeviceId', 'df.AccMgn_mean'], {}), '(df.DeviceId, df.AccMgn_mean)\n', (2298, 2327), True, 'import seaborn as sns\n'), ((2334, 2376), 'seaborn.boxplot', 'sns.boxplot', (['df.DeviceId', 'df.AccMgn_median'], {}), '(df.DeviceId, df.AccMgn_median)\n', (2345, 2376), True, 'import seaborn as sns\n'), ((2383, 2423), 'seaborn.boxplot', 'sns.boxplot', (['df.DeviceId', 'df.GyrMgn_amax'], {}), '(df.DeviceId, df.GyrMgn_amax)\n', (2394, 2423), True, 'import seaborn as sns\n'), ((2430, 2544), 'seaborn.pairplot', 'sns.pairplot', (['df.loc[df.DeviceId != 3, :]'], {'hue': '"""DeviceId"""', 'vars': "['AccMgn_mean', 'AccMgn_median']", 'markers': '"""."""'}), "(df.loc[df.DeviceId != 3, :], hue='DeviceId', vars=[\n 'AccMgn_mean', 'AccMgn_median'], markers='.')\n", (2442, 2544), True, 'import seaborn as sns\n'), ((2583, 2640), 'seaborn.swarmplot', 'sns.swarmplot', ([], {'data': 'test', 'x': '"""DeviceId"""', 'y': '"""RotMgn_median"""'}), "(data=test, x='DeviceId', y='RotMgn_median')\n", (2596, 2640), True, 'import seaborn as sns\n'), ((2674, 2727), 'seaborn.boxplot', 'sns.boxplot', ([], {'data': 'test', 'x': '"""DeviceId"""', 'y': '"""GrvMgn_amax"""'}), "(data=test, x='DeviceId', y='GrvMgn_amax')\n", (2685, 2727), True, 'import seaborn as sns\n'), ((11807, 11864), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X, y, test_size=0.2, random_state=12369)\n', (11823, 11864), False, 'from sklearn.model_selection import train_test_split\n'), ((11933, 11975), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': '(12369)'}), '(random_state=12369)\n', (11955, 11975), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((13439, 13496), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X, y, test_size=0.2, random_state=12369)\n', (13455, 13496), False, 'from sklearn.model_selection import train_test_split\n'), ((13611, 13670), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)', 'random_state': '(12369)'}), '(n_estimators=10, random_state=12369)\n', (13633, 13670), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((13686, 13742), 'yellowbrick.model_selection.RFECV', 'RFECV', (['estimator'], {'cv': '(5)', 'scoring': '"""f1_weighted"""', 'step': '(0.05)'}), "(estimator, cv=5, scoring='f1_weighted', step=0.05)\n", (13691, 13742), False, 'from yellowbrick.model_selection import RFECV\n'), ((3058, 3109), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_device', 'y_device'], {'test_size': '(0.2)'}), '(X_device, y_device, test_size=0.2)\n', (3074, 3109), False, 'from sklearn.model_selection import train_test_split\n'), ((3168, 3199), 'sklearn.svm.OneClassSVM', 'OneClassSVM', ([], {'random_state': '(12369)'}), '(random_state=12369)\n', (3179, 3199), False, 'from sklearn.svm import OneClassSVM\n'), ((4658, 4709), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_device', 'y_device'], {'test_size': '(0.2)'}), '(X_device, y_device, test_size=0.2)\n', (4674, 4709), False, 'from sklearn.model_selection import train_test_split\n'), ((4777, 4809), 'sklearn.ensemble.IsolationForest', 'IsolationForest', ([], {'n_estimators': '(10)'}), '(n_estimators=10)\n', (4792, 4809), False, 'from sklearn.ensemble import IsolationForest\n'), ((6250, 6301), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_device', 'y_device'], {'test_size': '(0.2)'}), '(X_device, y_device, test_size=0.2)\n', (6266, 6301), False, 'from sklearn.model_selection import train_test_split\n'), ((6373, 6443), 'sklearn.neighbors.LocalOutlierFactor', 'LocalOutlierFactor', ([], {'n_neighbors': '(10)', 'novelty': '(True)', 'contamination': '"""auto"""'}), "(n_neighbors=10, novelty=True, contamination='auto')\n", (6391, 6443), False, 'from sklearn.neighbors import LocalOutlierFactor\n'), ((7695, 7725), 'numpy.where', 'np.where', (['(y == device_id)', '(1)', '(0)'], {}), '(y == device_id, 1, 0)\n', (7703, 7725), True, 'import numpy as np\n'), ((7823, 7887), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y_device'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X, y_device, test_size=0.2, random_state=12369)\n', (7839, 7887), False, 'from sklearn.model_selection import train_test_split\n'), ((7946, 7975), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(12369)'}), '(random_state=12369)\n', (7955, 7975), False, 'from sklearn.svm import LinearSVC\n'), ((8714, 8744), 'numpy.where', 'np.where', (['(y == device_id)', '(1)', '(0)'], {}), '(y == device_id, 1, 0)\n', (8722, 8744), True, 'import numpy as np\n'), ((8842, 8906), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y_device'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X, y_device, test_size=0.2, random_state=12369)\n', (8858, 8906), False, 'from sklearn.model_selection import train_test_split\n'), ((8982, 9004), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (9002, 9004), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((9735, 9765), 'numpy.where', 'np.where', (['(y == device_id)', '(1)', '(0)'], {}), '(y == device_id, 1, 0)\n', (9743, 9765), True, 'import numpy as np\n'), ((9863, 9927), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y_device'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X, y_device, test_size=0.2, random_state=12369)\n', (9879, 9927), False, 'from sklearn.model_selection import train_test_split\n'), ((9995, 10007), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (10005, 10007), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((10750, 10780), 'numpy.where', 'np.where', (['(y == device_id)', '(1)', '(0)'], {}), '(y == device_id, 1, 0)\n', (10758, 10780), True, 'import numpy as np\n'), ((10878, 10942), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y_device'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X, y_device, test_size=0.2, random_state=12369)\n', (10894, 10942), False, 'from sklearn.model_selection import train_test_split\n'), ((11019, 11061), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': '(12369)'}), '(random_state=12369)\n', (11041, 11061), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((12281, 12311), 'numpy.where', 'np.where', (['(y == device_id)', '(1)', '(0)'], {}), '(y == device_id, 1, 0)\n', (12289, 12311), True, 'import numpy as np\n'), ((12513, 12581), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_std', 'y_device'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X_std, y_device, test_size=0.2, random_state=12369)\n', (12529, 12581), False, 'from sklearn.model_selection import train_test_split\n'), ((12658, 12700), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': '(12369)'}), '(random_state=12369)\n', (12680, 12700), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((14058, 14088), 'numpy.where', 'np.where', (['(y == device_id)', '(1)', '(0)'], {}), '(y == device_id, 1, 0)\n', (14066, 14088), True, 'import numpy as np\n'), ((14186, 14250), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y_device'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X, y_device, test_size=0.2, random_state=12369)\n', (14202, 14250), False, 'from sklearn.model_selection import train_test_split\n'), ((14377, 14436), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)', 'random_state': '(12369)'}), '(n_estimators=10, random_state=12369)\n', (14399, 14436), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((14456, 14506), 'sklearn.feature_selection.RFE', 'RFE', (['estimator'], {'n_features_to_select': '(20)', 'step': '(0.05)'}), '(estimator, n_features_to_select=20, step=0.05)\n', (14459, 14506), False, 'from sklearn.feature_selection import RFE\n'), ((15302, 15332), 'numpy.where', 'np.where', (['(y == device_id)', '(1)', '(0)'], {}), '(y == device_id, 1, 0)\n', (15310, 15332), True, 'import numpy as np\n'), ((15430, 15494), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y_device'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X, y_device, test_size=0.2, random_state=12369)\n', (15446, 15494), False, 'from sklearn.model_selection import train_test_split\n'), ((15629, 15688), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)', 'random_state': '(12369)'}), '(n_estimators=10, random_state=12369)\n', (15651, 15688), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((15708, 15751), 'sklearn.feature_selection.SelectFromModel', 'SelectFromModel', (['estimator'], {'max_features': '(20)'}), '(estimator, max_features=20)\n', (15723, 15751), False, 'from sklearn.feature_selection import SelectFromModel\n'), ((16555, 16585), 'numpy.where', 'np.where', (['(y == device_id)', '(1)', '(0)'], {}), '(y == device_id, 1, 0)\n', (16563, 16585), True, 'import numpy as np\n'), ((16683, 16747), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y_device'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X, y_device, test_size=0.2, random_state=12369)\n', (16699, 16747), False, 'from sklearn.model_selection import train_test_split\n'), ((16987, 17046), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)', 'random_state': '(12369)'}), '(n_estimators=10, random_state=12369)\n', (17009, 17046), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((17853, 17883), 'numpy.where', 'np.where', (['(y == device_id)', '(1)', '(0)'], {}), '(y == device_id, 1, 0)\n', (17861, 17883), True, 'import numpy as np\n'), ((17981, 18045), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y_device'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X, y_device, test_size=0.2, random_state=12369)\n', (17997, 18045), False, 'from sklearn.model_selection import train_test_split\n'), ((18353, 18412), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)', 'random_state': '(12369)'}), '(n_estimators=10, random_state=12369)\n', (18375, 18412), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((19225, 19255), 'numpy.where', 'np.where', (['(y == device_id)', '(1)', '(0)'], {}), '(y == device_id, 1, 0)\n', (19233, 19255), True, 'import numpy as np\n'), ((19353, 19417), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y_device'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X, y_device, test_size=0.2, random_state=12369)\n', (19369, 19417), False, 'from sklearn.model_selection import train_test_split\n'), ((19745, 19804), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)', 'random_state': '(12369)'}), '(n_estimators=10, random_state=12369)\n', (19767, 19804), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((20602, 20632), 'numpy.where', 'np.where', (['(y == device_id)', '(1)', '(0)'], {}), '(y == device_id, 1, 0)\n', (20610, 20632), True, 'import numpy as np\n'), ((20730, 20794), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y_device'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X, y_device, test_size=0.2, random_state=12369)\n', (20746, 20794), False, 'from sklearn.model_selection import train_test_split\n'), ((21068, 21127), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)', 'random_state': '(12369)'}), '(n_estimators=10, random_state=12369)\n', (21090, 21127), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((21147, 21197), 'sklearn.feature_selection.RFE', 'RFE', (['estimator'], {'n_features_to_select': '(20)', 'step': '(0.05)'}), '(estimator, n_features_to_select=20, step=0.05)\n', (21150, 21197), False, 'from sklearn.feature_selection import RFE\n'), ((22007, 22037), 'numpy.where', 'np.where', (['(y == device_id)', '(1)', '(0)'], {}), '(y == device_id, 1, 0)\n', (22015, 22037), True, 'import numpy as np\n'), ((22135, 22199), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y_device'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X, y_device, test_size=0.2, random_state=12369)\n', (22151, 22199), False, 'from sklearn.model_selection import train_test_split\n'), ((22470, 22529), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)', 'random_state': '(12369)'}), '(n_estimators=10, random_state=12369)\n', (22492, 22529), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((22549, 22599), 'sklearn.feature_selection.RFE', 'RFE', (['estimator'], {'n_features_to_select': '(20)', 'step': '(0.05)'}), '(estimator, n_features_to_select=20, step=0.05)\n', (22552, 22599), False, 'from sklearn.feature_selection import RFE\n'), ((23397, 23427), 'numpy.where', 'np.where', (['(y == device_id)', '(1)', '(0)'], {}), '(y == device_id, 1, 0)\n', (23405, 23427), True, 'import numpy as np\n'), ((23525, 23589), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y_device'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X, y_device, test_size=0.2, random_state=12369)\n', (23541, 23589), False, 'from sklearn.model_selection import train_test_split\n'), ((23836, 23895), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)', 'random_state': '(12369)'}), '(n_estimators=10, random_state=12369)\n', (23858, 23895), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((23915, 23965), 'sklearn.feature_selection.RFE', 'RFE', (['estimator'], {'n_features_to_select': '(20)', 'step': '(0.05)'}), '(estimator, n_features_to_select=20, step=0.05)\n', (23918, 23965), False, 'from sklearn.feature_selection import RFE\n'), ((24766, 24796), 'numpy.where', 'np.where', (['(y == device_id)', '(1)', '(0)'], {}), '(y == device_id, 1, 0)\n', (24774, 24796), True, 'import numpy as np\n'), ((24894, 24958), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y_device'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X, y_device, test_size=0.2, random_state=12369)\n', (24910, 24958), False, 'from sklearn.model_selection import train_test_split\n'), ((25205, 25264), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)', 'random_state': '(12369)'}), '(n_estimators=10, random_state=12369)\n', (25227, 25264), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((25284, 25334), 'sklearn.feature_selection.RFE', 'RFE', (['estimator'], {'n_features_to_select': '(20)', 'step': '(0.05)'}), '(estimator, n_features_to_select=20, step=0.05)\n', (25287, 25334), False, 'from sklearn.feature_selection import RFE\n'), ((26136, 26166), 'numpy.where', 'np.where', (['(y == device_id)', '(1)', '(0)'], {}), '(y == device_id, 1, 0)\n', (26144, 26166), True, 'import numpy as np\n'), ((26264, 26328), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y_device'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X, y_device, test_size=0.2, random_state=12369)\n', (26280, 26328), False, 'from sklearn.model_selection import train_test_split\n'), ((26579, 26638), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)', 'random_state': '(12369)'}), '(n_estimators=10, random_state=12369)\n', (26601, 26638), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((26658, 26708), 'sklearn.feature_selection.RFE', 'RFE', (['estimator'], {'n_features_to_select': '(20)', 'step': '(0.05)'}), '(estimator, n_features_to_select=20, step=0.05)\n', (26661, 26708), False, 'from sklearn.feature_selection import RFE\n'), ((27505, 27535), 'numpy.where', 'np.where', (['(y == device_id)', '(1)', '(0)'], {}), '(y == device_id, 1, 0)\n', (27513, 27535), True, 'import numpy as np\n'), ((27633, 27697), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y_device'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X, y_device, test_size=0.2, random_state=12369)\n', (27649, 27697), False, 'from sklearn.model_selection import train_test_split\n'), ((27833, 27900), 'imblearn.ensemble.BalancedRandomForestClassifier', 'BalancedRandomForestClassifier', ([], {'n_estimators': '(10)', 'random_state': '(12369)'}), '(n_estimators=10, random_state=12369)\n', (27863, 27900), False, 'from imblearn.ensemble import BalancedRandomForestClassifier\n'), ((27920, 27970), 'sklearn.feature_selection.RFE', 'RFE', (['estimator'], {'n_features_to_select': '(20)', 'step': '(0.05)'}), '(estimator, n_features_to_select=20, step=0.05)\n', (27923, 27970), False, 'from sklearn.feature_selection import RFE\n'), ((28688, 28718), 'numpy.where', 'np.where', (['(y == device_id)', '(1)', '(0)'], {}), '(y == device_id, 1, 0)\n', (28696, 28718), True, 'import numpy as np\n'), ((28816, 28880), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y_device'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X, y_device, test_size=0.2, random_state=12369)\n', (28832, 28880), False, 'from sklearn.model_selection import train_test_split\n'), ((29131, 29292), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': '(12369)', 'n_estimators': '(50)', 'min_samples_leaf': '(1)', 'min_samples_split': '(2)', 'bootstrap': '(False)', 'max_features': '"""sqrt"""', 'max_depth': '(20)'}), "(random_state=12369, n_estimators=50,\n min_samples_leaf=1, min_samples_split=2, bootstrap=False, max_features=\n 'sqrt', max_depth=20)\n", (29153, 29292), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((29371, 29421), 'sklearn.feature_selection.RFE', 'RFE', (['estimator'], {'n_features_to_select': '(20)', 'step': '(0.05)'}), '(estimator, n_features_to_select=20, step=0.05)\n', (29374, 29421), False, 'from sklearn.feature_selection import RFE\n'), ((30190, 30321), 'sklearn.model_selection.RandomizedSearchCV', 'RandomizedSearchCV', ([], {'estimator': 'selector', 'param_distributions': 'param_grid', 'n_iter': '(100)', 'cv': '(3)', 'verbose': '(2)', 'random_state': '(42)', 'n_jobs': '(-1)'}), '(estimator=selector, param_distributions=param_grid,\n n_iter=100, cv=3, verbose=2, random_state=42, n_jobs=-1)\n', (30208, 30321), False, 'from sklearn.model_selection import RandomizedSearchCV\n'), ((30635, 30665), 'numpy.where', 'np.where', (['(y == device_id)', '(1)', '(0)'], {}), '(y == device_id, 1, 0)\n', (30643, 30665), True, 'import numpy as np\n'), ((30763, 30827), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y_device'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X, y_device, test_size=0.2, random_state=12369)\n', (30779, 30827), False, 'from sklearn.model_selection import train_test_split\n'), ((31078, 31239), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': '(12369)', 'n_estimators': '(50)', 'min_samples_leaf': '(1)', 'min_samples_split': '(2)', 'bootstrap': '(False)', 'max_features': '"""sqrt"""', 'max_depth': '(20)'}), "(random_state=12369, n_estimators=50,\n min_samples_leaf=1, min_samples_split=2, bootstrap=False, max_features=\n 'sqrt', max_depth=20)\n", (31100, 31239), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((31318, 31368), 'sklearn.feature_selection.RFE', 'RFE', (['estimator'], {'n_features_to_select': '(20)', 'step': '(0.05)'}), '(estimator, n_features_to_select=20, step=0.05)\n', (31321, 31368), False, 'from sklearn.feature_selection import RFE\n'), ((12378, 12394), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (12392, 12394), False, 'from sklearn.preprocessing import StandardScaler\n'), ((16807, 16827), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(20)'}), '(n_components=20)\n', (16810, 16827), False, 'from sklearn.decomposition import PCA\n'), ((18133, 18172), 'sklearn.feature_selection.SelectKBest', 'SelectKBest', ([], {'score_func': 'f_classif', 'k': '(20)'}), '(score_func=f_classif, k=20)\n', (18144, 18172), False, 'from sklearn.feature_selection import SelectKBest, mutual_info_classif\n'), ((19515, 19564), 'sklearn.feature_selection.SelectKBest', 'SelectKBest', ([], {'score_func': 'mutual_info_classif', 'k': '(20)'}), '(score_func=mutual_info_classif, k=20)\n', (19526, 19564), False, 'from sklearn.feature_selection import SelectKBest, mutual_info_classif\n'), ((20892, 20912), 'imblearn.under_sampling.RandomUnderSampler', 'RandomUnderSampler', ([], {}), '()\n', (20910, 20912), False, 'from imblearn.under_sampling import RandomUnderSampler\n'), ((22295, 22314), 'imblearn.over_sampling.RandomOverSampler', 'RandomOverSampler', ([], {}), '()\n', (22312, 22314), False, 'from imblearn.over_sampling import RandomOverSampler\n'), ((23673, 23680), 'imblearn.over_sampling.SMOTE', 'SMOTE', ([], {}), '()\n', (23678, 23680), False, 'from imblearn.over_sampling import SMOTE\n'), ((25039, 25049), 'imblearn.combine.SMOTEENN', 'SMOTEENN', ([], {}), '()\n', (25047, 25049), False, 'from imblearn.combine import SMOTEENN\n'), ((26411, 26423), 'imblearn.combine.SMOTETomek', 'SMOTETomek', ([], {}), '()\n', (26421, 26423), False, 'from imblearn.combine import SMOTETomek\n'), ((28963, 28975), 'imblearn.combine.SMOTETomek', 'SMOTETomek', ([], {}), '()\n', (28973, 28975), False, 'from imblearn.combine import SMOTETomek\n'), ((30910, 30922), 'imblearn.combine.SMOTETomek', 'SMOTETomek', ([], {}), '()\n', (30920, 30922), False, 'from imblearn.combine import SMOTETomek\n'), ((3652, 3670), 'numpy.isnan', 'np.isnan', (['accuracy'], {}), '(accuracy)\n', (3660, 3670), True, 'import numpy as np\n'), ((3718, 3737), 'numpy.isnan', 'np.isnan', (['precision'], {}), '(precision)\n', (3726, 3737), True, 'import numpy as np\n'), ((3779, 3795), 'numpy.isnan', 'np.isnan', (['recall'], {}), '(recall)\n', (3787, 3795), True, 'import numpy as np\n'), ((3837, 3853), 'numpy.isnan', 'np.isnan', (['fscore'], {}), '(fscore)\n', (3845, 3853), True, 'import numpy as np\n'), ((4178, 4197), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (4185, 4197), True, 'import numpy as np\n'), ((4222, 4241), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (4229, 4241), True, 'import numpy as np\n'), ((4263, 4279), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (4270, 4279), True, 'import numpy as np\n'), ((4301, 4317), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (4308, 4317), True, 'import numpy as np\n'), ((5254, 5272), 'numpy.isnan', 'np.isnan', (['accuracy'], {}), '(accuracy)\n', (5262, 5272), True, 'import numpy as np\n'), ((5320, 5339), 'numpy.isnan', 'np.isnan', (['precision'], {}), '(precision)\n', (5328, 5339), True, 'import numpy as np\n'), ((5381, 5397), 'numpy.isnan', 'np.isnan', (['recall'], {}), '(recall)\n', (5389, 5397), True, 'import numpy as np\n'), ((5439, 5455), 'numpy.isnan', 'np.isnan', (['fscore'], {}), '(fscore)\n', (5447, 5455), True, 'import numpy as np\n'), ((5782, 5801), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (5789, 5801), True, 'import numpy as np\n'), ((5826, 5845), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (5833, 5845), True, 'import numpy as np\n'), ((5867, 5883), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (5874, 5883), True, 'import numpy as np\n'), ((5905, 5921), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (5912, 5921), True, 'import numpy as np\n'), ((6892, 6910), 'numpy.isnan', 'np.isnan', (['accuracy'], {}), '(accuracy)\n', (6900, 6910), True, 'import numpy as np\n'), ((6958, 6977), 'numpy.isnan', 'np.isnan', (['precision'], {}), '(precision)\n', (6966, 6977), True, 'import numpy as np\n'), ((7019, 7035), 'numpy.isnan', 'np.isnan', (['recall'], {}), '(recall)\n', (7027, 7035), True, 'import numpy as np\n'), ((7077, 7093), 'numpy.isnan', 'np.isnan', (['fscore'], {}), '(fscore)\n', (7085, 7093), True, 'import numpy as np\n'), ((7420, 7439), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (7427, 7439), True, 'import numpy as np\n'), ((7464, 7483), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (7471, 7483), True, 'import numpy as np\n'), ((7505, 7521), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (7512, 7521), True, 'import numpy as np\n'), ((7543, 7559), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (7550, 7559), True, 'import numpy as np\n'), ((8428, 8447), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (8435, 8447), True, 'import numpy as np\n'), ((8472, 8491), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (8479, 8491), True, 'import numpy as np\n'), ((8513, 8529), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (8520, 8529), True, 'import numpy as np\n'), ((8551, 8567), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (8558, 8567), True, 'import numpy as np\n'), ((9459, 9478), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (9466, 9478), True, 'import numpy as np\n'), ((9503, 9522), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (9510, 9522), True, 'import numpy as np\n'), ((9544, 9560), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (9551, 9560), True, 'import numpy as np\n'), ((9582, 9598), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (9589, 9598), True, 'import numpy as np\n'), ((10462, 10481), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (10469, 10481), True, 'import numpy as np\n'), ((10506, 10525), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (10513, 10525), True, 'import numpy as np\n'), ((10547, 10563), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (10554, 10563), True, 'import numpy as np\n'), ((10585, 10601), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (10592, 10601), True, 'import numpy as np\n'), ((11518, 11537), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (11525, 11537), True, 'import numpy as np\n'), ((11562, 11581), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (11569, 11581), True, 'import numpy as np\n'), ((11603, 11619), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (11610, 11619), True, 'import numpy as np\n'), ((11641, 11657), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (11648, 11657), True, 'import numpy as np\n'), ((13157, 13176), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (13164, 13176), True, 'import numpy as np\n'), ((13201, 13220), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (13208, 13220), True, 'import numpy as np\n'), ((13242, 13258), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (13249, 13258), True, 'import numpy as np\n'), ((13280, 13296), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (13287, 13296), True, 'import numpy as np\n'), ((14996, 15015), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (15003, 15015), True, 'import numpy as np\n'), ((15040, 15059), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (15047, 15059), True, 'import numpy as np\n'), ((15081, 15097), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (15088, 15097), True, 'import numpy as np\n'), ((15119, 15135), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (15126, 15135), True, 'import numpy as np\n'), ((16261, 16280), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (16268, 16280), True, 'import numpy as np\n'), ((16305, 16324), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (16312, 16324), True, 'import numpy as np\n'), ((16346, 16362), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (16353, 16362), True, 'import numpy as np\n'), ((16384, 16400), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (16391, 16400), True, 'import numpy as np\n'), ((17539, 17558), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (17546, 17558), True, 'import numpy as np\n'), ((17583, 17602), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (17590, 17602), True, 'import numpy as np\n'), ((17624, 17640), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (17631, 17640), True, 'import numpy as np\n'), ((17662, 17678), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (17669, 17678), True, 'import numpy as np\n'), ((18901, 18920), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (18908, 18920), True, 'import numpy as np\n'), ((18945, 18964), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (18952, 18964), True, 'import numpy as np\n'), ((18986, 19002), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (18993, 19002), True, 'import numpy as np\n'), ((19024, 19040), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (19031, 19040), True, 'import numpy as np\n'), ((20293, 20312), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (20300, 20312), True, 'import numpy as np\n'), ((20337, 20356), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (20344, 20356), True, 'import numpy as np\n'), ((20378, 20394), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (20385, 20394), True, 'import numpy as np\n'), ((20416, 20432), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (20423, 20432), True, 'import numpy as np\n'), ((21699, 21718), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (21706, 21718), True, 'import numpy as np\n'), ((21743, 21762), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (21750, 21762), True, 'import numpy as np\n'), ((21784, 21800), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (21791, 21800), True, 'import numpy as np\n'), ((21822, 21838), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (21829, 21838), True, 'import numpy as np\n'), ((23101, 23120), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (23108, 23120), True, 'import numpy as np\n'), ((23145, 23164), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (23152, 23164), True, 'import numpy as np\n'), ((23186, 23202), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (23193, 23202), True, 'import numpy as np\n'), ((23224, 23240), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (23231, 23240), True, 'import numpy as np\n'), ((24467, 24486), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (24474, 24486), True, 'import numpy as np\n'), ((24511, 24530), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (24518, 24530), True, 'import numpy as np\n'), ((24552, 24568), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (24559, 24568), True, 'import numpy as np\n'), ((24590, 24606), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (24597, 24606), True, 'import numpy as np\n'), ((25836, 25855), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (25843, 25855), True, 'import numpy as np\n'), ((25880, 25899), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (25887, 25899), True, 'import numpy as np\n'), ((25921, 25937), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (25928, 25937), True, 'import numpy as np\n'), ((25959, 25975), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (25966, 25975), True, 'import numpy as np\n'), ((27210, 27229), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (27217, 27229), True, 'import numpy as np\n'), ((27254, 27273), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (27261, 27273), True, 'import numpy as np\n'), ((27295, 27311), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (27302, 27311), True, 'import numpy as np\n'), ((27333, 27349), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (27340, 27349), True, 'import numpy as np\n'), ((28460, 28479), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (28467, 28479), True, 'import numpy as np\n'), ((28504, 28523), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (28511, 28523), True, 'import numpy as np\n'), ((28545, 28561), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (28552, 28561), True, 'import numpy as np\n'), ((28583, 28599), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (28590, 28599), True, 'import numpy as np\n'), ((31870, 31889), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (31877, 31889), True, 'import numpy as np\n'), ((31914, 31933), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (31921, 31933), True, 'import numpy as np\n'), ((31955, 31971), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (31962, 31971), True, 'import numpy as np\n'), ((31993, 32009), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (32000, 32009), True, 'import numpy as np\n'), ((29984, 30009), 'numpy.linspace', 'np.linspace', (['(2)', '(20)'], {'num': '(2)'}), '(2, 20, num=2)\n', (29995, 30009), True, 'import numpy as np\n')]
|
import pylab as plt
import numpy as np
from math import *
N=100
t0 = 0.0
t1 = 2.0
t = np.linspace(t0,t1,N)
dt = (t1-t0)/N
one = np.ones((N))
xp = np.zeros((N))
yp = np.zeros((N))
th = np.zeros((N))
x = t*t
y = t
plt.figure()
plt.plot(x,y,'g-')
plt.legend(['Path'],loc='best')
plt.title('Quadratic Path')
plt.show()
doty=one
dotx=2*t
ddoty=0
ddotx=2*one
r = 1.0
L = 4.0
v = np.sqrt(dotx*dotx + doty*doty)
kappa = (dotx*ddoty - doty*ddotx)/(v*v*v)
dotphi1 = (v/r)*(kappa*L +1)
dotphi2 = (v/r)*(-kappa*L+1)
plt.plot(t,dotphi1,'b-', t,dotphi2,'g-')
plt.title('Wheel Speeds')
plt.legend(['Right', 'Left'],loc='best')
plt.show()
xp[0] = 0.0
yp[0] = 0.0
th[0] = 1.5707963267949
for i in range(N-1):
xp[i+1] = xp[i] + (r*dt/2.0) * (dotphi1[i]+dotphi2[i]) * cos(th[i])
yp[i+1] = yp[i] + (r*dt/2.0)*(dotphi1[i]+dotphi2[i])* sin(th[i])
th[i+1] = th[i] + (r*dt/(2.0*L))*(dotphi1[i]-dotphi2[i])
plt.figure()
plt.plot(x,y,'g-', xp, yp, 'bx')
plt.legend(['Original Path', 'Robot Path'],loc='best')
plt.title('Path')
plt.show()
|
[
"pylab.title",
"pylab.show",
"numpy.zeros",
"numpy.ones",
"pylab.figure",
"numpy.linspace",
"pylab.legend",
"pylab.plot",
"numpy.sqrt"
] |
[((87, 109), 'numpy.linspace', 'np.linspace', (['t0', 't1', 'N'], {}), '(t0, t1, N)\n', (98, 109), True, 'import numpy as np\n'), ((129, 139), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (136, 139), True, 'import numpy as np\n'), ((147, 158), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (155, 158), True, 'import numpy as np\n'), ((166, 177), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (174, 177), True, 'import numpy as np\n'), ((185, 196), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (193, 196), True, 'import numpy as np\n'), ((215, 227), 'pylab.figure', 'plt.figure', ([], {}), '()\n', (225, 227), True, 'import pylab as plt\n'), ((228, 248), 'pylab.plot', 'plt.plot', (['x', 'y', '"""g-"""'], {}), "(x, y, 'g-')\n", (236, 248), True, 'import pylab as plt\n'), ((247, 279), 'pylab.legend', 'plt.legend', (["['Path']"], {'loc': '"""best"""'}), "(['Path'], loc='best')\n", (257, 279), True, 'import pylab as plt\n'), ((279, 306), 'pylab.title', 'plt.title', (['"""Quadratic Path"""'], {}), "('Quadratic Path')\n", (288, 306), True, 'import pylab as plt\n'), ((307, 317), 'pylab.show', 'plt.show', ([], {}), '()\n', (315, 317), True, 'import pylab as plt\n'), ((378, 412), 'numpy.sqrt', 'np.sqrt', (['(dotx * dotx + doty * doty)'], {}), '(dotx * dotx + doty * doty)\n', (385, 412), True, 'import numpy as np\n'), ((510, 554), 'pylab.plot', 'plt.plot', (['t', 'dotphi1', '"""b-"""', 't', 'dotphi2', '"""g-"""'], {}), "(t, dotphi1, 'b-', t, dotphi2, 'g-')\n", (518, 554), True, 'import pylab as plt\n'), ((551, 576), 'pylab.title', 'plt.title', (['"""Wheel Speeds"""'], {}), "('Wheel Speeds')\n", (560, 576), True, 'import pylab as plt\n'), ((577, 618), 'pylab.legend', 'plt.legend', (["['Right', 'Left']"], {'loc': '"""best"""'}), "(['Right', 'Left'], loc='best')\n", (587, 618), True, 'import pylab as plt\n'), ((618, 628), 'pylab.show', 'plt.show', ([], {}), '()\n', (626, 628), True, 'import pylab as plt\n'), ((903, 915), 'pylab.figure', 'plt.figure', ([], {}), '()\n', (913, 915), True, 'import pylab as plt\n'), ((916, 950), 'pylab.plot', 'plt.plot', (['x', 'y', '"""g-"""', 'xp', 'yp', '"""bx"""'], {}), "(x, y, 'g-', xp, yp, 'bx')\n", (924, 950), True, 'import pylab as plt\n'), ((949, 1004), 'pylab.legend', 'plt.legend', (["['Original Path', 'Robot Path']"], {'loc': '"""best"""'}), "(['Original Path', 'Robot Path'], loc='best')\n", (959, 1004), True, 'import pylab as plt\n'), ((1004, 1021), 'pylab.title', 'plt.title', (['"""Path"""'], {}), "('Path')\n", (1013, 1021), True, 'import pylab as plt\n'), ((1022, 1032), 'pylab.show', 'plt.show', ([], {}), '()\n', (1030, 1032), True, 'import pylab as plt\n')]
|
#!/usr/bin/env python
# gatherUpper.py
import numpy
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
LENGTH = 3
x = None
x_local = numpy.linspace(rank*LENGTH,(rank+1)*LENGTH, LENGTH)
print(x_local)
if rank == 0:
x = numpy.zeros(size*LENGTH)
print (x)
comm.Gather(x_local, x, root=0)
#you should notice that only the root process has a value for x that
#is not "None"
print ("process", rank, "x:", x)
print ("process", rank, "x_local:", x_local)
|
[
"numpy.zeros",
"numpy.linspace"
] |
[((173, 231), 'numpy.linspace', 'numpy.linspace', (['(rank * LENGTH)', '((rank + 1) * LENGTH)', 'LENGTH'], {}), '(rank * LENGTH, (rank + 1) * LENGTH, LENGTH)\n', (187, 231), False, 'import numpy\n'), ((262, 288), 'numpy.zeros', 'numpy.zeros', (['(size * LENGTH)'], {}), '(size * LENGTH)\n', (273, 288), False, 'import numpy\n')]
|
"""Run simulations for SDC model.
Parameters
----------
N_JOBS
Number of cores used for parallelization.
RANDOM_SEED
Seed for the random numbers generator.
SPACE
Types of social space.
Available values: 'uniform', 'lognormal', 'clusters_normal'.
N
Sizes of networks,
NDIM
Number of dimensions of simulated social spaces.
DATA_REP
Number of independent realizations of social spaces.
SDA_PARAMS
k
Expected average degree.
alpha
Homophily level.
directed
Directed/undirected networks.
p_rewire
Probability of random rewiring.
SDA_REP
Number of independent realizations of adjacency matrices.
SIM_PARAMS
degseq_type
Degree sequence type.
One of: 'poisson', 'negbinom', 'powerlaw'.
degseq_sort
Should degree sequence be sorted by expected node degrees.
"""
import os
import gc
import numpy as np
import pandas as pd
from sklearn.externals.joblib import Memory
import _
# Globals
ROOT = os.path.dirname(os.path.realpath(__file__))
HERE = ROOT
DATAPATH = os.path.join(HERE, 'raw-data')
# Persistence
MEMORY = Memory(location='.cache', verbose=1)
N_JOBS = 4
# Data generation params
RANDOM_SEED = 101
SPACE = ('uniform', 'lognormal', 'clusters_normal')
N = (1000, 2000, 4000, 8000)
NDIM = (1, 2, 4, 8, 16)
CENTERS = (4,)
DATA_REP = 2
# SDA params
SDA_PARAMS = {
'k': (30,),
'alpha': (2, 4, 8, np.inf),
'directed': (False,),
'p_rewire': (.01,)
}
SDA_REP = 3
SIM_PARAMS = {
'degseq_type': ('poisson', 'negbinom', 'powerlaw'),
'sort': (True, False)
}
@MEMORY.cache(ignore=['n_jobs'])
def simulate_cm(space, dparams, drep, sdaparams, sdarep, simparams, n_jobs):
return _.simulate(space, dparams, drep, sdaparams, sdarep,
simparams, n_jobs, simfunc=_.run_sdac)
# Run simulations
if RANDOM_SEED is not None:
np.random.seed(RANDOM_SEED)
sim = lambda s: simulate_cm(
space=s,
dparams=(N, NDIM, CENTERS),
drep=DATA_REP,
sdaparams=SDA_PARAMS,
sdarep=SDA_REP,
simparams=SIM_PARAMS,
n_jobs=N_JOBS
)
df = None # main data frame
gdf = None # graph data frame
for s in SPACE:
sim(s)
gc.collect()
for s in SPACE:
print(f"\rloading and processing '{s}' space' ...", end="")
_df = sim(s)
_df.drop(columns=['A', 'labels'], inplace=True)
if df is None:
df = _df
else:
df = pd.concat((df, _df), ignore_index=True)
# Save data -------------------------------------------------------------------
# Standard data get saved as feather file, so it can be easily
# shared with R for data analysis and visualization.
# Adjacency matrices data is saved as a separate pickle file.
# It will be used for graph visualizations.
os.makedirs(DATAPATH, exist_ok=True)
# Save main data as a feather file
df.to_feather(os.path.join(DATAPATH, 'sda-data-cm.feather'))
# Save graph data as a pickle file
# joblib.dump(gdf, os.path.join(DATAPATH, 'sda-graphs-cm.pkl'))
|
[
"numpy.random.seed",
"os.makedirs",
"_.simulate",
"os.path.realpath",
"sklearn.externals.joblib.Memory",
"gc.collect",
"os.path.join",
"pandas.concat"
] |
[((1067, 1097), 'os.path.join', 'os.path.join', (['HERE', '"""raw-data"""'], {}), "(HERE, 'raw-data')\n", (1079, 1097), False, 'import os\n'), ((1122, 1158), 'sklearn.externals.joblib.Memory', 'Memory', ([], {'location': '""".cache"""', 'verbose': '(1)'}), "(location='.cache', verbose=1)\n", (1128, 1158), False, 'from sklearn.externals.joblib import Memory\n'), ((2185, 2197), 'gc.collect', 'gc.collect', ([], {}), '()\n', (2195, 2197), False, 'import gc\n'), ((2750, 2786), 'os.makedirs', 'os.makedirs', (['DATAPATH'], {'exist_ok': '(True)'}), '(DATAPATH, exist_ok=True)\n', (2761, 2786), False, 'import os\n'), ((1016, 1042), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1032, 1042), False, 'import os\n'), ((1709, 1803), '_.simulate', '_.simulate', (['space', 'dparams', 'drep', 'sdaparams', 'sdarep', 'simparams', 'n_jobs'], {'simfunc': '_.run_sdac'}), '(space, dparams, drep, sdaparams, sdarep, simparams, n_jobs,\n simfunc=_.run_sdac)\n', (1719, 1803), False, 'import _\n'), ((1874, 1901), 'numpy.random.seed', 'np.random.seed', (['RANDOM_SEED'], {}), '(RANDOM_SEED)\n', (1888, 1901), True, 'import numpy as np\n'), ((2837, 2882), 'os.path.join', 'os.path.join', (['DATAPATH', '"""sda-data-cm.feather"""'], {}), "(DATAPATH, 'sda-data-cm.feather')\n", (2849, 2882), False, 'import os\n'), ((2406, 2445), 'pandas.concat', 'pd.concat', (['(df, _df)'], {'ignore_index': '(True)'}), '((df, _df), ignore_index=True)\n', (2415, 2445), True, 'import pandas as pd\n')]
|
import numpy as np
import scipy.signal as sp
import scipy.spatial.distance as sp_dist
import librosa
class MedianNMF:
y, sr = None,None
n_components = None
def __init__(self,y,sr,n_components = 5):
self.y, self.sr = y,sr
self.n_components = n_components
def decompose(self):
#filter out precussive parts
hpss_y = self.hpss()
#Perform Short-time Fourier transform
D = librosa.stft(hpss_y)
# Separate the magnitude and phase
S, phase = librosa.magphase(D)
#NMF decompose to components
components, activations = self.decomposeNMF(hpss_y, S, self.n_components)
#reconstruct and return
return [self.reconstructComponent(
components[:, i], activations[i], phase) for i in range(0,len(activations))]
def hpss(self, margin=4.0):
#extract precussive components through median filtering
return librosa.effects.percussive(self.y, margin=margin)
def decomposeNMF(self, y, magnitude, n_components):
# Decompose by nmf
return librosa.decompose.decompose(magnitude, n_components, sort=True)
def reconstructFull(self, activations, phase):
#reconstruct all components into one signal
D_k = components.dot(activations)
y_k = librosa.istft(D_k * phase)
return y_k
def reconstructComponent(self, components, activation, phase):
D_k = np.multiply.outer(components, activation)
y_k = librosa.istft(D_k * phase)
#filter out noise using Savitzky-Golay filter
component_filtered = sp.savgol_filter(y_k,11,1)
return component_filtered
|
[
"librosa.decompose.decompose",
"scipy.signal.savgol_filter",
"librosa.effects.percussive",
"librosa.istft",
"numpy.multiply.outer",
"librosa.magphase",
"librosa.stft"
] |
[((438, 458), 'librosa.stft', 'librosa.stft', (['hpss_y'], {}), '(hpss_y)\n', (450, 458), False, 'import librosa\n'), ((521, 540), 'librosa.magphase', 'librosa.magphase', (['D'], {}), '(D)\n', (537, 540), False, 'import librosa\n'), ((940, 989), 'librosa.effects.percussive', 'librosa.effects.percussive', (['self.y'], {'margin': 'margin'}), '(self.y, margin=margin)\n', (966, 989), False, 'import librosa\n'), ((1090, 1153), 'librosa.decompose.decompose', 'librosa.decompose.decompose', (['magnitude', 'n_components'], {'sort': '(True)'}), '(magnitude, n_components, sort=True)\n', (1117, 1153), False, 'import librosa\n'), ((1314, 1340), 'librosa.istft', 'librosa.istft', (['(D_k * phase)'], {}), '(D_k * phase)\n', (1327, 1340), False, 'import librosa\n'), ((1446, 1487), 'numpy.multiply.outer', 'np.multiply.outer', (['components', 'activation'], {}), '(components, activation)\n', (1463, 1487), True, 'import numpy as np\n'), ((1502, 1528), 'librosa.istft', 'librosa.istft', (['(D_k * phase)'], {}), '(D_k * phase)\n', (1515, 1528), False, 'import librosa\n'), ((1613, 1641), 'scipy.signal.savgol_filter', 'sp.savgol_filter', (['y_k', '(11)', '(1)'], {}), '(y_k, 11, 1)\n', (1629, 1641), True, 'import scipy.signal as sp\n')]
|
import numpy as np
from collections import defaultdict
class Agent:
def __init__(self, nA=6):
""" Initialize agent.
Params
======
- nA: number of actions available to the agent
"""
self.nA = nA
self.Q = defaultdict(lambda: np.zeros(self.nA))
self.epsilon_start = 1.0
self.i_episode = 1.0
self.alpha = 0.04
self.gamma = 0.9
def epsilon_greedy_probs(self, state, epsilon):
''' Calculation of probabilities accordgin to a
epsilon greedy policy'''
probs = np.ones(self.nA) * epsilon / self.nA
best_action = np.argmax(self.Q[state])
probs[best_action] = 1 - epsilon + (epsilon / self.nA)
return probs
def select_action(self, state):
""" Given the state, select an action.
Params
======
- state: the current state of the environment
Returns
=======
- action: an integer, compatible with the task's action space
"""
# Random action
# action = np.random.choice(self.nA)
# Epsilon decay
epsilon = self.epsilon_start / self.i_episode
# Epsilon-greedy policy/probabilities
probs = self.epsilon_greedy_probs(state, epsilon)
# Action selection acc. to epsilon-greedy policy
action = np.random.choice(np.arange(self.nA), p = probs)
return action
def step(self, state, action, reward, next_state, done):
""" Update the agent's knowledge, using the most recently sampled tuple.
Params
======
- state: the previous state of the environment
- action: the agent's previous choice of action
- reward: last reward received
- next_state: the current state of the environment
- done: whether the episode is complete (True or False)
"""
# SARSA method
next_action = self.select_action(next_state)
Gt = reward + self.gamma * self.Q[next_state][next_action]
# Q-learning (SARSAMAX) method
#best_action = np.argmax(self.Q[next_state])
#Gt = reward + self.gamma * self.Q[next_state][best_action]
self.Q[state][action] += self.alpha * (Gt - self.Q[state][action])
# i_episode update for calculation of epsilon decay
self.i_episode += 1.0
|
[
"numpy.zeros",
"numpy.arange",
"numpy.ones",
"numpy.argmax"
] |
[((636, 660), 'numpy.argmax', 'np.argmax', (['self.Q[state]'], {}), '(self.Q[state])\n', (645, 660), True, 'import numpy as np\n'), ((1397, 1415), 'numpy.arange', 'np.arange', (['self.nA'], {}), '(self.nA)\n', (1406, 1415), True, 'import numpy as np\n'), ((286, 303), 'numpy.zeros', 'np.zeros', (['self.nA'], {}), '(self.nA)\n', (294, 303), True, 'import numpy as np\n'), ((577, 593), 'numpy.ones', 'np.ones', (['self.nA'], {}), '(self.nA)\n', (584, 593), True, 'import numpy as np\n')]
|
import numpy as np
class Mesh:
""" Contains all the information about the spatial domain """
def __init__(self,dimension,topology,geometry):
self.Nvoxels = len(topology)
self.dimension = dimension
self.topology = topology # adjaceny matrix (numpy array), 0 along main diagonal, 1 elsewhere
# only really works for regular grids
self.geometry = geometry # numpy array of Nvoxels pairs (volume,x,(y,(z)))
def get_coarseMesh_voxel(voxel,coupling):
# returns the coarse mesh voxel associated with
# voxel by the coupling
# by convention I take the coarse mesh voxel to by the smallest
# index coupled to voxel according to coupling
i = 0
while coupling[voxel,i]<1:
i = i+1
return i
def make_lattice1d(Nx,L):
# generates uniform 1d lattice on [0,L]
topology = np.zeros((Nx,Nx))
d = np.ones(Nx-1)
topology = np.diag(d,1)+np.diag(d,-1)
geometry = np.zeros((Nx,2))
h = L/Nx
geometry[:,0] = h*np.ones(Nx)
geometry[:,1] = np.linspace(0,L-h,Nx)
mesh = Mesh(1,topology,geometry)
return mesh
def make_lattice1d_coupled(Nx,L,J):
mesh = make_lattice1d(Nx,L)
coupling = np.zeros((Nx,Nx))
for i in range(int(Nx/J)):
coupling[i*J:(i+1)*J,i*J:(i+1)*J] = np.ones((J,J))
return mesh,coupling
# need to implement
def make_lattice2d(Nx,Ny,Lx,Ly):
topology = np.zeros((Nx*Ny,Nx*Ny))
d1 = np.ones(Nx-1)
d2 = np.ones(Nx*Ny-Ny)
for i in range(Ny):
topology[i*Ny:(i+1)*Ny,i*Ny:(i+1)*Ny] = np.diag(d1,1)+np.diag(d1,-1)
topology = topology + np.diag(d2,Nx)+np.diag(d2,-Nx)
geometry = np.zeros((Nx*Ny,2))
hx = Nx/Lx
hy = Ny/Ly
#geometry[:,0] = h*np.ones(Nx)
#geometry[:,1] = linspace(0,L-h,Nx)
mesh = Mesh(1,topology,geometry)
return mesh
def make_lattice3d(Nx,Ny):
return None
|
[
"numpy.diag",
"numpy.zeros",
"numpy.ones",
"numpy.linspace"
] |
[((855, 873), 'numpy.zeros', 'np.zeros', (['(Nx, Nx)'], {}), '((Nx, Nx))\n', (863, 873), True, 'import numpy as np\n'), ((881, 896), 'numpy.ones', 'np.ones', (['(Nx - 1)'], {}), '(Nx - 1)\n', (888, 896), True, 'import numpy as np\n'), ((952, 969), 'numpy.zeros', 'np.zeros', (['(Nx, 2)'], {}), '((Nx, 2))\n', (960, 969), True, 'import numpy as np\n'), ((1036, 1061), 'numpy.linspace', 'np.linspace', (['(0)', '(L - h)', 'Nx'], {}), '(0, L - h, Nx)\n', (1047, 1061), True, 'import numpy as np\n'), ((1196, 1214), 'numpy.zeros', 'np.zeros', (['(Nx, Nx)'], {}), '((Nx, Nx))\n', (1204, 1214), True, 'import numpy as np\n'), ((1398, 1426), 'numpy.zeros', 'np.zeros', (['(Nx * Ny, Nx * Ny)'], {}), '((Nx * Ny, Nx * Ny))\n', (1406, 1426), True, 'import numpy as np\n'), ((1431, 1446), 'numpy.ones', 'np.ones', (['(Nx - 1)'], {}), '(Nx - 1)\n', (1438, 1446), True, 'import numpy as np\n'), ((1454, 1475), 'numpy.ones', 'np.ones', (['(Nx * Ny - Ny)'], {}), '(Nx * Ny - Ny)\n', (1461, 1475), True, 'import numpy as np\n'), ((1645, 1667), 'numpy.zeros', 'np.zeros', (['(Nx * Ny, 2)'], {}), '((Nx * Ny, 2))\n', (1653, 1667), True, 'import numpy as np\n'), ((910, 923), 'numpy.diag', 'np.diag', (['d', '(1)'], {}), '(d, 1)\n', (917, 923), True, 'import numpy as np\n'), ((923, 937), 'numpy.diag', 'np.diag', (['d', '(-1)'], {}), '(d, -1)\n', (930, 937), True, 'import numpy as np\n'), ((1004, 1015), 'numpy.ones', 'np.ones', (['Nx'], {}), '(Nx)\n', (1011, 1015), True, 'import numpy as np\n'), ((1289, 1304), 'numpy.ones', 'np.ones', (['(J, J)'], {}), '((J, J))\n', (1296, 1304), True, 'import numpy as np\n'), ((1614, 1630), 'numpy.diag', 'np.diag', (['d2', '(-Nx)'], {}), '(d2, -Nx)\n', (1621, 1630), True, 'import numpy as np\n'), ((1544, 1558), 'numpy.diag', 'np.diag', (['d1', '(1)'], {}), '(d1, 1)\n', (1551, 1558), True, 'import numpy as np\n'), ((1558, 1573), 'numpy.diag', 'np.diag', (['d1', '(-1)'], {}), '(d1, -1)\n', (1565, 1573), True, 'import numpy as np\n'), ((1599, 1614), 'numpy.diag', 'np.diag', (['d2', 'Nx'], {}), '(d2, Nx)\n', (1606, 1614), True, 'import numpy as np\n')]
|
import tensorflow as tf
from tensorflow.python.keras.preprocessing import image as kp_image
# Keras is only used to load VGG19 model as a high level API to TensorFlow
from keras.applications.vgg19 import VGG19
from keras.models import Model
from keras import backend as K
# pillow is used for loading and saving images
from PIL import Image
# numPy is used for manipulation of array of object i.e Image in our case
import numpy as np
##
##
##
# list of layers to be considered for calculation of Content and Style Loss
content_layers = ['block3_conv3']
style_layers = ['block1_conv1','block2_conv2','block4_conv3']
num_content_layers = len(content_layers)
num_style_layers = len(style_layers)
# path where the content and style images are located
content_path = 'content.jpg'
style_path = 'style.jpg'
# Save the result as
save_name = 'generated.jpg'
# path to where Vgg19 model weight is located
vgg_weights = "vgg_weights/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5"
############################################################################################################
############################################################################################################
# UTILS
############################################################################################################
############################################################################################################
def load_img(path_to_img):
max_dim = 512
img = Image.open(path_to_img)
img_size = max(img.size)
scale = max_dim/img_size
img = img.resize((round(img.size[0]*scale), round(img.size[1]*scale)), Image.ANTIALIAS)
img = kp_image.img_to_array(img)
# We need to broadcast the image array such that it has a batch dimension
img = np.expand_dims(img, axis=0)
# preprocess raw images to make it suitable to be used by VGG19 model
out = tf.keras.applications.vgg19.preprocess_input(img)
return tf.convert_to_tensor(out)
def deprocess_img(processed_img):
x = processed_img.copy()
# perform the inverse of the preprocessiing step
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
x = x[:, :, ::-1]
x = np.clip(x, 0, 255).astype('uint8')
return x
############################################################################################################
############################################################################################################
# Loss Function
############################################################################################################
############################################################################################################
### Content Loss Function
def get_content_loss(content, target):
return tf.reduce_mean(tf.square(content - target)) /2
### Style Loss Fucntion
def gram_matrix(input_tensor):
# if input tensor is a 3D array of size Nh x Nw X Nc
# we reshape it to a 2D array of Nc x (Nh*Nw)
channels = int(input_tensor.shape[-1])
a = tf.reshape(input_tensor, [-1, channels])
n = tf.shape(a)[0]
# get gram matrix
gram = tf.matmul(a, a, transpose_a=True)
return gram
def get_style_loss(base_style, gram_target):
height, width, channels = base_style.get_shape().as_list()
gram_style = gram_matrix(base_style)
# Original eqn as a constant to divide i.e 1/(4. * (channels ** 2) * (width * height) ** 2)
return tf.reduce_mean(tf.square(gram_style - gram_target)) / (channels**2 * width * height) #(4.0 * (channels ** 2) * (width * height) ** 2)
### Use to pass content and style image through it
def get_feature_representations(model, content_path, style_path, num_content_layers):
# Load our images in
content_image = load_img(content_path)
style_image = load_img(style_path)
# batch compute content and style features
content_outputs = model(content_image)
style_outputs = model(style_image)
# Get the style and content feature representations from our model
style_features = [ style_layer[0] for style_layer in style_outputs[num_content_layers:] ]
content_features = [ content_layer[0] for content_layer in content_outputs[:num_content_layers] ]
return style_features, content_features
### Total Loss
def compute_loss(model, loss_weights, generated_output_activations, gram_style_features, content_features, num_content_layers, num_style_layers):
generated_content_activations = generated_output_activations[:num_content_layers]
generated_style_activations = generated_output_activations[num_content_layers:]
style_weight, content_weight = loss_weights
style_score = 0
content_score = 0
# Accumulate style losses from all layers
# Here, we equally weight each contribution of each loss layer
weight_per_style_layer = 1.0 / float(num_style_layers)
for target_style, comb_style in zip(gram_style_features, generated_style_activations):
temp = get_style_loss(comb_style[0], target_style)
style_score += weight_per_style_layer * temp
# Accumulate content losses from all layers
weight_per_content_layer = 1.0 / float(num_content_layers)
for target_content, comb_content in zip(content_features, generated_content_activations):
temp = get_content_loss(comb_content[0], target_content)
content_score += weight_per_content_layer* temp
# Get total loss
loss = style_weight*style_score + content_weight*content_score
return loss, style_score, content_score
############################################################################################################
############################################################################################################
# CREATE STYLE TRANFER
############################################################################################################
############################################################################################################
# Using Keras Load VGG19 model
def get_model(content_layers,style_layers):
# Load our model. We load pretrained VGG, trained on imagenet data
vgg19 = VGG19(weights=None, include_top=False)
# We don't need to (or want to) train any layers of our pre-trained vgg model, so we set it's trainable to false.
vgg19.trainable = False
style_model_outputs = [vgg19.get_layer(name).output for name in style_layers]
content_model_outputs = [vgg19.get_layer(name).output for name in content_layers]
model_outputs = content_model_outputs + style_model_outputs
# Build model
return Model(inputs = vgg19.input, outputs = model_outputs), vgg19
def run_style_transfer(content_path, style_path, num_iterations=200, content_weight=0.1, style_weight=0.9):
# Create a tensorflow session
sess = tf.Session()
# Assign keras back-end to the TF session which we created
K.set_session(sess)
model, vgg19 = get_model(content_layers,style_layers)
# Get the style and content feature representations (from our specified intermediate layers)
style_features, content_features = get_feature_representations(model, content_path, style_path, num_content_layers)
gram_style_features = [gram_matrix(style_feature) for style_feature in style_features]
# VGG default normalization
norm_means = np.array([103.939, 116.779, 123.68])
min_vals = -norm_means
max_vals = 255 - norm_means
# In original paper, the initial stylized image is random matrix of same size as that of content image
# but in later images content image was used instead on random values for first stylized image
# because it proved to help to stylize faster
generated_image = load_img(content_path)
# generated_image = np.random.randint(0,255, size=generated_image.shape)
# Create tensorflow variable to hold a stylized/generated image during the training
generated_image = tf.Variable(generated_image, dtype=tf.float32)
model_outputs = model(generated_image)
# weightages of each content and style images i.e alpha & beta
loss_weights = (style_weight, content_weight)
# Create our optimizer
loss = compute_loss(model, loss_weights, model_outputs, gram_style_features, content_features, num_content_layers, num_style_layers)
opt = tf.train.AdamOptimizer(learning_rate=9, beta1=0.9, epsilon=1e-1).minimize( loss[0], var_list = [generated_image])
sess.run(tf.global_variables_initializer())
sess.run(generated_image.initializer)
# loading the weights again because tf.global_variables_initializer() resets the weights
vgg19.load_weights(vgg_weights)
# Put loss as infinity before training starts and Create a variable to hold best image (i.e image with minimum loss)
best_loss, best_img = float('inf'), None
for i in range(num_iterations):
# Do optimization
sess.run(opt)
# Make sure image values stays in the range of max-min value of VGG norm
clipped = tf.clip_by_value(generated_image, min_vals, max_vals)
# assign the clipped value to the tensor stylized image
generated_image.assign(clipped)
# Open the Tuple of tensors
total_loss, style_score, content_score = loss
total_loss = total_loss.eval(session=sess)
if total_loss < best_loss:
# Update best loss and best image from total loss.
best_loss = total_loss
# generated image is of shape (1, h, w, 3) convert it to (h, w, 3)
temp_generated_image = sess.run(generated_image)[0]
best_img = deprocess_img(temp_generated_image)
s_loss = sess.run(style_score)
c_loss = sess.run(content_score)
# print best loss
print('best: iteration: ', i ,'loss: ', total_loss ,' style_loss: ', s_loss,' content_loss: ', c_loss)
# Save image after every 100 iterations
if (i+1)%100 == 0:
output = Image.fromarray(best_img)
output.save(str(i+1)+'-'+save_name)
# after num_iterations iterations are completed, close the TF session
sess.close()
return best_img, best_loss
best, best_loss = run_style_transfer(content_path, style_path)
|
[
"tensorflow.clip_by_value",
"tensorflow.reshape",
"keras.models.Model",
"numpy.clip",
"tensorflow.matmul",
"tensorflow.Variable",
"tensorflow.global_variables_initializer",
"keras.backend.set_session",
"tensorflow.Session",
"keras.applications.vgg19.VGG19",
"tensorflow.keras.applications.vgg19.preprocess_input",
"tensorflow.python.keras.preprocessing.image.img_to_array",
"tensorflow.convert_to_tensor",
"numpy.expand_dims",
"PIL.Image.open",
"tensorflow.shape",
"numpy.array",
"tensorflow.square",
"PIL.Image.fromarray",
"tensorflow.train.AdamOptimizer"
] |
[((1538, 1561), 'PIL.Image.open', 'Image.open', (['path_to_img'], {}), '(path_to_img)\n', (1548, 1561), False, 'from PIL import Image\n'), ((1728, 1754), 'tensorflow.python.keras.preprocessing.image.img_to_array', 'kp_image.img_to_array', (['img'], {}), '(img)\n', (1749, 1754), True, 'from tensorflow.python.keras.preprocessing import image as kp_image\n'), ((1841, 1868), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (1855, 1868), True, 'import numpy as np\n'), ((1950, 1999), 'tensorflow.keras.applications.vgg19.preprocess_input', 'tf.keras.applications.vgg19.preprocess_input', (['img'], {}), '(img)\n', (1994, 1999), True, 'import tensorflow as tf\n'), ((2010, 2035), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['out'], {}), '(out)\n', (2030, 2035), True, 'import tensorflow as tf\n'), ((3120, 3160), 'tensorflow.reshape', 'tf.reshape', (['input_tensor', '[-1, channels]'], {}), '(input_tensor, [-1, channels])\n', (3130, 3160), True, 'import tensorflow as tf\n'), ((3213, 3246), 'tensorflow.matmul', 'tf.matmul', (['a', 'a'], {'transpose_a': '(True)'}), '(a, a, transpose_a=True)\n', (3222, 3246), True, 'import tensorflow as tf\n'), ((6230, 6268), 'keras.applications.vgg19.VGG19', 'VGG19', ([], {'weights': 'None', 'include_top': '(False)'}), '(weights=None, include_top=False)\n', (6235, 6268), False, 'from keras.applications.vgg19 import VGG19\n'), ((6888, 6900), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (6898, 6900), True, 'import tensorflow as tf\n'), ((6965, 6984), 'keras.backend.set_session', 'K.set_session', (['sess'], {}), '(sess)\n', (6978, 6984), True, 'from keras import backend as K\n'), ((7392, 7428), 'numpy.array', 'np.array', (['[103.939, 116.779, 123.68]'], {}), '([103.939, 116.779, 123.68])\n', (7400, 7428), True, 'import numpy as np\n'), ((7970, 8016), 'tensorflow.Variable', 'tf.Variable', (['generated_image'], {'dtype': 'tf.float32'}), '(generated_image, dtype=tf.float32)\n', (7981, 8016), True, 'import tensorflow as tf\n'), ((3167, 3178), 'tensorflow.shape', 'tf.shape', (['a'], {}), '(a)\n', (3175, 3178), True, 'import tensorflow as tf\n'), ((6673, 6721), 'keras.models.Model', 'Model', ([], {'inputs': 'vgg19.input', 'outputs': 'model_outputs'}), '(inputs=vgg19.input, outputs=model_outputs)\n', (6678, 6721), False, 'from keras.models import Model\n'), ((8468, 8501), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (8499, 8501), True, 'import tensorflow as tf\n'), ((9004, 9057), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['generated_image', 'min_vals', 'max_vals'], {}), '(generated_image, min_vals, max_vals)\n', (9020, 9057), True, 'import tensorflow as tf\n'), ((2251, 2269), 'numpy.clip', 'np.clip', (['x', '(0)', '(255)'], {}), '(x, 0, 255)\n', (2258, 2269), True, 'import numpy as np\n'), ((2880, 2907), 'tensorflow.square', 'tf.square', (['(content - target)'], {}), '(content - target)\n', (2889, 2907), True, 'import tensorflow as tf\n'), ((3532, 3567), 'tensorflow.square', 'tf.square', (['(gram_style - gram_target)'], {}), '(gram_style - gram_target)\n', (3541, 3567), True, 'import tensorflow as tf\n'), ((8342, 8405), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': '(9)', 'beta1': '(0.9)', 'epsilon': '(0.1)'}), '(learning_rate=9, beta1=0.9, epsilon=0.1)\n', (8364, 8405), True, 'import tensorflow as tf\n'), ((9890, 9915), 'PIL.Image.fromarray', 'Image.fromarray', (['best_img'], {}), '(best_img)\n', (9905, 9915), False, 'from PIL import Image\n')]
|
import dynet as dy
import numpy as np
import moire
from moire import Expression
__all__ = [
'zeros', 'ones', 'full', 'normal', 'bernoulli', 'uniform', 'gumbel',
'zeros_like', 'ones_like', 'full_like', 'normal_like', 'bernoulli_like', 'uniform_like', 'gumbel_like',
'eye', 'diagonal',
'where',
]
def zeros(*dim, batch_size: int = 1) -> Expression:
a = np.zeros((*dim, batch_size), dtype=np.float32)
return dy.inputTensor(a, batched=True, device=moire.config.device)
def zeros_like(x: Expression) -> Expression:
dim, batch_size = x.dim()
return zeros(*dim, batch_size=batch_size)
def ones(*dim, batch_size: int = 1) -> Expression:
a = np.ones((*dim, batch_size), dtype=np.float32)
return dy.inputTensor(a, batched=True, device=moire.config.device)
def ones_like(x: Expression) -> Expression:
dim, batch_size = x.dim()
return ones(*dim, batch_size=batch_size)
def eye(N: int, M: int = None, k: int = 0) -> Expression:
return dy.inputTensor(np.eye(N, M, k), batched=False, device=moire.config.device)
def diagonal(x: Expression) -> Expression:
(dim0, dim1), batch_size = x.dim()
return dy.cmult(x, eye(dim0, dim1))
def full(*dim, value, batch_size: int = 1) -> Expression:
a = np.full((*dim, batch_size), fill_value=value, dtype=np.float32)
return dy.inputTensor(a, batched=True, device=moire.config.device)
def full_like(x: Expression, value) -> Expression:
dim, batch_size = x.dim()
return full(*dim, value=value, batch_size=batch_size)
def normal(*dim, mean: float = 0.0, stddev: float = 1.0, batch_size: int = 1) -> Expression:
a = np.random.normal(loc=mean, scale=stddev, size=(*dim, batch_size)).astype(np.float32)
return dy.inputTensor(a, batched=True, device=moire.config.device)
def normal_like(x: Expression, mean: float = 0.0, stddev: float = 1.0) -> Expression:
dim, batch_size = x.dim()
return normal(*dim, mean=mean, stddev=stddev, batch_size=batch_size)
def bernoulli(*dim, p: float, batch_size: int = 1) -> Expression:
a = np.random.uniform(low=0, high=1.0, size=(*dim, batch_size)) < p
return dy.inputTensor(a.astype(np.int32), batched=True, device=moire.config.device)
def bernoulli_like(x: Expression, p: float) -> Expression:
dim, batch_size = x.dim()
return bernoulli(*dim, p=p, batch_size=batch_size)
def uniform(*dim, low: float, high: float, batch_size: int = 1) -> Expression:
a = np.random.uniform(low=low, high=high, size=(*dim, batch_size))
return dy.inputTensor(a, batched=True, device=moire.config.device)
def uniform_like(x: Expression, low: float, high: float) -> Expression:
dim, batch_size = x.dim()
return uniform(dim, low=low, high=high, batch_size=batch_size)
def gumbel(*dim, mu: float = 0.0, beta: float = 1.0, batch_size: int = 1) -> Expression:
a = np.random.gumbel(loc=mu, scale=beta, size=(*dim, batch_size))
return dy.inputTensor(a, batched=True, device=moire.config.device)
def gumbel_like(x: Expression, mu: float = 0.0, beta: float = 1.0) -> Expression:
dim, batch_size = x.dim()
return gumbel(*dim, mu=mu, beta=beta, batch_size=batch_size)
def where(cond: Expression, x: Expression, y: Expression) -> Expression:
return dy.cmult(cond, x) + dy.cmult(1.0 - cond, y)
if __name__ == '__main__':
a = dy.inputTensor([[1, 2, 3], [2, 3, 4], ])
moire.debug(f'a :: {a.dim()} => {a.value()}')
b = diagonal(a)
moire.debug(f'b :: {b.dim()} => {b.value()}')
|
[
"numpy.full",
"numpy.random.uniform",
"numpy.random.gumbel",
"dynet.inputTensor",
"dynet.cmult",
"numpy.zeros",
"numpy.ones",
"numpy.random.normal",
"numpy.eye"
] |
[((375, 421), 'numpy.zeros', 'np.zeros', (['(*dim, batch_size)'], {'dtype': 'np.float32'}), '((*dim, batch_size), dtype=np.float32)\n', (383, 421), True, 'import numpy as np\n'), ((433, 492), 'dynet.inputTensor', 'dy.inputTensor', (['a'], {'batched': '(True)', 'device': 'moire.config.device'}), '(a, batched=True, device=moire.config.device)\n', (447, 492), True, 'import dynet as dy\n'), ((677, 722), 'numpy.ones', 'np.ones', (['(*dim, batch_size)'], {'dtype': 'np.float32'}), '((*dim, batch_size), dtype=np.float32)\n', (684, 722), True, 'import numpy as np\n'), ((734, 793), 'dynet.inputTensor', 'dy.inputTensor', (['a'], {'batched': '(True)', 'device': 'moire.config.device'}), '(a, batched=True, device=moire.config.device)\n', (748, 793), True, 'import dynet as dy\n'), ((1253, 1316), 'numpy.full', 'np.full', (['(*dim, batch_size)'], {'fill_value': 'value', 'dtype': 'np.float32'}), '((*dim, batch_size), fill_value=value, dtype=np.float32)\n', (1260, 1316), True, 'import numpy as np\n'), ((1328, 1387), 'dynet.inputTensor', 'dy.inputTensor', (['a'], {'batched': '(True)', 'device': 'moire.config.device'}), '(a, batched=True, device=moire.config.device)\n', (1342, 1387), True, 'import dynet as dy\n'), ((1728, 1787), 'dynet.inputTensor', 'dy.inputTensor', (['a'], {'batched': '(True)', 'device': 'moire.config.device'}), '(a, batched=True, device=moire.config.device)\n', (1742, 1787), True, 'import dynet as dy\n'), ((2442, 2504), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'low', 'high': 'high', 'size': '(*dim, batch_size)'}), '(low=low, high=high, size=(*dim, batch_size))\n', (2459, 2504), True, 'import numpy as np\n'), ((2516, 2575), 'dynet.inputTensor', 'dy.inputTensor', (['a'], {'batched': '(True)', 'device': 'moire.config.device'}), '(a, batched=True, device=moire.config.device)\n', (2530, 2575), True, 'import dynet as dy\n'), ((2846, 2907), 'numpy.random.gumbel', 'np.random.gumbel', ([], {'loc': 'mu', 'scale': 'beta', 'size': '(*dim, batch_size)'}), '(loc=mu, scale=beta, size=(*dim, batch_size))\n', (2862, 2907), True, 'import numpy as np\n'), ((2919, 2978), 'dynet.inputTensor', 'dy.inputTensor', (['a'], {'batched': '(True)', 'device': 'moire.config.device'}), '(a, batched=True, device=moire.config.device)\n', (2933, 2978), True, 'import dynet as dy\n'), ((3325, 3363), 'dynet.inputTensor', 'dy.inputTensor', (['[[1, 2, 3], [2, 3, 4]]'], {}), '([[1, 2, 3], [2, 3, 4]])\n', (3339, 3363), True, 'import dynet as dy\n'), ((1001, 1016), 'numpy.eye', 'np.eye', (['N', 'M', 'k'], {}), '(N, M, k)\n', (1007, 1016), True, 'import numpy as np\n'), ((2055, 2114), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0)', 'high': '(1.0)', 'size': '(*dim, batch_size)'}), '(low=0, high=1.0, size=(*dim, batch_size))\n', (2072, 2114), True, 'import numpy as np\n'), ((3244, 3261), 'dynet.cmult', 'dy.cmult', (['cond', 'x'], {}), '(cond, x)\n', (3252, 3261), True, 'import dynet as dy\n'), ((3264, 3287), 'dynet.cmult', 'dy.cmult', (['(1.0 - cond)', 'y'], {}), '(1.0 - cond, y)\n', (3272, 3287), True, 'import dynet as dy\n'), ((1632, 1697), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'mean', 'scale': 'stddev', 'size': '(*dim, batch_size)'}), '(loc=mean, scale=stddev, size=(*dim, batch_size))\n', (1648, 1697), True, 'import numpy as np\n')]
|
"""Tests for the variant of MT2 by <NAME>."""
from typing import Optional, Union
import numpy
import pytest
from .common import mt2_lester, mt2_tombs
def test_simple_example():
computed_val = mt2_tombs(100, 410, 20, 150, -210, -300, -200, 280, 100, 100)
assert computed_val == pytest.approx(412.628)
def test_near_massless():
# This test is based on Fig 5 of https://arxiv.org/pdf/1411.4312.pdf
m_vis_a = 0
px_a = -42.017340486
py_a = -146.365340528
m_vis_b = 0.087252259
px_b = -9.625614206
py_b = 145.757295514
px_miss = -16.692279406
py_miss = -14.730240471
chi_a = 0
chi_b = 0
computed_val = mt2_tombs(
m_vis_a, px_a, py_a, m_vis_b, px_b, py_b, px_miss, py_miss, chi_a, chi_b
)
assert computed_val == pytest.approx(0.09719971)
def test_fuzz():
batch_size = 100
num_tests = 1000
numpy.random.seed(42)
def _random_batch(min_, max_):
return numpy.random.uniform(min_, max_, (batch_size,))
for _ in range(num_tests):
m_vis_1 = _random_batch(0, 100)
px_vis_1 = _random_batch(-100, 100)
py_vis_1 = _random_batch(-100, 100)
m_vis_2 = _random_batch(0, 100)
px_vis_2 = _random_batch(-100, 100)
py_vis_2 = _random_batch(-100, 100)
px_miss = _random_batch(-100, 100)
py_miss = _random_batch(-100, 100)
m_invis_1 = _random_batch(0, 100)
m_invis_2 = _random_batch(0, 100)
args = (
m_vis_1,
px_vis_1,
py_vis_1,
m_vis_2,
px_vis_2,
py_vis_2,
px_miss,
py_miss,
m_invis_1,
m_invis_2,
)
result_lester = mt2_lester(*args)
result_tombs = mt2_tombs(*args)
numpy.testing.assert_allclose(result_lester, result_tombs, rtol=1e-12)
def test_scale_invariance():
example_args = numpy.array((100, 410, 20, 150, -210, -300, -200, 280, 100, 100))
example_val = mt2_tombs(*example_args)
# mt2 scales with its arguments; check over some orders of magnitude.
for i in range(-100, 100, 10):
scale = 10.0 ** i
with numpy.errstate(over="ignore"):
# Suppress overflow warnings when performing the evaluation; we're happy
# so long as we match approximately in the test below.
computed_val = mt2_tombs(*(example_args * scale))
assert computed_val == pytest.approx(example_val * scale)
def test_negative_masses():
# Any negative mass is unphysical.
# These arguments use negative masses to make both initial bounds negative.
# Check that the result is neither positive nor an infinite loop.
computed_val = mt2_tombs(1, 2, 3, 4, 5, 6, 7, 8, -90, -100)
assert not (computed_val > 0)
|
[
"numpy.random.uniform",
"numpy.random.seed",
"numpy.errstate",
"numpy.array",
"numpy.testing.assert_allclose",
"pytest.approx"
] |
[((880, 901), 'numpy.random.seed', 'numpy.random.seed', (['(42)'], {}), '(42)\n', (897, 901), False, 'import numpy\n'), ((1918, 1983), 'numpy.array', 'numpy.array', (['(100, 410, 20, 150, -210, -300, -200, 280, 100, 100)'], {}), '((100, 410, 20, 150, -210, -300, -200, 280, 100, 100))\n', (1929, 1983), False, 'import numpy\n'), ((290, 312), 'pytest.approx', 'pytest.approx', (['(412.628)'], {}), '(412.628)\n', (303, 312), False, 'import pytest\n'), ((788, 813), 'pytest.approx', 'pytest.approx', (['(0.09719971)'], {}), '(0.09719971)\n', (801, 813), False, 'import pytest\n'), ((953, 1000), 'numpy.random.uniform', 'numpy.random.uniform', (['min_', 'max_', '(batch_size,)'], {}), '(min_, max_, (batch_size,))\n', (973, 1000), False, 'import numpy\n'), ((1797, 1867), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (['result_lester', 'result_tombs'], {'rtol': '(1e-12)'}), '(result_lester, result_tombs, rtol=1e-12)\n', (1826, 1867), False, 'import numpy\n'), ((2176, 2205), 'numpy.errstate', 'numpy.errstate', ([], {'over': '"""ignore"""'}), "(over='ignore')\n", (2190, 2205), False, 'import numpy\n'), ((2452, 2486), 'pytest.approx', 'pytest.approx', (['(example_val * scale)'], {}), '(example_val * scale)\n', (2465, 2486), False, 'import pytest\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# dicomgui.py
"""Main app file that convert DICOM data via a wxPython GUI dialog."""
# Copyright (c) 2018-2020 <NAME>
# Copyright (c) 2009-2017 <NAME>
# Copyright (c) 2009 <NAME>
# This file is part of dicompyler, released under a BSD license.
# See the file license.txt included with this distribution, also
# available at https://github.com/bastula/dicompyler/
#
# It's assumed that the reference (prescription) dose is in cGy.
import hashlib, os, threading, functools, json, warnings
from logging import getLogger, DEBUG, INFO
logger = getLogger('DcmConverter')
import wx
warnings.filterwarnings("ignore", category=wx.wxPyDeprecationWarning)
from wx.xrc import *
import numpy as np
from dicompylercore import dicomparser
from pyDcmConverter import guiutil, util
class DcmConverterApp(wx.App):
"""Prepare to show the dialog that will Import DICOM and DICOM RT files."""
def OnInit(self):
wx.GetApp().SetAppName("DicomConverter")
# Load the XRC file for our gui resources
self.res = XmlResource(util.GetResourcePath('dicomgui.xrc'))
dlgDicomImporter = self.res.LoadDialog(None, "DicomImporterDialog")
dlgDicomImporter.Init(self.res)
# Show the dialog and return the result
ret = dlgDicomImporter.ShowModal()
# Save configure
conf = {}
with open('.dcmconverter.conf', 'w') as f:
conf['path'] = dlgDicomImporter.path
conf['only_export_voldata'] = dlgDicomImporter.only_export_voldata
conf['min_slice_num'] = dlgDicomImporter.min_slice_num
conf['offset'] = dlgDicomImporter.offset
conf['export_mori_format'] = dlgDicomImporter.export_mori_format
conf['export_nii_format'] = dlgDicomImporter.export_nii_format
conf['output_dir'] = dlgDicomImporter.output_dir
conf['output_name'] = dlgDicomImporter.output_name
json.dump(conf, f, indent=2, sort_keys=True)
# Block until the thread is done before destroying the dialog
if dlgDicomImporter:
if hasattr(dlgDicomImporter, 't'):
dlgDicomImporter.t.join()
dlgDicomImporter.Destroy()
os.sys.exit(0)
return 1
class DicomImporterDialog(wx.Dialog):
"""Import DICOM RT files and return a dictionary of data."""
def __init__(self):
wx.Dialog.__init__(self)
def Init(self, res):
"""Method called after the panel has been initialized."""
# Set window icon
if not guiutil.IsMac():
self.SetIcon(guiutil.get_icon())
# Initialize controls
self.txtDicomImport = XRCCTRL(self, 'txtDicomImport')
self.btnDicomImport = XRCCTRL(self, 'btnDicomImport')
self.btnPause = XRCCTRL(self, 'btn_pause')
self.checkSearchSubfolders = XRCCTRL(self, 'checkSearchSubfolders')
self.lblProgressLabel = XRCCTRL(self, 'lblProgressLabel')
self.lblProgress = XRCCTRL(self, 'lblProgress')
self.gaugeProgress = XRCCTRL(self, 'gaugeProgress')
self.lblProgressPercent = XRCCTRL(self, 'lblProgressPercent')
self.lblProgressPercentSym = XRCCTRL(self, 'lblProgressPercentSym')
self.tcPatients = XRCCTRL(self, 'tcPatients')
self.bmpRxDose = XRCCTRL(self, 'bmpRxDose')
self.lblRxDose = XRCCTRL(self, 'lblRxDose')
self.txtRxDose = XRCCTRL(self, 'txtRxDose')
self.lblRxDoseUnits = XRCCTRL(self, 'lblRxDoseUnits')
# Bind interface events to the proper methods
self.Bind(wx.EVT_BUTTON, self.OnBrowseDicomImport, id=XRCID('btnDicomImport'))
self.Bind(wx.EVT_CHECKBOX, self.OnCheckSearchSubfolders, id=XRCID('checkSearchSubfolders'))
self.Bind(wx.EVT_TREE_SEL_CHANGED, self.OnSelectTreeItem, id=XRCID('tcPatients'))
#self.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.OnOK, id=XRCID('tcPatients'))
#added by CL.Wang
self.Bind(wx.EVT_CHECKBOX, self.OnCheckVolFlag, id=XRCID('check_volume'))
self.Bind(wx.EVT_SPINCTRL, self.OnSpinSliceNum, id=XRCID('spin_minslices'))
self.Bind(wx.EVT_SPINCTRL, self.OnSpinOffset, id=XRCID('spin_offset'))
self.Bind(wx.EVT_CHECKBOX, self.OnCheckMoriFormat, id=XRCID('check_mori'))
self.Bind(wx.EVT_CHECKBOX, self.OnCheckNiftiFormat, id=XRCID('check_nifti'))
self.Bind(wx.EVT_DIRPICKER_CHANGED, self.OnPickOutdir, id=XRCID('picker_output'))
self.Bind(wx.EVT_TEXT, self.OnInputName, id=XRCID('text_output_name'))
self.Bind(wx.EVT_BUTTON, self.OnConvert, id=XRCID('btn_convert'))
self.Bind(wx.EVT_BUTTON, self.OnPause, id=XRCID('btn_pause'))
self.Bind(wx.EVT_BUTTON, self.OnRescan, id=XRCID('btn_rescan'))
# Init variables
if os.path.isfile('.dcmconverter.conf'):
logger.info('Loading previous configuration...')
with open('.dcmconverter.conf', 'r') as f:
conf = json.load(f)
self.path = conf['path']
self.txtDicomImport.SetValue(self.path)
self.only_export_voldata = conf['only_export_voldata']
XRCCTRL(self, 'check_mori').SetValue(self.only_export_voldata)
self.min_slice_num = conf['min_slice_num']
XRCCTRL(self, 'spin_minslices').SetValue(self.min_slice_num)
self.offset = conf['offset']
XRCCTRL(self, 'spin_offset').SetValue(self.offset)
self.export_mori_format = conf['export_mori_format']
XRCCTRL(self, 'check_mori').SetValue(self.export_mori_format)
self.export_nii_format = conf['export_nii_format']
XRCCTRL(self, 'check_nifti').SetValue(self.export_nii_format)
self.output_dir = conf['output_dir']
XRCCTRL(self, 'picker_output').SetPath(self.output_dir)
self.output_name = conf['output_name']
XRCCTRL(self, 'text_output_name').SetValue(self.output_name)
else:
self.path = os.path.expanduser('~')
self.only_export_voldata = XRCCTRL(self, 'check_volume').IsChecked()
self.min_slice_num = int(XRCCTRL(self, 'spin_minslices').GetValue())
self.offset = int(XRCCTRL(self, 'spin_offset').GetValue())
self.export_mori_format = XRCCTRL(self, 'check_mori').IsChecked()
self.export_nii_format = XRCCTRL(self, 'check_nifti').IsChecked()
self.output_dir = ''
self.output_name = ''
# Set the dialog font and bold the font of the directions label
font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
if guiutil.IsMac():
self.txtDicomImport.SetFont(font)
self.btnDicomImport.SetFont(font)
self.checkSearchSubfolders.SetFont(font)
self.lblProgressLabel.SetFont(font)
self.lblProgress.SetFont(font)
self.lblProgressPercent.SetFont(font)
self.lblProgressPercentSym.SetFont(font)
self.tcPatients.SetFont(font)
self.txtRxDose.SetFont(font)
self.lblRxDoseUnits.SetFont(font)
font.SetWeight(wx.FONTWEIGHT_BOLD)
self.lblRxDose.SetFont(font)
# Initialize the patients tree control
self.root = self.InitTree()
# Initialize the patients dictionary
self.patients = {}
# Search subfolders by default
self.import_search_subfolders = True
# Set the threading termination status to false intially
self.terminate = False
# Hide the progress bar until it needs to be shown
self.gaugeProgress.Show(False)
self.lblProgressPercent.Show(False)
self.lblProgressPercentSym.Show(False)
# Start the directory search as soon as the panel loads
#self.OnDirectorySearch()
def OnRescan(self, evt):
self.OnDirectorySearch()
def OnPause(self, evt):
self.terminate = True
def OnSpinOffset(self, evt):
self.offset = evt.GetPosition()
def OnCheckVolFlag(self, evt):
self.only_export_voldata = evt.IsChecked()
try:
self.Check_Export_Files()
except:
logger.info('Adjusted parameters befor the tree generated.')
def OnSpinSliceNum(self, evt):
self.min_slice_num = evt.GetPosition()
try:
self.Check_Export_Files()
except:
logger.info('Adjusted parameters befor the tree generated.')
def OnCheckMoriFormat(self, evt):
self.export_mori_format = evt.IsChecked()
def OnCheckNiftiFormat(self, evt):
self.export_nii_format = evt.IsChecked()
def OnPickOutdir(self, evt):
self.output_dir = evt.GetPath()
def OnInputName(self, evt):
self.output_name = evt.GetString()
def AlertDialog(self, msg):
dialog = wx.MessageDialog(self, msg, 'Error', style=wx.OK)
dialog.ShowModal()
dialog.Destroy()
def ChoiceDialog(self, msg):
dialog = wx.MessageDialog(self, msg, 'Warning', style=wx.OK_DEFAULT|wx.CANCEL)
self.contiune_export = dialog.ShowModal()
dialog.Destroy()
def __GetNiftiAffineMatrix__(self, dp):
di = float(dp.ds.PixelSpacing[0])
dj = float(dp.ds.PixelSpacing[1])
orientation = dp.ds.ImageOrientationPatient
dk = float(dp.ds.SliceThickness)
m = np.array(
[[float(orientation[0])*di, float(orientation[3])*dj, 0, 0],
[float(orientation[1])*di, float(orientation[4])*dj, 0, 0],
[float(orientation[2])*di, float(orientation[5])*dj, dk, 0],
[0, 0, 0, 1]], dtype=np.float)
return m
def ExportFunc(self, out_basepath, patient_data, progressFunc=None):
if patient_data is None:
return
# Existence check
if self.export_mori_format:
out_dir = os.path.join(os.path.dirname(out_basepath), 'LabFormat')
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
mori_fname = os.path.join(out_dir, os.path.basename(out_basepath))
if os.path.isfile(mori_fname+'.raw.gz'):
self.ChoiceDialog('File existed! Continue?')
if self.contiune_export != wx.ID_OK:
return
if self.export_nii_format:
out_dir = os.path.join(os.path.dirname(out_basepath), 'NiftiFormat')
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
nii_fname = os.path.join(out_dir, os.path.basename(out_basepath)+'.nii.gz')
if os.path.isfile(nii_fname):
self.ChoiceDialog('File existed! Continue?')
if self.contiune_export != wx.ID_OK:
return
dp = dicomparser.DicomParser(patient_data['images'][0])
reso = [ float(dp.ds.PixelSpacing[0]), float(dp.ds.PixelSpacing[1]), float(dp.ds.SliceThickness)]
affine = self.__GetNiftiAffineMatrix__(dp)
conv_kernel, hospital, kvp, model_name = dp.ds.ConvolutionKernel, dp.ds.InstitutionName, dp.ds.KVP, dp.ds.ManufacturerModelName
img_ori, pat_ori, pat_pos = np.array(dp.ds.ImageOrientationPatient), dp.ds.PatientOrientation, dp.ds.PatientPosition
study_date, serise_date, acq_date = dp.ds.StudyDate, dp.ds.SeriesDate, dp.ds.AcquisitionDate
if (dp.ds.SamplesPerPixel > 1) or (dp.ds.PhotometricInterpretation == 'RGB'):
logger.info('Cannot handle color image!')
return
if dp.ds.BitsAllocated == 16:
image_array = np.zeros([dp.ds.Rows, dp.ds.Columns, len(patient_data['images'])]).astype(np.int16)
elif dp.ds.BitsAllocated == 32:
image_array = np.zeros([dp.ds.Rows, dp.ds.Columns, len(patient_data['images'])]).astype(np.int32)
elif dp.ds.BitsAllocated == 8:
image_array = np.zeros([dp.ds.Rows, dp.ds.Columns, len(patient_data['images'])]).astype(np.int8)
else:
image_array = np.zeros([dp.ds.Rows, dp.ds.Columns, len(patient_data['images'])])
pos = []
for i, img in enumerate(patient_data['images']):
dp = dicomparser.DicomParser(img)
intercept, slope = dp.GetRescaleInterceptSlope()
pos.append(dp.ds.ImagePositionPatient[2])
pixel_array = dp.ds.pixel_array
rescaled_image = pixel_array * slope + intercept + self.offset
image_array[:,:,i] = rescaled_image
wx.CallAfter(progressFunc, (i+image_array.shape[-1])//2, image_array.shape[-1]+1, 'Creating image array...')
image_array = np.transpose(image_array, (1,0,2))
if self.export_mori_format:
from utils_cw import write_mori, get_mori_header_fields
logger.info('Exporting image to %s', mori_fname)
header_name = write_mori(image_array, reso, mori_fname, True)
with open(header_name, 'r') as f:
origin_header_lines = f.read().splitlines()
with open(header_name, 'w') as f:
for field in origin_header_lines: # \r\n
if 'Thickness' in field:
f.write('{} {:.6f}\r'.format(field,reso[2]))
elif 'ImagePositionBegin' in field:
f.write('{} {:.6f}\r'.format(field,np.min(pos)))
elif 'ImagePositionEnd' in field:
f.write('{} {:.6f}\r'.format(field,np.max(pos)))
elif 'Hospital' in field:
f.write('{} {}\r'.format(field,hospital))
elif 'KVP' in field:
f.write('{} {}\r'.format(field,kvp))
elif 'KernelFunction' in field:
f.write('{} {}\r'.format(field,conv_kernel))
elif 'ModelName' in field:
f.write('{} {}\r'.format(field,model_name))
elif 'PatientPosition' in field:
f.write('{} {}\r'.format(field,pat_pos))
elif 'PatientOrientation' in field:
f.write('{} {}\r'.format(field,pat_ori))
elif 'ImageOrientation' in field:
f.write('{} {}\r'.format(field,img_ori.tolist()))
elif 'StudyDate' in field:
f.write('{} {}\r'.format(field,study_date))
elif 'SeriesDate' in field:
f.write('{} {}\r'.format(field,serise_date))
elif 'AcquisitionDate' in field:
f.write('{} {}\r'.format(field,acq_date))
elif 'Orientation' in field:
f.write('{} {}\r'.format(field,'LPF'))
elif '' == field:
pass
else:
f.write('{} \r'.format(field))
wx.CallAfter(progressFunc, 97, 100, 'Export RAW image completed')
if self.export_nii_format:
import nibabel as nib
logger.info('Exporting image to %s', nii_fname)
nib.save(nib.Nifti1Image(image_array, affine=affine), nii_fname)
wx.CallAfter(progressFunc, 98, 100, 'Export Nifti image completed')
def OnConvert(self, evt):
if not self.selected_exports:
self.AlertDialog('No Dicom series have been selected!')
return
if not self.output_dir:
self.AlertDialog('Please enter valid output dir!')
return
if not self.output_name:
self.AlertDialog('Please enter valid output file name!')
return
if not os.path.isdir(self.output_dir):
logger.info("Output dir not exists! Create new dir [%s]", self.output_dir)
os.makedirs(self.output_dir)
all_export_threads = []
for export in self.selected_exports:
info = self.tcPatients.GetItemData(export)
filearray, series_no = info['filearray'], info['info']['series_number']
basename = os.path.join(self.output_dir, self.output_name+'-'+str(series_no)+'.512')
all_export_threads.append(threading.Thread(target=self.ExportPatientData,
args=(self.path, filearray, self.txtRxDose.GetValue(),
self.SetThreadStatus, self.OnUpdateProgress,
functools.partial(self.ExportFunc, out_basepath=basename))))
[th.start() for th in all_export_threads]
#[th.join() for th in all_export_threads] # wait all threads
#self.AlertDialog('All exports finished!')
def OnCheckSearchSubfolders(self, evt):
"""Determine whether to search subfolders for DICOM data."""
self.import_search_subfolders = evt.IsChecked()
self.terminate = True
self.OnDirectorySearch()
def OnBrowseDicomImport(self, evt):
"""Get the directory selected by the user."""
self.terminate = True
dlg = wx.DirDialog(
self, defaultPath = self.path,
message="Choose a directory containing DICOM RT files...")
if dlg.ShowModal() == wx.ID_OK:
self.path = dlg.GetPath()
self.txtDicomImport.SetValue(self.path)
dlg.Destroy()
#self.OnDirectorySearch()
def OnDirectorySearch(self):
"""Begin directory search."""
self.patients = {}
self.tcPatients.DeleteChildren(self.root)
self.terminate = False
self.gaugeProgress.Show(True)
self.lblProgressPercent.Show(True)
self.lblProgressPercentSym.Show(True)
#self.btnSelect.Enable(False)
# Disable Rx dose controls except on GTK due to control placement oddities
if not guiutil.IsGtk():
self.EnableRxDose(False)
# If a previous search thread exists, block until it is done before
# starting a new thread
if (hasattr(self, 't')):
self.t.join()
del self.t
self.t=threading.Thread(target=self.DirectorySearchThread,
args=(self, self.path, self.import_search_subfolders,
self.SetThreadStatus, self.OnUpdateProgress,
self.AddPatientTree, self.AddPatientDataTree))
self.t.start()
def SetThreadStatus(self):
"""Tell the directory search thread whether to terminate or not."""
return self.terminate
def DirectorySearchThread(self, parent, path, subfolders, terminate,
progressFunc, foundFunc, resultFunc):
"""Thread to start the directory search."""
# Call the progress function to update the gui
wx.CallAfter(progressFunc, 0, 0, 'Searching for patients...')
patients = {}
# Check if the path is valid
if os.path.isdir(path):
files = []
for root, dirs, filenames in os.walk(path):
files += map(lambda f:os.path.join(root, f), filenames)
if (self.import_search_subfolders == False):
break
for n in range(len(files)):
# terminate the thread if the value has changed
# during the loop duration
if terminate():
wx.CallAfter(progressFunc, 0, 0, 'Search terminated.')
return
if (os.path.isfile(files[n])):
try:
logger.debug("Reading: %s", files[n])
dp = dicomparser.DicomParser(files[n])
except (AttributeError, EOFError, IOError, KeyError):
pass
logger.info("%s is not a valid DICOM file.", files[n])
else:
patient = dp.GetDemographics()
h = hashlib.sha1(patient['id'].encode('utf-8')).hexdigest()
if not h in patients:
patients[h] = {}
patients[h]['demographics'] = patient
if not 'studies' in patients[h]:
patients[h]['studies'] = {}
patients[h]['series'] = {}
wx.CallAfter(foundFunc, patient)
# Create each Study but don't create one for RT Dose
# since some vendors use incorrect StudyInstanceUIDs
if not (dp.GetSOPClassUID() == 'rtdose'):
stinfo = dp.GetStudyInfo()
if not stinfo['id'] in patients[h]['studies']:
patients[h]['studies'][stinfo['id']] = stinfo
# Create each Series of images
if (('ImageOrientationPatient' in dp.ds) and \
not (dp.GetSOPClassUID() == 'rtdose')):
seinfo = dp.GetSeriesInfo()
try:
seinfo['series_number'] = dp.ds.SeriesNumber #added by CL.Wang
seinfo['KVP'] = dp.ds.KVP
seinfo['PatientPosition'] = dp.ds.PatientPosition
seinfo['ModelName'] = dp.ds.ManufacturerModelName
seinfo['PixelSpacing'] = dp.ds.PixelSpacing
seinfo['Orientation'] = dp.ds.ImageOrientationPatient
except:
logger.error('Get dcm info error!')
seinfo['numimages'] = 0
seinfo['modality'] = dp.ds.SOPClassUID.name
if not seinfo['id'] in patients[h]['series']:
patients[h]['series'][seinfo['id']] = seinfo
if not 'images' in patients[h]:
patients[h]['images'] = {}
image = {}
image['id'] = dp.GetSOPInstanceUID()
image['filename'] = files[n]
image['series'] = seinfo['id']
image['referenceframe'] = dp.GetFrameOfReferenceUID()
patients[h]['series'][seinfo['id']]['numimages'] = \
patients[h]['series'][seinfo['id']]['numimages'] + 1
patients[h]['images'][image['id']] = image
# Create each RT Structure Set
elif dp.ds.Modality in ['RTSTRUCT']:
if not 'structures' in patients[h]:
patients[h]['structures'] = {}
structure = dp.GetStructureInfo()
structure['id'] = dp.GetSOPInstanceUID()
structure['filename'] = files[n]
structure['series'] = dp.GetReferencedSeries()
structure['referenceframe'] = dp.GetFrameOfReferenceUID()
patients[h]['structures'][structure['id']] = structure
# Create each RT Plan
elif dp.ds.Modality in ['RTPLAN']:
if not 'plans' in patients[h]:
patients[h]['plans'] = {}
plan = dp.GetPlan()
plan['id'] = dp.GetSOPInstanceUID()
plan['filename'] = files[n]
plan['series'] = dp.ds.SeriesInstanceUID
plan['referenceframe'] = dp.GetFrameOfReferenceUID()
plan['beams'] = dp.GetReferencedBeamsInFraction()
plan['rtss'] = dp.GetReferencedStructureSet()
patients[h]['plans'][plan['id']] = plan
# Create each RT Dose
elif dp.ds.Modality in ['RTDOSE']:
if not 'doses' in patients[h]:
patients[h]['doses'] = {}
dose = {}
dose['id'] = dp.GetSOPInstanceUID()
dose['filename'] = files[n]
dose['referenceframe'] = dp.GetFrameOfReferenceUID()
dose['hasdvh'] = dp.HasDVHs()
dose['hasgrid'] = "PixelData" in dp.ds
dose['summationtype'] = dp.ds.DoseSummationType
dose['beam'] = dp.GetReferencedBeamNumber()
dose['rtss'] = dp.GetReferencedStructureSet()
dose['rtplan'] = dp.GetReferencedRTPlan()
patients[h]['doses'][dose['id']] = dose
# Otherwise it is a currently unsupported file
else:
logger.info("%s is a %s file and is not " + \
"currently supported.",
files[n], dp.ds.SOPClassUID.name)
# Call the progress function to update the gui
wx.CallAfter(progressFunc, n, len(files), 'Searching for patients...')
if (len(patients) == 0):
progressStr = 'Found 0 patients.'
elif (len(patients) == 1):
progressStr = 'Found 1 patient. Reading DICOM data...'
elif (len(patients) > 1):
progressStr = 'Found ' + str(len(patients)) + ' patients. Reading DICOM data...'
wx.CallAfter(progressFunc, 0, 1, progressStr)
wx.CallAfter(resultFunc, patients)
# if the path is not valid, display an error message
else:
wx.CallAfter(progressFunc, 0, 0, 'Select a valid location.')
dlg = wx.MessageDialog(
parent,
"The DICOM import location does not exist. Please select a valid location.",
"Invalid DICOM Import Location", wx.OK|wx.ICON_ERROR)
dlg.ShowModal()
def OnUpdateProgress(self, num, length, message):
"""Update the DICOM Import process interface elements."""
if not length:
percentDone = 0
else:
percentDone = int(100 * (num+1) / length)
self.gaugeProgress.SetValue(percentDone)
self.lblProgressPercent.SetLabel(str(percentDone))
self.lblProgress.SetLabel(message)
if not (percentDone == 100):
self.gaugeProgress.Show(True)
self.lblProgressPercent.Show(True)
self.lblProgressPercentSym.Show(True)
else:
self.gaugeProgress.Show(False)
self.lblProgressPercent.Show(False)
self.lblProgressPercentSym.Show(False)
# End the dialog since we are done with the import process
if (message == 'Importing patient complete.'):
self.EndModal(wx.ID_OK)
elif (message == 'Importing patient cancelled.'):
self.EndModal(wx.ID_CANCEL)
def InitTree(self):
"""Initialize the tree control for use."""
iSize = (16,16)
iList = wx.ImageList(iSize[0], iSize[1])
iList.Add(
wx.Bitmap(
util.GetResourcePath('group.png'),
wx.BITMAP_TYPE_PNG))
iList.Add(
wx.Bitmap(
util.GetResourcePath('user.png'),
wx.BITMAP_TYPE_PNG))
iList.Add(
wx.Bitmap(
util.GetResourcePath('book.png'),
wx.BITMAP_TYPE_PNG))
iList.Add(
wx.Bitmap(
util.GetResourcePath('table_multiple.png'),
wx.BITMAP_TYPE_PNG))
iList.Add(
wx.Bitmap(
util.GetResourcePath('pencil.png'),
wx.BITMAP_TYPE_PNG))
iList.Add(
wx.Bitmap(
util.GetResourcePath('chart_bar.png'),
wx.BITMAP_TYPE_PNG))
iList.Add(
wx.Bitmap(
util.GetResourcePath('chart_curve.png'),
wx.BITMAP_TYPE_PNG))
iList.Add(
wx.Bitmap(
util.GetResourcePath('pencil_error.png'),
wx.BITMAP_TYPE_PNG))
iList.Add(
wx.Bitmap(
util.GetResourcePath('chart_bar_error.png'),
wx.BITMAP_TYPE_PNG))
iList.Add(
wx.Bitmap(
util.GetResourcePath('chart_curve_error.png'),
wx.BITMAP_TYPE_PNG))
iList.Add(
wx.Bitmap(
util.GetResourcePath('table_selected.png'),
wx.BITMAP_TYPE_PNG))
self.tcPatients.AssignImageList(iList)
root = self.tcPatients.AddRoot('Patients', image=0)
return root
def AddPatientTree(self, patient):
"""Add a new patient to the tree control."""
# Create a hash for each patient
h = hashlib.sha1(patient['id'].encode('utf-8')).hexdigest()
# Add the patient to the tree if they don't already exist
if not h in self.patients:
self.patients[h] = {}
self.patients[h]['demographics'] = patient
name = str(patient['name']) + ' (' + patient['id'] + ')'
self.patients[h]['treeid'] = \
self.tcPatients.AppendItem(self.root, name, 1)
self.tcPatients.SortChildren(self.root)
self.tcPatients.ExpandAll()
def AddPatientDataTree(self, patients):
"""Add the patient data to the tree control."""
# Now add the specific item to the tree
for key, patient in self.patients.items():
patient.update(patients[key])
if 'studies' in patient:
for studyid, study in patient['studies'].items():
name = 'Study: ' + study['description']
study['treeid'] = self.tcPatients.AppendItem(patient['treeid'], name, 2)
# Search for series and images
if 'series' in patient:
for seriesid, series in patient['series'].items():
if 'studies' in patient:
for studyid, study in patient['studies'].items():
if (studyid == series['study']):
modality = series['modality'].partition(' Image Storage')[0]
name = 'Series {}: {}. ({}, {} {})'.format(series['series_number'], series['description'], modality, series['numimages'], 'image' if series['numimages']==1 else 'images')
#name = 'Series: ' + series['description'] + ' (' + modality + ', '
#numimages = str(series['numimages']) + ' image)' if (series['numimages'] == 1) else str(series['numimages']) + ' images)'
#name = name + numimages
series['treeid'] = self.tcPatients.AppendItem(study['treeid'], name, 3)
self.EnableItemSelection(patient, series, [])
# Search for RT Structure Sets
if 'structures' in patient:
for structureid, structure in patient['structures'].items():
if 'series' in patient:
foundseries = False
name = 'RT Structure Set: ' + structure['label']
for seriesid, series in patient['series'].items():
foundseries = False
if (seriesid == structure['series']):
structure['treeid'] = self.tcPatients.AppendItem(series['treeid'], name, 4)
foundseries = True
# If no series were found, add the rtss to the study
if not foundseries:
structure['treeid'] = self.tcPatients.AppendItem(study['treeid'], name, 4)
filearray = [structure['filename']]
self.EnableItemSelection(patient, structure, filearray)
# Search for RT Plans
if 'plans' in patient:
for planid, plan in patient['plans'].items():
foundstructure = False
planname = ' (' + plan['name'] + ')' if len(plan['name']) else ""
rxdose = plan['rxdose'] if plan['rxdose'] > 0 else "Unknown"
name = 'RT Plan: ' + plan['label'] + planname + \
' - Dose: ' + str(rxdose) + ' cGy'
if 'structures' in patient:
for structureid, structure in patient['structures'].items():
foundstructure = False
if (structureid == plan['rtss']):
plan['treeid'] = self.tcPatients.AppendItem(structure['treeid'], name, 5)
foundstructure = True
# If no structures were found, add the plan to the study/series instead
if not foundstructure:
# If there is an image series, add a fake rtss to it
foundseries = False
for seriesid, series in patient['series'].items():
foundseries = False
if (series['referenceframe'] == plan['referenceframe']):
badstructure = self.tcPatients.AppendItem(
series['treeid'], "RT Structure Set not found", 7)
foundseries = True
# If no series were found, add the rtss to the study
if not foundseries:
badstructure = self.tcPatients.AppendItem(
patient['treeid'], "RT Structure Set not found", 7)
plan['treeid'] = self.tcPatients.AppendItem(badstructure, name, 5)
self.tcPatients.SetItemTextColour(badstructure, wx.RED)
filearray = [plan['filename']]
self.EnableItemSelection(patient, plan, filearray, plan['rxdose'])
# Search for RT Doses
if 'doses' in patient:
for doseid, dose in patient['doses'].items():
foundplan = False
if 'plans' in patient:
for planid, plan in patient['plans'].items():
foundplan = False
if (planid == dose['rtplan']):
foundplan = True
rxdose = None
if dose['hasgrid']:
if dose['hasdvh']:
name = 'RT Dose with DVH'
else:
name = 'RT Dose without DVH'
else:
if dose['hasdvh']:
name = 'RT Dose without Dose Grid (DVH only)'
else:
name = 'RT Dose without Dose Grid or DVH'
if (dose['summationtype'] == "BEAM"):
name += " (Beam " + str(dose['beam']) + ": "
if dose['beam'] in plan['beams']:
b = plan['beams'][dose['beam']]
name += b['name']
if len(b['description']):
name += " - " + b['description']
name += ")"
if "dose" in b:
name += " - Dose: " + str(int(b['dose'])) + " cGy"
rxdose = int(b['dose'])
dose['treeid'] = self.tcPatients.AppendItem(plan['treeid'], name, 6)
filearray = [dose['filename']]
self.EnableItemSelection(patient, dose, filearray, rxdose)
# If no plans were found, add the dose to the structure/study instead
if not foundplan:
if dose['hasgrid']:
if dose['hasdvh']:
name = 'RT Dose with DVH'
else:
name = 'RT Dose without DVH'
else:
if dose['hasdvh']:
name = 'RT Dose without Dose Grid (DVH only)'
else:
name = 'RT Dose without Dose Grid or DVH'
foundstructure = False
if 'structures' in patient:
for structureid, structure in patient['structures'].items():
foundstructure = False
if 'rtss' in dose:
if (structureid == dose['rtss']):
foundstructure = True
if (structure['referenceframe'] == dose['referenceframe']):
foundstructure = True
if foundstructure:
badplan = self.tcPatients.AppendItem(
structure['treeid'], "RT Plan not found", 8)
dose['treeid'] = self.tcPatients.AppendItem(badplan, name, 6)
self.tcPatients.SetItemTextColour(badplan, wx.RED)
filearray = [dose['filename']]
self.EnableItemSelection(patient, dose, filearray)
if not foundstructure:
# If there is an image series, add a fake rtss to it
foundseries = False
for seriesid, series in patient['series'].items():
foundseries = False
if (series['referenceframe'] == dose['referenceframe']):
badstructure = self.tcPatients.AppendItem(
series['treeid'], "RT Structure Set not found", 7)
foundseries = True
# If no series were found, add the rtss to the study
if not foundseries:
badstructure = self.tcPatients.AppendItem(
patient['treeid'], "RT Structure Set not found", 7)
self.tcPatients.SetItemTextColour(badstructure, wx.RED)
badplan = self.tcPatients.AppendItem(
badstructure, "RT Plan not found", 8)
dose['treeid'] = self.tcPatients.AppendItem(badplan, name, 5)
self.tcPatients.SetItemTextColour(badplan, wx.RED)
filearray = [dose['filename']]
self.EnableItemSelection(patient, dose, filearray)
# No RT Dose files were found
else:
if 'structures' in patient:
for structureid, structure in patient['structures'].items():
if 'plans' in patient:
for planid, plan in patient['plans'].items():
name = 'RT Dose not found'
baddose = self.tcPatients.AppendItem(plan['treeid'], name, 9)
self.tcPatients.SetItemTextColour(baddose, wx.RED)
# No RT Plan nor RT Dose files were found
else:
name = 'RT Plan not found'
badplan = self.tcPatients.AppendItem(structure['treeid'], name, 8)
self.tcPatients.SetItemTextColour(badplan, wx.RED)
name = 'RT Dose not found'
baddose = self.tcPatients.AppendItem(badplan, name, 9)
self.tcPatients.SetItemTextColour(baddose, wx.RED)
#self.btnSelect.SetFocus()
self.tcPatients.ExpandAll()
self.lblProgress.SetLabel(
str(self.lblProgress.GetLabel()).replace(' Reading DICOM data...', ''))
#Added by CL.Wang
self.Check_Export_Files()
def Check_Export_Files(self):
def select(child, flag):
if flag:
self.tcPatients.SetItemImage(child, 10)
self.selected_exports.append(child)
else:
self.tcPatients.SetItemImage(child, 3)
def minslice_check(child):
info = self.tcPatients.GetItemData(child)['info']
return int(info['numimages'])>self.min_slice_num
self.selected_exports = []
first_patient = self.tcPatients.GetFirstChild(self.tcPatients.RootItem)[0]
first_study = self.tcPatients.GetFirstChild(first_patient)[0]
child, cookie = self.tcPatients.GetFirstChild(first_study)
while child.IsOk():
if self.only_export_voldata:
title = self.tcPatients.GetItemText(child)
flag = 'vol' in title.lower() and minslice_check(child)
select(child, flag)
else:
select(child, minslice_check(child))
child, cookie = self.tcPatients.GetNextChild(child, cookie)
logger.info('%d files selected!', len(self.selected_exports))
def EnableItemSelection(self, patient, item, filearray = [], rxdose = None):
"""Enable an item to be selected in the tree control."""
# Add the respective images to the filearray if they exist
if 'images' in patient:
for imageid, image in patient['images'].items():
appendImage = False
# used for image series
if 'id' in item:
if (item['id'] == image['series']):
appendImage = True
# used for RT structure set
if 'series' in item:
if (item['series'] == image['series']):
appendImage = True
# used for RT plan / dose
if 'referenceframe' in item:
if (item['referenceframe'] == image['referenceframe']):
if not 'numimages' in item:
appendImage = True
if appendImage:
filearray.append(image['filename'])
# Add the respective rtss files to the filearray if they exist
if 'structures' in patient:
for structureid, structure in patient['structures'].items():
if 'rtss' in item:
if (structureid == item['rtss']):
filearray.append(structure['filename'])
break
elif (structure['referenceframe'] == item['referenceframe']):
filearray.append(structure['filename'])
break
# If no referenced rtss, but ref'd rtplan, check rtplan->rtss
if 'rtplan' in item:
if 'plans' in patient:
for planid, plan in patient['plans'].items():
if (planid == item['rtplan']):
if 'rtss' in plan:
if (structureid == plan['rtss']):
filearray.append(structure['filename'])
# Add the respective rtplan files to the filearray if they exist
if 'plans' in patient:
for planid, plan in patient['plans'].items():
if 'rtplan' in item:
if (planid == item['rtplan']):
filearray.append(plan['filename'])
if not rxdose:
self.tcPatients.SetItemData(item['treeid'], {'filearray':filearray, 'info':item})
else:
self.tcPatients.SetItemData(item['treeid'], {'filearray':filearray, 'info':item, 'rxdose':rxdose})
self.tcPatients.SetItemBold(item['treeid'], True)
self.tcPatients.SelectItem(item['treeid'])
def OnSelectTreeItem(self, evt):
"""Update the interface when the selected item has changed."""
item = evt.GetItem()
# Disable the rx dose message and select button by default
self.EnableRxDose(False)
#self.btnSelect.Enable(False)
# If the item has data, check to see whether there is an rxdose
if not (self.tcPatients.GetItemData(item) == None):
data = self.tcPatients.GetItemData(item)
#self.btnSelect.Enable()
rxdose = 0
parent = self.tcPatients.GetItemParent(item)
if 'rxdose' in data:
rxdose = data['rxdose']
else:
parentdata = self.tcPatients.GetItemData(parent)
if not (parentdata == None):
if 'rxdose' in parentdata:
rxdose = parentdata['rxdose']
# Show the rxdose text box if no rxdose was found
# and if it is an RT plan or RT dose file
self.txtRxDose.SetValue(rxdose)
if (self.tcPatients.GetItemText(item).startswith('RT Plan') or
self.tcPatients.GetItemText(parent).startswith('RT Plan')):
self.EnableRxDose(True)
def EnableRxDose(self, value):
"""Show or hide the prescription dose message."""
self.bmpRxDose.Show(value)
self.lblRxDose.Show(value)
self.txtRxDose.Show(value)
self.lblRxDoseUnits.Show(value)
# if set to hide, reset the rx dose
if not value:
self.txtRxDose.SetValue(1)
def ExportPatientData(self, path, filearray, RxDose, terminate, progressFunc, exportFunc):
"""Get the data of the selected patient from the DICOM importer dialog."""
msgs = ['Scanning patient. Please wait...','Exporting patient cancelled.','Exporting patient...']
wx.CallAfter(progressFunc, -1, 100, msgs[0])
for n in range(0, len(filearray)):
if terminate():
wx.CallAfter(progressFunc, 98, 100, msgs[1])
return
dcmfile = str(os.path.join(self.path, filearray[n]))
dp = dicomparser.DicomParser(dcmfile)
if (n == 0):
patient = {}
patient['rxdose'] = RxDose
if (('ImageOrientationPatient' in dp.ds) and \
not (dp.GetSOPClassUID() == 'rtdose')):
if not 'images' in patient:
patient['images'] = []
patient['images'].append(dp.ds)
elif (dp.ds.Modality in ['RTSTRUCT']):
patient['rtss'] = dp.ds
elif (dp.ds.Modality in ['RTPLAN']):
patient['rtplan'] = dp.ds
elif (dp.ds.Modality in ['RTDOSE']):
patient['rtdose'] = dp.ds
wx.CallAfter(progressFunc, n//2, len(filearray), msgs[0])
# Sort the images based on a sort descriptor:
# (ImagePositionPatient, InstanceNumber or AcquisitionNumber)
if 'images' in patient:
sortedimages = []
unsortednums = []
sortednums = []
images = patient['images']
sort = 'IPP'
# Determine if all images in the series are parallel
# by testing for differences in ImageOrientationPatient
parallel = True
for i, item in enumerate(images):
if (i > 0):
iop0 = np.array(item.ImageOrientationPatient)
iop1 = np.array(images[i-1].ImageOrientationPatient)
if (np.any(np.array(np.round(iop0 - iop1), dtype=np.int32))):
parallel = False
break
# Also test ImagePositionPatient, as some series
# use the same patient position for every slice
ipp0 = np.array(item.ImagePositionPatient)
ipp1 = np.array(images[i-1].ImagePositionPatient)
if not (np.any(np.array(np.round(ipp0 - ipp1), dtype=np.int32))):
parallel = False
break
# If the images are parallel, sort by ImagePositionPatient
if parallel:
sort = 'IPP'
else:
# Otherwise sort by Instance Number
if not (images[0].InstanceNumber == \
images[1].InstanceNumber):
sort = 'InstanceNumber'
# Otherwise sort by Acquisition Number
elif not (images[0].AcquisitionNumber == \
images[1].AcquisitionNumber):
sort = 'AcquisitionNumber'
# Add the sort descriptor to a list to be sorted
for i, image in enumerate(images):
if (sort == 'IPP'):
unsortednums.append(image.ImagePositionPatient[2])
else:
unsortednums.append(image.data_element(sort).value)
# Sort in LPI order! Modified by CL.Wang
# Sort image numbers in descending order for head first patients
if ('hf' in image.PatientPosition.lower()) and (sort == 'IPP'):
sortednums = sorted(unsortednums, reverse=True)
# Otherwise sort image numbers in ascending order
else:
sortednums = sorted(unsortednums, reverse=False)
# Add the images to the array based on the sorted order
for s, slice in enumerate(sortednums):
for i, image in enumerate(images):
if (sort == 'IPP'):
if (slice == image.ImagePositionPatient[2]):
sortedimages.append(image)
elif (slice == image.data_element(sort).value):
sortedimages.append(image)
# Save the images back to the patient dictionary
logger.debug('Slices num: %d', len(sortedimages))
patient['images'] = sortedimages
wx.CallAfter(progressFunc, 49, 100, msgs[2])
if exportFunc:
exportFunc(patient_data=patient, progressFunc=progressFunc)
wx.CallAfter(progressFunc, 99, 100, '')
def GetPatient(self):
"""Return the patient data from the DICOM importer dialog."""
return self.patient
def OnCancel(self, evt):
"""Stop the directory search and close the dialog."""
self.terminate = True
super().OnCancel(evt)
def main():
app = DcmConverterApp(0)
app.MainLoop()
if __name__ == '__main__':
main()
|
[
"wx.Dialog.__init__",
"os.walk",
"wx.CallAfter",
"os.path.isfile",
"os.path.join",
"numpy.round",
"pyDcmConverter.guiutil.get_icon",
"wx.SystemSettings.GetFont",
"os.path.dirname",
"numpy.transpose",
"numpy.max",
"wx.DirDialog",
"wx.GetApp",
"nibabel.Nifti1Image",
"threading.Thread",
"pyDcmConverter.util.GetResourcePath",
"json.dump",
"functools.partial",
"os.path.basename",
"wx.ImageList",
"numpy.min",
"dicompylercore.dicomparser.DicomParser",
"os.sys.exit",
"pyDcmConverter.guiutil.IsGtk",
"json.load",
"os.makedirs",
"warnings.filterwarnings",
"os.path.isdir",
"wx.MessageDialog",
"pyDcmConverter.guiutil.IsMac",
"utils_cw.write_mori",
"numpy.array",
"os.path.expanduser",
"logging.getLogger"
] |
[((592, 617), 'logging.getLogger', 'getLogger', (['"""DcmConverter"""'], {}), "('DcmConverter')\n", (601, 617), False, 'from logging import getLogger, DEBUG, INFO\n'), ((628, 697), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'wx.wxPyDeprecationWarning'}), "('ignore', category=wx.wxPyDeprecationWarning)\n", (651, 697), False, 'import hashlib, os, threading, functools, json, warnings\n'), ((2258, 2272), 'os.sys.exit', 'os.sys.exit', (['(0)'], {}), '(0)\n', (2269, 2272), False, 'import hashlib, os, threading, functools, json, warnings\n'), ((2427, 2451), 'wx.Dialog.__init__', 'wx.Dialog.__init__', (['self'], {}), '(self)\n', (2445, 2451), False, 'import wx\n'), ((4806, 4842), 'os.path.isfile', 'os.path.isfile', (['""".dcmconverter.conf"""'], {}), "('.dcmconverter.conf')\n", (4820, 4842), False, 'import hashlib, os, threading, functools, json, warnings\n'), ((6646, 6696), 'wx.SystemSettings.GetFont', 'wx.SystemSettings.GetFont', (['wx.SYS_DEFAULT_GUI_FONT'], {}), '(wx.SYS_DEFAULT_GUI_FONT)\n', (6671, 6696), False, 'import wx\n'), ((6708, 6723), 'pyDcmConverter.guiutil.IsMac', 'guiutil.IsMac', ([], {}), '()\n', (6721, 6723), False, 'from pyDcmConverter import guiutil, util\n'), ((8925, 8974), 'wx.MessageDialog', 'wx.MessageDialog', (['self', 'msg', '"""Error"""'], {'style': 'wx.OK'}), "(self, msg, 'Error', style=wx.OK)\n", (8941, 8974), False, 'import wx\n'), ((9082, 9153), 'wx.MessageDialog', 'wx.MessageDialog', (['self', 'msg', '"""Warning"""'], {'style': '(wx.OK_DEFAULT | wx.CANCEL)'}), "(self, msg, 'Warning', style=wx.OK_DEFAULT | wx.CANCEL)\n", (9098, 9153), False, 'import wx\n'), ((10857, 10907), 'dicompylercore.dicomparser.DicomParser', 'dicomparser.DicomParser', (["patient_data['images'][0]"], {}), "(patient_data['images'][0])\n", (10880, 10907), False, 'from dicompylercore import dicomparser\n'), ((12686, 12722), 'numpy.transpose', 'np.transpose', (['image_array', '(1, 0, 2)'], {}), '(image_array, (1, 0, 2))\n', (12698, 12722), True, 'import numpy as np\n'), ((17178, 17283), 'wx.DirDialog', 'wx.DirDialog', (['self'], {'defaultPath': 'self.path', 'message': '"""Choose a directory containing DICOM RT files..."""'}), "(self, defaultPath=self.path, message=\n 'Choose a directory containing DICOM RT files...')\n", (17190, 17283), False, 'import wx\n'), ((18200, 18406), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.DirectorySearchThread', 'args': '(self, self.path, self.import_search_subfolders, self.SetThreadStatus, self\n .OnUpdateProgress, self.AddPatientTree, self.AddPatientDataTree)'}), '(target=self.DirectorySearchThread, args=(self, self.path,\n self.import_search_subfolders, self.SetThreadStatus, self.\n OnUpdateProgress, self.AddPatientTree, self.AddPatientDataTree))\n', (18216, 18406), False, 'import hashlib, os, threading, functools, json, warnings\n'), ((18832, 18893), 'wx.CallAfter', 'wx.CallAfter', (['progressFunc', '(0)', '(0)', '"""Searching for patients..."""'], {}), "(progressFunc, 0, 0, 'Searching for patients...')\n", (18844, 18893), False, 'import wx\n'), ((18966, 18985), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (18979, 18985), False, 'import hashlib, os, threading, functools, json, warnings\n'), ((27449, 27481), 'wx.ImageList', 'wx.ImageList', (['iSize[0]', 'iSize[1]'], {}), '(iSize[0], iSize[1])\n', (27461, 27481), False, 'import wx\n'), ((46981, 47025), 'wx.CallAfter', 'wx.CallAfter', (['progressFunc', '(-1)', '(100)', 'msgs[0]'], {}), '(progressFunc, -1, 100, msgs[0])\n', (46993, 47025), False, 'import wx\n'), ((51148, 51192), 'wx.CallAfter', 'wx.CallAfter', (['progressFunc', '(49)', '(100)', 'msgs[2]'], {}), '(progressFunc, 49, 100, msgs[2])\n', (51160, 51192), False, 'import wx\n'), ((51297, 51336), 'wx.CallAfter', 'wx.CallAfter', (['progressFunc', '(99)', '(100)', '""""""'], {}), "(progressFunc, 99, 100, '')\n", (51309, 51336), False, 'import wx\n'), ((1083, 1119), 'pyDcmConverter.util.GetResourcePath', 'util.GetResourcePath', (['"""dicomgui.xrc"""'], {}), "('dicomgui.xrc')\n", (1103, 1119), False, 'from pyDcmConverter import guiutil, util\n'), ((1969, 2013), 'json.dump', 'json.dump', (['conf', 'f'], {'indent': '(2)', 'sort_keys': '(True)'}), '(conf, f, indent=2, sort_keys=True)\n', (1978, 2013), False, 'import hashlib, os, threading, functools, json, warnings\n'), ((2586, 2601), 'pyDcmConverter.guiutil.IsMac', 'guiutil.IsMac', ([], {}), '()\n', (2599, 2601), False, 'from pyDcmConverter import guiutil, util\n'), ((6078, 6101), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (6096, 6101), False, 'import hashlib, os, threading, functools, json, warnings\n'), ((10194, 10232), 'os.path.isfile', 'os.path.isfile', (["(mori_fname + '.raw.gz')"], {}), "(mori_fname + '.raw.gz')\n", (10208, 10232), False, 'import hashlib, os, threading, functools, json, warnings\n'), ((10675, 10700), 'os.path.isfile', 'os.path.isfile', (['nii_fname'], {}), '(nii_fname)\n', (10689, 10700), False, 'import hashlib, os, threading, functools, json, warnings\n'), ((11237, 11276), 'numpy.array', 'np.array', (['dp.ds.ImageOrientationPatient'], {}), '(dp.ds.ImageOrientationPatient)\n', (11245, 11276), True, 'import numpy as np\n'), ((12232, 12260), 'dicompylercore.dicomparser.DicomParser', 'dicomparser.DicomParser', (['img'], {}), '(img)\n', (12255, 12260), False, 'from dicompylercore import dicomparser\n'), ((12555, 12674), 'wx.CallAfter', 'wx.CallAfter', (['progressFunc', '((i + image_array.shape[-1]) // 2)', '(image_array.shape[-1] + 1)', '"""Creating image array..."""'], {}), "(progressFunc, (i + image_array.shape[-1]) // 2, image_array.\n shape[-1] + 1, 'Creating image array...')\n", (12567, 12674), False, 'import wx\n'), ((12914, 12961), 'utils_cw.write_mori', 'write_mori', (['image_array', 'reso', 'mori_fname', '(True)'], {}), '(image_array, reso, mori_fname, True)\n', (12924, 12961), False, 'from utils_cw import write_mori, get_mori_header_fields\n'), ((15001, 15066), 'wx.CallAfter', 'wx.CallAfter', (['progressFunc', '(97)', '(100)', '"""Export RAW image completed"""'], {}), "(progressFunc, 97, 100, 'Export RAW image completed')\n", (15013, 15066), False, 'import wx\n'), ((15299, 15366), 'wx.CallAfter', 'wx.CallAfter', (['progressFunc', '(98)', '(100)', '"""Export Nifti image completed"""'], {}), "(progressFunc, 98, 100, 'Export Nifti image completed')\n", (15311, 15366), False, 'import wx\n'), ((15776, 15806), 'os.path.isdir', 'os.path.isdir', (['self.output_dir'], {}), '(self.output_dir)\n', (15789, 15806), False, 'import hashlib, os, threading, functools, json, warnings\n'), ((15907, 15935), 'os.makedirs', 'os.makedirs', (['self.output_dir'], {}), '(self.output_dir)\n', (15918, 15935), False, 'import hashlib, os, threading, functools, json, warnings\n'), ((17939, 17954), 'pyDcmConverter.guiutil.IsGtk', 'guiutil.IsGtk', ([], {}), '()\n', (17952, 17954), False, 'from pyDcmConverter import guiutil, util\n'), ((19052, 19065), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (19059, 19065), False, 'import hashlib, os, threading, functools, json, warnings\n'), ((25856, 25901), 'wx.CallAfter', 'wx.CallAfter', (['progressFunc', '(0)', '(1)', 'progressStr'], {}), '(progressFunc, 0, 1, progressStr)\n', (25868, 25901), False, 'import wx\n'), ((25914, 25948), 'wx.CallAfter', 'wx.CallAfter', (['resultFunc', 'patients'], {}), '(resultFunc, patients)\n', (25926, 25948), False, 'import wx\n'), ((26037, 26097), 'wx.CallAfter', 'wx.CallAfter', (['progressFunc', '(0)', '(0)', '"""Select a valid location."""'], {}), "(progressFunc, 0, 0, 'Select a valid location.')\n", (26049, 26097), False, 'import wx\n'), ((26116, 26282), 'wx.MessageDialog', 'wx.MessageDialog', (['parent', '"""The DICOM import location does not exist. Please select a valid location."""', '"""Invalid DICOM Import Location"""', '(wx.OK | wx.ICON_ERROR)'], {}), "(parent,\n 'The DICOM import location does not exist. Please select a valid location.'\n , 'Invalid DICOM Import Location', wx.OK | wx.ICON_ERROR)\n", (26132, 26282), False, 'import wx\n'), ((47263, 47295), 'dicompylercore.dicomparser.DicomParser', 'dicomparser.DicomParser', (['dcmfile'], {}), '(dcmfile)\n', (47286, 47295), False, 'from dicompylercore import dicomparser\n'), ((961, 972), 'wx.GetApp', 'wx.GetApp', ([], {}), '()\n', (970, 972), False, 'import wx\n'), ((2628, 2646), 'pyDcmConverter.guiutil.get_icon', 'guiutil.get_icon', ([], {}), '()\n', (2644, 2646), False, 'from pyDcmConverter import guiutil, util\n'), ((4983, 4995), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4992, 4995), False, 'import hashlib, os, threading, functools, json, warnings\n'), ((9974, 10003), 'os.path.dirname', 'os.path.dirname', (['out_basepath'], {}), '(out_basepath)\n', (9989, 10003), False, 'import hashlib, os, threading, functools, json, warnings\n'), ((10037, 10059), 'os.path.isdir', 'os.path.isdir', (['out_dir'], {}), '(out_dir)\n', (10050, 10059), False, 'import hashlib, os, threading, functools, json, warnings\n'), ((10078, 10098), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (10089, 10098), False, 'import hashlib, os, threading, functools, json, warnings\n'), ((10147, 10177), 'os.path.basename', 'os.path.basename', (['out_basepath'], {}), '(out_basepath)\n', (10163, 10177), False, 'import hashlib, os, threading, functools, json, warnings\n'), ((10444, 10473), 'os.path.dirname', 'os.path.dirname', (['out_basepath'], {}), '(out_basepath)\n', (10459, 10473), False, 'import hashlib, os, threading, functools, json, warnings\n'), ((10509, 10531), 'os.path.isdir', 'os.path.isdir', (['out_dir'], {}), '(out_dir)\n', (10522, 10531), False, 'import hashlib, os, threading, functools, json, warnings\n'), ((10550, 10570), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (10561, 10570), False, 'import hashlib, os, threading, functools, json, warnings\n'), ((15231, 15274), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['image_array'], {'affine': 'affine'}), '(image_array, affine=affine)\n', (15246, 15274), True, 'import nibabel as nib\n'), ((19529, 19553), 'os.path.isfile', 'os.path.isfile', (['files[n]'], {}), '(files[n])\n', (19543, 19553), False, 'import hashlib, os, threading, functools, json, warnings\n'), ((27540, 27573), 'pyDcmConverter.util.GetResourcePath', 'util.GetResourcePath', (['"""group.png"""'], {}), "('group.png')\n", (27560, 27573), False, 'from pyDcmConverter import guiutil, util\n'), ((27670, 27702), 'pyDcmConverter.util.GetResourcePath', 'util.GetResourcePath', (['"""user.png"""'], {}), "('user.png')\n", (27690, 27702), False, 'from pyDcmConverter import guiutil, util\n'), ((27799, 27831), 'pyDcmConverter.util.GetResourcePath', 'util.GetResourcePath', (['"""book.png"""'], {}), "('book.png')\n", (27819, 27831), False, 'from pyDcmConverter import guiutil, util\n'), ((27928, 27970), 'pyDcmConverter.util.GetResourcePath', 'util.GetResourcePath', (['"""table_multiple.png"""'], {}), "('table_multiple.png')\n", (27948, 27970), False, 'from pyDcmConverter import guiutil, util\n'), ((28067, 28101), 'pyDcmConverter.util.GetResourcePath', 'util.GetResourcePath', (['"""pencil.png"""'], {}), "('pencil.png')\n", (28087, 28101), False, 'from pyDcmConverter import guiutil, util\n'), ((28198, 28235), 'pyDcmConverter.util.GetResourcePath', 'util.GetResourcePath', (['"""chart_bar.png"""'], {}), "('chart_bar.png')\n", (28218, 28235), False, 'from pyDcmConverter import guiutil, util\n'), ((28332, 28371), 'pyDcmConverter.util.GetResourcePath', 'util.GetResourcePath', (['"""chart_curve.png"""'], {}), "('chart_curve.png')\n", (28352, 28371), False, 'from pyDcmConverter import guiutil, util\n'), ((28468, 28508), 'pyDcmConverter.util.GetResourcePath', 'util.GetResourcePath', (['"""pencil_error.png"""'], {}), "('pencil_error.png')\n", (28488, 28508), False, 'from pyDcmConverter import guiutil, util\n'), ((28605, 28648), 'pyDcmConverter.util.GetResourcePath', 'util.GetResourcePath', (['"""chart_bar_error.png"""'], {}), "('chart_bar_error.png')\n", (28625, 28648), False, 'from pyDcmConverter import guiutil, util\n'), ((28745, 28790), 'pyDcmConverter.util.GetResourcePath', 'util.GetResourcePath', (['"""chart_curve_error.png"""'], {}), "('chart_curve_error.png')\n", (28765, 28790), False, 'from pyDcmConverter import guiutil, util\n'), ((28887, 28929), 'pyDcmConverter.util.GetResourcePath', 'util.GetResourcePath', (['"""table_selected.png"""'], {}), "('table_selected.png')\n", (28907, 28929), False, 'from pyDcmConverter import guiutil, util\n'), ((47113, 47157), 'wx.CallAfter', 'wx.CallAfter', (['progressFunc', '(98)', '(100)', 'msgs[1]'], {}), '(progressFunc, 98, 100, msgs[1])\n', (47125, 47157), False, 'import wx\n'), ((47207, 47244), 'os.path.join', 'os.path.join', (['self.path', 'filearray[n]'], {}), '(self.path, filearray[n])\n', (47219, 47244), False, 'import hashlib, os, threading, functools, json, warnings\n'), ((10618, 10648), 'os.path.basename', 'os.path.basename', (['out_basepath'], {}), '(out_basepath)\n', (10634, 10648), False, 'import hashlib, os, threading, functools, json, warnings\n'), ((19426, 19480), 'wx.CallAfter', 'wx.CallAfter', (['progressFunc', '(0)', '(0)', '"""Search terminated."""'], {}), "(progressFunc, 0, 0, 'Search terminated.')\n", (19438, 19480), False, 'import wx\n'), ((48556, 48594), 'numpy.array', 'np.array', (['item.ImageOrientationPatient'], {}), '(item.ImageOrientationPatient)\n', (48564, 48594), True, 'import numpy as np\n'), ((48622, 48669), 'numpy.array', 'np.array', (['images[i - 1].ImageOrientationPatient'], {}), '(images[i - 1].ImageOrientationPatient)\n', (48630, 48669), True, 'import numpy as np\n'), ((48985, 49020), 'numpy.array', 'np.array', (['item.ImagePositionPatient'], {}), '(item.ImagePositionPatient)\n', (48993, 49020), True, 'import numpy as np\n'), ((49048, 49092), 'numpy.array', 'np.array', (['images[i - 1].ImagePositionPatient'], {}), '(images[i - 1].ImagePositionPatient)\n', (49056, 49092), True, 'import numpy as np\n'), ((19105, 19126), 'os.path.join', 'os.path.join', (['root', 'f'], {}), '(root, f)\n', (19117, 19126), False, 'import hashlib, os, threading, functools, json, warnings\n'), ((19672, 19705), 'dicompylercore.dicomparser.DicomParser', 'dicomparser.DicomParser', (['files[n]'], {}), '(files[n])\n', (19695, 19705), False, 'from dicompylercore import dicomparser\n'), ((16572, 16629), 'functools.partial', 'functools.partial', (['self.ExportFunc'], {'out_basepath': 'basename'}), '(self.ExportFunc, out_basepath=basename)\n', (16589, 16629), False, 'import hashlib, os, threading, functools, json, warnings\n'), ((20418, 20450), 'wx.CallAfter', 'wx.CallAfter', (['foundFunc', 'patient'], {}), '(foundFunc, patient)\n', (20430, 20450), False, 'import wx\n'), ((48708, 48729), 'numpy.round', 'np.round', (['(iop0 - iop1)'], {}), '(iop0 - iop1)\n', (48716, 48729), True, 'import numpy as np\n'), ((49135, 49156), 'numpy.round', 'np.round', (['(ipp0 - ipp1)'], {}), '(ipp0 - ipp1)\n', (49143, 49156), True, 'import numpy as np\n'), ((13407, 13418), 'numpy.min', 'np.min', (['pos'], {}), '(pos)\n', (13413, 13418), True, 'import numpy as np\n'), ((13534, 13545), 'numpy.max', 'np.max', (['pos'], {}), '(pos)\n', (13540, 13545), True, 'import numpy as np\n')]
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for estimator.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.contrib.distributions.python.ops import estimator as estimator_lib
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators.head_test import _assert_metrics
from tensorflow.contrib.learn.python.learn.estimators.head_test import _assert_no_variables
from tensorflow.contrib.learn.python.learn.estimators.head_test import _assert_summary_tags
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.platform import test
class EstimatorHeadDistributionRegressionTest(test.TestCase):
def _assert_output_alternatives(self, model_fn_ops):
self.assertEquals({
None: constants.ProblemType.LINEAR_REGRESSION
}, {
k: v[0] for k, v in six.iteritems(model_fn_ops.output_alternatives)
})
def testNormalLocScaleLogits(self):
# We will bias logits[..., 1] so that: logits[..., 1]=0 implies scale=1.
scale_bias = np.log(np.expm1(1.))
def softplus(x):
return np.log1p(np.exp(x))
def actual_loss(logits, labels):
mu = actual_mean(logits)
sigma = actual_stddev(logits)
labels = np.squeeze(labels, -1)
z = (labels - mu) / sigma
loss = 0.5 * (z**2. + np.log(2. * np.pi)) + np.log(sigma)
return loss.mean()
def actual_mean(logits):
return logits[..., 0]
def actual_stddev(logits):
return softplus(logits[..., 1] + scale_bias)
def make_distribution_fn(logits):
return normal_lib.Normal(
loc=logits[..., 0],
scale=nn_ops.softplus(logits[..., 1] + scale_bias))
head = estimator_lib.estimator_head_distribution_regression(
make_distribution_fn,
logits_dimension=2)
labels = np.float32([[-1.],
[0.],
[1.]])
logits = np.float32([[0., -1],
[1, 0.5],
[-1, 1]])
with ops.Graph().as_default(), session.Session():
# Convert to tensor so we can index into head.distributions.
tflogits = ops.convert_to_tensor(logits, name="logits")
model_fn_ops = head.create_model_fn_ops(
{},
labels=labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=tflogits)
self._assert_output_alternatives(model_fn_ops)
_assert_summary_tags(self, ["loss"])
_assert_no_variables(self)
loss = actual_loss(logits, labels)
_assert_metrics(self, loss, {"loss": loss}, model_fn_ops)
# Now we verify the underlying distribution was correctly constructed.
expected_mean = logits[..., 0]
self.assertAllClose(
expected_mean,
head.distribution(tflogits).mean().eval(),
rtol=1e-6, atol=0.)
expected_stddev = softplus(logits[..., 1] + scale_bias)
self.assertAllClose(
expected_stddev,
head.distribution(tflogits).stddev().eval(),
rtol=1e-6, atol=0.)
# Should have created only one distribution.
self.assertEqual(1, len(head.distributions))
if __name__ == "__main__":
test.main()
|
[
"tensorflow.python.platform.test.main",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.client.session.Session",
"numpy.log",
"tensorflow.python.ops.nn_ops.softplus",
"numpy.float32",
"tensorflow.contrib.learn.python.learn.estimators.head_test._assert_no_variables",
"tensorflow.contrib.distributions.python.ops.estimator.estimator_head_distribution_regression",
"numpy.expm1",
"numpy.exp",
"numpy.squeeze",
"tensorflow.contrib.learn.python.learn.estimators.head_test._assert_summary_tags",
"six.iteritems",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.contrib.learn.python.learn.estimators.head_test._assert_metrics"
] |
[((4270, 4281), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (4279, 4281), False, 'from tensorflow.python.platform import test\n'), ((2756, 2854), 'tensorflow.contrib.distributions.python.ops.estimator.estimator_head_distribution_regression', 'estimator_lib.estimator_head_distribution_regression', (['make_distribution_fn'], {'logits_dimension': '(2)'}), '(make_distribution_fn,\n logits_dimension=2)\n', (2808, 2854), True, 'from tensorflow.contrib.distributions.python.ops import estimator as estimator_lib\n'), ((2881, 2915), 'numpy.float32', 'np.float32', (['[[-1.0], [0.0], [1.0]]'], {}), '([[-1.0], [0.0], [1.0]])\n', (2891, 2915), True, 'import numpy as np\n'), ((2976, 3018), 'numpy.float32', 'np.float32', (['[[0.0, -1], [1, 0.5], [-1, 1]]'], {}), '([[0.0, -1], [1, 0.5], [-1, 1]])\n', (2986, 3018), True, 'import numpy as np\n'), ((2107, 2120), 'numpy.expm1', 'np.expm1', (['(1.0)'], {}), '(1.0)\n', (2115, 2120), True, 'import numpy as np\n'), ((2296, 2318), 'numpy.squeeze', 'np.squeeze', (['labels', '(-1)'], {}), '(labels, -1)\n', (2306, 2318), True, 'import numpy as np\n'), ((3103, 3120), 'tensorflow.python.client.session.Session', 'session.Session', ([], {}), '()\n', (3118, 3120), False, 'from tensorflow.python.client import session\n'), ((3206, 3250), 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['logits'], {'name': '"""logits"""'}), "(logits, name='logits')\n", (3227, 3250), False, 'from tensorflow.python.framework import ops\n'), ((3510, 3546), 'tensorflow.contrib.learn.python.learn.estimators.head_test._assert_summary_tags', '_assert_summary_tags', (['self', "['loss']"], {}), "(self, ['loss'])\n", (3530, 3546), False, 'from tensorflow.contrib.learn.python.learn.estimators.head_test import _assert_summary_tags\n'), ((3553, 3579), 'tensorflow.contrib.learn.python.learn.estimators.head_test._assert_no_variables', '_assert_no_variables', (['self'], {}), '(self)\n', (3573, 3579), False, 'from tensorflow.contrib.learn.python.learn.estimators.head_test import _assert_no_variables\n'), ((3627, 3684), 'tensorflow.contrib.learn.python.learn.estimators.head_test._assert_metrics', '_assert_metrics', (['self', 'loss', "{'loss': loss}", 'model_fn_ops'], {}), "(self, loss, {'loss': loss}, model_fn_ops)\n", (3642, 3684), False, 'from tensorflow.contrib.learn.python.learn.estimators.head_test import _assert_metrics\n'), ((2165, 2174), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (2171, 2174), True, 'import numpy as np\n'), ((2401, 2414), 'numpy.log', 'np.log', (['sigma'], {}), '(sigma)\n', (2407, 2414), True, 'import numpy as np\n'), ((1912, 1959), 'six.iteritems', 'six.iteritems', (['model_fn_ops.output_alternatives'], {}), '(model_fn_ops.output_alternatives)\n', (1925, 1959), False, 'import six\n'), ((2698, 2742), 'tensorflow.python.ops.nn_ops.softplus', 'nn_ops.softplus', (['(logits[..., 1] + scale_bias)'], {}), '(logits[..., 1] + scale_bias)\n', (2713, 2742), False, 'from tensorflow.python.ops import nn_ops\n'), ((3077, 3088), 'tensorflow.python.framework.ops.Graph', 'ops.Graph', ([], {}), '()\n', (3086, 3088), False, 'from tensorflow.python.framework import ops\n'), ((2379, 2398), 'numpy.log', 'np.log', (['(2.0 * np.pi)'], {}), '(2.0 * np.pi)\n', (2385, 2398), True, 'import numpy as np\n')]
|
import numpy as np
import gym
from random import randint
from metaworld.benchmarks import ML1
class ReachML1Env(gym.Env):
def __init__(self, max_episode_steps=150,out_of_distribution=False, n_train_tasks=50, n_test_tasks=10, **kwargs):
super(ReachML1Env, self).__init__()
self.train_env = ML1.get_train_tasks('reach-v1', out_of_distribution=out_of_distribution)
self.test_env = ML1.get_test_tasks('reach-v1', out_of_distribution=out_of_distribution)
self.train_tasks = self.train_env.sample_tasks(n_train_tasks)
self.test_tasks = self.test_env.sample_tasks(n_test_tasks)
self.tasks = self.train_tasks + self.test_tasks
self.env = self.train_env #this env will change depending on the idx
self.observation_space = self.env.observation_space
self.action_space = self.env.action_space
self.goal_space_origin = np.array([0, 0.85, 0.175])
self.current_task_idx = 0
self.episode_steps = 0
self._max_episode_steps = max_episode_steps
# self.get_tasks_goals()
# self.reset_task()
def step(self, action):
self.episode_steps += 1
obs, reward, done, info = self.env.step(action)
if self.episode_steps >= self._max_episode_steps:
done = True
return obs, reward, done, info
def reset(self):
self.episode_steps = 0
return self.env.reset()
def seed(self, seed):
self.train_env.seed(seed)
self.test_env.seed(seed)
def get_all_task_idx(self):
return range(len(self.tasks))
def set_task(self, idx):
self.current_task_idx = idx
self.env = self.train_env if idx < len(self.train_tasks) else self.test_env
self.env.set_task(self.tasks[idx])
self._goal = self.tasks[idx]['goal']
def get_task(self):
return self.tasks[self.current_task_idx]['goal'] # goal_pos
def reset_task(self, task=None, test=False):
# aparently this is called only without idx, so tasks are always scrambled
# we have to set anything only at test time
if task is None:
if test:
task = randint(len(self.train_tasks), len(self.tasks) - 1)
else:
task = randint(0, len(self.train_tasks) - 1)
self.set_task(task)
def render(self):
self.env.render()
def get_tasks_goals(self):
for idx in range(len(self.tasks)):
self.reset_task(idx)
_, _, _, info = self.step(self.action_space.sample())
self.tasks[idx]['goal_pos'] = info['goal']
|
[
"metaworld.benchmarks.ML1.get_train_tasks",
"metaworld.benchmarks.ML1.get_test_tasks",
"numpy.array"
] |
[((311, 383), 'metaworld.benchmarks.ML1.get_train_tasks', 'ML1.get_train_tasks', (['"""reach-v1"""'], {'out_of_distribution': 'out_of_distribution'}), "('reach-v1', out_of_distribution=out_of_distribution)\n", (330, 383), False, 'from metaworld.benchmarks import ML1\n'), ((408, 479), 'metaworld.benchmarks.ML1.get_test_tasks', 'ML1.get_test_tasks', (['"""reach-v1"""'], {'out_of_distribution': 'out_of_distribution'}), "('reach-v1', out_of_distribution=out_of_distribution)\n", (426, 479), False, 'from metaworld.benchmarks import ML1\n'), ((893, 919), 'numpy.array', 'np.array', (['[0, 0.85, 0.175]'], {}), '([0, 0.85, 0.175])\n', (901, 919), True, 'import numpy as np\n')]
|
# Copyright (c) 2018 <NAME>
#
# Distributed under the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
from phylanx import Phylanx
import numpy as np
@Phylanx
def foo(x, y):
return [x, y]
x = np.array([1, 2])
y = np.array([3, 4])
result = foo(x, y)
assert((result[0] == x).all() and (result[1] == y).all())
|
[
"numpy.array"
] |
[((282, 298), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (290, 298), True, 'import numpy as np\n'), ((303, 319), 'numpy.array', 'np.array', (['[3, 4]'], {}), '([3, 4])\n', (311, 319), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
################################################################
# <NAME> Personality Type Tweets Natural Language Processing
# By <NAME>
# Project can be found at:
# https://www.inertia7.com/projects/109 &
# https://www.inertia7.com/projects/110
################################################################
##################
# Import packages
##################
import sys, os
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
# Confirm the correct directory; break script and prompt user to move to correct directory otherwise
filepath = os.getcwd()
if not filepath.endswith('myersBriggsNLPAnalysis'):
print('\nYou do not appear to be in the correct directory,\
you must be in the \'myersBriggsNLPAnalysis\' directory\
in order to run these scripts. Type \'pwd\' in the command line\
if you are unsure of your location in the terminal.')
sys.exit(1)
raw_data = 'data/mbti_1.csv'
token_data = 'data/mbti_tokenized.csv'
clean_data = 'data/mbti_cleaned.csv'
columns = np.array(['type', 'posts'])
##################################################
# Make different versions of our data for analysis
##################################################
'''
Explanation
-----------
Now we will have various versions of our data:
- Raw, unfiltered data
- Tokenized data with hashtags, mentions, retweets, etc.
- Cleaned tokenized data with stopwords removed
We will now subset the data into various parts to be used in the other scripts
'''
# First check if the data has been generated
# If not prompt user to make it
token_file_exists = os.path.isfile(token_data)
clean_file_exists = os.path.isfile(clean_data)
if not token_file_exists or not clean_file_exists:
print('It looks like no processed data has been generated.\n',
'Please run the \'data_generation.py\' file and follow the prompts.')
sys.exit(1)
# Declare different processed and unprocessed objects for further analysis
raw_df = pd.read_csv(raw_data, header = 0)
raw_type = raw_df['type']
raw_posts = raw_df['posts']
token_df = pd.read_csv(token_data, header = 0)
token_type = token_df['type']
token_posts = token_df['posts']
clean_df = pd.read_csv(clean_data, header = 0)
clean_type = clean_df['type']
clean_posts = clean_df['posts']
# Split up data into training and testing datasets
# To evaluate effectiveness of model training
X_train_token, X_test_token, y_train_token, y_test_token = train_test_split(
token_posts, token_type, test_size = 0.30, random_state = 42)
X_train_clean, X_test_clean, y_train_clean, y_test_clean = train_test_split(
clean_posts, clean_type, test_size = 0.30, random_state = 42)
|
[
"os.getcwd",
"sklearn.model_selection.train_test_split",
"pandas.read_csv",
"os.path.isfile",
"numpy.array",
"sys.exit"
] |
[((615, 626), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (624, 626), False, 'import sys, os\n'), ((1048, 1075), 'numpy.array', 'np.array', (["['type', 'posts']"], {}), "(['type', 'posts'])\n", (1056, 1075), True, 'import numpy as np\n'), ((1616, 1642), 'os.path.isfile', 'os.path.isfile', (['token_data'], {}), '(token_data)\n', (1630, 1642), False, 'import sys, os\n'), ((1663, 1689), 'os.path.isfile', 'os.path.isfile', (['clean_data'], {}), '(clean_data)\n', (1677, 1689), False, 'import sys, os\n'), ((1988, 2019), 'pandas.read_csv', 'pd.read_csv', (['raw_data'], {'header': '(0)'}), '(raw_data, header=0)\n', (1999, 2019), True, 'import pandas as pd\n'), ((2088, 2121), 'pandas.read_csv', 'pd.read_csv', (['token_data'], {'header': '(0)'}), '(token_data, header=0)\n', (2099, 2121), True, 'import pandas as pd\n'), ((2198, 2231), 'pandas.read_csv', 'pd.read_csv', (['clean_data'], {'header': '(0)'}), '(clean_data, header=0)\n', (2209, 2231), True, 'import pandas as pd\n'), ((2453, 2526), 'sklearn.model_selection.train_test_split', 'train_test_split', (['token_posts', 'token_type'], {'test_size': '(0.3)', 'random_state': '(42)'}), '(token_posts, token_type, test_size=0.3, random_state=42)\n', (2469, 2526), False, 'from sklearn.model_selection import train_test_split\n'), ((2597, 2670), 'sklearn.model_selection.train_test_split', 'train_test_split', (['clean_posts', 'clean_type'], {'test_size': '(0.3)', 'random_state': '(42)'}), '(clean_posts, clean_type, test_size=0.3, random_state=42)\n', (2613, 2670), False, 'from sklearn.model_selection import train_test_split\n'), ((920, 931), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (928, 931), False, 'import sys, os\n'), ((1891, 1902), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1899, 1902), False, 'import sys, os\n')]
|
from __future__ import print_function
import csv
import numpy as np
import re
import Spectrum
#import matplotlib.pyplot as plt
def ReadCSVRef(filename):
with open(filename) as csvfile:
reader = csv.reader(csvfile, delimiter=',')
headers = list(filter(None, next(reader)))
data = []
for row in reader:
data.append(row[:-1])
data = np.array(data)
data[data == ''] = np.nan
data = data.astype(float)
dataDict = {}
i = 0
columns_per_data = int(np.shape(data[0])[0]/np.shape(headers)[0])
print(columns_per_data)
for hh in headers:
label = tuple(map(int, re.findall(r'\d+', hh)))
dataDict[label] = data[:, i:i+columns_per_data]
data[:, i:i+columns_per_data]
i+= columns_per_data
return dataDict
# Add error-checking for entering a non-existent grating/wavelength pair
class SystemCorrectionFactor(object):
def __init__(self, grating, center_wavelength, wavelengths = None):
self.grating = grating
self.center_wavelength = center_wavelength
if grating >= 1000:
self.correction_spectrum = self.ImportIR()
elif wavelengths is not None:
self.correction_spectrum = self.ImportVis(wavelengths)
else:
print('No valid reference for system correction!')
def ImportIR(self):
filename = '/home/isobel/Documents/McMaster/CL/SystemResponseFcns/CorrectionFactorSCAlIRCamera_2015_02_26.csv'
dataDict = ReadCSVRef(filename)
d = dataDict[self.grating, self.center_wavelength]
correction_spectrum = Spectrum.CLSpectrum(d[:,1], d[:,0])
return correction_spectrum
def ImportVis(self, wavelengths):
filename = '/home/isobel/Documents/McMaster/CL/SystemResponseFcns/SystemResponseVISInterpolated_20150717.csv'
dataDict = ReadCSVRef(filename)
d = dataDict[(self.grating,)]
spectrum_interp = np.interp(wavelengths, d[:, 0], d[:, 1])
correction_spectrum = Spectrum.CLSpectrum(spectrum_interp, wavelengths)
return correction_spectrum
class WavelengthCorrectionFactor(object):
def __init__(self, grating, center_wavelength):
self.grating = grating
self.center_wavelength = center_wavelength
if self.grating in (1250, 1600, 2000):
self.wavelength = self.importIRwavelengths()
elif self.grating in (500, 800):
self.wavelength = self.importVISwavelengths()
else:
print('No valid reference for wavelength correction!')
def importIRwavelengths(self):
filename = '/home/isobel/Documents/McMaster/CL/SystemResponseFcns/WinspecCorrWavelengthsIR20150428.csv'
dataDict = ReadCSVRef(filename)
correction_spectrum = dataDict[self.grating, self.center_wavelength]
return correction_spectrum
def importVISwavelengths(self):
filename = '/home/isobel/Documents/McMaster/CL/SystemResponseFcns/WinspecCorrWavelengthsVis20150309.csv'
dataDict = ReadCSVRef(filename)
wavelengths = dataDict[self.grating, self.center_wavelength]
return wavelengths
#wvls = np.linspace(400, 980)
#p = SystemCorrectionFactor(800, 750, wvls)
#print(np.shape(p.correction_spectrum.SpectrumRange))
#plt.plot(p.correction_spectrum.SpectrumRange, p.correction_spectrum.intensity)
#plt.show()
|
[
"csv.reader",
"numpy.shape",
"re.findall",
"numpy.array",
"numpy.interp",
"Spectrum.CLSpectrum"
] |
[((383, 397), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (391, 397), True, 'import numpy as np\n'), ((207, 241), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (217, 241), False, 'import csv\n'), ((1620, 1657), 'Spectrum.CLSpectrum', 'Spectrum.CLSpectrum', (['d[:, 1]', 'd[:, 0]'], {}), '(d[:, 1], d[:, 0])\n', (1639, 1657), False, 'import Spectrum\n'), ((1960, 2000), 'numpy.interp', 'np.interp', (['wavelengths', 'd[:, 0]', 'd[:, 1]'], {}), '(wavelengths, d[:, 0], d[:, 1])\n', (1969, 2000), True, 'import numpy as np\n'), ((2031, 2080), 'Spectrum.CLSpectrum', 'Spectrum.CLSpectrum', (['spectrum_interp', 'wavelengths'], {}), '(spectrum_interp, wavelengths)\n', (2050, 2080), False, 'import Spectrum\n'), ((513, 530), 'numpy.shape', 'np.shape', (['data[0]'], {}), '(data[0])\n', (521, 530), True, 'import numpy as np\n'), ((534, 551), 'numpy.shape', 'np.shape', (['headers'], {}), '(headers)\n', (542, 551), True, 'import numpy as np\n'), ((638, 660), 're.findall', 're.findall', (['"""\\\\d+"""', 'hh'], {}), "('\\\\d+', hh)\n", (648, 660), False, 'import re\n')]
|
import warnings
from datetime import datetime
import numpy as np
import xarray as xr
import pytest
import ecco_v4_py
from .test_common import all_mds_datadirs, get_test_ds
@pytest.mark.parametrize("mytype",['xda','nparr','list','single'])
def test_extract_dates(mytype):
dints = [[1991,8,9,13,10,15],[1992,10,20,8,30,5]]
dates = [datetime(year=x[0],month=x[1],day=x[2],
hour=x[3],minute=x[4],second=x[5]) for x in dints]
dates = np.array(dates,dtype='datetime64[s]')
dates = [np.datetime64(x) for x in dates]
if mytype=='xda':
dates = xr.DataArray(np.array(dates))
elif mytype=='nparr':
dates = np.array(dates)
elif mytype=='single':
dints=dints[0]
dates = dates[0]
test_out = ecco_v4_py.extract_yyyy_mm_dd_hh_mm_ss_from_datetime64(dates)
for test,expected in zip(test_out,np.array(dints).T):
print('test: ',test)
print('exp: ',expected)
test = test.values if mytype=='xda' else test
assert np.all(test==expected)
def test_get_grid(get_test_ds):
"""make sure we can make a grid ... that's it"""
grid = ecco_v4_py.get_llc_grid(get_test_ds)
|
[
"numpy.datetime64",
"ecco_v4_py.extract_yyyy_mm_dd_hh_mm_ss_from_datetime64",
"datetime.datetime",
"ecco_v4_py.get_llc_grid",
"numpy.array",
"pytest.mark.parametrize",
"numpy.all"
] |
[((176, 245), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mytype"""', "['xda', 'nparr', 'list', 'single']"], {}), "('mytype', ['xda', 'nparr', 'list', 'single'])\n", (199, 245), False, 'import pytest\n'), ((469, 507), 'numpy.array', 'np.array', (['dates'], {'dtype': '"""datetime64[s]"""'}), "(dates, dtype='datetime64[s]')\n", (477, 507), True, 'import numpy as np\n'), ((770, 831), 'ecco_v4_py.extract_yyyy_mm_dd_hh_mm_ss_from_datetime64', 'ecco_v4_py.extract_yyyy_mm_dd_hh_mm_ss_from_datetime64', (['dates'], {}), '(dates)\n', (824, 831), False, 'import ecco_v4_py\n'), ((1142, 1178), 'ecco_v4_py.get_llc_grid', 'ecco_v4_py.get_llc_grid', (['get_test_ds'], {}), '(get_test_ds)\n', (1165, 1178), False, 'import ecco_v4_py\n'), ((344, 422), 'datetime.datetime', 'datetime', ([], {'year': 'x[0]', 'month': 'x[1]', 'day': 'x[2]', 'hour': 'x[3]', 'minute': 'x[4]', 'second': 'x[5]'}), '(year=x[0], month=x[1], day=x[2], hour=x[3], minute=x[4], second=x[5])\n', (352, 422), False, 'from datetime import datetime\n'), ((520, 536), 'numpy.datetime64', 'np.datetime64', (['x'], {}), '(x)\n', (533, 536), True, 'import numpy as np\n'), ((1021, 1045), 'numpy.all', 'np.all', (['(test == expected)'], {}), '(test == expected)\n', (1027, 1045), True, 'import numpy as np\n'), ((604, 619), 'numpy.array', 'np.array', (['dates'], {}), '(dates)\n', (612, 619), True, 'import numpy as np\n'), ((663, 678), 'numpy.array', 'np.array', (['dates'], {}), '(dates)\n', (671, 678), True, 'import numpy as np\n'), ((870, 885), 'numpy.array', 'np.array', (['dints'], {}), '(dints)\n', (878, 885), True, 'import numpy as np\n')]
|
from abc import ABC, abstractmethod
import copy
import logging
import numpy as np
from scipy.stats import rankdata
from typing import Dict, NamedTuple, NoReturn, Tuple
from ..lux.game import Game
from ..lux.game_constants import GAME_CONSTANTS
from ..lux.game_objects import Player
def count_city_tiles(game_state: Game) -> np.ndarray:
return np.array([player.city_tile_count for player in game_state.players])
def count_units(game_state: Game) -> np.ndarray:
return np.array([len(player.units) for player in game_state.players])
def count_total_fuel(game_state: Game) -> np.ndarray:
return np.array([
sum([city.fuel for city in player.cities.values()])
for player in game_state.players
])
def count_research_points(game_state: Game) -> np.ndarray:
return np.array([player.research_points for player in game_state.players])
def should_early_stop(game_state: Game) -> bool:
ct_count = count_city_tiles(game_state)
unit_count = count_units(game_state)
ct_pct = ct_count / max(ct_count.sum(), 1)
unit_pct = unit_count / max(unit_count.sum(), 1)
return ((ct_count == 0).any() or
(unit_count == 0).any() or
(ct_pct >= 0.75).any() or
(unit_pct >= 0.75).any())
class RewardSpec(NamedTuple):
reward_min: float
reward_max: float
zero_sum: bool
only_once: bool
# All reward spaces defined below
class BaseRewardSpace(ABC):
"""
A class used for defining a reward space and/or done state for either the full game or a sub-task
"""
def __init__(self, **kwargs):
if kwargs:
logging.warning(f"RewardSpace received unexpected kwargs: {kwargs}")
@staticmethod
@abstractmethod
def get_reward_spec() -> RewardSpec:
pass
@abstractmethod
def compute_rewards_and_done(self, game_state: Game, done: bool) -> Tuple[Tuple[float, float], bool]:
pass
def get_info(self) -> Dict[str, np.ndarray]:
return {}
# Full game reward spaces defined below
class FullGameRewardSpace(BaseRewardSpace):
"""
A class used for defining a reward space for the full game.
"""
def compute_rewards_and_done(self, game_state: Game, done: bool) -> Tuple[Tuple[float, float], bool]:
return self.compute_rewards(game_state, done), done
@abstractmethod
def compute_rewards(self, game_state: Game, done: bool) -> Tuple[float, float]:
pass
class GameResultReward(FullGameRewardSpace):
@staticmethod
def get_reward_spec() -> RewardSpec:
return RewardSpec(
reward_min=-1.,
reward_max=1.,
zero_sum=True,
only_once=True
)
def __init__(self, early_stop: bool = False, **kwargs):
super(GameResultReward, self).__init__(**kwargs)
self.early_stop = early_stop
def compute_rewards_and_done(self, game_state: Game, done: bool) -> Tuple[Tuple[float, float], bool]:
if self.early_stop:
done = done or should_early_stop(game_state)
return self.compute_rewards(game_state, done), done
def compute_rewards(self, game_state: Game, done: bool) -> Tuple[float, float]:
if not done:
return 0., 0.
# reward here is defined as the sum of number of city tiles with unit count as a tie-breaking mechanism
rewards = [int(GameResultReward.compute_player_reward(p)) for p in game_state.players]
rewards = (rankdata(rewards) - 1.) * 2. - 1.
return tuple(rewards)
@staticmethod
def compute_player_reward(player: Player):
ct_count = player.city_tile_count
unit_count = len(player.units)
# max board size is 32 x 32 => 1024 max city tiles and units,
# so this should keep it strictly so we break by city tiles then unit count
return ct_count * 10000 + unit_count
class CityTileReward(FullGameRewardSpace):
@staticmethod
def get_reward_spec() -> RewardSpec:
return RewardSpec(
reward_min=0.,
reward_max=1.,
zero_sum=False,
only_once=False
)
def compute_rewards(self, game_state: Game, done: bool) -> Tuple[float, float]:
return tuple(count_city_tiles(game_state) / 1024.)
class StatefulMultiReward(FullGameRewardSpace):
@staticmethod
def get_reward_spec() -> RewardSpec:
return RewardSpec(
reward_min=-1. / GAME_CONSTANTS["PARAMETERS"]["MAX_DAYS"],
reward_max=1. / GAME_CONSTANTS["PARAMETERS"]["MAX_DAYS"],
zero_sum=False,
only_once=False
)
def __init__(
self,
positive_weight: float = 1.,
negative_weight: float = 1.,
early_stop: bool = False,
**kwargs
):
assert positive_weight > 0.
assert negative_weight > 0.
self.positive_weight = positive_weight
self.negative_weight = negative_weight
self.early_stop = early_stop
self.city_count = np.empty((2,), dtype=float)
self.unit_count = np.empty_like(self.city_count)
self.research_points = np.empty_like(self.city_count)
self.total_fuel = np.empty_like(self.city_count)
self.weights = {
"game_result": 10.,
"city": 1.,
"unit": 0.5,
"research": 0.1,
"fuel": 0.005,
# Penalize workers each step that their cargo remains full
# "full_workers": -0.01,
"full_workers": 0.,
# A reward given each step
"step": 0.,
}
self.weights.update({key: val for key, val in kwargs.items() if key in self.weights.keys()})
for key in copy.copy(kwargs).keys():
if key in self.weights.keys():
del kwargs[key]
super(StatefulMultiReward, self).__init__(**kwargs)
self._reset()
def compute_rewards_and_done(self, game_state: Game, done: bool) -> Tuple[Tuple[float, float], bool]:
if self.early_stop:
done = done or should_early_stop(game_state)
return self.compute_rewards(game_state, done), done
def compute_rewards(self, game_state: Game, done: bool) -> Tuple[float, float]:
new_city_count = count_city_tiles(game_state)
new_unit_count = count_units(game_state)
new_research_points = count_research_points(game_state)
new_total_fuel = count_total_fuel(game_state)
reward_items_dict = {
"city": new_city_count - self.city_count,
"unit": new_unit_count - self.unit_count,
"research": new_research_points - self.research_points,
# Don't penalize losing fuel at night
"fuel": np.maximum(new_total_fuel - self.total_fuel, 0),
"full_workers": np.array([
sum(unit.get_cargo_space_left() > 0 for unit in player.units if unit.is_worker())
for player in game_state.players
]),
"step": np.ones(2, dtype=float)
}
if done:
game_result_reward = [int(GameResultReward.compute_player_reward(p)) for p in game_state.players]
game_result_reward = (rankdata(game_result_reward) - 1.) * 2. - 1.
self._reset()
else:
game_result_reward = np.array([0., 0.])
self.city_count = new_city_count
self.unit_count = new_unit_count
self.research_points = new_research_points
self.total_fuel = new_total_fuel
reward_items_dict["game_result"] = game_result_reward
assert self.weights.keys() == reward_items_dict.keys()
reward = np.stack(
[self.weight_rewards(reward_items_dict[key] * w) for key, w in self.weights.items()],
axis=0
).sum(axis=0)
return tuple(reward / 500. / max(self.positive_weight, self.negative_weight))
def weight_rewards(self, reward: np.ndarray) -> np.ndarray:
reward = np.where(
reward > 0.,
self.positive_weight * reward,
reward
)
reward = np.where(
reward < 0.,
self.negative_weight * reward,
reward
)
return reward
def _reset(self) -> NoReturn:
self.city_count = np.ones_like(self.city_count)
self.unit_count = np.ones_like(self.unit_count)
self.research_points = np.zeros_like(self.research_points)
self.total_fuel = np.zeros_like(self.total_fuel)
class ZeroSumStatefulMultiReward(StatefulMultiReward):
@staticmethod
def get_reward_spec() -> RewardSpec:
return RewardSpec(
reward_min=-1.,
reward_max=1.,
zero_sum=True,
only_once=False
)
def compute_rewards(self, game_state: Game, done: bool) -> Tuple[float, float]:
reward = np.array(super(ZeroSumStatefulMultiReward, self).compute_rewards(game_state, done))
return tuple(reward - reward.mean())
class PunishingExponentialReward(BaseRewardSpace):
@staticmethod
def get_reward_spec() -> RewardSpec:
return RewardSpec(
reward_min=-1. / GAME_CONSTANTS["PARAMETERS"]["MAX_DAYS"],
reward_max=1. / GAME_CONSTANTS["PARAMETERS"]["MAX_DAYS"],
zero_sum=False,
only_once=False
)
def __init__(
self,
**kwargs
):
self.city_count = np.empty((2,), dtype=float)
self.unit_count = np.empty_like(self.city_count)
self.research_points = np.empty_like(self.city_count)
self.total_fuel = np.empty_like(self.city_count)
self.weights = {
"game_result": 0.,
"city": 1.,
"unit": 0.5,
"research": 0.01,
"fuel": 0.001,
}
self.weights.update({key: val for key, val in kwargs.items() if key in self.weights.keys()})
for key in copy.copy(kwargs).keys():
if key in self.weights.keys():
del kwargs[key]
super(PunishingExponentialReward, self).__init__(**kwargs)
self._reset()
def compute_rewards_and_done(self, game_state: Game, done: bool) -> Tuple[Tuple[float, float], bool]:
new_city_count = count_city_tiles(game_state)
new_unit_count = count_units(game_state)
new_research_points = count_research_points(game_state)
new_total_fuel = count_total_fuel(game_state)
city_diff = new_city_count - self.city_count
unit_diff = new_unit_count - self.unit_count
reward_items_dict = {
"city": new_city_count,
"unit": new_unit_count,
"research": new_research_points,
"fuel": new_total_fuel,
}
if done:
game_result_reward = [int(GameResultReward.compute_player_reward(p)) for p in game_state.players]
game_result_reward = (rankdata(game_result_reward) - 1.) * 2. - 1.
self._reset()
else:
game_result_reward = np.array([0., 0.])
self.city_count = new_city_count
self.unit_count = new_unit_count
self.research_points = new_research_points
self.total_fuel = new_total_fuel
reward_items_dict["game_result"] = game_result_reward
assert self.weights.keys() == reward_items_dict.keys()
reward = np.stack(
[reward_items_dict[key] * w for key, w in self.weights.items()],
axis=0
).sum(axis=0)
lost_unit_or_city = (city_diff < 0) | (unit_diff < 0)
reward = np.where(
lost_unit_or_city,
-0.1,
reward / 1_000.
)
return tuple(reward), done or lost_unit_or_city.any()
def compute_rewards(self, game_state: Game, done: bool) -> Tuple[float, float]:
raise NotImplementedError
def _reset(self) -> NoReturn:
self.city_count = np.ones_like(self.city_count)
self.unit_count = np.ones_like(self.unit_count)
self.research_points = np.zeros_like(self.research_points)
self.total_fuel = np.zeros_like(self.total_fuel)
# Subtask reward spaces defined below
# NB: Subtasks that are "different enough" should be defined separately since each subtask gets its own embedding
# See obs_spaces.SUBTASK_ENCODING
# TODO: Somehow include target locations for subtasks?
class Subtask(BaseRewardSpace, ABC):
@staticmethod
def get_reward_spec() -> RewardSpec:
"""
Don't override reward_spec or you risk breaking classes like multi_subtask.MultiSubtask
"""
return RewardSpec(
reward_min=0.,
reward_max=1.,
zero_sum=False,
only_once=True
)
def compute_rewards_and_done(self, game_state: Game, done: bool) -> Tuple[Tuple[float, float], bool]:
goal_reached = self.completed_task(game_state)
return tuple(goal_reached.astype(float)), goal_reached.any() or done
@abstractmethod
def completed_task(self, game_state: Game) -> np.ndarray:
pass
def get_subtask_encoding(self, subtask_encoding: dict) -> int:
return subtask_encoding[type(self)]
class CollectNWood(Subtask):
def __init__(self, n: int = GAME_CONSTANTS["PARAMETERS"]["RESOURCE_CAPACITY"]["WORKER"], **kwargs):
super(CollectNWood, self).__init__(**kwargs)
self.n = n
def completed_task(self, game_state: Game) -> np.ndarray:
return np.array([
sum([unit.cargo.wood for unit in player.units])
for player in game_state.players
]) >= self.n
class CollectNCoal(Subtask):
def __init__(self, n: int = GAME_CONSTANTS["PARAMETERS"]["RESOURCE_CAPACITY"]["WORKER"] // 2, **kwargs):
super(CollectNCoal, self).__init__(**kwargs)
self.n = n
def completed_task(self, game_state: Game) -> np.ndarray:
return np.array([
sum([unit.cargo.coal for unit in player.units])
for player in game_state.players
]) >= self.n
class CollectNUranium(Subtask):
def __init__(self, n: int = GAME_CONSTANTS["PARAMETERS"]["RESOURCE_CAPACITY"]["WORKER"] // 5, **kwargs):
super(CollectNUranium, self).__init__(**kwargs)
self.n = n
def completed_task(self, game_state: Game) -> np.ndarray:
return np.array([
sum([unit.cargo.uranium for unit in player.units])
for player in game_state.players
]) >= self.n
class MakeNCityTiles(Subtask):
def __init__(self, n_city_tiles: int = 2, **kwargs):
super(MakeNCityTiles, self).__init__(**kwargs)
assert n_city_tiles > 1, "Players start with 1 city tile already"
self.n_city_tiles = n_city_tiles
def completed_task(self, game_state: Game) -> np.ndarray:
return count_city_tiles(game_state) >= self.n_city_tiles
class MakeNContiguousCityTiles(MakeNCityTiles):
def completed_task(self, game_state: Game) -> np.ndarray:
return np.array([
# Extra -1 is included to avoid taking max of empty sequence
max([len(city.citytiles) for city in player.cities.values()] + [0])
for player in game_state.players
]) >= self.n_city_tiles
class CollectNTotalFuel(Subtask):
def __init__(self, n_total_fuel: int = GAME_CONSTANTS["PARAMETERS"]["LIGHT_UPKEEP"]["CITY"] *
GAME_CONSTANTS["PARAMETERS"]["NIGHT_LENGTH"], **kwargs):
super(CollectNTotalFuel, self).__init__(**kwargs)
self.n_total_fuel = n_total_fuel
def completed_task(self, game_state: Game) -> np.ndarray:
return count_total_fuel(game_state) >= self.n_total_fuel
class SurviveNNights(Subtask):
def __init__(self, n_nights: int = 1, **kwargs):
super(SurviveNNights, self).__init__(**kwargs)
cycle_len = GAME_CONSTANTS["PARAMETERS"]["DAY_LENGTH"] + GAME_CONSTANTS["PARAMETERS"]["NIGHT_LENGTH"]
self.target_step = n_nights * cycle_len
assert self.target_step <= GAME_CONSTANTS["PARAMETERS"]["MAX_DAYS"]
self.city_count = np.empty((2,), dtype=int)
self.unit_count = np.empty_like(self.city_count)
def compute_rewards_and_done(self, game_state: Game, done: bool) -> Tuple[Tuple[float, float], bool]:
failed_task = self.failed_task(game_state)
completed_task = self.completed_task(game_state)
if failed_task.any():
rewards = np.where(
failed_task,
0.,
0.5 + 0.5 * completed_task.astype(float)
)
else:
rewards = completed_task.astype(float)
done = failed_task.any() or completed_task.any() or done
if done:
self._reset()
return tuple(rewards), done
def completed_task(self, game_state: Game) -> np.ndarray:
return np.array([
game_state.turn >= self.target_step
]).repeat(2)
def failed_task(self, game_state: Game) -> np.ndarray:
new_city_count = count_city_tiles(game_state)
new_unit_count = count_units(game_state)
failed = np.logical_or(
new_city_count < self.city_count,
new_unit_count < self.unit_count
)
self.city_count = new_city_count
self.unit_count = new_unit_count
return failed
def _reset(self) -> NoReturn:
self.city_count = np.ones_like(self.city_count)
self.unit_count = np.ones_like(self.unit_count)
class GetNResearchPoints(Subtask):
def __init__(
self,
n_research_points: int = GAME_CONSTANTS["PARAMETERS"]["RESEARCH_REQUIREMENTS"]["COAL"],
**kwargs
):
super(GetNResearchPoints, self).__init__(**kwargs)
self.n_research_points = n_research_points
def completed_task(self, game_state: Game) -> np.ndarray:
return np.array([player.research_points for player in game_state.players]) >= self.n_research_points
|
[
"numpy.zeros_like",
"numpy.ones_like",
"numpy.maximum",
"logging.warning",
"numpy.empty",
"numpy.empty_like",
"numpy.ones",
"copy.copy",
"scipy.stats.rankdata",
"numpy.where",
"numpy.array",
"numpy.logical_or"
] |
[((350, 417), 'numpy.array', 'np.array', (['[player.city_tile_count for player in game_state.players]'], {}), '([player.city_tile_count for player in game_state.players])\n', (358, 417), True, 'import numpy as np\n'), ((801, 868), 'numpy.array', 'np.array', (['[player.research_points for player in game_state.players]'], {}), '([player.research_points for player in game_state.players])\n', (809, 868), True, 'import numpy as np\n'), ((5022, 5049), 'numpy.empty', 'np.empty', (['(2,)'], {'dtype': 'float'}), '((2,), dtype=float)\n', (5030, 5049), True, 'import numpy as np\n'), ((5076, 5106), 'numpy.empty_like', 'np.empty_like', (['self.city_count'], {}), '(self.city_count)\n', (5089, 5106), True, 'import numpy as np\n'), ((5138, 5168), 'numpy.empty_like', 'np.empty_like', (['self.city_count'], {}), '(self.city_count)\n', (5151, 5168), True, 'import numpy as np\n'), ((5195, 5225), 'numpy.empty_like', 'np.empty_like', (['self.city_count'], {}), '(self.city_count)\n', (5208, 5225), True, 'import numpy as np\n'), ((7995, 8056), 'numpy.where', 'np.where', (['(reward > 0.0)', '(self.positive_weight * reward)', 'reward'], {}), '(reward > 0.0, self.positive_weight * reward, reward)\n', (8003, 8056), True, 'import numpy as np\n'), ((8119, 8180), 'numpy.where', 'np.where', (['(reward < 0.0)', '(self.negative_weight * reward)', 'reward'], {}), '(reward < 0.0, self.negative_weight * reward, reward)\n', (8127, 8180), True, 'import numpy as np\n'), ((8309, 8338), 'numpy.ones_like', 'np.ones_like', (['self.city_count'], {}), '(self.city_count)\n', (8321, 8338), True, 'import numpy as np\n'), ((8365, 8394), 'numpy.ones_like', 'np.ones_like', (['self.unit_count'], {}), '(self.unit_count)\n', (8377, 8394), True, 'import numpy as np\n'), ((8426, 8461), 'numpy.zeros_like', 'np.zeros_like', (['self.research_points'], {}), '(self.research_points)\n', (8439, 8461), True, 'import numpy as np\n'), ((8488, 8518), 'numpy.zeros_like', 'np.zeros_like', (['self.total_fuel'], {}), '(self.total_fuel)\n', (8501, 8518), True, 'import numpy as np\n'), ((9450, 9477), 'numpy.empty', 'np.empty', (['(2,)'], {'dtype': 'float'}), '((2,), dtype=float)\n', (9458, 9477), True, 'import numpy as np\n'), ((9504, 9534), 'numpy.empty_like', 'np.empty_like', (['self.city_count'], {}), '(self.city_count)\n', (9517, 9534), True, 'import numpy as np\n'), ((9566, 9596), 'numpy.empty_like', 'np.empty_like', (['self.city_count'], {}), '(self.city_count)\n', (9579, 9596), True, 'import numpy as np\n'), ((9623, 9653), 'numpy.empty_like', 'np.empty_like', (['self.city_count'], {}), '(self.city_count)\n', (9636, 9653), True, 'import numpy as np\n'), ((11605, 11655), 'numpy.where', 'np.where', (['lost_unit_or_city', '(-0.1)', '(reward / 1000.0)'], {}), '(lost_unit_or_city, -0.1, reward / 1000.0)\n', (11613, 11655), True, 'import numpy as np\n'), ((11945, 11974), 'numpy.ones_like', 'np.ones_like', (['self.city_count'], {}), '(self.city_count)\n', (11957, 11974), True, 'import numpy as np\n'), ((12001, 12030), 'numpy.ones_like', 'np.ones_like', (['self.unit_count'], {}), '(self.unit_count)\n', (12013, 12030), True, 'import numpy as np\n'), ((12062, 12097), 'numpy.zeros_like', 'np.zeros_like', (['self.research_points'], {}), '(self.research_points)\n', (12075, 12097), True, 'import numpy as np\n'), ((12124, 12154), 'numpy.zeros_like', 'np.zeros_like', (['self.total_fuel'], {}), '(self.total_fuel)\n', (12137, 12154), True, 'import numpy as np\n'), ((16112, 16137), 'numpy.empty', 'np.empty', (['(2,)'], {'dtype': 'int'}), '((2,), dtype=int)\n', (16120, 16137), True, 'import numpy as np\n'), ((16164, 16194), 'numpy.empty_like', 'np.empty_like', (['self.city_count'], {}), '(self.city_count)\n', (16177, 16194), True, 'import numpy as np\n'), ((17140, 17226), 'numpy.logical_or', 'np.logical_or', (['(new_city_count < self.city_count)', '(new_unit_count < self.unit_count)'], {}), '(new_city_count < self.city_count, new_unit_count < self.\n unit_count)\n', (17153, 17226), True, 'import numpy as np\n'), ((17421, 17450), 'numpy.ones_like', 'np.ones_like', (['self.city_count'], {}), '(self.city_count)\n', (17433, 17450), True, 'import numpy as np\n'), ((17477, 17506), 'numpy.ones_like', 'np.ones_like', (['self.unit_count'], {}), '(self.unit_count)\n', (17489, 17506), True, 'import numpy as np\n'), ((1620, 1688), 'logging.warning', 'logging.warning', (['f"""RewardSpace received unexpected kwargs: {kwargs}"""'], {}), "(f'RewardSpace received unexpected kwargs: {kwargs}')\n", (1635, 1688), False, 'import logging\n'), ((6740, 6787), 'numpy.maximum', 'np.maximum', (['(new_total_fuel - self.total_fuel)', '(0)'], {}), '(new_total_fuel - self.total_fuel, 0)\n', (6750, 6787), True, 'import numpy as np\n'), ((7011, 7034), 'numpy.ones', 'np.ones', (['(2)'], {'dtype': 'float'}), '(2, dtype=float)\n', (7018, 7034), True, 'import numpy as np\n'), ((7325, 7345), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (7333, 7345), True, 'import numpy as np\n'), ((11045, 11065), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (11053, 11065), True, 'import numpy as np\n'), ((17896, 17963), 'numpy.array', 'np.array', (['[player.research_points for player in game_state.players]'], {}), '([player.research_points for player in game_state.players])\n', (17904, 17963), True, 'import numpy as np\n'), ((5722, 5739), 'copy.copy', 'copy.copy', (['kwargs'], {}), '(kwargs)\n', (5731, 5739), False, 'import copy\n'), ((9947, 9964), 'copy.copy', 'copy.copy', (['kwargs'], {}), '(kwargs)\n', (9956, 9964), False, 'import copy\n'), ((16879, 16926), 'numpy.array', 'np.array', (['[game_state.turn >= self.target_step]'], {}), '([game_state.turn >= self.target_step])\n', (16887, 16926), True, 'import numpy as np\n'), ((3459, 3476), 'scipy.stats.rankdata', 'rankdata', (['rewards'], {}), '(rewards)\n', (3467, 3476), False, 'from scipy.stats import rankdata\n'), ((7207, 7235), 'scipy.stats.rankdata', 'rankdata', (['game_result_reward'], {}), '(game_result_reward)\n', (7215, 7235), False, 'from scipy.stats import rankdata\n'), ((10927, 10955), 'scipy.stats.rankdata', 'rankdata', (['game_result_reward'], {}), '(game_result_reward)\n', (10935, 10955), False, 'from scipy.stats import rankdata\n')]
|
import numpy as np
import matplotlib.pyplot as plt
# To use LaTeX in the plots
plt.rcParams.update({
"text.usetex": True,
"font.family": "sans-serif",
"font.sans-serif": ["Helvetica"]})
# for Palatino and other serif fonts use:
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif",
"font.serif": ["Palatino"],
})
plt.rcParams.update({
"text.usetex": True,
"font.family": "Helvetica"
})
# constants used in the problem
E = 0.220 # in volts
R = 500 # in Ohms
Vt = 0.025 # in volts
Isa = 0.6e-6 # in Amps
Isb = 1.2e-6 # in Amps
# Calculates the vector F which solves the equation F = 0
def F(v): # v is a 2 x 1 vector which contains the voltage values of the circuit
f1 = (E - v[0]) / R - Isa * (np.exp((v[0] - v[1]) / Vt) - 1)
f2 = Isa*(np.exp((v[0]-v[1]) / Vt)-1)-Isb * (np.exp((v[1] / Vt)) - 1)
F = np.array([f1, f2])
return F
# compute the Jacobian
def Jacobian(v):
J = np.zeros(shape=(2, 2))
J[0][0] = -1/R - (Isa / Vt) * np.exp((v[0] - v[1]) / Vt)
J[0][1] = (Isa / Vt) * np.exp((v[0] - v[1]) / Vt)
J[1][0] = (Isa / Vt) * np.exp((v[0] - v[1]) / Vt)
J[1][1] = -(Isb / Vt) * np.exp(v[1] / Vt) - (Isa/Vt)*np.exp((v[0]-v[1])/Vt)
return J
# uses the above two functions to calculate the voltage solution to the circuit
def newton_raphson(maxerr):
i = 0
Vnew = np.zeros(shape=(2, 1))
dV_vec = []
val_vec = []
conv = False
while not conv:
i += 1
F_p = Jacobian(Vnew) # calculate the Jacobian given teh current voltage values
eff = F(Vnew) # calculate the value of the F vector for the current voltage values
dV = np.multiply(np.dot(np.linalg.inv(F_p), eff), -1) # compute dV
crit_val = np.linalg.norm(dV, 2) # compute the 2-norm of dV for convergence criteria
Vnew = np.add(Vnew, dV) # compute new voltage value for next step
dV_vec.append(crit_val)
val_vec.append(Vnew)
print("------------------------------------")
print("iteration = "+str(i))
print("Jacobian = "+str(F_p))
print("F-vector = "+str(eff))
print("\u0394 V = "+str(dV))
if crit_val < maxerr:
break
return Vnew, dV_vec, i, val_vec
if __name__ == "__main__":
error = 10e-15 # the maximum allowable error
val = newton_raphson(error)
# plot error in the log scale
dV_norm_err = val[1]
iter_no = val[2]
ans = val[3]
print("------------------------------------")
print(ans[7])
# Plot the 10log_10 of dV
x_val = np.linspace(1, iter_no, iter_no)
dV = 10*np.log10(dV_norm_err)
plt.plot(x_val, dV)
plt.xlabel("Number of Iterations")
plt.ylabel("log(2-norm dV)")
plt.show()
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.add",
"numpy.zeros",
"matplotlib.pyplot.rcParams.update",
"numpy.array",
"numpy.exp",
"numpy.linspace",
"numpy.linalg.norm",
"numpy.linalg.inv",
"matplotlib.pyplot.ylabel",
"numpy.log10",
"matplotlib.pyplot.xlabel"
] |
[((80, 189), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'text.usetex': True, 'font.family': 'sans-serif', 'font.sans-serif': [\n 'Helvetica']}"], {}), "({'text.usetex': True, 'font.family': 'sans-serif',\n 'font.sans-serif': ['Helvetica']})\n", (99, 189), True, 'import matplotlib.pyplot as plt\n'), ((241, 339), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'text.usetex': True, 'font.family': 'serif', 'font.serif': ['Palatino']}"], {}), "({'text.usetex': True, 'font.family': 'serif',\n 'font.serif': ['Palatino']})\n", (260, 339), True, 'import matplotlib.pyplot as plt\n'), ((351, 421), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'text.usetex': True, 'font.family': 'Helvetica'}"], {}), "({'text.usetex': True, 'font.family': 'Helvetica'})\n", (370, 421), True, 'import matplotlib.pyplot as plt\n'), ((862, 880), 'numpy.array', 'np.array', (['[f1, f2]'], {}), '([f1, f2])\n', (870, 880), True, 'import numpy as np\n'), ((944, 966), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, 2)'}), '(shape=(2, 2))\n', (952, 966), True, 'import numpy as np\n'), ((1360, 1382), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, 1)'}), '(shape=(2, 1))\n', (1368, 1382), True, 'import numpy as np\n'), ((2562, 2594), 'numpy.linspace', 'np.linspace', (['(1)', 'iter_no', 'iter_no'], {}), '(1, iter_no, iter_no)\n', (2573, 2594), True, 'import numpy as np\n'), ((2633, 2652), 'matplotlib.pyplot.plot', 'plt.plot', (['x_val', 'dV'], {}), '(x_val, dV)\n', (2641, 2652), True, 'import matplotlib.pyplot as plt\n'), ((2657, 2691), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Iterations"""'], {}), "('Number of Iterations')\n", (2667, 2691), True, 'import matplotlib.pyplot as plt\n'), ((2696, 2724), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""log(2-norm dV)"""'], {}), "('log(2-norm dV)')\n", (2706, 2724), True, 'import matplotlib.pyplot as plt\n'), ((2729, 2739), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2737, 2739), True, 'import matplotlib.pyplot as plt\n'), ((1055, 1081), 'numpy.exp', 'np.exp', (['((v[0] - v[1]) / Vt)'], {}), '((v[0] - v[1]) / Vt)\n', (1061, 1081), True, 'import numpy as np\n'), ((1109, 1135), 'numpy.exp', 'np.exp', (['((v[0] - v[1]) / Vt)'], {}), '((v[0] - v[1]) / Vt)\n', (1115, 1135), True, 'import numpy as np\n'), ((1743, 1764), 'numpy.linalg.norm', 'np.linalg.norm', (['dV', '(2)'], {}), '(dV, 2)\n', (1757, 1764), True, 'import numpy as np\n'), ((1833, 1849), 'numpy.add', 'np.add', (['Vnew', 'dV'], {}), '(Vnew, dV)\n', (1839, 1849), True, 'import numpy as np\n'), ((2607, 2628), 'numpy.log10', 'np.log10', (['dV_norm_err'], {}), '(dV_norm_err)\n', (2615, 2628), True, 'import numpy as np\n'), ((1001, 1027), 'numpy.exp', 'np.exp', (['((v[0] - v[1]) / Vt)'], {}), '((v[0] - v[1]) / Vt)\n', (1007, 1027), True, 'import numpy as np\n'), ((1164, 1181), 'numpy.exp', 'np.exp', (['(v[1] / Vt)'], {}), '(v[1] / Vt)\n', (1170, 1181), True, 'import numpy as np\n'), ((1193, 1219), 'numpy.exp', 'np.exp', (['((v[0] - v[1]) / Vt)'], {}), '((v[0] - v[1]) / Vt)\n', (1199, 1219), True, 'import numpy as np\n'), ((748, 774), 'numpy.exp', 'np.exp', (['((v[0] - v[1]) / Vt)'], {}), '((v[0] - v[1]) / Vt)\n', (754, 774), True, 'import numpy as np\n'), ((794, 820), 'numpy.exp', 'np.exp', (['((v[0] - v[1]) / Vt)'], {}), '((v[0] - v[1]) / Vt)\n', (800, 820), True, 'import numpy as np\n'), ((829, 846), 'numpy.exp', 'np.exp', (['(v[1] / Vt)'], {}), '(v[1] / Vt)\n', (835, 846), True, 'import numpy as np\n'), ((1680, 1698), 'numpy.linalg.inv', 'np.linalg.inv', (['F_p'], {}), '(F_p)\n', (1693, 1698), True, 'import numpy as np\n')]
|
import sys
from os.path import join
from threading import Timer
import numpy as np
from openal.audio import SoundSource
assets_root = getattr(sys, '_MEIPASS', '.')
def get_sfx(name):
return join(assets_root, 'assets', name)
def new_pt(*values):
return np.array(values or (0, 0, 0), dtype=float)
def vec_mag(vec: np.array):
return np.sqrt(vec.dot(vec))
def vec_dist(a: np.array, b: np.array):
return vec_mag(a - b)
class ContinuousSoundSource(SoundSource):
def __init__(self, sound_generator):
super().__init__()
def play_sound():
sound = sound_generator()
self.queue(sound)
self.__dict__['timer'] = timer = Timer(self.calc_length(sound), play_sound)
timer.daemon = True
timer.start()
play_sound()
@staticmethod
def calc_length(sound):
return sound.size / (sound.frequency * sound.bitrate / 8)
|
[
"numpy.array",
"os.path.join"
] |
[((198, 231), 'os.path.join', 'join', (['assets_root', '"""assets"""', 'name'], {}), "(assets_root, 'assets', name)\n", (202, 231), False, 'from os.path import join\n'), ((266, 308), 'numpy.array', 'np.array', (['(values or (0, 0, 0))'], {'dtype': 'float'}), '(values or (0, 0, 0), dtype=float)\n', (274, 308), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
#pylint: disable = C, R
#pylint: disable = E1101 # no-member (generated-members)
#pylint: disable = C0302 # too-many-lines
"""
This code features the article
"Pareto-based evaluation of national responses to COVID-19 pandemic shows
that saving lives and protecting economy are non-trade-off objectives"
by Kochanczyk & Lipniacki (Scientific Reports, 2021).
License: MIT
Last changes: November 09, 2020
"""
# --------------------------------------------------------------------------------------------------
import re
from operator import itemgetter
from multiprocessing import Pool
import pandas as pd
import seaborn as sns
import numpy as np
import scipy.stats
import statsmodels.stats.weightstats as wstats
import matplotlib.pyplot as plt
import matplotlib.dates as dts
import matplotlib.ticker as tckr
import matplotlib.patheffects as pthff
from colorsys import rgb_to_hls
from pandas.plotting import register_matplotlib_converters
import locale
import dill
import gzip
from shared import *
register_matplotlib_converters()
locate_set = False
try:
locale.setlocale(locale.LC_TIME, 'en_US')
locale.setlocale(locale.LC_ALL, 'en_US')
locate_set = True
except:
try:
locale.setlocale(locale.LC_TIME, 'en_US.utf8')
locale.setlocale(locale.LC_ALL, 'en_US.utf8')
locate_set = True
except:
locale.setlocale(locale.LC_TIME, 'POSIX')
locale.setlocale(locale.LC_ALL, 'POSIX')
if not locate_set:
print('Warning: US English locale could not be set. Check tick labels in generated figures.')
# -- Shared plot settings --------------------------------------------------------------------------
plt.rcParams['axes.linewidth'] = 0.5
plt.rcParams['xtick.major.width'] = 0.5
plt.rcParams['ytick.major.width'] = 0.5
plt.rcParams['xtick.minor.width'] = 0.5
plt.rcParams['ytick.minor.width'] = 0.5
plt.rcParams['xtick.major.pad'] = 1.67
plt.rcParams['ytick.major.pad'] = 1.33
plt.rc('font', size=8, family='sans-serif')
plt.rc('text', usetex=True)
plt.rc('text.latex', preamble=r'''\usepackage{cmbright}''')
# -- Plotting auxiliary functions ------------------------------------------------------------------
# manual tweaks:
OUT_OF_FRONT = ['Greece', 'Hungary', 'Canada', 'Netherlands', 'Czechia']
# colors:
SNAIL_GREEN, SNAIL_NONGREEN, SNAIL_ORANGE = '#77ffaa', '#aabbdd', '#885500'
ANNOT_COLOR = '#777777'
def color_of(country, dull_color=(0.15, 0.15, 0.15)):
colors = {
'Austria': plt.cm.tab10(6),
'Belgium': plt.cm.tab10(5),
'Bulgaria': plt.cm.tab10(2),
'Croatia': (0.50, 0.55, 0.00),
'Czechia': plt.cm.tab10(4),
'Denmark': (0.85, 0.20, 0.00),
'Finland': plt.cm.tab10(9),
'France': (0.95, 0.25, 0.75),
'Germany': (0.55, 0.25, 0.70),
'Hungary': (0.35, 0.35, 0.35),
'Greece': (0.45, 0.75, 1.00),
'Italy': plt.cm.tab10(2),
'Netherlands': (0.88, 0.50, 0.00),
'Norway': plt.cm.tab10(0),
'Poland': (0.15, 0.65, 1.00),
'Portugal': (0.95, 0.65, 0.00),
'Romania': plt.cm.tab10(8),
'Russia': (0.80, 0.45, 0.15),
'Slovakia': (0.25, 0.90, 0.50),
'Slovenia': plt.cm.tab10(1),
'Spain': plt.cm.tab10(3),
'Sweden': (0.10, 0.20, 0.90),
'Switzerland': (1.00, 0.05, 0.05),
'United Kingdom': (0.20, 0.00, 0.99),
'Japan': (0.9, 0.00, 0.00),
'South Korea': (0.70, 0.60, 0.65),
'Taiwan': (0.10, 0.80, 0.00),
'California': (0.90, 0.70, 0.00),
'Canada': (0.00, 0.45, 0.80),
'Florida': (0.95, 0.40, 0.00),
'Georgia': (0.80, 0.10, 0.60),
'Illinois': (0.75, 0.50, 0.00),
'Michigan': (0.05, 0.50, 0.15),
'North Carolina': (0.10, 0.00, 0.95),
'New York': (0.60, 0.30, 0.00),
'Ohio': (0.65, 0.00, 0.00),
'Pennsylvania': (0.20, 0.25, 1.00),
'Texas': (0.35, 0.40, 0.40),
'Argentina': (0.30, 0.75, 1.00),
'Bolivia': (0.20, 0.65, 0.00),
'Brazil': (0.00, 0.70, 0.20),
'Chile': (0.65, 0.15, 0.00),
'Colombia': (0.00, 0.10, 0.65),
'Ecuador': (0.65, 0.65, 0.00),
'Mexico': (0.00, 0.50, 0.60),
'Peru': (0.75, 0.50, 0.25),
}
if country in colors.keys():
return colors[country]
else:
return dull_color
def correlations(values, weights):
rho = scipy.stats.pearsonr(values[:,0], values[:,1])[0]
wrho = wstats.DescrStatsW(values, weights=weights).corrcoef[0][1]
return (rho, wrho)
def adjust_spines(ax, spines, left_shift=15, bottom_shift=0):
for loc, spine in ax.spines.items():
if loc in spines:
if loc == 'left':
spine.set_position(('outward', left_shift))
elif loc == 'bottom':
spine.set_position(('outward', bottom_shift))
else:
spine.set_color('none')
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks([])
def set_ticks_lengths(ax):
ax.tick_params(which='major', length=2., labelsize=7)
ax.tick_params(which='minor', length=1.)
def darken(color, scale=0.5):
lightness = min(1, rgb_to_hls(*color[0:3])[1] * scale)
return sns.set_hls_values(color=color, h=None, l=lightness, s=None)
def pareto_front(data, optima=True):
sorted_data = sorted(data, key=itemgetter(0, 1), reverse=not optima) # x-ascending
front = [ sorted_data[0][2] ]
cutoff = sorted_data[0][1]
for sd in sorted_data[1:]:
if (optima and sd[1] < cutoff) or (not optima and sd[1] > cutoff):
front += [sd[2]]
cutoff = sd[1]
return front
def put_final_dot(ax, location, x, y, is_extra_country=False, is_tail_shown=False,
show_population_halo=False, label_shifting='A', italic=False):
label_shifts = {
'Denmark': (940, 1.0 ),
'Norway': ( 20, 0.88 ),
'South Korea': ( 52, 0.59 ),
'Portugal': ( 0, 0.97 ),
'Bulgaria': (830, 0.994),
'Switzerland': ( 80, 0.92 ),
'Ohio': ( 40, 1.014),
'Michigan': (800, 1.018),
'Florida': ( 0, 0.987),
'Illinois': ( 90, 1.016),
'North Carolina': (-10, 0.97 ),
'Pennsylvania': ( 0, 0.999),
'Georgia': (825, 0.991)
} if label_shifting == 'A' else {}
if show_population_halo:
marker_size = 3.5
diameter = np.sqrt(population(location)) * 3
light_color = color_of(location)
ax.plot([x], [y], '-.', marker='8' if is_extra_country else 'o',
linewidth=1, markersize=diameter, markeredgewidth=0, alpha=0.2, clip_on=False,
color=light_color, markerfacecolor=light_color)
else:
marker_size = 6
ax.plot([x], [y], '-.', marker='8' if is_extra_country else 'o',
linewidth=1, markersize=marker_size, markeredgewidth=0, alpha=0.8, clip_on=False,
color=color_of(location), markerfacecolor=color_of(location))
loc = location.replace('United Kingdom', 'UK')
if italic:
loc = r'\textit{' + loc + r'}'
if label_shifting == 'A':
ax.annotate(loc, xycoords='data',
xy=(x + 65 - (0 if location not in label_shifts else label_shifts[location][0]),
y**0.9999 * (1 if location not in label_shifts else label_shifts[location][1])),
color=sns.set_hls_values(color_of(location), l=0.3), clip_on=False)
else:
ax.annotate(loc, xycoords='data',
xy=(x + 0.13,
y + 0.04),
color=sns.set_hls_values(color_of(location), l=0.3), clip_on=False)
def jointly_trimmed_trajs(trajs, locations, cols, force_end=None, skipped=None, cleanup=True,
verbose=False):
assert len(cols) == 2
col1, col2 = cols
days_of_last_available_data = set()
for country in locations:
if skipped and country in skipped:
continue
df = trajs[country]
df_sel = df[ ~df[col1].isnull() & ~df[col2].isnull() ]
last_day = df_sel.iloc[-1].name
days_of_last_available_data.add(last_day)
if verbose:
print(country, last_day.strftime('%b%d'))
day_of_last_available_data = min(days_of_last_available_data)
if force_end is None:
if verbose:
print(f"Last shared available day ({' & '.join(cols)}):",
day_of_last_available_data.strftime('%b%d'))
else:
if verbose:
print(f"Last shared available day ({' & '.join(cols)}):",
day_of_last_available_data.strftime('%b%d'), '==FORCED=>',
force_end.strftime('%b%d'))
day_of_last_available_data = force_end
edited_trajs = {}
assert len(cols) == 2
for country in locations:
df = trajs[country].loc[:day_of_last_available_data]
edited_trajs[country] = df[ ~df[col1].isnull() & ~df[col2].isnull() ] if cleanup else df
return day_of_last_available_data, edited_trajs
def extract_cumulative_immobilization_and_deaths(trajectories, country, interval):
trajectory = trajectories[country]
immobi = -trajectory[['mobility_reduction']]
deaths = trajectory[['new_deaths']].astype('Float64')
ppl = population(country)
if interval == 'monthly':
immobi = immobi.cumsum().groupby(pd.Grouper(freq='M')).nth(0)
deaths = deaths.cumsum().groupby(pd.Grouper(freq='M')).nth(0) / ppl
df = immobi.join(deaths).rename(columns={
'mobility_reduction': f"immobilization_cumul_{country}",
'new_deaths': f"new_deaths_cumul_per_1M_{country}"})
ii = df.index
df.index = [i.replace(day=1) for i in ii]
return df
elif interval == 'weekly':
immobi = immobi.resample('W').sum().cumsum()
deaths = deaths.resample('W').sum().cumsum() / ppl
df = immobi.join(deaths).rename(columns={
'mobility_reduction': f"immobilization_cumul_{country}",
'new_deaths': f"new_deaths_cumul_per_1M_{country}"})
return df
elif interval == 'daily':
immobi = immobi.cumsum()
deaths = deaths.cumsum() / ppl
df = immobi.join(deaths).rename(columns={
'mobility_reduction': f"immobilization_cumul_{country}",
'new_deaths': f"new_deaths_cumul_per_1M_{country}"})
return df
def make_sqrt_deaths_yaxis(ax, ymax=40, sep=5):
ax.set_ylim((0, ymax))
ticks = list(range(0, ymax + sep, sep))
ax.set_yticks(ticks)
ax.set_yticklabels(['0'] + [r'$\sqrt{' + str(t**2) + '}$' for t in ticks[1:]])
def plot_cumulative_immobilization_and_deaths(trajectories, locations, final_day, show_fronts,
show_tail, show_corr_history, show_population_halo,
fig_name='X', scale_deaths=np.sqrt):
def draw_pareto_fronts_(ax, finals, n_fronts, optimal):
fronts = []
for i in range(n_fronts):
fronts_locations = [__ for _ in fronts for __ in _]
finals_remaining = [(*im_de, loc) for loc, im_de in finals.items()
if loc not in fronts_locations and loc not in OUT_OF_FRONT]
front = pareto_front(finals_remaining, optimal)
fronts.append(front)
for front_i, front in enumerate(fronts):
color = sns.set_hls_values('gray', l=0.1 + 0.04*(max(0, front_i - 1*optimal))) # TMP: was 0.15+0.1*
front_coords = np.array([finals[loc] for loc in front]).T
if len(front_coords.T) > 1:
ax.plot(*front_coords, ':' if optimal else '--', c=color, alpha=0.8,
linewidth=1.1 if optimal else 0.8)
else:
if optimal:
front_coords = [[front_coords[0][0] + 0.707*180 + 180*np.cos((180 + i)/360*2*3.14159),
front_coords[1][0] + 0.8 + 1.2*np.sin((180 + i)/360*2*3.14159)]
for i in range(0, 91, 10)]
else:
front_coords = [[front_coords[0][0] - 0.707*180 + 180*np.cos((180 + i)/360*2*3.14159),
front_coords[1][0] - 0.8 + 1.2*np.sin((180 + i)/360*2*3.14159)]
for i in range(180+0, 180+91, 10)]
ax.plot(*np.array(front_coords).T, ':' if optimal else '--', c=color, alpha=0.8,
linewidth=1.1 if optimal else 0.8, clip_on=False)
def make_subplot_(ax, trajs, locations, final_day, show_fronts, panel_letter=None):
adjust_spines(ax, ['left', 'bottom'], left_shift=10)
ax.set_xlim((0, 8e3))
ax.set_xlabel(r'Cumulative lockdown')
ax.set_ylabel(r'$\sqrt{\textrm{\sf Cumulative deaths/M}}$')
make_sqrt_deaths_yaxis(ax)
# plot "flares" (tails are optional)
finals = {}
for loc in locations:
im, de = extract_cumulative_immobilization_and_deaths(trajs, loc, 'monthly').values.T
de = scale_deaths(de)
put_final_dot(ax, loc, im[-1], de[-1], show_population_halo=show_population_halo)
if show_tail:
color = color_of(loc)
darker_color = darken(color_of(loc))
alpha = 0.7
ax.plot(im, de, '-', linewidth=0.8, alpha=alpha, color=color)
for i in range(1, len(im)):
m, ms = [('s', 1.7), ('D', 1.55), ('p', 2.2)][i % 3]
ax.plot(im[-1 - i], de[-1 - i], '.', marker=m, markersize=ms,
fillstyle=None, markeredgewidth=0.33, markerfacecolor=darken(color, 0.9),
markeredgecolor=darker_color, alpha=alpha)
ax.plot(im[-1], de[-1], '.', marker='o', markersize=1., markeredgewidth=0,
markerfacecolor=darker_color, alpha=alpha)
finals[loc] = (im[-1], de[-1])
if show_fronts:
draw_pareto_fronts_(ax, finals, n_fronts=3+2, optimal=True)
draw_pareto_fronts_(ax, finals, n_fronts=2, optimal=False)
# annotation: last day
ax.annotate(str('Date:' if show_corr_history else 'Last day:') + \
f" {final_day.strftime('%B %d, %Y')}", xy=(0.0, 1.01), xycoords='axes fraction',
color=ANNOT_COLOR)
# annotation: correlation coefficients
values = np.array(list(finals.values()))
weights = np.array([population(loc) for loc in finals.keys()])
rho, wrho = correlations(values, weights)
ax.annotate(r'Correlation:',
xy=(0.0, 0.97), xycoords='axes fraction', color=ANNOT_COLOR)
ax.annotate(r"(non-weighted) Pearson's $\rho$ = " + f"{rho:.2f}",
xy=(0.16 - 0.03*show_tail, 0.97), xycoords='axes fraction', color=ANNOT_COLOR)
ax.annotate(r"population-weighted Pearson's $\rho$ = " + f"{wrho:.2f}",
xy=(0.16 - 0.03*show_tail, 0.94), xycoords='axes fraction', color=ANNOT_COLOR)
# export coordinates
if panel_letter is not None:
csv_fn = f"Figure{fig_name}{panel_letter}.csv"
np.savetxt(csv_fn, values, header='lockdown,sqrt_deaths', delimiter=',')
cols = ['mobility', 'new_deaths']
# set up the figure
if show_corr_history:
fig, axes = plt.subplots(ncols=2, figsize=(11, 5))
for i, fday in enumerate(final_day):
last_avail_day, trajs = jointly_trimmed_trajs(trajectories, locations, cols, force_end=fday)
assert fday <= last_avail_day
panel_letter = chr(ord('A') + i)
make_subplot_(axes[i], trajs, locations, fday, show_fronts=show_fronts and i>0,
panel_letter=panel_letter)
axes[i].annotate(r'\large\textbf{' + panel_letter + r'}',
xy=(-0.175, 1.04), xycoords='axes fraction', clip_on=False)
ax = axes[1].inset_axes([0.92, 0.09, 0.45, 0.2])
adjust_spines(ax, ['left', 'bottom'], left_shift=7)
ax.annotate(r'\large\textbf{C}', xy=(-0.275, 1.06), xycoords='axes fraction', clip_on=False)
x, y1, y2 = [], [], []
for i in range(9):
points, weights = [], []
for loc in locations:
im_de = extract_cumulative_immobilization_and_deaths(trajs, loc, 'monthly').iloc[-1 - i]
points.append([im_de[0], scale_deaths(im_de[1])])
weights.append(population(loc))
points = np.array(points)
rho, wrho = correlations(points, weights)
x.append(im_de.name)
y1.append(rho)
y2.append(wrho)
ax.xaxis.set_major_formatter(dts.DateFormatter('%b')) # %d
ax.yaxis.set_major_locator(tckr.MultipleLocator(0.1))
ax.plot(x, y2, '.-', linestyle='dotted', linewidth=0.5, color='#333333', markersize=7,
markerfacecolor='#00000000', markeredgecolor='black', markeredgewidth=0.5,
label=r'population-weighted $\rho$')
ax.plot(x, y1, '.-', linestyle='dashed', linewidth=0.5, color='#333333', markersize=5.5,
label=r'non-weighted $\rho$')
ax.set_ylim((0.5, 0.9))
ax.set_xlabel(r'First days of months of 2020')
ax.set_ylabel(r"Pearson's $\rho$")
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.48), fancybox=False, fontsize=6.75)
for item in (ax.xaxis.label, ax.yaxis.label): item.set_fontsize(7.00)
for label in (ax.get_xticklabels() + ax.get_yticklabels()): label.set_fontsize(6.25)
else:
last_avail_day, trajs = jointly_trimmed_trajs(trajectories, locations, cols, force_end=final_day)
assert final_day <= last_avail_day
fig, axes = plt.subplots(ncols=1, figsize=(6, 5))
make_subplot_(axes, trajs, locations, final_day, show_fronts=False, panel_letter='_')
# export
fig.tight_layout()
fn = f"Figure{fig_name}.pdf" # _{last_day.strftime('%b%d')}
fig.savefig(fn)
print(f"Saved figure file {fn}.")
return fig
def put_legend_cases(ax_leg, thr_weekly_cases_per_1M):
z = [3, 10, 30, 100, 300, 1000, 3000, 10000]
x = np.array(list(range(len(z))))
y1 = np.ones(len(x))*0.62
y2 = np.ones(len(x))*0.31
y3 = np.ones(len(x))*0.0
ax_leg.set_xlim((0 +0, len(z)-1 -0))
ax_leg.set_ylim((0, 1))
# tracer line
for y in [y1, y2, y3]:
xx = [float(x[0]) + 0.125] + list(x[1:-1]) + [float(x[-1]) - 0.125]
ax_leg.plot(xx, y, linestyle='-', linewidth=0.75, alpha=1, solid_capstyle='round',
color='#ffaaee', clip_on=False, zorder=10)
# variable thickness line (BEGIN)
lwidths = [0.7 * (0 + np.log(z))]
points = np.array([x, y1]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
for segi, seg in enumerate(segments):
seg = seg.T
color = sns.set_hls_values(SNAIL_NONGREEN, l=0.15 + (lwidths[0][segi] - 0.)/8)
ax_leg.plot(seg[0]+0.05, seg[1], '-', color=color, linewidth=lwidths[0][segi],
alpha=1, solid_capstyle='butt', zorder=20, clip_on=False)
# variable thickness line (END)
points = np.array([x, y2]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
for segi, seg in enumerate(segments):
seg = seg.T
el = min(1, 0.075 + ((lwidths[0][segi] - 0.)/7)**1.3)
co = sns.set_hls_values(SNAIL_GREEN, l=el)
ax_leg.plot(seg[0]+0.05, seg[1], '-', color=co, linewidth=lwidths[0][segi],
alpha=1, solid_capstyle='butt', zorder=20, clip_on=False)
# dots + thin black
for y in [y1, y2, y3]:
xx, yy = x[:-1], y[:-1]
ax_leg.scatter(xx + 0.5, yy, s=0.025, marker='o', facecolor='#000000', alpha=0.5,
clip_on=False, zorder=30)
ax_leg.plot(xx + 0.5, yy, linestyle='--', linewidth=0.1, color='#000000', alpha=0.33,
clip_on=False, zorder=40)
ax_leg.annotate(text=r'Tests per case:', xy=(0.5, 0.84), xycoords='axes fraction', fontsize=8,
ha="center", va="center")
ax_leg.annotate(text=r'when \textbf{$>$ ' + str(thr_weekly_cases_per_1M) + r'} '
r'new cases /week /M', xy=(0.5, 0.62-0.09),
xycoords='axes fraction', fontsize=6.5, ha="center", va="center")
ax_leg.annotate(text=r'when \textbf{$<$ ' + str(thr_weekly_cases_per_1M) + '} '
r'new cases /week /M', xy=(0.5, 0.31-0.09),
xycoords='axes fraction', fontsize=6.5, ha="center", va="center")
ax_leg.annotate(text=r'no data on testing', xy=(0.5, 0.055), xycoords='axes fraction',
fontsize=6.5, ha="center", va="center")
for vi, v in enumerate(z):
for y in [y1, y2]:
extra_shift = -0.08 if v in [100, 300, 1000] else 0
ax_leg.annotate(text=f"{v}"[::-1].replace('000', 'k')[::-1], color='black',
xy=(x[vi]+extra_shift + 0.5, y[vi]+0.05+0.005*vi), xycoords='data',
fontsize=5.75, ha="center", va="center", zorder=30, clip_on=False)
def put_legend_deaths(ax_leg):
z = [1, 3, 10, 30, 100, 300]
x = np.array(list(range(len(z))))
y2 = np.ones(len(x))*0.37
ax_leg.set_xlim((0-0.1, len(z)-1+0.1))
ax_leg.set_ylim((0, 1))
# variable thickness line (BEGIN)
lwidths = [1*np.log(1 + np.array(z))]
points = np.array([x, y2]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
for segi, seg in enumerate(segments):
seg = seg.T
el = 0.1 + (lwidths[0][segi] - 0.)/14
color = sns.set_hls_values(SNAIL_ORANGE, l=el)
ax_leg.plot(seg[0]-0.025, seg[1], '-', color=color, linewidth=lwidths[0][segi],
alpha=1, solid_capstyle='butt',
zorder=20, clip_on=False)
# dots + thin black
for y in [y2]:
xx, yy = x[:-1], y[:-1]
ax_leg.scatter(xx + 0.5, yy, s=0.025, marker='o', facecolor='black', alpha=0.5,
clip_on=False, zorder=30)
ax_leg.plot(xx + 0.5, yy, linestyle='--', linewidth=0.1, color='black', alpha=0.33,
clip_on=False, zorder=40)
ax_leg.annotate(s=r'Cases per death:', xy=(0.5, 0.63), xycoords='axes fraction', fontsize=8,
ha="center", va="center")
ax_leg.annotate(s=r'when \textbf{at least 1} new death /week /M', xy=(0.5, 0.22),
xycoords='axes fraction', fontsize=6.5, ha="center", va="center")
for vi, v in enumerate(z):
for y in [y2]:
ax_leg.annotate(s=f"{v}", xy=(x[vi] + 0.5, y[vi]+0.05 + 0.005*vi), xycoords='data',
fontsize=6, ha="center", va="center", zorder=30, clip_on=False,
color='black')
def plot_R_vs_mobility_reduction(trajs, locations, final_day, missing_days, fig_name, kind='cases',
thr_weekly_cases_per_1M=20):
assert kind in ('cases', 'deaths')
trajs_orig = trajs.copy()
low_mortality_locations = ['Taiwan', 'Slovakia', 'New Zealand']
mob_col, Rt_col = f"mobility_historical_{kind}", f"Rt_{kind}"
last_day, trajs_trimmed = jointly_trimmed_trajs(trajs, locations, [mob_col, Rt_col],
force_end=final_day,
skipped=low_mortality_locations)
def by_per_capita(cc):
if kind == 'cases':
assert last_day in trajs[cc].index, \
print(f"Day {last_day} not available for {cc} that ends on",
trajs[cc].tail(1).index)
return trajs[cc].loc[last_day, f"total_{kind}"] / population(cc) + 1e6*is_USA_state(cc)
elif kind == 'deaths':
if cc in low_mortality_locations:
return trajs[cc].loc[last_day, f"total_{kind}"] / 1e9 + 1e6*is_USA_state(cc)
else:
return trajs[cc].loc[last_day, f"total_{kind}"] / population(cc) + 1e6*is_USA_state(cc)
locations = sorted(locations, key=by_per_capita, reverse=True)
facecolor = '#f8f6f4'
ncols = 6
nrows = (len(locations))//ncols + 1
fig, _ = plt.subplots(nrows=nrows, ncols=ncols, figsize=(8/5*ncols, 8/6*nrows))
for ci, country in enumerate(locations):
ax = fig.axes[ci]
ax.set_facecolor(facecolor)
# PLOT: deaths in low-mortality locations
if kind == 'deaths' and country in low_mortality_locations:
ax.annotate(s=country, xy=(0.5, 0.88), xycoords='axes fraction', fontsize=9,
color='#666666', ha="center", va="center", clip_on=False, zorder=100)
total = trajs_orig[country].loc[last_day, f"total_{kind}"]
ax.annotate(s="{:d} {:s} in total".format(int(round(total)), kind),
xy=(0.5, 0.77), xycoords='axes fraction', fontsize=6.5, color='#666666',
ha="center", va="center", clip_on=False, zorder=100)
ax.annotate(s="(plot not shown)",
xy=(0.5, 0.67), xycoords='axes fraction', fontsize=6.5, color='#666666',
ha="center", va="center", clip_on=False, zorder=100)
adjust_spines(ax, ['left', 'bottom'] if ax.is_first_col() else ['bottom'])
ax.set_xticks(())
continue
# PLOT: X-axis
row_i = ci//ncols
if row_i == nrows-1:
ax.set_xlabel('Mobility', labelpad=-1)
ax.set_xlim((-100, 0))
ax.set_xticks((-100, 0))
#ax.xaxis.set_major_formatter(tckr.PercentFormatter(decimals=0))
ax.set_xticklabels((r'$-100\%$', r'$0\%$'))
# PLOT: Y-axis
if ax.is_first_col():
ax.set_ylabel(r'$R$')
ax.set_ylim((0, 4))
ax.yaxis.set_major_locator(tckr.MultipleLocator(1))
ax.axhline(1, linestyle='--', linewidth=0.5, color='#666666')
# DATA
df = trajs_trimmed[country].copy()
# DATA: begin each trajectory since 100 cumulative cases
min_cumul = 100
above_min_cumul_indices = df['total_cases'] >= min_cumul # cases even if kind == 'deaths'
df = df[above_min_cumul_indices]
# DATA: nullify missing days to obtain visual discontinuities
for missing_day in missing_days[country]:
if df.index[0] <= missing_day and missing_day <= FINAL_DAY:
df.at[missing_day,mob_col] = np.nan # cannot be pd.NA because used in mpl.plot
df.at[missing_day, Rt_col] = np.nan # cannot be pd.NA because used in mpl.plot
df.sort_index(inplace=True)
if kind == 'cases': # ==---
# PLOT: pink tracer line
ax.plot(*df[[mob_col, Rt_col]].values.T, linestyle='-', linewidth=0.75, alpha=1,
solid_capstyle='round', color='#ffaaee', clip_on=True, zorder=10)
# DATA: partition trajectory into temporally-ordered stretches
df_freq = df[f"new_{kind}"].ffill().rolling(window=7, min_periods=7, **ROLL_OPTS).sum()\
/ population(country)
assert len(df_freq) == len(df)
green_indices = df[df_freq < thr_weekly_cases_per_1M].index
nongreen_indices = df[df_freq >= thr_weekly_cases_per_1M].index
green_stretches, nongreen_stretches = [], []
last_index_is_green = None
for index, value in df.iterrows():
if index in green_indices:
if last_index_is_green is None or last_index_is_green == False:
green_stretches += [ [index] ]
elif last_index_is_green == True:
green_stretches[-1] += [index]
last_index_is_green = True
elif index in nongreen_indices:
if last_index_is_green is None or last_index_is_green == True:
if green_stretches:
green_stretches[-1] += [index] # extra point for smooth joins
nongreen_stretches += [ [index] ]
elif last_index_is_green == False:
nongreen_stretches[-1] += [index]
last_index_is_green = False
stretches = [( g, SNAIL_GREEN ) for g in green_stretches] \
+ [(ng, SNAIL_NONGREEN) for ng in nongreen_stretches]
def by_first_day(cs): return cs[0][0]
stretches = sorted(stretches, key=by_first_day)
# PLOT: variable thickness line
for stretch, color in stretches:
x, y = df.loc[stretch, [mob_col, Rt_col]].values.T
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
tests_per_hit = df.loc[stretch, 'tests_per_hit'].values
np.place(tests_per_hit, np.isinf(tests_per_hit) | (tests_per_hit > 10000), 10000)
z = 0.7*np.log(0 + tests_per_hit)
np.place(z, np.isnan(z), 0)
np.place(z, np.isinf(z), 1000)
np.place(z, z < 0, 0)
lwidths = [z]
for segi, seg in enumerate(segments):
seg = seg.T
if kind == 'cases': el = 0.15 + lwidths[0][segi] / 8
else: el = 0.10 + lwidths[0][segi] / 14
co = sns.set_hls_values(color, l=el)
ax.plot(seg[0], seg[1], '-', color=co, linewidth=lwidths[0][segi],
alpha=1, solid_capstyle='round', zorder=20)
elif kind == 'deaths': # ==---
days_back = 14
x, y = df[[mob_col, Rt_col]].values.T
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
de = df[['new_deaths14']]
ca = df[['new_cases14' ]]
ca = ca.set_index( ca.index.shift(+days_back, freq ='D') ) # <-- THIS
#de = de.set_index( de.index.shift(-days_back, freq ='D') ) # <-- not this
z = de.join(ca)
z['cases14_per_death14'] = z['new_cases14'] / z['new_deaths14']
z = z['cases14_per_death14'].values
np.place(z, np.isnan(z), 0)
np.place(z, np.isinf(z), 1000)
np.place(z, z < 0, 0)
lwidths = [1*np.log(1 + z)]
for segi, seg in enumerate(segments):
seg = seg.T
if kind == 'cases': el = 0.15 + lwidths[0][segi] / 8
else: el = 0.10 + lwidths[0][segi] / 14
co = sns.set_hls_values(SNAIL_ORANGE, l=el)
ax.plot(seg[0], seg[1], '-', color=co, linewidth=lwidths[0][segi],
alpha=1, solid_capstyle='round', zorder=20)
# PLOT: dots + thin black
x, y = df[[mob_col, Rt_col]].values.T
ax.scatter(x, y, s=0.025, marker='o', facecolor='#000000', alpha=0.5, clip_on=True, zorder=30)
ax.plot(x, y, linestyle='--', linewidth=0.1, color='#000000', alpha=0.33, zorder=40)
# PLOT: panel title
ax.annotate(text=country, xy=(0.5, 0.88), xycoords='axes fraction', fontsize=9, ha="center",
va="center", clip_on=False, zorder=100,
path_effects=[pthff.Stroke(linewidth=2, foreground=facecolor), pthff.Normal()])
pop = population(country)
total_per_1M = trajs_orig[country].loc[last_day, f"total_{kind}"] / pop
heading = "{:d} {:s}/M".format(int(round(total_per_1M)), kind)
ax.annotate(text=heading, xy=(0.5, 0.77), xycoords='axes fraction', fontsize=6.5,
ha="center", va="center", clip_on=False, zorder=100,
path_effects=[pthff.Stroke(linewidth=1.33, foreground=facecolor),
pthff.Normal()])
adjust_spines(ax, ['left', 'bottom'] if ax.is_first_col() else ['bottom'])
set_ticks_lengths(ax)
# PLOT: legend
for ax in fig.axes:
if ax.is_last_row() and ax.is_last_col():
ax.set_axis_off()
if kind == 'cases':
put_legend_cases(fig.axes[-1], thr_weekly_cases_per_1M)
elif kind == 'deaths':
put_legend_deaths(fig.axes[-1])
# PLOT: export and return
fig.tight_layout(w_pad=0.4, h_pad=0.15)
l, b, w, h = fig.axes[-1].get_position().bounds
fig.axes[-1].set_position([l, b - 0.0185, w, h])
fig.axes[-1].annotate('Last day:' + f" {final_day.strftime('%B %d, %Y')}",
xy=(0.0, 1.01), xycoords='axes fraction', color=ANNOT_COLOR)
fn = f"Figure{fig_name}_{last_day.strftime('%b%d')}.pdf"
fig.savefig(fn)
print(f"Saved figure file {fn}.")
return fig
def plot_cumulative_immobilization_and_gdp_drop(trajectories, locations, final_day, gdp_2020h1,
fig_name):
df = pd.DataFrame(columns='location cumul_2020H1_mobility_reduction gdp_2020H1_drop'.split())
df = df.set_index('location')
for loc in locations:
if not loc in gdp_2020h1:
print(f"{loc}: missing GDP data in figure {fig_name}")
continue
gdp_drop = -gdp_2020h1[loc]
immob, _ = extract_cumulative_immobilization_and_deaths(trajectories, loc, 'daily').loc[final_day]
df.loc[loc] = [immob, gdp_drop]
fig, ax = plt.subplots(figsize=(5, 5))
adjust_spines(ax, ['left', 'bottom'], left_shift=10)
set_ticks_lengths(ax)
ax.set_xlabel(r'Cumulated mobility reduction in the 1\textsuperscript{st} half of 2020')
ax.set_ylabel(r'GDP loss in the 1\textsuperscript{st} half of 2020 (year-on-year \%)')
ax.set_xlim((0, 5000))
ax.set_ylim((-2, 14))
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(*df.values.T)
ax.plot([0, 5000], [intercept, intercept + slope*5000],
linewidth=0.75, linestyle='--', color='#aaaaaa', zorder=5)
weights = []
for _, row in df.iterrows():
location = row.name
color = color_of(location)
mob_red, gdp_drop = row[['cumul_2020H1_mobility_reduction', 'gdp_2020H1_drop']]
ax.scatter([mob_red], [gdp_drop], color=color, zorder=10)
ax.annotate(text=location.replace('United Kingdom', 'UK'),
xy=(mob_red + 49, gdp_drop + 0.028), xycoords='data',
color=sns.set_hls_values(color, l=0.3), fontsize=7, zorder=10)
weights.append(population(location))
rho, wrho = correlations(df.values, weights)
ax.annotate(r'Correlation:',
xy=(0.0, 0.97), xycoords='axes fraction', color=ANNOT_COLOR)
ax.annotate(r"(non-weighted) Pearson's $\rho$ = " + f"{rho:.2f}",
xy=(0.15, 0.97), xycoords='axes fraction', color=ANNOT_COLOR)
ax.annotate(r"population-weighted Pearson's $\rho$ = " + f"{wrho:.2f}",
xy=(0.15, 0.94), xycoords='axes fraction', color=ANNOT_COLOR)
# export coordinates
csv_fn = f"Figure{fig_name}.csv"
np.savetxt(csv_fn, df.values, header='lockdown,gdp_loss', delimiter=',')
# export image as PDF
fig.tight_layout()
fn = f"Figure{fig_name}.pdf"
fig.savefig(fn)
print(f"Saved figure file {fn}.")
return fig
def plot_gdp_drop_and_excess_deaths(trajectories, locations, final_day, excess_deaths, gdp_2020h1,
fig_name, scale_deaths=np.sqrt):
fig, ax = plt.subplots(figsize=(5, 5))
adjust_spines(ax, ['left', 'bottom'], left_shift=10)
ax.set_xlabel(r'GDP loss in the 1\textsuperscript{st} half of 2020 (year-on-year \%)')
ax.set_ylabel(r'$\sqrt{\textrm{\sf COVID-19-related deaths in the 1\textsuperscript{st} half of 2020 / M}}$')
ax.set_xlim((-2, 14))
make_sqrt_deaths_yaxis(ax)
ed_locations = excess_deaths.keys()
points, weights = [], []
points_eur, weights_eur = [], []
for loc in locations:
if population(loc) < MIN_POPULATION_M or loc=='Serbia':
print(f"{loc} skipped in figure {fig_name}")
continue
if loc not in ed_locations:
print(f"{loc} in figure {fig_name}: deaths will be used in place of excess deaths")
if loc not in gdp_2020h1:
print(f"{loc} skipped in figure {fig_name} because of missing GDP data")
continue
is_in_Europe = not loc in STATE_TO_ABBREV and not loc in ['Canada', 'Taiwan', 'Japan', 'South Korea']
deaths = max(excess_deaths[loc] if loc in excess_deaths else 0,
trajectories[loc].loc[final_day]['total_deaths'])
x, y = -gdp_2020h1[loc], np.sqrt(deaths / population(loc) )
put_final_dot(ax, loc, x, y, show_population_halo=True, label_shifting=False,
italic=not is_in_Europe)
points.append([x, y])
weights.append(population(loc))
if is_in_Europe:
points_eur.append([x, y])
weights_eur.append(population(loc))
values, values_eur = np.array(points), np.array(points_eur)
rho, wrho = correlations(values, weights)
rho_eur, wrho_eur = correlations(values_eur, weights_eur)
ax.annotate(r'Correlation:',
xy=(-0.01, 0.97), xycoords='axes fraction', color=ANNOT_COLOR)
ax.annotate(r"(non-weighted) Pearson's $\rho$ = " + f"{rho:.2f} (Europe-only: {rho_eur:.2f})",
xy=(0.155, 0.97), xycoords='axes fraction', color=ANNOT_COLOR)
ax.annotate(r"population-weighted Pearson's $\rho$ = " + f"{wrho:.2f} (Europe-only: {wrho_eur:.2f})",
xy=(0.155, 0.94), xycoords='axes fraction', color=ANNOT_COLOR)
# export coordinates
csv_fn = f"Figure{fig_name}_all.csv"
np.savetxt(csv_fn, values, header='gdp_loss,sqrt_deaths', delimiter=',')
csv_fn = f"Figure{fig_name}_eur.csv"
np.savetxt(csv_fn, values_eur, header='gdp_loss,sqrt_deaths', delimiter=',')
# export image as PDF
fig.tight_layout()
fn = f"Figure{fig_name}.pdf"
fig.savefig(fn)
print(f"Saved figure file {fn}.")
return fig
if __name__ == '__main__':
with gzip.open('processed_data.dill.gz', 'rb') as f:
trajectories, locations, final_day, missing_days, excess_deaths, gdp_2020h1 = dill.load(f)
print('Locations count:', len(locations))
jul01 = pd.to_datetime('2020-07-01')
fig1 = plot_cumulative_immobilization_and_deaths(trajectories, locations, [jul01, final_day],
show_fronts=True, show_tail=False, show_corr_history=True, show_population_halo=True,
fig_name='1')
figS1 = plot_cumulative_immobilization_and_deaths(trajectories, locations, final_day,
show_fronts=False, show_tail=True, show_corr_history=False, show_population_halo=False,
fig_name='S1')
fig2 = plot_R_vs_mobility_reduction(trajectories, locations, jul01, missing_days, fig_name='2')
fig4 = plot_cumulative_immobilization_and_gdp_drop(trajectories, locations, jul01, gdp_2020h1,
fig_name='4')
fig5 = plot_gdp_drop_and_excess_deaths(trajectories, locations, jul01, excess_deaths,
gdp_2020h1, fig_name='5')
|
[
"colorsys.rgb_to_hls",
"numpy.isnan",
"numpy.sin",
"pandas.Grouper",
"locale.setlocale",
"matplotlib.patheffects.Normal",
"matplotlib.pyplot.cm.tab10",
"numpy.savetxt",
"dill.load",
"numpy.place",
"matplotlib.dates.DateFormatter",
"matplotlib.patheffects.Stroke",
"matplotlib.pyplot.rc",
"matplotlib.ticker.MultipleLocator",
"matplotlib.pyplot.subplots",
"statsmodels.stats.weightstats.DescrStatsW",
"numpy.isinf",
"pandas.to_datetime",
"numpy.cos",
"numpy.concatenate",
"gzip.open",
"numpy.log",
"pandas.plotting.register_matplotlib_converters",
"numpy.array",
"seaborn.set_hls_values",
"operator.itemgetter"
] |
[((1038, 1070), 'pandas.plotting.register_matplotlib_converters', 'register_matplotlib_converters', ([], {}), '()\n', (1068, 1070), False, 'from pandas.plotting import register_matplotlib_converters\n'), ((1971, 2014), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(8)', 'family': '"""sans-serif"""'}), "('font', size=8, family='sans-serif')\n", (1977, 2014), True, 'import matplotlib.pyplot as plt\n'), ((2015, 2042), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (2021, 2042), True, 'import matplotlib.pyplot as plt\n'), ((2043, 2098), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text.latex"""'], {'preamble': '"""\\\\usepackage{cmbright}"""'}), "('text.latex', preamble='\\\\usepackage{cmbright}')\n", (2049, 2098), True, 'import matplotlib.pyplot as plt\n'), ((1100, 1141), 'locale.setlocale', 'locale.setlocale', (['locale.LC_TIME', '"""en_US"""'], {}), "(locale.LC_TIME, 'en_US')\n", (1116, 1141), False, 'import locale\n'), ((1146, 1186), 'locale.setlocale', 'locale.setlocale', (['locale.LC_ALL', '"""en_US"""'], {}), "(locale.LC_ALL, 'en_US')\n", (1162, 1186), False, 'import locale\n'), ((5691, 5751), 'seaborn.set_hls_values', 'sns.set_hls_values', ([], {'color': 'color', 'h': 'None', 'l': 'lightness', 's': 'None'}), '(color=color, h=None, l=lightness, s=None)\n', (5709, 5751), True, 'import seaborn as sns\n'), ((19487, 19536), 'numpy.concatenate', 'np.concatenate', (['[points[:-1], points[1:]]'], {'axis': '(1)'}), '([points[:-1], points[1:]], axis=1)\n', (19501, 19536), True, 'import numpy as np\n'), ((19954, 20003), 'numpy.concatenate', 'np.concatenate', (['[points[:-1], points[1:]]'], {'axis': '(1)'}), '([points[:-1], points[1:]], axis=1)\n', (19968, 20003), True, 'import numpy as np\n'), ((22231, 22280), 'numpy.concatenate', 'np.concatenate', (['[points[:-1], points[1:]]'], {'axis': '(1)'}), '([points[:-1], points[1:]], axis=1)\n', (22245, 22280), True, 'import numpy as np\n'), ((24974, 25052), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'nrows', 'ncols': 'ncols', 'figsize': '(8 / 5 * ncols, 8 / 6 * nrows)'}), '(nrows=nrows, ncols=ncols, figsize=(8 / 5 * ncols, 8 / 6 * nrows))\n', (24986, 25052), True, 'import matplotlib.pyplot as plt\n'), ((34202, 34230), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (34214, 34230), True, 'import matplotlib.pyplot as plt\n'), ((35836, 35908), 'numpy.savetxt', 'np.savetxt', (['csv_fn', 'df.values'], {'header': '"""lockdown,gdp_loss"""', 'delimiter': '""","""'}), "(csv_fn, df.values, header='lockdown,gdp_loss', delimiter=',')\n", (35846, 35908), True, 'import numpy as np\n'), ((36224, 36252), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (36236, 36252), True, 'import matplotlib.pyplot as plt\n'), ((38475, 38547), 'numpy.savetxt', 'np.savetxt', (['csv_fn', 'values'], {'header': '"""gdp_loss,sqrt_deaths"""', 'delimiter': '""","""'}), "(csv_fn, values, header='gdp_loss,sqrt_deaths', delimiter=',')\n", (38485, 38547), True, 'import numpy as np\n'), ((38593, 38669), 'numpy.savetxt', 'np.savetxt', (['csv_fn', 'values_eur'], {'header': '"""gdp_loss,sqrt_deaths"""', 'delimiter': '""","""'}), "(csv_fn, values_eur, header='gdp_loss,sqrt_deaths', delimiter=',')\n", (38603, 38669), True, 'import numpy as np\n'), ((39074, 39102), 'pandas.to_datetime', 'pd.to_datetime', (['"""2020-07-01"""'], {}), "('2020-07-01')\n", (39088, 39102), True, 'import pandas as pd\n'), ((2505, 2520), 'matplotlib.pyplot.cm.tab10', 'plt.cm.tab10', (['(6)'], {}), '(6)\n', (2517, 2520), True, 'import matplotlib.pyplot as plt\n'), ((2548, 2563), 'matplotlib.pyplot.cm.tab10', 'plt.cm.tab10', (['(5)'], {}), '(5)\n', (2560, 2563), True, 'import matplotlib.pyplot as plt\n'), ((2591, 2606), 'matplotlib.pyplot.cm.tab10', 'plt.cm.tab10', (['(2)'], {}), '(2)\n', (2603, 2606), True, 'import matplotlib.pyplot as plt\n'), ((2680, 2695), 'matplotlib.pyplot.cm.tab10', 'plt.cm.tab10', (['(4)'], {}), '(4)\n', (2692, 2695), True, 'import matplotlib.pyplot as plt\n'), ((2769, 2784), 'matplotlib.pyplot.cm.tab10', 'plt.cm.tab10', (['(9)'], {}), '(9)\n', (2781, 2784), True, 'import matplotlib.pyplot as plt\n'), ((2996, 3011), 'matplotlib.pyplot.cm.tab10', 'plt.cm.tab10', (['(2)'], {}), '(2)\n', (3008, 3011), True, 'import matplotlib.pyplot as plt\n'), ((3085, 3100), 'matplotlib.pyplot.cm.tab10', 'plt.cm.tab10', (['(0)'], {}), '(0)\n', (3097, 3100), True, 'import matplotlib.pyplot as plt\n'), ((3220, 3235), 'matplotlib.pyplot.cm.tab10', 'plt.cm.tab10', (['(8)'], {}), '(8)\n', (3232, 3235), True, 'import matplotlib.pyplot as plt\n'), ((3355, 3370), 'matplotlib.pyplot.cm.tab10', 'plt.cm.tab10', (['(1)'], {}), '(1)\n', (3367, 3370), True, 'import matplotlib.pyplot as plt\n'), ((3398, 3413), 'matplotlib.pyplot.cm.tab10', 'plt.cm.tab10', (['(3)'], {}), '(3)\n', (3410, 3413), True, 'import matplotlib.pyplot as plt\n'), ((16018, 16056), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(2)', 'figsize': '(11, 5)'}), '(ncols=2, figsize=(11, 5))\n', (16030, 16056), True, 'import matplotlib.pyplot as plt\n'), ((18457, 18494), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(1)', 'figsize': '(6, 5)'}), '(ncols=1, figsize=(6, 5))\n', (18469, 18494), True, 'import matplotlib.pyplot as plt\n'), ((19615, 19688), 'seaborn.set_hls_values', 'sns.set_hls_values', (['SNAIL_NONGREEN'], {'l': '(0.15 + (lwidths[0][segi] - 0.0) / 8)'}), '(SNAIL_NONGREEN, l=0.15 + (lwidths[0][segi] - 0.0) / 8)\n', (19633, 19688), True, 'import seaborn as sns\n'), ((20141, 20178), 'seaborn.set_hls_values', 'sns.set_hls_values', (['SNAIL_GREEN'], {'l': 'el'}), '(SNAIL_GREEN, l=el)\n', (20159, 20178), True, 'import seaborn as sns\n'), ((22405, 22443), 'seaborn.set_hls_values', 'sns.set_hls_values', (['SNAIL_ORANGE'], {'l': 'el'}), '(SNAIL_ORANGE, l=el)\n', (22423, 22443), True, 'import seaborn as sns\n'), ((37782, 37798), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (37790, 37798), True, 'import numpy as np\n'), ((37800, 37820), 'numpy.array', 'np.array', (['points_eur'], {}), '(points_eur)\n', (37808, 37820), True, 'import numpy as np\n'), ((38867, 38908), 'gzip.open', 'gzip.open', (['"""processed_data.dill.gz"""', '"""rb"""'], {}), "('processed_data.dill.gz', 'rb')\n", (38876, 38908), False, 'import gzip\n'), ((39001, 39013), 'dill.load', 'dill.load', (['f'], {}), '(f)\n', (39010, 39013), False, 'import dill\n'), ((1235, 1281), 'locale.setlocale', 'locale.setlocale', (['locale.LC_TIME', '"""en_US.utf8"""'], {}), "(locale.LC_TIME, 'en_US.utf8')\n", (1251, 1281), False, 'import locale\n'), ((1290, 1335), 'locale.setlocale', 'locale.setlocale', (['locale.LC_ALL', '"""en_US.utf8"""'], {}), "(locale.LC_ALL, 'en_US.utf8')\n", (1306, 1335), False, 'import locale\n'), ((5826, 5842), 'operator.itemgetter', 'itemgetter', (['(0)', '(1)'], {}), '(0, 1)\n', (5836, 5842), False, 'from operator import itemgetter\n'), ((15833, 15905), 'numpy.savetxt', 'np.savetxt', (['csv_fn', 'values'], {'header': '"""lockdown,sqrt_deaths"""', 'delimiter': '""","""'}), "(csv_fn, values, header='lockdown,sqrt_deaths', delimiter=',')\n", (15843, 15905), True, 'import numpy as np\n'), ((17185, 17201), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (17193, 17201), True, 'import numpy as np\n'), ((17381, 17404), 'matplotlib.dates.DateFormatter', 'dts.DateFormatter', (['"""%b"""'], {}), "('%b')\n", (17398, 17404), True, 'import matplotlib.dates as dts\n'), ((17447, 17472), 'matplotlib.ticker.MultipleLocator', 'tckr.MultipleLocator', (['(0.1)'], {}), '(0.1)\n', (17467, 17472), True, 'import matplotlib.ticker as tckr\n'), ((26607, 26630), 'matplotlib.ticker.MultipleLocator', 'tckr.MultipleLocator', (['(1)'], {}), '(1)\n', (26627, 26630), True, 'import matplotlib.ticker as tckr\n'), ((1383, 1424), 'locale.setlocale', 'locale.setlocale', (['locale.LC_TIME', '"""POSIX"""'], {}), "(locale.LC_TIME, 'POSIX')\n", (1399, 1424), False, 'import locale\n'), ((1433, 1473), 'locale.setlocale', 'locale.setlocale', (['locale.LC_ALL', '"""POSIX"""'], {}), "(locale.LC_ALL, 'POSIX')\n", (1449, 1473), False, 'import locale\n'), ((4782, 4825), 'statsmodels.stats.weightstats.DescrStatsW', 'wstats.DescrStatsW', (['values'], {'weights': 'weights'}), '(values, weights=weights)\n', (4800, 4825), True, 'import statsmodels.stats.weightstats as wstats\n'), ((5644, 5667), 'colorsys.rgb_to_hls', 'rgb_to_hls', (['*color[0:3]'], {}), '(*color[0:3])\n', (5654, 5667), False, 'from colorsys import rgb_to_hls\n'), ((12112, 12152), 'numpy.array', 'np.array', (['[finals[loc] for loc in front]'], {}), '([finals[loc] for loc in front])\n', (12120, 12152), True, 'import numpy as np\n'), ((19409, 19418), 'numpy.log', 'np.log', (['z'], {}), '(z)\n', (19415, 19418), True, 'import numpy as np\n'), ((19434, 19451), 'numpy.array', 'np.array', (['[x, y1]'], {}), '([x, y1])\n', (19442, 19451), True, 'import numpy as np\n'), ((19901, 19918), 'numpy.array', 'np.array', (['[x, y2]'], {}), '([x, y2])\n', (19909, 19918), True, 'import numpy as np\n'), ((22178, 22195), 'numpy.array', 'np.array', (['[x, y2]'], {}), '([x, y2])\n', (22186, 22195), True, 'import numpy as np\n'), ((29562, 29611), 'numpy.concatenate', 'np.concatenate', (['[points[:-1], points[1:]]'], {'axis': '(1)'}), '([points[:-1], points[1:]], axis=1)\n', (29576, 29611), True, 'import numpy as np\n'), ((29939, 29960), 'numpy.place', 'np.place', (['z', '(z < 0)', '(0)'], {}), '(z, z < 0, 0)\n', (29947, 29960), True, 'import numpy as np\n'), ((30642, 30691), 'numpy.concatenate', 'np.concatenate', (['[points[:-1], points[1:]]'], {'axis': '(1)'}), '([points[:-1], points[1:]], axis=1)\n', (30656, 30691), True, 'import numpy as np\n'), ((31185, 31206), 'numpy.place', 'np.place', (['z', '(z < 0)', '(0)'], {}), '(z, z < 0, 0)\n', (31193, 31206), True, 'import numpy as np\n'), ((35205, 35237), 'seaborn.set_hls_values', 'sns.set_hls_values', (['color'], {'l': '(0.3)'}), '(color, l=0.3)\n', (35223, 35237), True, 'import seaborn as sns\n'), ((9917, 9937), 'pandas.Grouper', 'pd.Grouper', ([], {'freq': '"""M"""'}), "(freq='M')\n", (9927, 9937), True, 'import pandas as pd\n'), ((22151, 22162), 'numpy.array', 'np.array', (['z'], {}), '(z)\n', (22159, 22162), True, 'import numpy as np\n'), ((29806, 29831), 'numpy.log', 'np.log', (['(0 + tests_per_hit)'], {}), '(0 + tests_per_hit)\n', (29812, 29831), True, 'import numpy as np\n'), ((29860, 29871), 'numpy.isnan', 'np.isnan', (['z'], {}), '(z)\n', (29868, 29871), True, 'import numpy as np\n'), ((29904, 29915), 'numpy.isinf', 'np.isinf', (['z'], {}), '(z)\n', (29912, 29915), True, 'import numpy as np\n'), ((30252, 30283), 'seaborn.set_hls_values', 'sns.set_hls_values', (['color'], {'l': 'el'}), '(color, l=el)\n', (30270, 30283), True, 'import seaborn as sns\n'), ((31114, 31125), 'numpy.isnan', 'np.isnan', (['z'], {}), '(z)\n', (31122, 31125), True, 'import numpy as np\n'), ((31154, 31165), 'numpy.isinf', 'np.isinf', (['z'], {}), '(z)\n', (31162, 31165), True, 'import numpy as np\n'), ((31488, 31526), 'seaborn.set_hls_values', 'sns.set_hls_values', (['SNAIL_ORANGE'], {'l': 'el'}), '(SNAIL_ORANGE, l=el)\n', (31506, 31526), True, 'import seaborn as sns\n'), ((32178, 32225), 'matplotlib.patheffects.Stroke', 'pthff.Stroke', ([], {'linewidth': '(2)', 'foreground': 'facecolor'}), '(linewidth=2, foreground=facecolor)\n', (32190, 32225), True, 'import matplotlib.patheffects as pthff\n'), ((32227, 32241), 'matplotlib.patheffects.Normal', 'pthff.Normal', ([], {}), '()\n', (32239, 32241), True, 'import matplotlib.patheffects as pthff\n'), ((32626, 32676), 'matplotlib.patheffects.Stroke', 'pthff.Stroke', ([], {'linewidth': '(1.33)', 'foreground': 'facecolor'}), '(linewidth=1.33, foreground=facecolor)\n', (32638, 32676), True, 'import matplotlib.patheffects as pthff\n'), ((32712, 32726), 'matplotlib.patheffects.Normal', 'pthff.Normal', ([], {}), '()\n', (32724, 32726), True, 'import matplotlib.patheffects as pthff\n'), ((9987, 10007), 'pandas.Grouper', 'pd.Grouper', ([], {'freq': '"""M"""'}), "(freq='M')\n", (9997, 10007), True, 'import pandas as pd\n'), ((29724, 29747), 'numpy.isinf', 'np.isinf', (['tests_per_hit'], {}), '(tests_per_hit)\n', (29732, 29747), True, 'import numpy as np\n'), ((31232, 31245), 'numpy.log', 'np.log', (['(1 + z)'], {}), '(1 + z)\n', (31238, 31245), True, 'import numpy as np\n'), ((12998, 13020), 'numpy.array', 'np.array', (['front_coords'], {}), '(front_coords)\n', (13006, 13020), True, 'import numpy as np\n'), ((29498, 29514), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (29506, 29514), True, 'import numpy as np\n'), ((30582, 30598), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (30590, 30598), True, 'import numpy as np\n'), ((12460, 12497), 'numpy.cos', 'np.cos', (['((180 + i) / 360 * 2 * 3.14159)'], {}), '((180 + i) / 360 * 2 * 3.14159)\n', (12466, 12497), True, 'import numpy as np\n'), ((12568, 12605), 'numpy.sin', 'np.sin', (['((180 + i) / 360 * 2 * 3.14159)'], {}), '((180 + i) / 360 * 2 * 3.14159)\n', (12574, 12605), True, 'import numpy as np\n'), ((12761, 12798), 'numpy.cos', 'np.cos', (['((180 + i) / 360 * 2 * 3.14159)'], {}), '((180 + i) / 360 * 2 * 3.14159)\n', (12767, 12798), True, 'import numpy as np\n'), ((12869, 12906), 'numpy.sin', 'np.sin', (['((180 + i) / 360 * 2 * 3.14159)'], {}), '((180 + i) / 360 * 2 * 3.14159)\n', (12875, 12906), True, 'import numpy as np\n')]
|
import numpy as np
from absl.testing import parameterized
from keras.preprocessing import image
from keras.utils import data_utils
from tensorflow.python.platform import test
from ..bit import BiT_S_R50x1, BiT_S_R50x3, BiT_S_R101x1, BiT_S_R101x3, BiT_S_R152x4
from ..bit import BiT_M_R50x1, BiT_M_R50x3, BiT_M_R101x1, BiT_M_R101x3, BiT_M_R152x4
from ..bit import preprocess_input
MODEL_LIST_S = [
BiT_S_R50x1,
# Bad weights
# BiT_S_R50x3, BiT_S_R101x1,
BiT_S_R101x3, BiT_S_R152x4
]
MODEL_LIST_M = [BiT_M_R50x1, BiT_M_R50x3, BiT_M_R101x1, BiT_M_R101x3, BiT_M_R152x4]
TEST_IMAGE_PATH = ('https://storage.googleapis.com/tensorflow/'
'keras-applications/tests/elephant.jpg')
_IMAGENET_CLASSES = 1000
class ApplicationsLoadWeightTest(test.TestCase, parameterized.TestCase):
@parameterized.parameters(*MODEL_LIST_S)
def test_application_predict_odd_s(self, app):
model = app()
_assert_shape_equal(model.output_shape, (None, _IMAGENET_CLASSES))
x = _get_elephant((224, 224))
x = preprocess_input(x)
preds = model.predict(x)
label = np.argmax(preds[0], axis=-1)
self.assertIn(label, [348, 386])
@parameterized.parameters(*MODEL_LIST_S)
def test_application_predict_even_s(self, app):
model = app()
_assert_shape_equal(model.output_shape, (None, _IMAGENET_CLASSES))
x = _get_elephant((299, 299))
x = preprocess_input(x)
preds = model.predict(x)
label = np.argmax(preds[0], axis=-1)
self.assertIn(label, [348, 386])
# @parameterized.parameters(*MODEL_LIST_M)
# def test_application_predict_odd_m(self, app):
# model = app()
# _assert_shape_equal(model.output_shape, (None, 21843))
# x = _get_elephant((224, 224))
# x = preprocess_input(x)
# preds = model.predict(x)
# label = np.argmax(preds[0], axis=-1)
# self.assertIn(label, [3671, 3673, 3674])
#
#
# @parameterized.parameters(*MODEL_LIST_M)
# def test_application_predict_even_m(self, app):
# model = app()
# _assert_shape_equal(model.output_shape, (None, 21843))
# x = _get_elephant((299, 299))
# x = preprocess_input(x)
# preds = model.predict(x)
# label = np.argmax(preds[0], axis=-1)
# self.assertIn(label, [3671, 3673, 3674])
def _get_elephant(target_size):
# For models that don't include a Flatten step,
# the default is to accept variable-size inputs
# even when loading ImageNet weights (since it is possible).
# In this case, default to 299x299.
if target_size[0] is None:
target_size = (299, 299)
test_image = data_utils.get_file('elephant.jpg', TEST_IMAGE_PATH)
img = image.load_img(test_image, target_size=tuple(target_size))
x = image.img_to_array(img)
return np.expand_dims(x, axis=0)
def _assert_shape_equal(shape1, shape2):
if len(shape1) != len(shape2):
raise AssertionError(
'Shapes are different rank: %s vs %s' % (shape1, shape2))
if shape1 != shape2:
raise AssertionError('Shapes differ: %s vs %s' % (shape1, shape2))
if __name__ == '__main__':
test.main()
|
[
"tensorflow.python.platform.test.main",
"numpy.argmax",
"numpy.expand_dims",
"absl.testing.parameterized.parameters",
"keras.utils.data_utils.get_file",
"keras.preprocessing.image.img_to_array"
] |
[((814, 853), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['*MODEL_LIST_S'], {}), '(*MODEL_LIST_S)\n', (838, 853), False, 'from absl.testing import parameterized\n'), ((1197, 1236), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['*MODEL_LIST_S'], {}), '(*MODEL_LIST_S)\n', (1221, 1236), False, 'from absl.testing import parameterized\n'), ((2705, 2757), 'keras.utils.data_utils.get_file', 'data_utils.get_file', (['"""elephant.jpg"""', 'TEST_IMAGE_PATH'], {}), "('elephant.jpg', TEST_IMAGE_PATH)\n", (2724, 2757), False, 'from keras.utils import data_utils\n'), ((2835, 2858), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (2853, 2858), False, 'from keras.preprocessing import image\n'), ((2870, 2895), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (2884, 2895), True, 'import numpy as np\n'), ((3207, 3218), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (3216, 3218), False, 'from tensorflow.python.platform import test\n'), ((1121, 1149), 'numpy.argmax', 'np.argmax', (['preds[0]'], {'axis': '(-1)'}), '(preds[0], axis=-1)\n', (1130, 1149), True, 'import numpy as np\n'), ((1505, 1533), 'numpy.argmax', 'np.argmax', (['preds[0]'], {'axis': '(-1)'}), '(preds[0], axis=-1)\n', (1514, 1533), True, 'import numpy as np\n')]
|
"""
The Colloid_output module contains classes to read LB Colloids simulation
outputs and perform post processing. Many classes are available to
provide plotting functionality. ModelPlot and CCModelPlot are useful for
visualizing colloid-surface forces and colloid-colloid forces respectively.
example import of the Colloid_output.py module is as follows
>>> from lb_colloids import ColloidOutput
>>> import matplotlib.pyplot as plt
>>>
>>> hdf = "mymodel.hdf5"
>>> mp = ColloidOutput.ModelPlot(hdf)
>>> # model plot accepts matplotlib args and kwargs!!!
>>> mp.plot('edl_x', cmap='viridis')
>>> plt.show()
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import h5py as H
class Breakthrough(object):
"""
Class to prepare and plot breakthrough curve data from endpoint
files.
Parameters:
----------
:param str filename: <>.endpoint file
Attributes:
----------
:ivar df: (pandas DataFrame): dataframe of endpoint data
:ivar resolution: (float): model resolution
:ivar timestep: (float): model timestep
:ivar continuous: (int): interval of continuous release, 0 means pulse
:ivar ncol: (float): number of colloids per release in simulation
:ivar total_ncol: (int): total number of colloids in simulation
"""
def __init__(self, filename):
if not filename.endswith('.endpoint'):
raise FileTypeError('.endpoint file must be supplied')
reader = ASCIIReader(filename)
self.df = reader.df
self.resolution = reader.resolution
self.timestep = reader.timestep
self.continuous = reader.continuous
# todo: replace this call with something from the header later!
self.ncol = reader.ncol
self.total_ncol = float(self.df.shape[0])
self.__breakthrough_curve = None
self.__reader = reader
@property
def breakthrough_curve(self):
"""
Property method that performs a dynamic
calculation of breakthrough curve data
"""
max_ts = self.df['nts'].max()
if self.__breakthrough_curve is None:
if not self.continuous:
bt_colloids = self.df.loc[self.df['flag'] == 3]
bt_colloids = bt_colloids.sort_values('end-ts')
ncols = []
nts = []
ncol = 0
for index, row in bt_colloids.iterrows():
ncol += 1
ncols.append(float(ncol))
nts.append(row['end-ts'])
ncols.append(float(ncol))
nts.append(max_ts)
df = pd.DataFrame({'nts': nts, 'ncol': ncols}).set_index('ncol')
self.__breakthrough_curve = df
else:
bt_colloids = self.df.loc[self.df['flag'] == 3]
bt_colloids = bt_colloids.sort_values('end-ts')
ncols = []
nts = []
ncol = 0
ncol_per_release = []
for index, row in bt_colloids.iterrows():
lower_ts = row['end-ts'] - self.continuous
upper_ts = row['end-ts']
t = bt_colloids.loc[(bt_colloids['end-ts'] >= lower_ts) & (bt_colloids['end-ts'] <= upper_ts)]
ncol += 1
ncols.append(float(ncol))
ncol_per_release.append(len(t))
nts.append(row['end-ts'])
ncols.append(float(ncol))
nts.append(max_ts)
ncol_per_release.append(len(bt_colloids.loc[(bt_colloids['end-ts'] >= max_ts - self.continuous)
& (bt_colloids['end-ts'] <= max_ts)]))
df = pd.DataFrame({'nts': nts, 'ncol': ncols, 'ncpr': ncol_per_release}).set_index('ncol')
self.__breakthrough_curve = df
return self.__breakthrough_curve
def pore_volume_conversion(self):
"""
Method to retrieve the pore volume calculation
conversion for plotting colloids.
"""
pv_factor = (abs(self.__reader.uy) * self.__reader.velocity_factor) /\
(self.__reader.ylen * self.resolution)
return pv_factor
def plot(self, time=True, *args, **kwargs):
"""
Convience method to plot data into a matplotlib
chart.
Parameters:
----------
:param bool time: if true x-axis is time, false is nts
:param *args: matplotlib args for 1d charts
:param **kwargs: matplotlib keyword arguments for 1d charts
"""
if time:
if self.continuous:
plt.plot(self.breakthrough_curve['nts'] * self.timestep,
self.breakthrough_curve['ncpr'] / float(self.ncol),
*args, **kwargs)
else:
plt.plot(self.breakthrough_curve['nts'] * self.timestep,
self.breakthrough_curve.index.values / float(self.ncol),
*args, **kwargs)
else:
if self.continuous:
plt.plot(self.breakthrough_curve['nts'] * self.timestep,
self.breakthrough_curve['ncpr'] / float(self.ncol),
*args, **kwargs)
else:
plt.plot(self.breakthrough_curve['nts'],
self.breakthrough_curve.index.values / float(self.ncol),
*args, **kwargs)
plt.ylim([0, 1])
def plot_pv(self, *args, **kwargs):
"""
Method to plot breakthrough data with pore
volumes (non-dimensional time)
Parameters:
----------
:param *args: matplotlib args for 1d plotting
:param **kwargs: matplotlib kwargs for 1d plotting
"""
pv_factor = self.pore_volume_conversion()
if self.continuous:
plt.plot(self.breakthrough_curve['nts'] * pv_factor * self.timestep,
self.breakthrough_curve['ncpr'] / float(self.ncol),
*args, **kwargs)
else:
plt.plot(self.breakthrough_curve['nts'] * pv_factor * self.timestep,
self.breakthrough_curve.index.values / float(self.ncol),
*args, **kwargs)
plt.ylim([0, 1])
plt.xlim([0, max(self.breakthrough_curve['nts'] * pv_factor * self.timestep)])
class DistributionFunction(object):
"""
Class to plot a probablity distribution function of colloid breakthrough
from endpoint files.
Parameters:
----------
:param str filename: <>.endpoint file name
:param int nbin: number of bins for pdf calculation
Attributes:
----------
:ivar df: (pandas DataFrame): dataframe of endpoint data
:ivar resolution: (float): model resolution
:ivar timestep: (float): model timestep
:ivar continuous: (int): interval of continuous release, 0 means pulse
:ivar ncol: (float): number of colloids per release in simulation
:ivar total_ncol: (int): total number of colloids in simulation
:ivar pdf: (np.recarray) colloid probability distribution function
"""
def __init__(self, filename, nbin=1000):
if not filename.endswith('.endpoint'):
raise FileTypeError('.endpoint file must be supplied')
reader = ASCIIReader(filename)
self.df = reader.df
self.resolution = reader.resolution
self.timestep = reader.timestep
self.continuous = reader.continuous
self.ncol = float(reader.ncol)
self.total_ncol = float(self.df.shape[0])
self.bin = nbin
self.pdf = None
self.reset_pdf(nbin)
self.__normalize = False
self.__reader = reader
def reset_pdf(self, nbin, normalize=False):
"""
Method to generate a probability distribution function
based upon user supplied bin size.
Parameters:
----------
:param int nbin: number of time steps to base bin on
:param bool normalize: method to calculate pdf by residence time or end time
"""
self.bin = nbin
self.__normalize = normalize
ts = []
ncols = []
lower_nts = 0
max_ts = self.df['nts'].max()
pdf_colloids = self.df.loc[self.df['flag'] == 3]
pdf_colloids = pdf_colloids.sort_values('delta-ts')
for upper_nts in range(0, int(max_ts) + 1, nbin):
ncol = 0
for index, row in pdf_colloids.iterrows():
if normalize:
if lower_nts < row['delta-ts'] <= upper_nts:
ncol += 1
else:
if lower_nts < row['end-ts'] <= upper_nts:
ncol += 1
ts.append(upper_nts)
ncols.append(ncol)
lower_nts = upper_nts
arr = np.recarray((len(ts),), dtype=[('nts', np.float),
('ncol', np.float)])
for idx, value in enumerate(ts):
arr[idx] = tuple([value, ncols[idx]])
self.pdf = arr
def pore_volume_conversion(self):
"""
Method to retrieve the pore volume calculation
conversion for plotting colloids.
"""
pv_factor = (abs(self.__reader.uy) * self.__reader.velocity_factor) /\
(self.__reader.ylen * self.resolution)
return pv_factor
def plot(self, time=True, *args, **kwargs):
"""
Method to plot data into a matplotlib chart.
Parameters:
----------
:param bool time: if true x-axis is time, false is nts
:param *args: matplotlib args for 1d charts
:param **kwargs: matplotlib keyword arguments for 1d charts
"""
if time:
if self.__normalize:
plt.plot(self.pdf['nts'] * self.timestep,
self.pdf['ncol'] / self.total_ncol,
*args, **kwargs)
else:
plt.plot(self.pdf['nts'] * self.timestep,
self.pdf['ncol'] / self.ncol,
*args, **kwargs)
else:
if self.__normalize:
plt.plot(self.pdf['nts'],
self.pdf['ncol'] / self.total_ncol,
*args, **kwargs)
else:
plt.plot(self.pdf['nts'],
self.pdf['ncol'] / self.ncol,
*args, **kwargs)
plt.ylim([0, 1])
def plot_pv(self, *args, **kwargs):
"""
Method to plot pdf data with pore volumes (non-dimensional time)
Parameters:
----------
:param *args: matplotlib args for 1d plotting
:param **kwargs: matplotlib kwargs for 1d plotting
"""
pv_factor = self.pore_volume_conversion()
plt.plot(self.pdf['nts'] * pv_factor * self.timestep,
self.pdf['ncol'] / self.ncol,
*args, **kwargs)
class ADE(object):
"""
Class to calculate macroscopic advection dispersion
equation parameters for field scale model parameterization
Class needs to be re-named and updated to CDE equation
Parameters:
----------
:param str filename: ascii output file name from colloid model
:param int nbin: number of timesteps to bin a pdf for calculation
"""
def __init__(self, filename, nbin=1000):
if not filename.endswith('.endpoint'):
raise FileTypeError('<>.endpoint file must be supplied')
reader = ASCIIReader(filename)
self.timestep = reader.timestep
self.resolution = reader.resolution
self.ylen = reader.ylen
self.ncol = reader.ncol
self.total_ncol = float(reader.df.shape[0])
self.uy = reader.uy
self.pdf = None
self.__dist_func = DistributionFunction(filename, nbin)
self.bt = Breakthrough(filename).breakthrough_curve
self.reset_pdf(nbin)
def __reset(self):
self.pdf = self.__dist_func.pdf
def reset_pdf(self, nbin, normalize=False):
"""
User method to reset values based on changing
the pdf bin values
Parameters:
----------
:param int nbin: number of timesteps to bin a pdf for calculation
:param bool normalize: flag to calculate pdf by residence time or end time
"""
self.__dist_func.reset_pdf(nbin, normalize)
self.pdf = self.__dist_func.pdf
def solve_jury_1991(self, D=0.01, R=0.01, ftol=1e-10,
max_nfev=1000, **kwargs):
"""
Scipy optimize method to solve least sqares
for jury 1991. Pulse flux.
Parameters:
----------
:param float D: Diffusivity initial guess. Cannot be 0
:param float R: Retardation initial guess. Cannot be 0
:param float ftol: scipy function tolerance for solution
:param int max_nfev: maximum number of function iterations
:param **kwargs: scipy least squares kwargs
Returns:
-------
:return: scipy least squares dictionary. Answer in dict['x']
"""
# todo: test this method! look up references for clearer examples!
from scipy.optimize import leastsq, minimize, least_squares
a = self.ncol
l = self.ylen * self.resolution
v = self.uy
pdf, t = self.__prep_data()
x0 = np.array([D, R])
return least_squares(self.__jury_residuals, x0,
args=(a, l, t, v, pdf),
ftol=ftol, max_nfev=max_nfev,
**kwargs)
def __jury_residuals(self, vars, A, L, t, v, pdf):
"""
Method to estimate residuals from jury 1991 equation
using data
Parameters
vars: (np.array) [dispersivity, retardation]
A: ncol
l: (float) ylen
v: (float) mean fluid_velocity
t: (float) time
pdf: pd.dataframe c/co of colloid pdf
"""
return pdf - self.__jury_1991(vars, A, L, t, v)
def __jury_1991(self, vars, A, L, t, v):
"""
Equation for Jury 1991 calculation of Dispersivity
and Retardation
Parameters
vars: (np.array) [dispersivity, retardation]
A: ncol
l: (float) ylen
v: (float) mean fluid_velocity
t: (float) time
"""
D = vars[0]
R = vars[1]
eq0 = (A * L * np.sqrt(R))
eq1 = 2 * np.sqrt(np.pi * D * t ** 3)
eq2 = -(R * L - v * t) ** 2
eq3 = 4 * R * D * t
x = (eq0 / eq1) * np.exp(eq2 / eq3)
x[0] = 0
return x
def solve_van_genuchten_1986(self, D=0.01, R=0.01, ftol=1e-10,
max_nfev=1000, **kwargs):
"""
Scipy optimize method to solve least squares
for van genuchten 1986. Miscable displacement.
Parameters:
----------
:param float D: Diffusivity initial guess. Cannot be 0
:param float R: Retardation initial guess. Cannot be 0
:param float ftol: scipy function tolerance for solution
:param int max_nfev: maximum number of function iterations
:param **kwargs: scipy least squares kwargs
Returns:
-------
:return: scipy least squares dictionary. Answer in dict['x']
"""
from scipy.optimize import least_squares
l = self.ylen * self.resolution
v = self.uy
t = self.bt['nts'].as_matrix() * self.timestep
bt = self.bt['ncpr'].as_matrix() / self.ncol
x0 = np.array([D, R])
return least_squares(self.__van_genuchten_residuals, x0,
args=(l, v, t, bt),
ftol=ftol, max_nfev=max_nfev,
**kwargs)
def __van_genuchten_residuals(self, vars, l, v, t, bt):
"""
Method to estimate residuals from vanGenuchten and Winerega
1986
Parameters:
vars: (np.array) [dispersivity, retardation]
x: (float) column length
v: (float) mean fluid velocity
t: (float) time
bt: (np.array) breakthrough curve
"""
return bt - self.__van_genuchten_1986(vars, l, v, t)
def __van_genuchten_1986(self, vars, l, v, t):
"""
Equation for <NAME> and Winerega 1986 to calculate
Dispersivity and Retardation from breakthrough data.
Parameters:
vars: (np.array) [dispersivity, retardation]
x: (float) column length
v: (float) mean fluid velocity
t: (float) time
"""
from scipy import special
D = vars[0]
R = vars[1]
eq0 = R * l - v * t
eq1 = np.sqrt(4 * D * R * t)
x = 0.5 * special.erfc(eq0/eq1)
if np.isnan(x[0]):
x[0] = 0
return x
def __prep_data(self):
"""
Prepares breakthrough data by stripping off trailing
zeros.
Returns:
pdf = (np.array) stripped pdf
t = (np.array) times
"""
strip_idx = None
seq = False
bt = False
for idx, rec in enumerate(self.pdf):
if not bt:
if rec['ncol'] != 0:
bt = True
else:
pass
else:
if rec['ncol'] == 0:
if not seq:
strip_idx = idx
seq = True
else:
pass
else:
seq = False
strip_idx = None
if strip_idx is not None:
pdf = self.pdf['ncol'][:strip_idx + 1]
time = self.pdf['nts'][:strip_idx + 1]
else:
pdf = self.pdf['ncol']
time = self.pdf['nts']
return pdf, time
class ModelPlot(object):
"""
Class to retrieve Colloid force arrays
and plot for data analysis.
Parameters:
----------
:param str hdf5: hdf5 file name
"""
def __init__(self, hdf5):
if not hdf5.endswith('hdf') and\
not hdf5.endswith('hdf5'):
raise FileTypeError('hdf or hdf5 file must be supplied')
self.__hdf = Hdf5Reader(hdf5)
@property
def keys(self):
return self.__hdf.keys
def get_data(self, key):
"""
Get data method to view and analyze colloid
force arrays
Parameters:
----------
:param str key: valid dictionary key from self.keys
Returns:
-------
:return: data <varies>
"""
return self.__hdf.get_data(key)
def get_data_by_path(self, path):
"""
Method to retrieve hdf5 data by specific path
Parameters:
----------
:param str path: hdf5 directory path to data
Returns:
-------
:return: data <varies>
"""
return self.__hdf.get_data_by_path(path)
def plot(self, key, ax=None, masked=False, *args, **kwargs):
"""
Hdf array plotting using Hdf5Reader keys
Parameters:
----------
:param str key: valid dictionary key from self.keys
:param object ax: matplotlib pyplot axes object (optional)
:param *args: matplotlib plotting args
:param **kwargs: matplotlib plotting kwargs
"""
# todo: create a function_fmt for axis options
mesh = None
if ax is None:
ax = plt.gca()
if key in ('lvdw_x', 'lvdw_y',
'lewis_x', 'lewis_y',
'edl_x', 'edl_y',
'dlvo_x', 'dlvo_y',
'attractive_x', 'attractive_y'):
x_axis = self.__hdf.get_data('distance_array')
arr = self.__hdf.get_data(key)
ax.plot(x_axis, arr, *args, **kwargs)
elif key in ('conversion_factor',
'gravity',
'bouyancy'):
raise KeyError('{}: key not valid for plotting'.format(key))
elif key in ('dlvo_fine', 'edl_fine',
'attractive_fine'):
x_axis = self.__hdf.get_data('distance_fine')
arr = self.__hdf.get_data(key)
ax.plot(x_axis, arr, *args, **kwargs)
elif key == "image":
arr = self.__hdf.get_data(key)
if masked:
arr = np.ma.masked_where(arr == 0, a=arr)
ax.imshow(arr, *args, **kwargs)
else:
arr = self.__hdf.get_data(key)
if masked:
img = self.__hdf.get_data("image")
arr = np.ma.masked_where(img == 1, a=arr)
mesh = ax.imshow(arr, *args, **kwargs)
if mesh is not None:
return mesh
else:
return ax
def plot_velocity_magnitude(self, nbin=10, dimensional=True,
masked=False, *args, **kwargs):
"""
Method to create a quiver plot to display the
magnitude and direction of velocity vectors within
the system.
Parameters:
----------
:param int nbin: refinement for quiver plotting
:param *args: matplotlib plotting args
:param **kwargs: matplotlib plotting kwargs
"""
if dimensional:
x = self.__hdf.get_data('velocity_x')
y = self.__hdf.get_data('velocity_y')
else:
x = self.__hdf.get_data('lb_velocity_x')
y = self.__hdf.get_data('lb_velocity_y')
xx = np.arange(0, x.shape[1])
yy = np.arange(0, x.shape[0])
xx, yy = np.meshgrid(xx, yy)
if masked:
img = self.__hdf.get_data('image')
xx = np.ma.masked_where(img == 1, a=xx)
yy = np.ma.masked_where(img == 1, a=yy)
x = np.ma.masked_where(img == 1, a=x)
y = np.ma.masked_where(img == 1, a=y)
Q = plt.quiver(xx[::nbin, ::nbin], yy[::nbin, ::nbin],
x[::nbin, ::nbin], y[::nbin, ::nbin],
units='width', *args, **kwargs)
qk = plt.quiverkey(Q, 0.9, 0.9, 0.01, r'$1 \frac{cm}{s}$',
coordinates='figure')
plt.xlim(0, x.shape[1])
plt.ylim(x.shape[0], 0)
class CCModelPlot(object):
"""
Class to query colloid-colloid interactions
and plot data as 1d or as a meshgrid object
More sophisticated than standard ModelPlot
Parameters:
----------
:param str hdf5: hdf5 file name
"""
data_paths = {'col_col_x': 'colloidcolloid/x',
'col_col_y': 'colloidcolloid/y',
'col_col': None,
'distance_x': 'colloid_colloid/distance/x',
'distance_y': 'colloid_colloid/distance/y',
'distance_fine_x': 'colloid_colloid/fine/distance/x',
'distance_fine_y': 'colloid_colloid/fine/distance/y',
'col_col_fine_x': 'colloid_colloid/fine/x',
'col_col_fine_y': 'colloid_colloid/fine/y',
'col_col_fine': None}
def __init__(self, hdf5):
if not hdf5.endswith('hdf') and\
not hdf5.endswith('hdf5'):
raise FileTypeError('hdf or hdf5 file must be supplied')
self.__hdf5 = Hdf5Reader(hdf5)
@property
def keys(self):
"""
Property method to return valid keys to obtain data
"""
return CCModelPlot.keys
def get_data(self, key):
"""
Method to return data by key
Parameters:
----------
:param str key: valid model key
"""
return self.__hdf5.get_data(key)
def get_data_by_path(self, path):
"""
Method to return data by hdf5 path
Parameters:
----------
:param str path: valid HDF5 data path
"""
return self.__hdf5.get_data_by_path(path)
def plot(self, key, *args, **kwargs):
"""
Plotting method for 1d colloid-colloid dlvo profiles
Parameters:
----------
:param str key: valid data key
:param *args: matplotlib plotting args
:param **kwargs: matplotlib plotting kwargs
"""
if key not in ('col_col_x', 'col_col_y',
'col_col_fine_x', 'col_col_fine_y'):
raise KeyError("{} is not a valid key".format(key))
colcol = self.__hdf5.get_data(key)
shape = colcol.shape
center = shape[0] // 2
if key == "<KEY>":
x = self.__hdf5.get_data('distance_x')
x = x[center, center:]
y = colcol[center, center:]
elif key == "col_col_y":
x = self.__hdf5.get_data('distance_y')
x = x.T[center, center:]
y = colcol.T[center, center:]
elif key == "col_col_fine_x":
x = self.__hdf5.get_data('distance_fine_x')
x = x[center, center:] # * 1e-6
y = colcol[center, center:]
else:
x = self.__hdf5.get_data('distance_fine_y')
x = x[center, center:] # * 1e-6
y = colcol[center, center:]
plt.plot(x, y * -1, *args, **kwargs)
def plot_mesh(self, key, ax=None, *args, **kwargs):
"""
Plotting method for 2d representation of colloid-colloid
dlvo profiles.
Parameters:
----------
:param str key: valid data key
:param object ax: matplotlib axes object (optional)
:param *args: matplotlib plotting args
:param **kwargs: matplotlib plotting kwargs
"""
from matplotlib.colors import LogNorm
if ax is None:
ax = plt.gca()
if key not in ('col_col', 'col_col_fine',
'col_col_x', 'col_col_y',
'col_col_fine_x', 'col_col_fine_y'):
raise KeyError("{} is not a valid key".format(key))
if key == 'col_col':
ccx = np.abs(self.__hdf5.get_data('col_col_x'))
ccy = np.abs(self.__hdf5.get_data('col_col_y'))
mesh = ccx + ccy
elif key == 'col_col_fine':
ccx = np.abs(self.__hdf5.get_data('col_col_fine_x'))
ccy = np.abs(self.__hdf5.get_data('col_col_fine_y'))
mesh = ccx + ccy
else:
mesh = self.__hdf5.get_data(key)
# find center and set to nearby value to prevent log scale crashing
shape = mesh.shape
center = shape[0] // 2
mesh[center, center] = mesh[center, center + 1]
xx, yy = np.meshgrid(np.arange(0, mesh.shape[0]+1),
np.arange(0, mesh.shape[1] + 1))
if mesh.max()/mesh.min() > 10:
vmin = mesh.min()
vmax = mesh.max()
if 'vmin' in kwargs:
vmin = kwargs.pop('vmin')
if 'vmax' in kwargs:
vamx = kwargs.pop('vmax')
p = ax.pcolormesh(xx, yy, mesh,
norm=LogNorm(vmin=mesh.min(),
vmax=mesh.max()),
*args, **kwargs)
else:
p = ax.pcolormesh(xx, yy, mesh,
*args, **kwargs)
ax.set_ylim([0, mesh.shape[0]])
ax.set_xlim([0, mesh.shape[1]])
center = mesh.shape[0] / 2.
ax.plot([center], [center], 'ko')
return p
class ColloidVelocity(object):
"""
Method to return colloid velocity and statistics
relating to colloid velocity for a simulation. Class
needs to be rebuilt to work with timeseries and pathline
files for a more precise velocity measurement
Parameters:
----------
:param str filename: endpoint file name
"""
def __init__(self, filename):
if not filename.endswith(".endpoint"):
raise FileTypeError('.endpoint file must be supplied')
reader = ASCIIReader(filename)
self.timestep = reader.timestep
self.resolution = reader.resolution
self.xlen = reader.xlen
self.ylen = reader.ylen
self.df = reader.df
self.ncol = reader.df.shape[0]
self.max_time = max(reader.df['nts']) * self.timestep
self.velocity = None
self.__get_velocity_array()
def __get_velocity_array(self):
"""
Built in method to calculate the mean velocity of
each colloid in the simulation
"""
colloid = []
velocity = []
for index, row in self.df.iterrows():
if np.isnan(row['y-position']):
velocity.append((self.ylen * self.resolution) /
(row['delta-ts'] * self.timestep))
else:
velocity.append((row['y-position'] * self.resolution) /
(row['nts'] * self.timestep))
colloid.append(index)
arr = np.recarray(len(colloid,), dtype=[('colloid', np.int),
('velocity', np.float)])
for idx, value in enumerate(colloid):
arr[idx] = tuple([value, velocity[idx]])
self.velocity = arr
@property
def max(self):
"""
:return: maximum colloid velocity
"""
return self.velocity['velocity'].max()
@property
def min(self):
"""
:return: minimum colloid velocity
"""
return self.velocity['velocity'].min()
@property
def mean(self):
"""
:return: mean colloid velocity
"""
return self.velocity['velocity'].mean()
@property
def var(self):
"""
:return: variance of colloid velocities
"""
return np.var(self.velocity['velocity'])
@property
def stdev(self):
"""
:return: standard deviation of colloid velocities
"""
return np.std(self.velocity['velocity'])
@property
def cv(self):
"""
:return: coeficient of variance of colloid velocities
"""
return (self.stdev / self.mean) * 100
def plot(self, *args, **kwargs):
"""
Method to plot distribution of velocities by
colloid for array of velocity.
Parameters
----------
:param *args: matplotlib plotting args
:param **kwargs: matplotlib plotting kwargs
"""
plt.scatter(self.velocity['colloid'],
self.velocity['velocity'],
*args, **kwargs)
def plot_histogram(self, nbin=10, width=0.01,
*args, **kwargs):
"""
User method to plot a histogram of velocities using
a bar chart.
Parameters:
----------
:param int nbin: number of specific bins for plotting
:param float width: matplotlib bar width.
:param *args: matplotlib plotting args
:param **kwargs: matplotlib plotting kwargs
"""
adjuster = 0.00001
bins = np.linspace(self.min - adjuster, self.max, nbin)
ncols = []
velocity = []
lower_v = self.min - adjuster
upper_v = 0
for upper_v in bins:
ncol = 0
for v in self.velocity['velocity']:
if lower_v < v <= upper_v:
ncol += 1
velocity.append((lower_v + upper_v)/2.)
ncols.append(ncol)
lower_v = upper_v - adjuster
velocity.append(upper_v + adjuster)
ncols.append(0)
plt.bar(velocity, ncols, width, *args, **kwargs)
# todo: think about this one. Does it belong here? Finish class. Integrate into LB
class LBOutput(object):
"""
Class to anaylze LB fluid/solid properties
Parameters:
----------
:param str hdf: hdf5 output filename
"""
data_paths = {'velocity_x': None,
'velocity_y': None,
'lb_velocity_x': None,
'lb_velocity_y': None,
'resolution': None,
'porosity': None,
'pore_diameter': None,
'conversion_factor': None,
'reynolds_number': None}
def __init__(self, hdf5):
if not hdf5.endswith('.hdf') and not\
hdf5.endswith('.hdf5'):
raise FileTypeError('hdf or hdf5 file must be supplied')
self.__hdf5 = Hdf5Reader(hdf5)
@property
def keys(self):
"""
:return: Lattice boltzmann data keys
"""
return LBOutput.data_paths.keys()
def get_data(self, key):
"""
Method to select data from hdf5 file based on key, instead
of data path
Parameters:
----------
:param str key: lattice boltzmann data key
Returns:
-------
:return: data
"""
if key in ("velocity_x", "velocity_y"):
factor = self.__hdf5.get_data("conversion_factor")
key = "lb_{}".format(key)
data = self.__hdf5.get_data(key) * factor
else:
data = self.__hdf5.get_data(key)
return data
class ASCIIReader(object):
"""
Class to read in text based output files <endpoint, timestep, pathline>
to a pandas dataframe
Parameters:
----------
:param str filename: output filename (ie. endpoint, timestep, or pathline)
"""
dtypes = {'colloid': np.int,
'flag': np.int,
'nts': np.int,
'x-position': np.float,
'y-position': np.float,
'x-model': np.float,
'y-model': np.float,
'start-ts': np.int,
'end-ts': np.int,
'delta-ts': np.int,
'continuous': np.int}
def __init__(self, filename):
self.timestep = 0
self.ncol = 0
self.resolution = 0
self.xlen = 0
self.ylen = 0
self.ux = 0
self.uy = 0
self.velocity_factor = 1.
self.continuous = 0
self.__data_startline = 0
self.__header = []
if filename.split('.')[-1] not in ('endpoint', 'timeseries', 'pathline'):
raise FileTypeError("{}: not in supported filetypes".format(filename))
else:
self.read_header(filename)
self.df = self.read_ascii(filename)
def read_header(self, filename):
"""
Method to read the header from ascii output files for LB-Colloids
Parameters:
----------
:param str filename: colloid model output filename (ie. endpoint, timestep, or pathline)
"""
with open(filename) as f:
for idx, line in enumerate(f):
if line.startswith("Timestep"):
t = line.split()
self.timestep = float(t[-1].rstrip())
elif line.startswith("Ncols"):
t = line.split()
self.ncol = int(t[-1].rstrip())
elif line.startswith('Resolution'):
t = line.split()
self.resolution = float(t[-1].rstrip())
elif line.startswith('xlen'):
t = line.split()
self.xlen = float(t[-1].rstrip())
elif line.startswith('ylen'):
t = line.split()
self.ylen = float(t[-1].rstrip())
elif line.startswith('ux'):
t = line.split()
self.ux = float(t[-1].rstrip())
elif line.startswith('uy'):
t = line.split()
self.uy = float(t[-1].rstrip())
elif line.startswith('velocity_factor'):
t = line.split()
self.velocity_factor = float(t[-1].rstrip())
elif line.startswith('Continuous'):
t = line.split()
self.continuous = int(t[-1].rstrip())
elif line.startswith("#"*10):
self.__data_startline = idx + 1
break
else:
pass
def read_ascii(self, filename):
"""
Method to read endpoint file data from from ascii files for LB-Colloids
Sets data to pandas dataframe
Parameters:
----------
:param str filename: colloid model output filename (ie. endpoint, timestep, or pathline)
"""
with open(filename) as f:
t = []
for idx, line in enumerate(f):
if idx < self.__data_startline:
pass
elif idx == self.__data_startline:
self.__header = [i.rstrip() for i in line.split()
if i not in ('\t', '', ' ', '\n')]
else:
t.append([self.__try_float(i.rstrip()) for i
in line.split() if i not in ('\t', '', ' ', '\n')])
temp = np.array(t).T
temp = {self.__header[idx]: data for idx, data in enumerate(temp)}
df = pd.DataFrame(temp)
df = df.reindex_axis(self.__header, axis=1)
df = df.set_index('colloid')
return df
@staticmethod
def __try_float(val):
try:
return float(val)
except ValueError:
return float('nan')
class Hdf5Reader(object):
"""
Reader object to read in HDF5 stored outputs
from colloid models. Contains a data_paths dictionary
which allows the user to use keys to access data
Parameters:
----------
:param str hdf5: LB-Colloid hdf5 file name
"""
data_paths = {'ac': "colloids/model_dict/ac",
'image': 'Binary_image',
'lb_velocity_x': 'results/uarray',
'lb_velocity_y': 'results/uarray',
'lb_mean_velocity_x': 'results/mean_ux',
'lb_mean_velocity_y': 'results/mean_uy',
'conversion_factor': 'results/velocity_factor',
'pore_diameter': 'results/pore_diameter',
'porosity': 'results/porosity',
'reynolds_number': 'results/reynolds_number',
'brownian_x': 'colloids/brownian/x',
'brownian_y': 'colloids/brownian/y',
'lvdw_x': 'colloids/lvdw/x',
'lvdw_y': 'colloids/lvdw/y',
'edl_x': 'colloids/edl/x',
'edl_y': 'colloids/edl/y',
'attractive_x': 'colloids/attractive/x',
'attractive_y': 'colloids/attractive/y',
'lewis_x': 'colloids/lewis_acid_base/x',
'lewis_y': 'colloids/lewis_acid_base/y',
'velocity_x': 'colloids/ux',
'velocity_y': 'colloids/uy',
'gravity': 'colloids/gravity',
'bouyancy': 'colloids/bouyancy',
'ionic': 'colloids/chemical_dict/I',
'distance_array': 'colloids/distance_arr',
'dlvo_x': None,
'dlvo_y': None,
'col_col_x': 'colloid_colloid/x',
'col_col_y': 'colloid_colloid/y',
'col_col': None,
'distance_x': 'colloid_colloid/distance/x',
'distance_y': 'colloid_colloid/distance/y',
'distance_fine_x': 'colloid_colloid/fine/distance/x',
'distance_fine_y': 'colloid_colloid/fine/distance/y',
'col_col_fine_x': 'colloid_colloid/fine/x',
'col_col_fine_y': 'colloid_colloid/fine/y',
'col_col_fine': None,
'edl_fine': 'colloids/edl_fine',
'attractive_fine': 'colloids/attractive_fine',
'dlvo_fine': None,
'distance_fine': 'colloids/distance_fine'}
def __init__(self, hdf5):
if not hdf5.endswith('hdf') and\
not hdf5.endswith('hdf5'):
raise FileTypeError('hdf or hdf5 file must be supplied')
self.file_name = hdf5
@property
def keys(self):
"""
:return: list of valid hdf5 data keys
"""
return [i for i in Hdf5Reader.data_paths]
def get_data(self, key):
"""
Method to retrieve hdf5 data by dict. key
Parameters:
----------
:param str key: valid dictionary key from self.keys
Returns:
-------
:return: data <varies>
"""
if key not in Hdf5Reader.data_paths:
raise KeyError('Dictionary key not in valid keys. Use get_data_by_path')
hdf = H.File(self.file_name, 'r')
if key == 'lb_velocity_x':
data = hdf[Hdf5Reader.data_paths[key]][()][1]
elif key == 'lb_velocity_y':
data = hdf[Hdf5Reader.data_paths[key]][()][0]
elif key == 'dlvo_x':
data = hdf[Hdf5Reader.data_paths['edl_x']][()] +\
hdf[Hdf5Reader.data_paths['attractive_x']][()]
# hdf[Hdf5Reader.data_paths['lewis_x']][()] +\
# hdf[Hdf5Reader.data_paths['lvdw_x']][()]
data = data[0]
elif key == 'dlvo_y':
data = hdf[Hdf5Reader.data_paths['edl_y']][()] +\
hdf[Hdf5Reader.data_paths['attractive_y']][()]
# hdf[Hdf5Reader.data_paths['lewis_y']][()] +\
# hdf[Hdf5Reader.data_paths['lvdw_y']][()]
data = data[0]
elif key == 'dlvo_fine':
data = hdf[Hdf5Reader.data_paths['edl_fine']][()] + \
hdf[Hdf5Reader.data_paths['attractive_fine']][()]
data = data[0]
elif key in ('lvdw_x', 'lvdw_y',
'lewis_x', 'lewis_y',
'edl_x', 'edl_y',
'dlvo_x', 'dlvo_y',
'attractive_x',
'attractive_y',
'distance_array',
'edl_fine',
'attractive_fine',
'distance_fine'):
data = hdf[Hdf5Reader.data_paths[key]][()][0]
else:
data = hdf[Hdf5Reader.data_paths[key]][()]
hdf.close()
return data
def get_data_by_path(self, path):
"""
Method to retrieve hdf5 data by specific hdf5 path
Parameters:
----------
:param str path: hdf5 directory path to data
Returns:
------
:return: data <varies>
"""
hdf = H.File(self.file_name, 'r')
data = hdf[path][()]
hdf.close()
return data
class FileTypeError(Exception):
pass
|
[
"matplotlib.pyplot.bar",
"matplotlib.pyplot.quiver",
"numpy.isnan",
"numpy.arange",
"numpy.exp",
"matplotlib.pyplot.gca",
"pandas.DataFrame",
"numpy.meshgrid",
"numpy.std",
"scipy.special.erfc",
"scipy.optimize.least_squares",
"numpy.linspace",
"numpy.var",
"h5py.File",
"numpy.ma.masked_where",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.scatter",
"numpy.array",
"matplotlib.pyplot.quiverkey",
"numpy.sqrt"
] |
[((5546, 5562), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1]'], {}), '([0, 1])\n', (5554, 5562), True, 'import matplotlib.pyplot as plt\n'), ((6361, 6377), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1]'], {}), '([0, 1])\n', (6369, 6377), True, 'import matplotlib.pyplot as plt\n'), ((10592, 10608), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1]'], {}), '([0, 1])\n', (10600, 10608), True, 'import matplotlib.pyplot as plt\n'), ((10958, 11062), 'matplotlib.pyplot.plot', 'plt.plot', (["(self.pdf['nts'] * pv_factor * self.timestep)", "(self.pdf['ncol'] / self.ncol)", '*args'], {}), "(self.pdf['nts'] * pv_factor * self.timestep, self.pdf['ncol'] /\n self.ncol, *args, **kwargs)\n", (10966, 11062), True, 'import matplotlib.pyplot as plt\n'), ((13541, 13557), 'numpy.array', 'np.array', (['[D, R]'], {}), '([D, R])\n', (13549, 13557), True, 'import numpy as np\n'), ((13574, 13682), 'scipy.optimize.least_squares', 'least_squares', (['self.__jury_residuals', 'x0'], {'args': '(a, l, t, v, pdf)', 'ftol': 'ftol', 'max_nfev': 'max_nfev'}), '(self.__jury_residuals, x0, args=(a, l, t, v, pdf), ftol=ftol,\n max_nfev=max_nfev, **kwargs)\n', (13587, 13682), False, 'from scipy.optimize import least_squares\n'), ((15775, 15791), 'numpy.array', 'np.array', (['[D, R]'], {}), '([D, R])\n', (15783, 15791), True, 'import numpy as np\n'), ((15808, 15922), 'scipy.optimize.least_squares', 'least_squares', (['self.__van_genuchten_residuals', 'x0'], {'args': '(l, v, t, bt)', 'ftol': 'ftol', 'max_nfev': 'max_nfev'}), '(self.__van_genuchten_residuals, x0, args=(l, v, t, bt), ftol=\n ftol, max_nfev=max_nfev, **kwargs)\n', (15821, 15922), False, 'from scipy.optimize import least_squares\n'), ((16964, 16986), 'numpy.sqrt', 'np.sqrt', (['(4 * D * R * t)'], {}), '(4 * D * R * t)\n', (16971, 16986), True, 'import numpy as np\n'), ((17039, 17053), 'numpy.isnan', 'np.isnan', (['x[0]'], {}), '(x[0])\n', (17047, 17053), True, 'import numpy as np\n'), ((21834, 21858), 'numpy.arange', 'np.arange', (['(0)', 'x.shape[1]'], {}), '(0, x.shape[1])\n', (21843, 21858), True, 'import numpy as np\n'), ((21872, 21896), 'numpy.arange', 'np.arange', (['(0)', 'x.shape[0]'], {}), '(0, x.shape[0])\n', (21881, 21896), True, 'import numpy as np\n'), ((21915, 21934), 'numpy.meshgrid', 'np.meshgrid', (['xx', 'yy'], {}), '(xx, yy)\n', (21926, 21934), True, 'import numpy as np\n'), ((22220, 22345), 'matplotlib.pyplot.quiver', 'plt.quiver', (['xx[::nbin, ::nbin]', 'yy[::nbin, ::nbin]', 'x[::nbin, ::nbin]', 'y[::nbin, ::nbin]', '*args'], {'units': '"""width"""'}), "(xx[::nbin, ::nbin], yy[::nbin, ::nbin], x[::nbin, ::nbin], y[::\n nbin, ::nbin], *args, units='width', **kwargs)\n", (22230, 22345), True, 'import matplotlib.pyplot as plt\n'), ((22400, 22475), 'matplotlib.pyplot.quiverkey', 'plt.quiverkey', (['Q', '(0.9)', '(0.9)', '(0.01)', '"""$1 \\\\frac{cm}{s}$"""'], {'coordinates': '"""figure"""'}), "(Q, 0.9, 0.9, 0.01, '$1 \\\\frac{cm}{s}$', coordinates='figure')\n", (22413, 22475), True, 'import matplotlib.pyplot as plt\n'), ((22511, 22534), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', 'x.shape[1]'], {}), '(0, x.shape[1])\n', (22519, 22534), True, 'import matplotlib.pyplot as plt\n'), ((22543, 22566), 'matplotlib.pyplot.ylim', 'plt.ylim', (['x.shape[0]', '(0)'], {}), '(x.shape[0], 0)\n', (22551, 22566), True, 'import matplotlib.pyplot as plt\n'), ((25466, 25502), 'matplotlib.pyplot.plot', 'plt.plot', (['x', '(y * -1)', '*args'], {}), '(x, y * -1, *args, **kwargs)\n', (25474, 25502), True, 'import matplotlib.pyplot as plt\n'), ((30048, 30081), 'numpy.var', 'np.var', (["self.velocity['velocity']"], {}), "(self.velocity['velocity'])\n", (30054, 30081), True, 'import numpy as np\n'), ((30215, 30248), 'numpy.std', 'np.std', (["self.velocity['velocity']"], {}), "(self.velocity['velocity'])\n", (30221, 30248), True, 'import numpy as np\n'), ((30714, 30800), 'matplotlib.pyplot.scatter', 'plt.scatter', (["self.velocity['colloid']", "self.velocity['velocity']", '*args'], {}), "(self.velocity['colloid'], self.velocity['velocity'], *args, **\n kwargs)\n", (30725, 30800), True, 'import matplotlib.pyplot as plt\n'), ((31327, 31375), 'numpy.linspace', 'np.linspace', (['(self.min - adjuster)', 'self.max', 'nbin'], {}), '(self.min - adjuster, self.max, nbin)\n', (31338, 31375), True, 'import numpy as np\n'), ((31850, 31898), 'matplotlib.pyplot.bar', 'plt.bar', (['velocity', 'ncols', 'width', '*args'], {}), '(velocity, ncols, width, *args, **kwargs)\n', (31857, 31898), True, 'import matplotlib.pyplot as plt\n'), ((37440, 37458), 'pandas.DataFrame', 'pd.DataFrame', (['temp'], {}), '(temp)\n', (37452, 37458), True, 'import pandas as pd\n'), ((41030, 41057), 'h5py.File', 'H.File', (['self.file_name', '"""r"""'], {}), "(self.file_name, 'r')\n", (41036, 41057), True, 'import h5py as H\n'), ((42895, 42922), 'h5py.File', 'H.File', (['self.file_name', '"""r"""'], {}), "(self.file_name, 'r')\n", (42901, 42922), True, 'import h5py as H\n'), ((14642, 14652), 'numpy.sqrt', 'np.sqrt', (['R'], {}), '(R)\n', (14649, 14652), True, 'import numpy as np\n'), ((14672, 14699), 'numpy.sqrt', 'np.sqrt', (['(np.pi * D * t ** 3)'], {}), '(np.pi * D * t ** 3)\n', (14679, 14699), True, 'import numpy as np\n'), ((14790, 14807), 'numpy.exp', 'np.exp', (['(eq2 / eq3)'], {}), '(eq2 / eq3)\n', (14796, 14807), True, 'import numpy as np\n'), ((17006, 17029), 'scipy.special.erfc', 'special.erfc', (['(eq0 / eq1)'], {}), '(eq0 / eq1)\n', (17018, 17029), False, 'from scipy import special\n'), ((19768, 19777), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (19775, 19777), True, 'import matplotlib.pyplot as plt\n'), ((22019, 22053), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(img == 1)'], {'a': 'xx'}), '(img == 1, a=xx)\n', (22037, 22053), True, 'import numpy as np\n'), ((22071, 22105), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(img == 1)'], {'a': 'yy'}), '(img == 1, a=yy)\n', (22089, 22105), True, 'import numpy as np\n'), ((22122, 22155), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(img == 1)'], {'a': 'x'}), '(img == 1, a=x)\n', (22140, 22155), True, 'import numpy as np\n'), ((22172, 22205), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(img == 1)'], {'a': 'y'}), '(img == 1, a=y)\n', (22190, 22205), True, 'import numpy as np\n'), ((25997, 26006), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (26004, 26006), True, 'import matplotlib.pyplot as plt\n'), ((26887, 26918), 'numpy.arange', 'np.arange', (['(0)', '(mesh.shape[0] + 1)'], {}), '(0, mesh.shape[0] + 1)\n', (26896, 26918), True, 'import numpy as np\n'), ((26947, 26978), 'numpy.arange', 'np.arange', (['(0)', '(mesh.shape[1] + 1)'], {}), '(0, mesh.shape[1] + 1)\n', (26956, 26978), True, 'import numpy as np\n'), ((28868, 28895), 'numpy.isnan', 'np.isnan', (["row['y-position']"], {}), "(row['y-position'])\n", (28876, 28895), True, 'import numpy as np\n'), ((37337, 37348), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (37345, 37348), True, 'import numpy as np\n'), ((9914, 10013), 'matplotlib.pyplot.plot', 'plt.plot', (["(self.pdf['nts'] * self.timestep)", "(self.pdf['ncol'] / self.total_ncol)", '*args'], {}), "(self.pdf['nts'] * self.timestep, self.pdf['ncol'] / self.\n total_ncol, *args, **kwargs)\n", (9922, 10013), True, 'import matplotlib.pyplot as plt\n'), ((10094, 10187), 'matplotlib.pyplot.plot', 'plt.plot', (["(self.pdf['nts'] * self.timestep)", "(self.pdf['ncol'] / self.ncol)", '*args'], {}), "(self.pdf['nts'] * self.timestep, self.pdf['ncol'] / self.ncol, *\n args, **kwargs)\n", (10102, 10187), True, 'import matplotlib.pyplot as plt\n'), ((10297, 10375), 'matplotlib.pyplot.plot', 'plt.plot', (["self.pdf['nts']", "(self.pdf['ncol'] / self.total_ncol)", '*args'], {}), "(self.pdf['nts'], self.pdf['ncol'] / self.total_ncol, *args, **kwargs)\n", (10305, 10375), True, 'import matplotlib.pyplot as plt\n'), ((10460, 10532), 'matplotlib.pyplot.plot', 'plt.plot', (["self.pdf['nts']", "(self.pdf['ncol'] / self.ncol)", '*args'], {}), "(self.pdf['nts'], self.pdf['ncol'] / self.ncol, *args, **kwargs)\n", (10468, 10532), True, 'import matplotlib.pyplot as plt\n'), ((2642, 2683), 'pandas.DataFrame', 'pd.DataFrame', (["{'nts': nts, 'ncol': ncols}"], {}), "({'nts': nts, 'ncol': ncols})\n", (2654, 2683), True, 'import pandas as pd\n'), ((3779, 3846), 'pandas.DataFrame', 'pd.DataFrame', (["{'nts': nts, 'ncol': ncols, 'ncpr': ncol_per_release}"], {}), "({'nts': nts, 'ncol': ncols, 'ncpr': ncol_per_release})\n", (3791, 3846), True, 'import pandas as pd\n'), ((20681, 20716), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(arr == 0)'], {'a': 'arr'}), '(arr == 0, a=arr)\n', (20699, 20716), True, 'import numpy as np\n'), ((20915, 20950), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(img == 1)'], {'a': 'arr'}), '(img == 1, a=arr)\n', (20933, 20950), True, 'import numpy as np\n')]
|
import geopandas as gpd
# required for MAUP: https://github.com/geopandas/geopandas/issues/2199
gpd.options.use_pygeos = False
import pandas as pd
import numpy as np
import shapely
import shapely.geometry
from shapely.geometry import Polygon, Point
from tqdm import tqdm
import maup
import os
#INTRO - need to edit values here for new city deployment
data_source = "census" #"census" or "ghsl"
city_crs = 32712
blocks_gdf_crs = gpd.read_file('prep_pop/tabblock2010_04_pophu.zip').to_crs(city_crs)
block_groups_gdf_crs = gpd.read_file('prep_pop/tl_2019_04_bg.zip').to_crs(city_crs)
veh_avail = pd.read_csv('prep_pop/B25044.csv').iloc[1:,]
bounds_gdf_latlon = gpd.GeoDataFrame(geometry = [
shapely.geometry.box(-111.124649,32.059300,-110.690002,32.366043)],
crs = 4326)
bounds_gdf_crs = bounds_gdf_latlon.to_crs(city_crs)
#define exception polygon
#this is the are within which the grid will be higher-resolution
point = Point(-71.411479,41.823544)
point_latlon = gpd.GeoDataFrame(geometry=[point], crs = 4326)
point_crs = point_latlon.to_crs(city_crs)
poly_crs = point_crs.buffer(1000).unary_union
exception_gdf_crs = gpd.GeoDataFrame(geometry = [poly_crs], crs=city_crs)
high_res = 250 #m to a side
low_res = 1000 #m to a side
# END INTRO actual code
def summarize_veh_avail(row):
total_pop = int(row['B25044_001E'])
if total_pop < 1:
return 0,0,1 #if no population, assume all 0 households have 2 cars
pct_carfree = (int(row['B25044_003E']) + int(row['B25044_010E'])) / total_pop
pct_onecar = (int(row['B25044_004E']) + int(row['B25044_011E'])) / total_pop
pct_twopluscars = 1 - pct_carfree - pct_onecar
return pct_carfree, pct_onecar, pct_twopluscars
def build_grid(bounds_poly_crs, low_resolution, exception_gdf_crs=None, high_resolution=None):
xmin,ymin,xmax,ymax = bounds_poly_crs.bounds
# thank you Faraz (https://gis.stackexchange.com/questions/269243/creating-polygon-grid-using-geopandas)
rows = int(np.ceil((ymax-ymin) / low_resolution))
cols = int(np.ceil((xmax-xmin) / low_resolution))
XleftOrigin = xmin
XrightOrigin = xmin + low_resolution
YtopOrigin = ymax
YbottomOrigin = ymax - low_resolution
lowres_cells = []
exception_cells = []
for i in range(cols):
Ytop = YtopOrigin
Ybottom = YbottomOrigin
for j in range(rows):
cell = Polygon([(XleftOrigin, Ytop), (XrightOrigin, Ytop), (XrightOrigin, Ybottom), (XleftOrigin, Ybottom)])
cell = cell.intersection(bounds_poly_crs)
if exception_gdf_crs is not None:
if not cell.intersects(exception_gdf_crs.unary_union):
lowres_cells.append(cell)
else:
exception_cells.append(cell)
else:
lowres_cells.append(cell)
Ytop = Ytop - low_resolution
Ybottom = Ybottom - low_resolution
XleftOrigin = XleftOrigin + low_resolution
XrightOrigin = XrightOrigin + low_resolution
highres_cells = []
if exception_gdf_crs is not None:
for exception_cell in exception_cells:
highres_cells += build_grid(exception_cell, high_resolution)
return lowres_cells + highres_cells
def populate_grid(grid, blocks, block_groups):
#blocks first, for simple population
blocks_pieces = maup.intersections(blocks, grid, area_cutoff=0)
blocks_weights = blocks['POP10'].groupby(maup.assign(blocks, blocks_pieces)).sum()
blocks_weights = maup.normalize(blocks_weights, level=0)
grid['POP10'] = maup.prorate(
blocks_pieces,
blocks['POP10'],
weights=blocks_weights,
)
#then block groups for car ownership
bg_pieces = maup.intersections(block_groups, grid)
bg_weights = grid['POP10'].groupby(maup.assign(grid, bg_pieces)).sum()
bg_weights = maup.normalize(bg_weights, level=0)
columns = ['pct_carfree', 'pct_onecar','pct_twopluscars']
grid[columns] = maup.prorate(
bg_pieces,
block_groups[columns],
weights=bg_weights,
aggregate_by='mean',
)
return grid
#clip blocks and block groups
blocks_gdf_crs = gpd.clip(blocks_gdf_crs, bounds_gdf_crs)
block_groups_gdf_crs = gpd.clip(block_groups_gdf_crs, bounds_gdf_crs)
#assign veh_avail and block_groups the same index
block_groups_gdf_crs.index = block_groups_gdf_crs.GEOID
newidx = []
for bgidx in veh_avail.GEO_ID:
newidx.append(bgidx[9:])
veh_avail.index = newidx
for bgidx in block_groups_gdf_crs.index:
pct_carfree, pct_onecar, pct_twopluscars = summarize_veh_avail(veh_avail.loc[bgidx])
total_pop = float(veh_avail.loc[bgidx,'B25044_001E'])
block_groups_gdf_crs.loc[bgidx,'total_pop'] = total_pop
block_groups_gdf_crs.loc[bgidx,'pct_carfree'] = pct_carfree
block_groups_gdf_crs.loc[bgidx,'pct_onecar'] = pct_onecar
block_groups_gdf_crs.loc[bgidx,'pct_twopluscars'] = pct_twopluscars
grid_cells = build_grid(bounds_gdf_crs.unary_union, 1000, exception_gdf_crs, 250)
grid_gdf_crs = gpd.GeoDataFrame(geometry=grid_cells, crs=city_crs)
grid_gdf_latlon = grid_gdf_crs.to_crs(4326)
grid_pop_gdf_crs = populate_grid(
grid_gdf_crs,
blocks_gdf_crs,
block_groups_gdf_crs,
)
grid_pop_gdf_crs['pop_dens'] = grid_pop_gdf_crs['POP10'] / grid_pop_gdf_crs.geometry.area
grid_pop_gdf_latlon = grid_pop_gdf_crs.to_crs(4326)
points = pd.DataFrame()
for idx in grid_pop_gdf_latlon.index:
if not np.isnan(grid_pop_gdf_latlon.loc[idx,'POP10']):
grid_pop_gdf_latlon.loc[idx,'id'] = idx
points.loc[idx,'id'] = idx
centroid = grid_pop_gdf_latlon.loc[idx,'geometry'].centroid
points.loc[idx,'lat'] = centroid.y
points.loc[idx,'lon'] = centroid.x
for col in ['POP10','pct_carfree','pct_onecar','pct_twopluscars','pop_dens']:
points.loc[idx, col] = grid_pop_gdf_latlon.loc[idx, col]
points.to_csv('pop_points.csv')
grid_pop_gdf_latlon.to_file('grid_pop.geojson',driver='GeoJSON')
|
[
"pandas.DataFrame",
"maup.normalize",
"shapely.geometry.Point",
"numpy.ceil",
"shapely.geometry.Polygon",
"pandas.read_csv",
"maup.assign",
"maup.prorate",
"numpy.isnan",
"geopandas.GeoDataFrame",
"maup.intersections",
"geopandas.clip",
"shapely.geometry.box",
"geopandas.read_file"
] |
[((934, 962), 'shapely.geometry.Point', 'Point', (['(-71.411479)', '(41.823544)'], {}), '(-71.411479, 41.823544)\n', (939, 962), False, 'from shapely.geometry import Polygon, Point\n'), ((977, 1021), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', ([], {'geometry': '[point]', 'crs': '(4326)'}), '(geometry=[point], crs=4326)\n', (993, 1021), True, 'import geopandas as gpd\n'), ((1132, 1183), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', ([], {'geometry': '[poly_crs]', 'crs': 'city_crs'}), '(geometry=[poly_crs], crs=city_crs)\n', (1148, 1183), True, 'import geopandas as gpd\n'), ((4172, 4212), 'geopandas.clip', 'gpd.clip', (['blocks_gdf_crs', 'bounds_gdf_crs'], {}), '(blocks_gdf_crs, bounds_gdf_crs)\n', (4180, 4212), True, 'import geopandas as gpd\n'), ((4236, 4282), 'geopandas.clip', 'gpd.clip', (['block_groups_gdf_crs', 'bounds_gdf_crs'], {}), '(block_groups_gdf_crs, bounds_gdf_crs)\n', (4244, 4282), True, 'import geopandas as gpd\n'), ((5036, 5087), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', ([], {'geometry': 'grid_cells', 'crs': 'city_crs'}), '(geometry=grid_cells, crs=city_crs)\n', (5052, 5087), True, 'import geopandas as gpd\n'), ((5387, 5401), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (5399, 5401), True, 'import pandas as pd\n'), ((3347, 3394), 'maup.intersections', 'maup.intersections', (['blocks', 'grid'], {'area_cutoff': '(0)'}), '(blocks, grid, area_cutoff=0)\n', (3365, 3394), False, 'import maup\n'), ((3503, 3542), 'maup.normalize', 'maup.normalize', (['blocks_weights'], {'level': '(0)'}), '(blocks_weights, level=0)\n', (3517, 3542), False, 'import maup\n'), ((3563, 3631), 'maup.prorate', 'maup.prorate', (['blocks_pieces', "blocks['POP10']"], {'weights': 'blocks_weights'}), "(blocks_pieces, blocks['POP10'], weights=blocks_weights)\n", (3575, 3631), False, 'import maup\n'), ((3725, 3763), 'maup.intersections', 'maup.intersections', (['block_groups', 'grid'], {}), '(block_groups, grid)\n', (3743, 3763), False, 'import maup\n'), ((3856, 3891), 'maup.normalize', 'maup.normalize', (['bg_weights'], {'level': '(0)'}), '(bg_weights, level=0)\n', (3870, 3891), False, 'import maup\n'), ((3974, 4065), 'maup.prorate', 'maup.prorate', (['bg_pieces', 'block_groups[columns]'], {'weights': 'bg_weights', 'aggregate_by': '"""mean"""'}), "(bg_pieces, block_groups[columns], weights=bg_weights,\n aggregate_by='mean')\n", (3986, 4065), False, 'import maup\n'), ((432, 483), 'geopandas.read_file', 'gpd.read_file', (['"""prep_pop/tabblock2010_04_pophu.zip"""'], {}), "('prep_pop/tabblock2010_04_pophu.zip')\n", (445, 483), True, 'import geopandas as gpd\n'), ((524, 567), 'geopandas.read_file', 'gpd.read_file', (['"""prep_pop/tl_2019_04_bg.zip"""'], {}), "('prep_pop/tl_2019_04_bg.zip')\n", (537, 567), True, 'import geopandas as gpd\n'), ((597, 631), 'pandas.read_csv', 'pd.read_csv', (['"""prep_pop/B25044.csv"""'], {}), "('prep_pop/B25044.csv')\n", (608, 631), True, 'import pandas as pd\n'), ((1973, 2012), 'numpy.ceil', 'np.ceil', (['((ymax - ymin) / low_resolution)'], {}), '((ymax - ymin) / low_resolution)\n', (1980, 2012), True, 'import numpy as np\n'), ((2028, 2067), 'numpy.ceil', 'np.ceil', (['((xmax - xmin) / low_resolution)'], {}), '((xmax - xmin) / low_resolution)\n', (2035, 2067), True, 'import numpy as np\n'), ((5451, 5498), 'numpy.isnan', 'np.isnan', (["grid_pop_gdf_latlon.loc[idx, 'POP10']"], {}), "(grid_pop_gdf_latlon.loc[idx, 'POP10'])\n", (5459, 5498), True, 'import numpy as np\n'), ((697, 763), 'shapely.geometry.box', 'shapely.geometry.box', (['(-111.124649)', '(32.0593)', '(-110.690002)', '(32.366043)'], {}), '(-111.124649, 32.0593, -110.690002, 32.366043)\n', (717, 763), False, 'import shapely\n'), ((2375, 2480), 'shapely.geometry.Polygon', 'Polygon', (['[(XleftOrigin, Ytop), (XrightOrigin, Ytop), (XrightOrigin, Ybottom), (\n XleftOrigin, Ybottom)]'], {}), '([(XleftOrigin, Ytop), (XrightOrigin, Ytop), (XrightOrigin, Ybottom),\n (XleftOrigin, Ybottom)])\n', (2382, 2480), False, 'from shapely.geometry import Polygon, Point\n'), ((3440, 3474), 'maup.assign', 'maup.assign', (['blocks', 'blocks_pieces'], {}), '(blocks, blocks_pieces)\n', (3451, 3474), False, 'import maup\n'), ((3803, 3831), 'maup.assign', 'maup.assign', (['grid', 'bg_pieces'], {}), '(grid, bg_pieces)\n', (3814, 3831), False, 'import maup\n')]
|
#!/usr/bin/env python
# coding: utf-8
# ## SIMPLE CONVOLUTIONAL NEURAL NETWORK
# In[1]:
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
get_ipython().run_line_magic('matplotlib', 'inline')
print ("PACKAGES LOADED")
# # LOAD MNIST
# In[2]:
mnist = input_data.read_data_sets('data/', one_hot=True)
trainimg = mnist.train.images
trainlabel = mnist.train.labels
testimg = mnist.test.images
testlabel = mnist.test.labels
print ("MNIST ready")
# # SELECT DEVICE TO BE USED
# In[3]:
device_type = "/gpu:1"
# # DEFINE CNN
# In[4]:
with tf.device(device_type): # <= This is optional
n_input = 784
n_output = 10
weights = {
'wc1': tf.Variable(tf.random_normal([3, 3, 1, 64], stddev=0.1)),
'wd1': tf.Variable(tf.random_normal([14*14*64, n_output], stddev=0.1))
}
biases = {
'bc1': tf.Variable(tf.random_normal([64], stddev=0.1)),
'bd1': tf.Variable(tf.random_normal([n_output], stddev=0.1))
}
def conv_simple(_input, _w, _b):
# Reshape input
_input_r = tf.reshape(_input, shape=[-1, 28, 28, 1])
# Convolution
_conv1 = tf.nn.conv2d(_input_r, _w['wc1'], strides=[1, 1, 1, 1], padding='SAME')
# Add-bias
_conv2 = tf.nn.bias_add(_conv1, _b['bc1'])
# Pass ReLu
_conv3 = tf.nn.relu(_conv2)
# Max-pooling
_pool = tf.nn.max_pool(_conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# Vectorize
_dense = tf.reshape(_pool, [-1, _w['wd1'].get_shape().as_list()[0]])
# Fully-connected layer
_out = tf.add(tf.matmul(_dense, _w['wd1']), _b['bd1'])
# Return everything
out = {
'input_r': _input_r, 'conv1': _conv1, 'conv2': _conv2, 'conv3': _conv3
, 'pool': _pool, 'dense': _dense, 'out': _out
}
return out
print ("CNN ready")
# # DEFINE COMPUTATIONAL GRAPH
# In[5]:
# tf Graph input
x = tf.placeholder(tf.float32, [None, n_input])
y = tf.placeholder(tf.float32, [None, n_output])
# Parameters
learning_rate = 0.001
training_epochs = 10
batch_size = 100
display_step = 1
# Functions!
with tf.device(device_type): # <= This is optional
_pred = conv_simple(x, weights, biases)['out']
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(_pred, y))
optm = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
_corr = tf.equal(tf.argmax(_pred,1), tf.argmax(y,1)) # Count corrects
accr = tf.reduce_mean(tf.cast(_corr, tf.float32)) # Accuracy
init = tf.initialize_all_variables()
# Saver
save_step = 1;
savedir = "nets/"
saver = tf.train.Saver(max_to_keep=3)
print ("Network Ready to Go!")
# # OPTIMIZE
# ## DO TRAIN OR NOT
# In[6]:
do_train = 1
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
sess.run(init)
# In[7]:
if do_train == 1:
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(mnist.train.num_examples/batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# Fit training using batch data
sess.run(optm, feed_dict={x: batch_xs, y: batch_ys})
# Compute average loss
avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch
# Display logs per epoch step
if epoch % display_step == 0:
print ("Epoch: %03d/%03d cost: %.9f" % (epoch, training_epochs, avg_cost))
train_acc = sess.run(accr, feed_dict={x: batch_xs, y: batch_ys})
print (" Training accuracy: %.3f" % (train_acc))
test_acc = sess.run(accr, feed_dict={x: testimg, y: testlabel})
print (" Test accuracy: %.3f" % (test_acc))
# Save Net
if epoch % save_step == 0:
saver.save(sess, "nets/cnn_mnist_simple.ckpt-" + str(epoch))
print ("Optimization Finished.")
# # RESTORE
# In[8]:
if do_train == 0:
epoch = training_epochs-1
saver.restore(sess, "nets/cnn_mnist_simple.ckpt-" + str(epoch))
print ("NETWORK RESTORED")
# # LET'S SEE HOW CNN WORKS
# In[9]:
with tf.device(device_type):
conv_out = conv_simple(x, weights, biases)
input_r = sess.run(conv_out['input_r'], feed_dict={x: trainimg[0:1, :]})
conv1 = sess.run(conv_out['conv1'], feed_dict={x: trainimg[0:1, :]})
conv2 = sess.run(conv_out['conv2'], feed_dict={x: trainimg[0:1, :]})
conv3 = sess.run(conv_out['conv3'], feed_dict={x: trainimg[0:1, :]})
pool = sess.run(conv_out['pool'], feed_dict={x: trainimg[0:1, :]})
dense = sess.run(conv_out['dense'], feed_dict={x: trainimg[0:1, :]})
out = sess.run(conv_out['out'], feed_dict={x: trainimg[0:1, :]})
# # Input
# In[10]:
# Let's see 'input_r'
print ("Size of 'input_r' is %s" % (input_r.shape,))
label = np.argmax(trainlabel[0, :])
print ("Label is %d" % (label))
# Plot !
plt.matshow(input_r[0, :, :, 0], cmap=plt.get_cmap('gray'))
plt.title("Label of this image is " + str(label) + "")
plt.colorbar()
plt.show()
# # Conv1 (convolution)
# In[11]:
# Let's see 'conv1'
print ("Size of 'conv1' is %s" % (conv1.shape,))
# Plot !
for i in range(3):
plt.matshow(conv1[0, :, :, i], cmap=plt.get_cmap('gray'))
plt.title(str(i) + "th conv1")
plt.colorbar()
plt.show()
# # Conv2 (+bias)
# In[12]:
# Let's see 'conv2'
print ("Size of 'conv2' is %s" % (conv2.shape,))
# Plot !
for i in range(3):
plt.matshow(conv2[0, :, :, i], cmap=plt.get_cmap('gray'))
plt.title(str(i) + "th conv2")
plt.colorbar()
plt.show()
# # Conv3 (ReLU)
# In[13]:
# Let's see 'conv3'
print ("Size of 'conv3' is %s" % (conv3.shape,))
# Plot !
for i in range(3):
plt.matshow(conv3[0, :, :, i], cmap=plt.get_cmap('gray'))
plt.title(str(i) + "th conv3")
plt.colorbar()
plt.show()
# # Pool (max_pool)
# In[14]:
# Let's see 'pool'
print ("Size of 'pool' is %s" % (pool.shape,))
# Plot !
for i in range(3):
plt.matshow(pool[0, :, :, i], cmap=plt.get_cmap('gray'))
plt.title(str(i) + "th pool")
plt.colorbar()
plt.show()
# # Dense
# In[15]:
# Let's see 'dense'
print ("Size of 'dense' is %s" % (dense.shape,))
# Let's see 'out'
print ("Size of 'out' is %s" % (out.shape,))
# # Convolution filters
# In[16]:
# Let's see weight!
wc1 = sess.run(weights['wc1'])
print ("Size of 'wc1' is %s" % (wc1.shape,))
# Plot !
for i in range(3):
plt.matshow(wc1[:, :, 0, i], cmap=plt.get_cmap('gray'))
plt.title(str(i) + "th conv filter")
plt.colorbar()
plt.show()
|
[
"numpy.argmax",
"tensorflow.reshape",
"tensorflow.ConfigProto",
"tensorflow.matmul",
"tensorflow.nn.conv2d",
"tensorflow.nn.relu",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"matplotlib.pyplot.colorbar",
"tensorflow.placeholder",
"tensorflow.cast",
"tensorflow.initialize_all_variables",
"tensorflow.nn.bias_add",
"matplotlib.pyplot.show",
"matplotlib.pyplot.get_cmap",
"tensorflow.train.Saver",
"tensorflow.nn.max_pool",
"tensorflow.random_normal",
"tensorflow.argmax",
"tensorflow.device",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.train.AdamOptimizer"
] |
[((342, 390), 'tensorflow.examples.tutorials.mnist.input_data.read_data_sets', 'input_data.read_data_sets', (['"""data/"""'], {'one_hot': '(True)'}), "('data/', one_hot=True)\n", (367, 390), False, 'from tensorflow.examples.tutorials.mnist import input_data\n'), ((2026, 2069), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, n_input]'], {}), '(tf.float32, [None, n_input])\n', (2040, 2069), True, 'import tensorflow as tf\n'), ((2074, 2118), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, n_output]'], {}), '(tf.float32, [None, n_output])\n', (2088, 2118), True, 'import tensorflow as tf\n'), ((2720, 2749), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(3)'}), '(max_to_keep=3)\n', (2734, 2749), True, 'import tensorflow as tf\n'), ((4950, 4977), 'numpy.argmax', 'np.argmax', (['trainlabel[0, :]'], {}), '(trainlabel[0, :])\n', (4959, 4977), True, 'import numpy as np\n'), ((5136, 5150), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (5148, 5150), True, 'import matplotlib.pyplot as plt\n'), ((5151, 5161), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5159, 5161), True, 'import matplotlib.pyplot as plt\n'), ((641, 663), 'tensorflow.device', 'tf.device', (['device_type'], {}), '(device_type)\n', (650, 663), True, 'import tensorflow as tf\n'), ((2238, 2260), 'tensorflow.device', 'tf.device', (['device_type'], {}), '(device_type)\n', (2247, 2260), True, 'import tensorflow as tf\n'), ((2640, 2669), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (2667, 2669), True, 'import tensorflow as tf\n'), ((4274, 4296), 'tensorflow.device', 'tf.device', (['device_type'], {}), '(device_type)\n', (4283, 4296), True, 'import tensorflow as tf\n'), ((5401, 5415), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (5413, 5415), True, 'import matplotlib.pyplot as plt\n'), ((5420, 5430), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5428, 5430), True, 'import matplotlib.pyplot as plt\n'), ((5665, 5679), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (5677, 5679), True, 'import matplotlib.pyplot as plt\n'), ((5684, 5694), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5692, 5694), True, 'import matplotlib.pyplot as plt\n'), ((5928, 5942), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (5940, 5942), True, 'import matplotlib.pyplot as plt\n'), ((5947, 5957), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5955, 5957), True, 'import matplotlib.pyplot as plt\n'), ((6189, 6203), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (6201, 6203), True, 'import matplotlib.pyplot as plt\n'), ((6208, 6218), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6216, 6218), True, 'import matplotlib.pyplot as plt\n'), ((6648, 6662), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (6660, 6662), True, 'import matplotlib.pyplot as plt\n'), ((6667, 6677), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6675, 6677), True, 'import matplotlib.pyplot as plt\n'), ((1135, 1176), 'tensorflow.reshape', 'tf.reshape', (['_input'], {'shape': '[-1, 28, 28, 1]'}), '(_input, shape=[-1, 28, 28, 1])\n', (1145, 1176), True, 'import tensorflow as tf\n'), ((1216, 1287), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['_input_r', "_w['wc1']"], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(_input_r, _w['wc1'], strides=[1, 1, 1, 1], padding='SAME')\n", (1228, 1287), True, 'import tensorflow as tf\n'), ((1324, 1357), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['_conv1', "_b['bc1']"], {}), "(_conv1, _b['bc1'])\n", (1338, 1357), True, 'import tensorflow as tf\n'), ((1395, 1413), 'tensorflow.nn.relu', 'tf.nn.relu', (['_conv2'], {}), '(_conv2)\n', (1405, 1413), True, 'import tensorflow as tf\n'), ((1453, 1538), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['_conv3'], {'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), "(_conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME'\n )\n", (1467, 1538), True, 'import tensorflow as tf\n'), ((2361, 2410), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', (['_pred', 'y'], {}), '(_pred, y)\n', (2400, 2410), True, 'import tensorflow as tf\n'), ((2511, 2530), 'tensorflow.argmax', 'tf.argmax', (['_pred', '(1)'], {}), '(_pred, 1)\n', (2520, 2530), True, 'import tensorflow as tf\n'), ((2531, 2546), 'tensorflow.argmax', 'tf.argmax', (['y', '(1)'], {}), '(y, 1)\n', (2540, 2546), True, 'import tensorflow as tf\n'), ((2590, 2616), 'tensorflow.cast', 'tf.cast', (['_corr', 'tf.float32'], {}), '(_corr, tf.float32)\n', (2597, 2616), True, 'import tensorflow as tf\n'), ((2868, 2909), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (2882, 2909), True, 'import tensorflow as tf\n'), ((5059, 5079), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""gray"""'], {}), "('gray')\n", (5071, 5079), True, 'import matplotlib.pyplot as plt\n'), ((768, 811), 'tensorflow.random_normal', 'tf.random_normal', (['[3, 3, 1, 64]'], {'stddev': '(0.1)'}), '([3, 3, 1, 64], stddev=0.1)\n', (784, 811), True, 'import tensorflow as tf\n'), ((841, 895), 'tensorflow.random_normal', 'tf.random_normal', (['[14 * 14 * 64, n_output]'], {'stddev': '(0.1)'}), '([14 * 14 * 64, n_output], stddev=0.1)\n', (857, 895), True, 'import tensorflow as tf\n'), ((943, 977), 'tensorflow.random_normal', 'tf.random_normal', (['[64]'], {'stddev': '(0.1)'}), '([64], stddev=0.1)\n', (959, 977), True, 'import tensorflow as tf\n'), ((1007, 1047), 'tensorflow.random_normal', 'tf.random_normal', (['[n_output]'], {'stddev': '(0.1)'}), '([n_output], stddev=0.1)\n', (1023, 1047), True, 'import tensorflow as tf\n'), ((1685, 1713), 'tensorflow.matmul', 'tf.matmul', (['_dense', "_w['wd1']"], {}), "(_dense, _w['wd1'])\n", (1694, 1713), True, 'import tensorflow as tf\n'), ((2423, 2474), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (2445, 2474), True, 'import tensorflow as tf\n'), ((5340, 5360), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""gray"""'], {}), "('gray')\n", (5352, 5360), True, 'import matplotlib.pyplot as plt\n'), ((5604, 5624), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""gray"""'], {}), "('gray')\n", (5616, 5624), True, 'import matplotlib.pyplot as plt\n'), ((5867, 5887), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""gray"""'], {}), "('gray')\n", (5879, 5887), True, 'import matplotlib.pyplot as plt\n'), ((6129, 6149), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""gray"""'], {}), "('gray')\n", (6141, 6149), True, 'import matplotlib.pyplot as plt\n'), ((6581, 6601), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""gray"""'], {}), "('gray')\n", (6593, 6601), True, 'import matplotlib.pyplot as plt\n')]
|
import mxnet as mx
import numpy as np
from rcnn.config import config
class LogLossMetric(mx.metric.EvalMetric):
def __init__(self):
super(LogLossMetric, self).__init__('LogLoss')
def update(self, labels, preds):
pred_cls = preds[0].asnumpy()
label = labels[0].asnumpy().astype('int32')
cls = pred_cls[np.arange(label.shape[0]), label]
cls += config.EPS
cls_loss = -1 * np.log(cls)
cls_loss = np.sum(cls_loss)
self.sum_metric += cls_loss
self.num_inst += label.shape[0]
class SmoothL1LossMetric(mx.metric.EvalMetric):
def __init__(self):
super(SmoothL1LossMetric, self).__init__('SmoothL1Loss')
def update(self, labels, preds):
bbox_loss = preds[0].asnumpy()
label = labels[0].asnumpy()
bbox_loss = np.sum(bbox_loss)
self.sum_metric += bbox_loss
self.num_inst += label.shape[0]
|
[
"numpy.arange",
"numpy.sum",
"numpy.log"
] |
[((460, 476), 'numpy.sum', 'np.sum', (['cls_loss'], {}), '(cls_loss)\n', (466, 476), True, 'import numpy as np\n'), ((825, 842), 'numpy.sum', 'np.sum', (['bbox_loss'], {}), '(bbox_loss)\n', (831, 842), True, 'import numpy as np\n'), ((429, 440), 'numpy.log', 'np.log', (['cls'], {}), '(cls)\n', (435, 440), True, 'import numpy as np\n'), ((345, 370), 'numpy.arange', 'np.arange', (['label.shape[0]'], {}), '(label.shape[0])\n', (354, 370), True, 'import numpy as np\n')]
|
import numpy as np
from pytest_cases import parametrize_with_cases, case
from snake_learner.direction import Direction
from snake_learner.linalg_util import block_distance, closest_direction, \
project_to_direction
CLOSEST_DIRECTION = "closest_direction"
PROJECT_TO_DIRECTION = "project_to_direction"
@case(tags=[CLOSEST_DIRECTION])
def case_closest_direction_up_direction():
vec = Direction.UP.to_array()
distance = 1
direction = Direction.UP
return vec, distance, direction
@case(tags=[CLOSEST_DIRECTION])
def case_closest_direction_down_direction():
vec = Direction.DOWN.to_array()
distance = 1
direction = Direction.DOWN
return vec, distance, direction
@case(tags=[CLOSEST_DIRECTION])
def case_closest_direction_left_direction():
vec = Direction.LEFT.to_array()
distance = 1
direction = Direction.LEFT
return vec, distance, direction
@case(tags=[CLOSEST_DIRECTION])
def case_closest_direction_right_direction():
vec = Direction.RIGHT.to_array()
distance = 1
direction = Direction.RIGHT
return vec, distance, direction
@case(tags=[CLOSEST_DIRECTION])
def case_closest_direction_long_up_direction():
n = 8
vec = n * Direction.UP.to_array()
distance = n
direction = Direction.UP
return vec, distance, direction
@case(tags=[CLOSEST_DIRECTION])
def case_closest_direction_long_down_direction():
n = 8
vec = n * Direction.DOWN.to_array()
distance = n
direction = Direction.DOWN
return vec, distance, direction
@case(tags=[CLOSEST_DIRECTION])
def case_closest_direction_long_right_direction():
n = 8
vec = n * Direction.RIGHT.to_array()
distance = n
direction = Direction.RIGHT
return vec, distance, direction
@case(tags=[CLOSEST_DIRECTION])
def case_closest_direction_long_left_direction():
n = 8
vec = n * Direction.LEFT.to_array()
distance = n
direction = Direction.LEFT
return vec, distance, direction
@case(tags=[CLOSEST_DIRECTION])
def case_closest_direction_up_right_direction():
n, m = 3, 2
vec = n * Direction.UP.to_array() + m * Direction.RIGHT.to_array()
distance = n + m
direction = Direction.UP
return vec, distance, direction
@case(tags=[CLOSEST_DIRECTION])
def case_closest_direction_right_up_direction():
n, m = 3, 2
vec = m * Direction.UP.to_array() + n * Direction.RIGHT.to_array()
distance = n + m
direction = Direction.RIGHT
return vec, distance, direction
@case(tags=[PROJECT_TO_DIRECTION])
def case_project_to_direction_up():
vec = np.array([5, 2])
direction = Direction.UP
result = np.array([5, 2])
return vec, direction, result
@case(tags=[PROJECT_TO_DIRECTION])
def case_project_to_direction_right():
vec = np.array([5, 2])
direction = Direction.RIGHT
result = np.array([2, -5])
return vec, direction, result
@case(tags=[PROJECT_TO_DIRECTION])
def case_project_to_direction_down():
vec = np.array([5, 2])
direction = Direction.DOWN
result = np.array([-5, -2])
return vec, direction, result
@case(tags=[PROJECT_TO_DIRECTION])
def case_project_to_direction_left():
vec = np.array([5, 2])
direction = Direction.LEFT
result = np.array([-2, 5])
return vec, direction, result
@parametrize_with_cases(
argnames=["vec", "distance", "direction"], cases=".", has_tag=CLOSEST_DIRECTION
)
def test_closest_direction(vec, distance, direction):
assert block_distance(vec) == distance
assert closest_direction(vec) == direction
@parametrize_with_cases(
argnames=["vec", "direction", "result"], cases=".", has_tag=PROJECT_TO_DIRECTION
)
def test_project_to_direction(vec, direction, result):
np.testing.assert_array_equal(
project_to_direction(sight_vector=vec, direction=direction), result
)
|
[
"snake_learner.direction.Direction.RIGHT.to_array",
"snake_learner.linalg_util.block_distance",
"snake_learner.linalg_util.closest_direction",
"snake_learner.direction.Direction.UP.to_array",
"snake_learner.linalg_util.project_to_direction",
"numpy.array",
"pytest_cases.case",
"pytest_cases.parametrize_with_cases",
"snake_learner.direction.Direction.DOWN.to_array",
"snake_learner.direction.Direction.LEFT.to_array"
] |
[((310, 340), 'pytest_cases.case', 'case', ([], {'tags': '[CLOSEST_DIRECTION]'}), '(tags=[CLOSEST_DIRECTION])\n', (314, 340), False, 'from pytest_cases import parametrize_with_cases, case\n'), ((503, 533), 'pytest_cases.case', 'case', ([], {'tags': '[CLOSEST_DIRECTION]'}), '(tags=[CLOSEST_DIRECTION])\n', (507, 533), False, 'from pytest_cases import parametrize_with_cases, case\n'), ((702, 732), 'pytest_cases.case', 'case', ([], {'tags': '[CLOSEST_DIRECTION]'}), '(tags=[CLOSEST_DIRECTION])\n', (706, 732), False, 'from pytest_cases import parametrize_with_cases, case\n'), ((901, 931), 'pytest_cases.case', 'case', ([], {'tags': '[CLOSEST_DIRECTION]'}), '(tags=[CLOSEST_DIRECTION])\n', (905, 931), False, 'from pytest_cases import parametrize_with_cases, case\n'), ((1103, 1133), 'pytest_cases.case', 'case', ([], {'tags': '[CLOSEST_DIRECTION]'}), '(tags=[CLOSEST_DIRECTION])\n', (1107, 1133), False, 'from pytest_cases import parametrize_with_cases, case\n'), ((1315, 1345), 'pytest_cases.case', 'case', ([], {'tags': '[CLOSEST_DIRECTION]'}), '(tags=[CLOSEST_DIRECTION])\n', (1319, 1345), False, 'from pytest_cases import parametrize_with_cases, case\n'), ((1533, 1563), 'pytest_cases.case', 'case', ([], {'tags': '[CLOSEST_DIRECTION]'}), '(tags=[CLOSEST_DIRECTION])\n', (1537, 1563), False, 'from pytest_cases import parametrize_with_cases, case\n'), ((1754, 1784), 'pytest_cases.case', 'case', ([], {'tags': '[CLOSEST_DIRECTION]'}), '(tags=[CLOSEST_DIRECTION])\n', (1758, 1784), False, 'from pytest_cases import parametrize_with_cases, case\n'), ((1972, 2002), 'pytest_cases.case', 'case', ([], {'tags': '[CLOSEST_DIRECTION]'}), '(tags=[CLOSEST_DIRECTION])\n', (1976, 2002), False, 'from pytest_cases import parametrize_with_cases, case\n'), ((2228, 2258), 'pytest_cases.case', 'case', ([], {'tags': '[CLOSEST_DIRECTION]'}), '(tags=[CLOSEST_DIRECTION])\n', (2232, 2258), False, 'from pytest_cases import parametrize_with_cases, case\n'), ((2487, 2520), 'pytest_cases.case', 'case', ([], {'tags': '[PROJECT_TO_DIRECTION]'}), '(tags=[PROJECT_TO_DIRECTION])\n', (2491, 2520), False, 'from pytest_cases import parametrize_with_cases, case\n'), ((2680, 2713), 'pytest_cases.case', 'case', ([], {'tags': '[PROJECT_TO_DIRECTION]'}), '(tags=[PROJECT_TO_DIRECTION])\n', (2684, 2713), False, 'from pytest_cases import parametrize_with_cases, case\n'), ((2880, 2913), 'pytest_cases.case', 'case', ([], {'tags': '[PROJECT_TO_DIRECTION]'}), '(tags=[PROJECT_TO_DIRECTION])\n', (2884, 2913), False, 'from pytest_cases import parametrize_with_cases, case\n'), ((3079, 3112), 'pytest_cases.case', 'case', ([], {'tags': '[PROJECT_TO_DIRECTION]'}), '(tags=[PROJECT_TO_DIRECTION])\n', (3083, 3112), False, 'from pytest_cases import parametrize_with_cases, case\n'), ((3277, 3384), 'pytest_cases.parametrize_with_cases', 'parametrize_with_cases', ([], {'argnames': "['vec', 'distance', 'direction']", 'cases': '"""."""', 'has_tag': 'CLOSEST_DIRECTION'}), "(argnames=['vec', 'distance', 'direction'], cases='.',\n has_tag=CLOSEST_DIRECTION)\n", (3299, 3384), False, 'from pytest_cases import parametrize_with_cases, case\n'), ((3535, 3643), 'pytest_cases.parametrize_with_cases', 'parametrize_with_cases', ([], {'argnames': "['vec', 'direction', 'result']", 'cases': '"""."""', 'has_tag': 'PROJECT_TO_DIRECTION'}), "(argnames=['vec', 'direction', 'result'], cases='.',\n has_tag=PROJECT_TO_DIRECTION)\n", (3557, 3643), False, 'from pytest_cases import parametrize_with_cases, case\n'), ((394, 417), 'snake_learner.direction.Direction.UP.to_array', 'Direction.UP.to_array', ([], {}), '()\n', (415, 417), False, 'from snake_learner.direction import Direction\n'), ((589, 614), 'snake_learner.direction.Direction.DOWN.to_array', 'Direction.DOWN.to_array', ([], {}), '()\n', (612, 614), False, 'from snake_learner.direction import Direction\n'), ((788, 813), 'snake_learner.direction.Direction.LEFT.to_array', 'Direction.LEFT.to_array', ([], {}), '()\n', (811, 813), False, 'from snake_learner.direction import Direction\n'), ((988, 1014), 'snake_learner.direction.Direction.RIGHT.to_array', 'Direction.RIGHT.to_array', ([], {}), '()\n', (1012, 1014), False, 'from snake_learner.direction import Direction\n'), ((2567, 2583), 'numpy.array', 'np.array', (['[5, 2]'], {}), '([5, 2])\n', (2575, 2583), True, 'import numpy as np\n'), ((2626, 2642), 'numpy.array', 'np.array', (['[5, 2]'], {}), '([5, 2])\n', (2634, 2642), True, 'import numpy as np\n'), ((2763, 2779), 'numpy.array', 'np.array', (['[5, 2]'], {}), '([5, 2])\n', (2771, 2779), True, 'import numpy as np\n'), ((2825, 2842), 'numpy.array', 'np.array', (['[2, -5]'], {}), '([2, -5])\n', (2833, 2842), True, 'import numpy as np\n'), ((2962, 2978), 'numpy.array', 'np.array', (['[5, 2]'], {}), '([5, 2])\n', (2970, 2978), True, 'import numpy as np\n'), ((3023, 3041), 'numpy.array', 'np.array', (['[-5, -2]'], {}), '([-5, -2])\n', (3031, 3041), True, 'import numpy as np\n'), ((3161, 3177), 'numpy.array', 'np.array', (['[5, 2]'], {}), '([5, 2])\n', (3169, 3177), True, 'import numpy as np\n'), ((3222, 3239), 'numpy.array', 'np.array', (['[-2, 5]'], {}), '([-2, 5])\n', (3230, 3239), True, 'import numpy as np\n'), ((1206, 1229), 'snake_learner.direction.Direction.UP.to_array', 'Direction.UP.to_array', ([], {}), '()\n', (1227, 1229), False, 'from snake_learner.direction import Direction\n'), ((1420, 1445), 'snake_learner.direction.Direction.DOWN.to_array', 'Direction.DOWN.to_array', ([], {}), '()\n', (1443, 1445), False, 'from snake_learner.direction import Direction\n'), ((1639, 1665), 'snake_learner.direction.Direction.RIGHT.to_array', 'Direction.RIGHT.to_array', ([], {}), '()\n', (1663, 1665), False, 'from snake_learner.direction import Direction\n'), ((1859, 1884), 'snake_learner.direction.Direction.LEFT.to_array', 'Direction.LEFT.to_array', ([], {}), '()\n', (1882, 1884), False, 'from snake_learner.direction import Direction\n'), ((3452, 3471), 'snake_learner.linalg_util.block_distance', 'block_distance', (['vec'], {}), '(vec)\n', (3466, 3471), False, 'from snake_learner.linalg_util import block_distance, closest_direction, project_to_direction\n'), ((3495, 3517), 'snake_learner.linalg_util.closest_direction', 'closest_direction', (['vec'], {}), '(vec)\n', (3512, 3517), False, 'from snake_learner.linalg_util import block_distance, closest_direction, project_to_direction\n'), ((3744, 3803), 'snake_learner.linalg_util.project_to_direction', 'project_to_direction', ([], {'sight_vector': 'vec', 'direction': 'direction'}), '(sight_vector=vec, direction=direction)\n', (3764, 3803), False, 'from snake_learner.linalg_util import block_distance, closest_direction, project_to_direction\n'), ((2082, 2105), 'snake_learner.direction.Direction.UP.to_array', 'Direction.UP.to_array', ([], {}), '()\n', (2103, 2105), False, 'from snake_learner.direction import Direction\n'), ((2112, 2138), 'snake_learner.direction.Direction.RIGHT.to_array', 'Direction.RIGHT.to_array', ([], {}), '()\n', (2136, 2138), False, 'from snake_learner.direction import Direction\n'), ((2338, 2361), 'snake_learner.direction.Direction.UP.to_array', 'Direction.UP.to_array', ([], {}), '()\n', (2359, 2361), False, 'from snake_learner.direction import Direction\n'), ((2368, 2394), 'snake_learner.direction.Direction.RIGHT.to_array', 'Direction.RIGHT.to_array', ([], {}), '()\n', (2392, 2394), False, 'from snake_learner.direction import Direction\n')]
|
import os
import re
import itertools
import cv2
import time
import numpy as np
import torch
from torch.autograd import Variable
from utils.craft_utils import getDetBoxes, adjustResultCoordinates
from data import imgproc
from data.dataset import SynthTextDataSet
import math
import xml.etree.ElementTree as elemTree
#-------------------------------------------------------------------------------------------------------------------#
def rotatePoint(xc, yc, xp, yp, theta):
xoff = xp - xc
yoff = yp - yc
cosTheta = math.cos(theta)
sinTheta = math.sin(theta)
pResx = cosTheta * xoff + sinTheta * yoff
pResy = - sinTheta * xoff + cosTheta * yoff
# pRes = (xc + pResx, yc + pResy)
return int(xc + pResx), int(yc + pResy)
def addRotatedShape(cx, cy, w, h, angle):
p0x, p0y = rotatePoint(cx, cy, cx - w / 2, cy - h / 2, -angle)
p1x, p1y = rotatePoint(cx, cy, cx + w / 2, cy - h / 2, -angle)
p2x, p2y = rotatePoint(cx, cy, cx + w / 2, cy + h / 2, -angle)
p3x, p3y = rotatePoint(cx, cy, cx - w / 2, cy + h / 2, -angle)
points = [[p0x, p0y], [p1x, p1y], [p2x, p2y], [p3x, p3y]]
return points
def xml_parsing(xml):
tree = elemTree.parse(xml)
annotations = [] # Initialize the list to store labels
iter_element = tree.iter(tag="object")
for element in iter_element:
annotation = {} # Initialize the dict to store labels
annotation['name'] = element.find("name").text # Save the name tag value
box_coords = element.iter(tag="robndbox")
for box_coord in box_coords:
cx = float(box_coord.find("cx").text)
cy = float(box_coord.find("cy").text)
w = float(box_coord.find("w").text)
h = float(box_coord.find("h").text)
angle = float(box_coord.find("angle").text)
convertcoodi = addRotatedShape(cx, cy, w, h, angle)
annotation['box_coodi'] = convertcoodi
annotations.append(annotation)
box_coords = element.iter(tag="bndbox")
for box_coord in box_coords:
xmin = int(box_coord.find("xmin").text)
ymin = int(box_coord.find("ymin").text)
xmax = int(box_coord.find("xmax").text)
ymax = int(box_coord.find("ymax").text)
# annotation['bndbox'] = [xmin,ymin,xmax,ymax]
annotation['box_coodi'] = [[xmin, ymin], [xmax, ymin], [xmax, ymax],
[xmin, ymax]]
annotations.append(annotation)
bounds = []
for i in range(len(annotations)):
box_info_dict = {"points": None, "text": None, "ignore": None}
box_info_dict["points"] = np.array(annotations[i]['box_coodi'])
if annotations[i]['name'] == "dnc":
box_info_dict["text"] = "###"
box_info_dict["ignore"] = True
else:
box_info_dict["text"] = annotations[i]['name']
box_info_dict["ignore"] = False
bounds.append(box_info_dict)
return bounds
#-------------------------------------------------------------------------------------------------------------------#
def load_prescription_gt(dataFolder):
total_img_path = []
total_imgs_bboxes = []
for (root, directories, files) in os.walk(dataFolder):
for file in files:
if '.jpg' in file:
img_path = os.path.join(root, file)
total_img_path.append(img_path)
if '.xml' in file:
gt_path = os.path.join(root, file)
total_imgs_bboxes.append(gt_path)
total_imgs_parsing_bboxes = []
for img_path, bbox in zip(sorted(total_img_path), sorted(total_imgs_bboxes)):
# check file
assert img_path.split(".jpg")[0] == bbox.split(".xml")[0]
result_label = xml_parsing(bbox)
total_imgs_parsing_bboxes.append(result_label)
return total_imgs_parsing_bboxes, sorted(total_img_path)
# NOTE
def load_prescription_cleval_gt(dataFolder):
total_img_path = []
total_gt_path = []
for (root, directories, files) in os.walk(dataFolder):
for file in files:
if '.jpg' in file:
img_path = os.path.join(root, file)
total_img_path.append(img_path)
if '_cl.txt' in file:
gt_path = os.path.join(root, file)
total_gt_path.append(gt_path)
total_imgs_parsing_bboxes = []
for img_path, gt_path in zip(sorted(total_img_path), sorted(total_gt_path)):
# check file
assert img_path.split(".jpg")[0] == gt_path.split('_label_cl.txt')[0]
lines = open(gt_path, encoding="utf-8").readlines()
word_bboxes = []
for line in lines:
box_info_dict = {"points": None, "text": None, "ignore": None}
box_info = line.strip().encode("utf-8").decode("utf-8-sig").split(",")
box_points = [int(box_info[i]) for i in range(8)]
box_info_dict["points"] = np.array(box_points)
word_bboxes.append(box_info_dict)
total_imgs_parsing_bboxes.append(word_bboxes)
return total_imgs_parsing_bboxes, sorted(total_img_path)
def load_synthtext_gt(data_folder):
synth_dataset = SynthTextDataSet(
output_size=768, data_dir=data_folder, saved_gt_dir=data_folder, logging=False
)
img_names, img_bbox, img_words = synth_dataset.load_data(bbox="word")
total_img_path = []
total_imgs_bboxes = []
for index in range(len(img_bbox[:100])):
img_path = os.path.join(data_folder, img_names[index][0])
total_img_path.append(img_path)
try:
wordbox = img_bbox[index].transpose((2, 1, 0))
except:
wordbox = np.expand_dims(img_bbox[index], axis=0)
wordbox = wordbox.transpose((0, 2, 1))
words = [re.split(" \n|\n |\n| ", t.strip()) for t in img_words[index]]
words = list(itertools.chain(*words))
words = [t for t in words if len(t) > 0]
if len(words) != len(wordbox):
import ipdb
ipdb.set_trace()
single_img_bboxes = []
for j in range(len(words)):
box_info_dict = {"points": None, "text": None, "ignore": None}
box_info_dict["points"] = wordbox[j]
box_info_dict["text"] = words[j]
box_info_dict["ignore"] = False
single_img_bboxes.append(box_info_dict)
total_imgs_bboxes.append(single_img_bboxes)
return total_imgs_bboxes, total_img_path
def load_icdar2015_gt(dataFolder, isTraing=False):
if isTraing:
img_folderName = "ch4_training_images"
gt_folderName = "ch4_training_localization_transcription_gt"
else:
img_folderName = "ch4_test_images"
gt_folderName = "ch4_test_localization_transcription_gt"
gt_folder_path = os.listdir(os.path.join(dataFolder, gt_folderName))
total_imgs_bboxes = []
total_img_path = []
for gt_path in gt_folder_path:
gt_path = os.path.join(os.path.join(dataFolder, gt_folderName), gt_path)
img_path = (
gt_path.replace(gt_folderName, img_folderName)
.replace(".txt", ".jpg")
.replace("gt_", "")
)
image = cv2.imread(img_path)
lines = open(gt_path, encoding="utf-8").readlines()
single_img_bboxes = []
for line in lines:
box_info_dict = {"points": None, "text": None, "ignore": None}
box_info = line.strip().encode("utf-8").decode("utf-8-sig").split(",")
box_points = [int(box_info[j]) for j in range(8)]
word = box_info[8:]
word = ",".join(word)
box_points = np.array(box_points, np.int32).reshape(4, 2)
cv2.polylines(
image, [np.array(box_points).astype(np.int)], True, (0, 0, 255), 1
)
box_info_dict["points"] = box_points
box_info_dict["text"] = word
if word == "###":
box_info_dict["ignore"] = True
else:
box_info_dict["ignore"] = False
single_img_bboxes.append(box_info_dict)
total_imgs_bboxes.append(single_img_bboxes)
total_img_path.append(img_path)
return total_imgs_bboxes, total_img_path
def load_icdar2013_gt(dataFolder, isTraing=False):
# choise test dataset
if isTraing:
img_folderName = "Challenge2_Test_Task12_Images"
gt_folderName = "Challenge2_Test_Task1_GT"
else:
img_folderName = "Challenge2_Test_Task12_Images"
gt_folderName = "Challenge2_Test_Task1_GT"
gt_folder_path = os.listdir(os.path.join(dataFolder, gt_folderName))
total_imgs_bboxes = []
total_img_path = []
for gt_path in gt_folder_path:
gt_path = os.path.join(os.path.join(dataFolder, gt_folderName), gt_path)
img_path = (
gt_path.replace(gt_folderName, img_folderName)
.replace(".txt", ".jpg")
.replace("gt_", "")
)
image = cv2.imread(img_path)
lines = open(gt_path, encoding="utf-8").readlines()
single_img_bboxes = []
for line in lines:
box_info_dict = {"points": None, "text": None, "ignore": None}
box_info = line.strip().encode("utf-8").decode("utf-8-sig").split(",")
box = [int(box_info[j]) for j in range(4)]
word = box_info[4:]
word = ",".join(word)
box = [
[box[0], box[1]],
[box[2], box[1]],
[box[2], box[3]],
[box[0], box[3]],
]
box_info_dict["points"] = box
box_info_dict["text"] = word
if word == "###":
box_info_dict["ignore"] = True
else:
box_info_dict["ignore"] = False
single_img_bboxes.append(box_info_dict)
total_imgs_bboxes.append(single_img_bboxes)
total_img_path.append(img_path)
return total_imgs_bboxes, total_img_path
def test_net(
net,
image,
text_threshold,
link_threshold,
low_text,
cuda,
poly,
canvas_size=1280,
mag_ratio=1.5,
):
# resize
img_resized, target_ratio, size_heatmap = imgproc.resize_aspect_ratio(
image, canvas_size, interpolation=cv2.INTER_LINEAR, mag_ratio=mag_ratio
)
ratio_h = ratio_w = 1 / target_ratio
# preprocessing
x = imgproc.normalizeMeanVariance(img_resized)
x = torch.from_numpy(x).permute(2, 0, 1) # [h, w, c] to [c, h, w]
x = Variable(x.unsqueeze(0)) # [c, h, w] to [b, c, h, w]
if cuda:
x = x.cuda()
# forward pass
with torch.no_grad():
y, feature = net(x)
# make score and link map
score_text = y[0, :, :, 0].cpu().data.numpy().astype(np.float32)
score_link = y[0, :, :, 1].cpu().data.numpy().astype(np.float32)
# NOTE
score_text = score_text[: size_heatmap[0], : size_heatmap[1]]
score_link = score_link[: size_heatmap[0], : size_heatmap[1]]
# Post-processing
boxes, polys = getDetBoxes(
score_text, score_link, text_threshold, link_threshold, low_text, poly
)
# coordinate adjustment
boxes = adjustResultCoordinates(boxes, ratio_w, ratio_h)
polys = adjustResultCoordinates(polys, ratio_w, ratio_h)
for k in range(len(polys)):
if polys[k] is None:
polys[k] = boxes[k]
# render results (optional)
score_text = score_text.copy()
render_score_text = imgproc.cvt2HeatmapImg(score_text)
render_score_link = imgproc.cvt2HeatmapImg(score_link)
render_img = [render_score_text, render_score_link]
# ret_score_text = imgproc.cvt2HeatmapImg(render_img)
return boxes, polys, render_img
|
[
"data.imgproc.resize_aspect_ratio",
"xml.etree.ElementTree.parse",
"data.imgproc.cvt2HeatmapImg",
"ipdb.set_trace",
"data.imgproc.normalizeMeanVariance",
"os.walk",
"numpy.expand_dims",
"math.sin",
"utils.craft_utils.adjustResultCoordinates",
"cv2.imread",
"numpy.array",
"math.cos",
"itertools.chain",
"data.dataset.SynthTextDataSet",
"utils.craft_utils.getDetBoxes",
"torch.no_grad",
"os.path.join",
"torch.from_numpy"
] |
[((531, 546), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (539, 546), False, 'import math\n'), ((562, 577), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (570, 577), False, 'import math\n'), ((1181, 1200), 'xml.etree.ElementTree.parse', 'elemTree.parse', (['xml'], {}), '(xml)\n', (1195, 1200), True, 'import xml.etree.ElementTree as elemTree\n'), ((3274, 3293), 'os.walk', 'os.walk', (['dataFolder'], {}), '(dataFolder)\n', (3281, 3293), False, 'import os\n'), ((4093, 4112), 'os.walk', 'os.walk', (['dataFolder'], {}), '(dataFolder)\n', (4100, 4112), False, 'import os\n'), ((5237, 5338), 'data.dataset.SynthTextDataSet', 'SynthTextDataSet', ([], {'output_size': '(768)', 'data_dir': 'data_folder', 'saved_gt_dir': 'data_folder', 'logging': '(False)'}), '(output_size=768, data_dir=data_folder, saved_gt_dir=\n data_folder, logging=False)\n', (5253, 5338), False, 'from data.dataset import SynthTextDataSet\n'), ((10253, 10358), 'data.imgproc.resize_aspect_ratio', 'imgproc.resize_aspect_ratio', (['image', 'canvas_size'], {'interpolation': 'cv2.INTER_LINEAR', 'mag_ratio': 'mag_ratio'}), '(image, canvas_size, interpolation=cv2.\n INTER_LINEAR, mag_ratio=mag_ratio)\n', (10280, 10358), False, 'from data import imgproc\n'), ((10438, 10480), 'data.imgproc.normalizeMeanVariance', 'imgproc.normalizeMeanVariance', (['img_resized'], {}), '(img_resized)\n', (10467, 10480), False, 'from data import imgproc\n'), ((11077, 11164), 'utils.craft_utils.getDetBoxes', 'getDetBoxes', (['score_text', 'score_link', 'text_threshold', 'link_threshold', 'low_text', 'poly'], {}), '(score_text, score_link, text_threshold, link_threshold,\n low_text, poly)\n', (11088, 11164), False, 'from utils.craft_utils import getDetBoxes, adjustResultCoordinates\n'), ((11216, 11264), 'utils.craft_utils.adjustResultCoordinates', 'adjustResultCoordinates', (['boxes', 'ratio_w', 'ratio_h'], {}), '(boxes, ratio_w, ratio_h)\n', (11239, 11264), False, 'from utils.craft_utils import getDetBoxes, adjustResultCoordinates\n'), ((11277, 11325), 'utils.craft_utils.adjustResultCoordinates', 'adjustResultCoordinates', (['polys', 'ratio_w', 'ratio_h'], {}), '(polys, ratio_w, ratio_h)\n', (11300, 11325), False, 'from utils.craft_utils import getDetBoxes, adjustResultCoordinates\n'), ((11511, 11545), 'data.imgproc.cvt2HeatmapImg', 'imgproc.cvt2HeatmapImg', (['score_text'], {}), '(score_text)\n', (11533, 11545), False, 'from data import imgproc\n'), ((11570, 11604), 'data.imgproc.cvt2HeatmapImg', 'imgproc.cvt2HeatmapImg', (['score_link'], {}), '(score_link)\n', (11592, 11604), False, 'from data import imgproc\n'), ((2682, 2719), 'numpy.array', 'np.array', (["annotations[i]['box_coodi']"], {}), "(annotations[i]['box_coodi'])\n", (2690, 2719), True, 'import numpy as np\n'), ((5538, 5584), 'os.path.join', 'os.path.join', (['data_folder', 'img_names[index][0]'], {}), '(data_folder, img_names[index][0])\n', (5550, 5584), False, 'import os\n'), ((6865, 6904), 'os.path.join', 'os.path.join', (['dataFolder', 'gt_folderName'], {}), '(dataFolder, gt_folderName)\n', (6877, 6904), False, 'import os\n'), ((7248, 7268), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (7258, 7268), False, 'import cv2\n'), ((8647, 8686), 'os.path.join', 'os.path.join', (['dataFolder', 'gt_folderName'], {}), '(dataFolder, gt_folderName)\n', (8659, 8686), False, 'import os\n'), ((9031, 9051), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (9041, 9051), False, 'import cv2\n'), ((10677, 10692), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10690, 10692), False, 'import torch\n'), ((4994, 5014), 'numpy.array', 'np.array', (['box_points'], {}), '(box_points)\n', (5002, 5014), True, 'import numpy as np\n'), ((5928, 5951), 'itertools.chain', 'itertools.chain', (['*words'], {}), '(*words)\n', (5943, 5951), False, 'import itertools\n'), ((6079, 6095), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (6093, 6095), False, 'import ipdb\n'), ((7023, 7062), 'os.path.join', 'os.path.join', (['dataFolder', 'gt_folderName'], {}), '(dataFolder, gt_folderName)\n', (7035, 7062), False, 'import os\n'), ((8806, 8845), 'os.path.join', 'os.path.join', (['dataFolder', 'gt_folderName'], {}), '(dataFolder, gt_folderName)\n', (8818, 8845), False, 'import os\n'), ((10489, 10508), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (10505, 10508), False, 'import torch\n'), ((3380, 3404), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (3392, 3404), False, 'import os\n'), ((3510, 3534), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (3522, 3534), False, 'import os\n'), ((4199, 4223), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (4211, 4223), False, 'import os\n'), ((4332, 4356), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (4344, 4356), False, 'import os\n'), ((5735, 5774), 'numpy.expand_dims', 'np.expand_dims', (['img_bbox[index]'], {'axis': '(0)'}), '(img_bbox[index], axis=0)\n', (5749, 5774), True, 'import numpy as np\n'), ((7699, 7729), 'numpy.array', 'np.array', (['box_points', 'np.int32'], {}), '(box_points, np.int32)\n', (7707, 7729), True, 'import numpy as np\n'), ((7795, 7815), 'numpy.array', 'np.array', (['box_points'], {}), '(box_points)\n', (7803, 7815), True, 'import numpy as np\n')]
|
from PIL import Image
import pytesseract
import os
import openpyxl as xl
from pytesseract import Output
from pytesseract import pytesseract as pt
import numpy as np
from matplotlib import pyplot as plt
import cv2
from imutils.object_detection import non_max_suppression
class Scan():
def __init__(self,folder,readfile,writefile):
self.folder=folder
self.readfile=readfile
self.writefile=writefile
def text_en(self):
os.chdir(self.folder)
img = Image.open(self.readfile)
im.load()
text = pytesseract.image_to_string(im,lang='eng')
print(text)
text.save(self.writefile)
def text_ar(self):
os.chdir(self.folder)
img = Image.open(self.readfile)
img.load()
text = pytesseract.image_to_string(im,lang='ara')
print(text)
wb2 = xl.load_workbook(self.writefile)
ws2 = wb2.get_sheet_by_name("Sheet1")
for row in ws2:
for cell in row:
ws2[cell.coordinate].value = text
wb2.save(self.writefile)
def pdf_extract_table(self):
import camelot
os.chdir(self.folder)
#table file must be pdf file
tables = camelot.read_pdf(self.readfile)
#TableList
#self.writefile must be csv file
n=1
tables.export(self.writefile, f='csv', compress=True) # json, excel, html,csv
tables[1]
Table_shape=(7, 7)
tables[1].parsing_report
{
'accuracy': 99.02,
'whitespace': 12.24,
'order': 1,
'page': 1}
tables[1].to_csv(self.writefile) # to_json, to_excel, to_html,to_csv
def boxes(self):
os.chdir(self.folder)
# read the image and get the dimensions
img = cv2.imread(self.readfile)
h, w, _ = img.shape # assumes color image
# run tesseract, returning the bounding boxes
boxes = pytesseract.image_to_boxes(img) # also include any config options you use
# draw the bounding boxes on the image
for b in boxes.splitlines():
b = b.split(' ')
img = cv2.rectangle(img, (int(b[1]), h - int(b[2])), (int(b[3]), h - int(b[4])), (0, 255, 0), 2)
cv2.imshow('img', img)
cv2.waitKey(0)
def all_boxes(self):
os.chdir(self.folder)
# read the image and get the dimensions
img = cv2.imread(self.readfile)
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
contours,hierarchy = cv2.findContours(gray,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
idx =0
for cnt in contours:
idx += 1
x,y,w,h = cv2.boundingRect(cnt)
roi=img[y:y+h,x:x+w]
cv2.imwrite(str(idx) + '.jpg', roi)
#cv2.rectangle(im,(x,y),(x+w,y+h),(200,0,0),2)
cv2.imshow('img',img)
cv2.waitKey(0)
def select_box(self):
'''this function is very useful for corp images then press ctrl+c the past it in iny place by ctrl+v'''
os.chdir(self.folder)
# read the image and get the dimensions
# Read image
img = cv2.imread(self.readfile)
# Select ROI
showCrosshair = False #to hide the rectangle selection line when select
fromCenter = True # true for corss line # false for triangle
r = cv2.selectROI('image',img, fromCenter, showCrosshair) #to select from center
# Crop image
imCrop = img[int(r[1]):int(r[1]+r[3]), int(r[0]):int(r[0]+r[2])]
# Display cropped image
cv2.imshow("Image", imCrop)
cv2.waitKey(0)
def hand_writing_digit(self):
os.chdir(self.folder)
#by knn technices
img = cv2.imread(self.readfile)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Now we split the image to 5000 cells, each 20x20 size
cells = [np.hsplit(row,100) for row in np.vsplit(gray,50)]
# Make it into a Numpy array. It size will be (50,100,20,20)
x = np.array(cells)
# Now we prepare train_data and test_data.
train = x[:,:50].reshape(-1,400).astype(np.float32) # Size = (2500,400)
test = x[:,50:100].reshape(-1,400).astype(np.float32) # Size = (2500,400)
# Create labels for train and test data
k = np.arange(10)
train_labels = np.repeat(k,250)[:,np.newaxis]
test_labels = train_labels.copy()
# Initiate kNN, train the data, then test it with test data for k=1
knn = cv2.KNearest()
knn.train(train,train_labels)
ret,result,neighbours,dist = knn.find_nearest(test,k=5)
# Now we check the accuracy of classification
# For that, compare the result with test_labels and check which are wrong
matches = result==test_labels
correct = np.count_nonzero(matches)
accuracy = correct*100.0/result.size
print (accuracy)
# save the data
np.savez('knn_data.npz',train=train, train_labels=train_labels)
# Now load the data
with np.load('knn_data.npz') as data:
print (data.files)
train = data['train']
train_labels = data['train_labels']
def line_detection(self):
#Reading the required image in
# which operations are to be done.
# Make sure that the image is in the same
# directory in which this python program is
os.chdir(self.folder)
#by knn technices
img = cv2.imread(self.readfile)
# Convert the img to grayscale
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Apply edge detection method on the image
edges = cv2.Canny(gray,50,150,apertureSize = 3)
# This returns an array of r and theta values
lines = cv2.HoughLines(edges,1,np.pi/180, 200)
# The below for loop runs till r and theta values
# are in the range of the 2d array
for r,theta in lines[0]:
# Stores the value of cos(theta) in a
a = np.cos(theta)
# Stores the value of sin(theta) in b
b = np.sin(theta)
# x0 stores the value rcos(theta)
x0 = a*r
# y0 stores the value rsin(theta)
y0 = b*r
# x1 stores the rounded off value of (rcos(theta)-1000sin(theta))
x1 = int(x0 + 1000*(-b))
# y1 stores the rounded off value of (rsin(theta)+1000cos(theta))
y1 = int(y0 + 1000*(a))
# x2 stores the rounded off value of (rcos(theta)+1000sin(theta))
x2 = int(x0 - 1000*(-b))
# y2 stores the rounded off value of (rsin(theta)-1000cos(theta))
y2 = int(y0 - 1000*(a))
# cv2.line draws a line in img from the point(x1,y1) to (x2,y2).
# (0,0,255) denotes the colour of the line to be
#drawn. In this case, it is red.
cv2.line(img,(x1,y1), (x2,y2), (0,0,255),2)
# All the changes made in the input image are finally
# written on a new image houghlines.jpg
cv2.imwrite('linesDetected.jpg', img)
cv2.imshow('img', img)
cv2.waitKey(0)
def spilt_cells_of_table(self):
os.chdir(self.folder)
#by knn technices
img = cv2.imread(self.readfile)
# find edges in the image
edges = cv2.Laplacian(img, cv2.CV_8U)
# kernel used to remove vetical and small horizontal lines using erosion
kernel = np.zeros((5, 11), np.uint8)
kernel[2, :] = 1
eroded = cv2.morphologyEx(edges, cv2.MORPH_ERODE,
kernel) # erode image to remove unwanted lines
# find (x,y) position of the horizontal lines
indices = np.nonzero(eroded)
# As indices contain all the points along horizontal line, so get unique rows only (indices[0] contains rows or y coordinate)
rows = np.unique(indices[0])
# now you have unique rows but edges are more than 1 pixel thick
# so remove lines which are near to each other using a certain threshold
filtered_rows = []
for ii in range(len(rows)):
if ii == 0:
filtered_rows.append(rows[ii])
else:
if np.abs(rows[ii] - rows[ii - 1]) >= 10:
filtered_rows.append(rows[ii])
print(filtered_rows)
# crop first row of table
first_cropped_row = img[filtered_rows[0]:filtered_rows[1], :, :]
#cv2.resize(img, (960, 540)
cv2.imshow('Image', eroded)
cv2.imshow('Cropped_Row', first_cropped_row)
cv2.waitKey(0)
|
[
"numpy.load",
"numpy.abs",
"openpyxl.load_workbook",
"numpy.hsplit",
"numpy.sin",
"numpy.arange",
"cv2.imshow",
"os.chdir",
"numpy.unique",
"cv2.line",
"cv2.selectROI",
"cv2.cvtColor",
"cv2.imwrite",
"cv2.boundingRect",
"cv2.Laplacian",
"numpy.repeat",
"pytesseract.image_to_boxes",
"cv2.Canny",
"cv2.waitKey",
"cv2.morphologyEx",
"cv2.HoughLines",
"camelot.read_pdf",
"numpy.cos",
"numpy.savez",
"cv2.KNearest",
"numpy.vsplit",
"numpy.count_nonzero",
"numpy.zeros",
"pytesseract.image_to_string",
"PIL.Image.open",
"numpy.nonzero",
"cv2.imread",
"numpy.array",
"cv2.findContours"
] |
[((484, 505), 'os.chdir', 'os.chdir', (['self.folder'], {}), '(self.folder)\n', (492, 505), False, 'import os\n'), ((520, 545), 'PIL.Image.open', 'Image.open', (['self.readfile'], {}), '(self.readfile)\n', (530, 545), False, 'from PIL import Image\n'), ((579, 622), 'pytesseract.image_to_string', 'pytesseract.image_to_string', (['im'], {'lang': '"""eng"""'}), "(im, lang='eng')\n", (606, 622), False, 'import pytesseract\n'), ((707, 728), 'os.chdir', 'os.chdir', (['self.folder'], {}), '(self.folder)\n', (715, 728), False, 'import os\n'), ((743, 768), 'PIL.Image.open', 'Image.open', (['self.readfile'], {}), '(self.readfile)\n', (753, 768), False, 'from PIL import Image\n'), ((803, 846), 'pytesseract.image_to_string', 'pytesseract.image_to_string', (['im'], {'lang': '"""ara"""'}), "(im, lang='ara')\n", (830, 846), False, 'import pytesseract\n'), ((880, 912), 'openpyxl.load_workbook', 'xl.load_workbook', (['self.writefile'], {}), '(self.writefile)\n', (896, 912), True, 'import openpyxl as xl\n'), ((1160, 1181), 'os.chdir', 'os.chdir', (['self.folder'], {}), '(self.folder)\n', (1168, 1181), False, 'import os\n'), ((1236, 1267), 'camelot.read_pdf', 'camelot.read_pdf', (['self.readfile'], {}), '(self.readfile)\n', (1252, 1267), False, 'import camelot\n'), ((1735, 1756), 'os.chdir', 'os.chdir', (['self.folder'], {}), '(self.folder)\n', (1743, 1756), False, 'import os\n'), ((1823, 1848), 'cv2.imread', 'cv2.imread', (['self.readfile'], {}), '(self.readfile)\n', (1833, 1848), False, 'import cv2\n'), ((1971, 2002), 'pytesseract.image_to_boxes', 'pytesseract.image_to_boxes', (['img'], {}), '(img)\n', (1997, 2002), False, 'import pytesseract\n'), ((2272, 2294), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'img'], {}), "('img', img)\n", (2282, 2294), False, 'import cv2\n'), ((2303, 2317), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2314, 2317), False, 'import cv2\n'), ((2360, 2381), 'os.chdir', 'os.chdir', (['self.folder'], {}), '(self.folder)\n', (2368, 2381), False, 'import os\n'), ((2448, 2473), 'cv2.imread', 'cv2.imread', (['self.readfile'], {}), '(self.readfile)\n', (2458, 2473), False, 'import cv2\n'), ((2488, 2525), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (2500, 2525), False, 'import cv2\n'), ((2554, 2616), 'cv2.findContours', 'cv2.findContours', (['gray', 'cv2.RETR_LIST', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(gray, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n', (2570, 2616), False, 'import cv2\n'), ((2873, 2895), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'img'], {}), "('img', img)\n", (2883, 2895), False, 'import cv2\n'), ((2903, 2917), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2914, 2917), False, 'import cv2\n'), ((3069, 3090), 'os.chdir', 'os.chdir', (['self.folder'], {}), '(self.folder)\n', (3077, 3090), False, 'import os\n'), ((3196, 3221), 'cv2.imread', 'cv2.imread', (['self.readfile'], {}), '(self.readfile)\n', (3206, 3221), False, 'import cv2\n'), ((3411, 3465), 'cv2.selectROI', 'cv2.selectROI', (['"""image"""', 'img', 'fromCenter', 'showCrosshair'], {}), "('image', img, fromCenter, showCrosshair)\n", (3424, 3465), False, 'import cv2\n'), ((3651, 3678), 'cv2.imshow', 'cv2.imshow', (['"""Image"""', 'imCrop'], {}), "('Image', imCrop)\n", (3661, 3678), False, 'import cv2\n'), ((3687, 3701), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3698, 3701), False, 'import cv2\n'), ((3744, 3765), 'os.chdir', 'os.chdir', (['self.folder'], {}), '(self.folder)\n', (3752, 3765), False, 'import os\n'), ((3810, 3835), 'cv2.imread', 'cv2.imread', (['self.readfile'], {}), '(self.readfile)\n', (3820, 3835), False, 'import cv2\n'), ((3851, 3888), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (3863, 3888), False, 'import cv2\n'), ((4102, 4117), 'numpy.array', 'np.array', (['cells'], {}), '(cells)\n', (4110, 4117), True, 'import numpy as np\n'), ((4393, 4406), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (4402, 4406), True, 'import numpy as np\n'), ((4594, 4608), 'cv2.KNearest', 'cv2.KNearest', ([], {}), '()\n', (4606, 4608), False, 'import cv2\n'), ((4904, 4929), 'numpy.count_nonzero', 'np.count_nonzero', (['matches'], {}), '(matches)\n', (4920, 4929), True, 'import numpy as np\n'), ((5032, 5096), 'numpy.savez', 'np.savez', (['"""knn_data.npz"""'], {'train': 'train', 'train_labels': 'train_labels'}), "('knn_data.npz', train=train, train_labels=train_labels)\n", (5040, 5096), True, 'import numpy as np\n'), ((5513, 5534), 'os.chdir', 'os.chdir', (['self.folder'], {}), '(self.folder)\n', (5521, 5534), False, 'import os\n'), ((5579, 5604), 'cv2.imread', 'cv2.imread', (['self.readfile'], {}), '(self.readfile)\n', (5589, 5604), False, 'import cv2\n'), ((5677, 5714), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (5689, 5714), False, 'import cv2\n'), ((5792, 5832), 'cv2.Canny', 'cv2.Canny', (['gray', '(50)', '(150)'], {'apertureSize': '(3)'}), '(gray, 50, 150, apertureSize=3)\n', (5801, 5832), False, 'import cv2\n'), ((5913, 5955), 'cv2.HoughLines', 'cv2.HoughLines', (['edges', '(1)', '(np.pi / 180)', '(200)'], {}), '(edges, 1, np.pi / 180, 200)\n', (5927, 5955), False, 'import cv2\n'), ((7355, 7392), 'cv2.imwrite', 'cv2.imwrite', (['"""linesDetected.jpg"""', 'img'], {}), "('linesDetected.jpg', img)\n", (7366, 7392), False, 'import cv2\n'), ((7402, 7424), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'img'], {}), "('img', img)\n", (7412, 7424), False, 'import cv2\n'), ((7433, 7447), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (7444, 7447), False, 'import cv2\n'), ((7492, 7513), 'os.chdir', 'os.chdir', (['self.folder'], {}), '(self.folder)\n', (7500, 7513), False, 'import os\n'), ((7558, 7583), 'cv2.imread', 'cv2.imread', (['self.readfile'], {}), '(self.readfile)\n', (7568, 7583), False, 'import cv2\n'), ((7636, 7665), 'cv2.Laplacian', 'cv2.Laplacian', (['img', 'cv2.CV_8U'], {}), '(img, cv2.CV_8U)\n', (7649, 7665), False, 'import cv2\n'), ((7764, 7791), 'numpy.zeros', 'np.zeros', (['(5, 11)', 'np.uint8'], {}), '((5, 11), np.uint8)\n', (7772, 7791), True, 'import numpy as np\n'), ((7834, 7882), 'cv2.morphologyEx', 'cv2.morphologyEx', (['edges', 'cv2.MORPH_ERODE', 'kernel'], {}), '(edges, cv2.MORPH_ERODE, kernel)\n', (7850, 7882), False, 'import cv2\n'), ((8028, 8046), 'numpy.nonzero', 'np.nonzero', (['eroded'], {}), '(eroded)\n', (8038, 8046), True, 'import numpy as np\n'), ((8196, 8217), 'numpy.unique', 'np.unique', (['indices[0]'], {}), '(indices[0])\n', (8205, 8217), True, 'import numpy as np\n'), ((8814, 8841), 'cv2.imshow', 'cv2.imshow', (['"""Image"""', 'eroded'], {}), "('Image', eroded)\n", (8824, 8841), False, 'import cv2\n'), ((8850, 8894), 'cv2.imshow', 'cv2.imshow', (['"""Cropped_Row"""', 'first_cropped_row'], {}), "('Cropped_Row', first_cropped_row)\n", (8860, 8894), False, 'import cv2\n'), ((8903, 8917), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (8914, 8917), False, 'import cv2\n'), ((2703, 2724), 'cv2.boundingRect', 'cv2.boundingRect', (['cnt'], {}), '(cnt)\n', (2719, 2724), False, 'import cv2\n'), ((3970, 3989), 'numpy.hsplit', 'np.hsplit', (['row', '(100)'], {}), '(row, 100)\n', (3979, 3989), True, 'import numpy as np\n'), ((4430, 4447), 'numpy.repeat', 'np.repeat', (['k', '(250)'], {}), '(k, 250)\n', (4439, 4447), True, 'import numpy as np\n'), ((5138, 5161), 'numpy.load', 'np.load', (['"""knn_data.npz"""'], {}), "('knn_data.npz')\n", (5145, 5161), True, 'import numpy as np\n'), ((6180, 6193), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (6186, 6193), True, 'import numpy as np\n'), ((6271, 6284), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (6277, 6284), True, 'import numpy as np\n'), ((7177, 7226), 'cv2.line', 'cv2.line', (['img', '(x1, y1)', '(x2, y2)', '(0, 0, 255)', '(2)'], {}), '(img, (x1, y1), (x2, y2), (0, 0, 255), 2)\n', (7185, 7226), False, 'import cv2\n'), ((4000, 4019), 'numpy.vsplit', 'np.vsplit', (['gray', '(50)'], {}), '(gray, 50)\n', (4009, 4019), True, 'import numpy as np\n'), ((8543, 8574), 'numpy.abs', 'np.abs', (['(rows[ii] - rows[ii - 1])'], {}), '(rows[ii] - rows[ii - 1])\n', (8549, 8574), True, 'import numpy as np\n')]
|
import os
import sys
import numpy as np
import scipy.io as sio
from skimage import io
import time
import math
import skimage
import src.faceutil
from src.faceutil import mesh
from src.faceutil.morphable_model import MorphabelModel
from src.util.matlabutil import NormDirection
from math import sin, cos, asin, acos, atan, atan2
from PIL import Image
import matplotlib.pyplot as plt
# global data
bfm = MorphabelModel('data/Out/BFM.mat')
def get_transform_matrix(s, angles, t, height):
"""
:param s: scale
:param angles: [3] rad
:param t: [3]
:return: 4x4 transmatrix
"""
x, y, z = angles[0], angles[1], angles[2]
Rx = np.array([[1, 0, 0],
[0, cos(x), sin(x)],
[0, -sin(x), cos(x)]])
Ry = np.array([[cos(y), 0, -sin(y)],
[0, 1, 0],
[sin(y), 0, cos(y)]])
Rz = np.array([[cos(z), sin(z), 0],
[-sin(z), cos(z), 0],
[0, 0, 1]])
# rotate
R = Rx.dot(Ry).dot(Rz)
R = R.astype(np.float32)
T = np.zeros((4, 4))
T[0:3, 0:3] = R
T[3, 3] = 1.
# scale
S = np.diagflat([s, s, s, 1.])
T = S.dot(T)
# offset move
M = np.diagflat([1., 1., 1., 1.])
M[0:3, 3] = t.astype(np.float32)
T = M.dot(T)
# revert height
# x[:,1]=height-x[:,1]
H = np.diagflat([1., 1., 1., 1.])
H[1, 1] = -1.0
H[1, 3] = height
T = H.dot(T)
return T.astype(np.float32)
|
[
"numpy.diagflat",
"numpy.zeros",
"src.faceutil.morphable_model.MorphabelModel",
"math.sin",
"math.cos"
] |
[((404, 438), 'src.faceutil.morphable_model.MorphabelModel', 'MorphabelModel', (['"""data/Out/BFM.mat"""'], {}), "('data/Out/BFM.mat')\n", (418, 438), False, 'from src.faceutil.morphable_model import MorphabelModel\n'), ((1060, 1076), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (1068, 1076), True, 'import numpy as np\n'), ((1134, 1161), 'numpy.diagflat', 'np.diagflat', (['[s, s, s, 1.0]'], {}), '([s, s, s, 1.0])\n', (1145, 1161), True, 'import numpy as np\n'), ((1204, 1237), 'numpy.diagflat', 'np.diagflat', (['[1.0, 1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0, 1.0])\n', (1215, 1237), True, 'import numpy as np\n'), ((1343, 1376), 'numpy.diagflat', 'np.diagflat', (['[1.0, 1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0, 1.0])\n', (1354, 1376), True, 'import numpy as np\n'), ((700, 706), 'math.cos', 'cos', (['x'], {}), '(x)\n', (703, 706), False, 'from math import sin, cos, asin, acos, atan, atan2\n'), ((708, 714), 'math.sin', 'sin', (['x'], {}), '(x)\n', (711, 714), False, 'from math import sin, cos, asin, acos, atan, atan2\n'), ((749, 755), 'math.cos', 'cos', (['x'], {}), '(x)\n', (752, 755), False, 'from math import sin, cos, asin, acos, atan, atan2\n'), ((779, 785), 'math.cos', 'cos', (['y'], {}), '(y)\n', (782, 785), False, 'from math import sin, cos, asin, acos, atan, atan2\n'), ((850, 856), 'math.sin', 'sin', (['y'], {}), '(y)\n', (853, 856), False, 'from math import sin, cos, asin, acos, atan, atan2\n'), ((861, 867), 'math.cos', 'cos', (['y'], {}), '(y)\n', (864, 867), False, 'from math import sin, cos, asin, acos, atan, atan2\n'), ((891, 897), 'math.cos', 'cos', (['z'], {}), '(z)\n', (894, 897), False, 'from math import sin, cos, asin, acos, atan, atan2\n'), ((899, 905), 'math.sin', 'sin', (['z'], {}), '(z)\n', (902, 905), False, 'from math import sin, cos, asin, acos, atan, atan2\n'), ((940, 946), 'math.cos', 'cos', (['z'], {}), '(z)\n', (943, 946), False, 'from math import sin, cos, asin, acos, atan, atan2\n'), ((741, 747), 'math.sin', 'sin', (['x'], {}), '(x)\n', (744, 747), False, 'from math import sin, cos, asin, acos, atan, atan2\n'), ((791, 797), 'math.sin', 'sin', (['y'], {}), '(y)\n', (794, 797), False, 'from math import sin, cos, asin, acos, atan, atan2\n'), ((932, 938), 'math.sin', 'sin', (['z'], {}), '(z)\n', (935, 938), False, 'from math import sin, cos, asin, acos, atan, atan2\n')]
|
##
# train eeg data of mind commands
# (beta)
#
##
import json
import os
import sys
import time
import pickle
import numpy as np
from mindFunctions import filterDownsampleData
import codecs, json
from scipy.signal import butter, lfilter
from sklearn import svm, preprocessing, metrics
from sklearn.model_selection import GridSearchCV, StratifiedShuffleSplit
from pathlib import Path
# enable/disable debug Mode
debug = False
# the 5 commands from player
commands = ['volup', 'playpause', 'next', 'prev', 'voldown']
cmdCount = len(commands) # nr of commands
def main():
# read training data from files
# default path with stored traingsdata
# filepath-example = 'your project path'/data/mind/training-playpause.json'
cwd = os.getcwd()
traindataFolder = cwd + '/data/mind/'
# default path if python script runs standalone
if (os.path.basename(cwd) == "pyscripts"):
traindataFolder = cwd + '/../../data/mind/'
traindata = []
for cmd in range(cmdCount):
filepath = Path(traindataFolder + 'training-' + commands[cmd] + '.json')
# read file of trainingCmd
with open(filepath) as f:
data = json.load(f)
traindata.append(np.array(data, dtype='f'))
# read in baseline from file
baseline = []
blpath = Path(traindataFolder + 'training-baseline.json')
# read file of baseline
with open(blpath) as blf:
bl = json.load(blf)
baseline = np.array(bl, dtype='f')
## read in test data
with open(traindataFolder + 'test-baseline.json') as f:
baselineTest = json.load(f)
with open(traindataFolder + 'test-volts.json') as f:
voltsTest = json.load(f)
# create a numpy array
voltsTest = np.array(voltsTest, dtype='f')
baselineTest = np.array(baselineTest, dtype='f')
if debug:
print("\n------ Training Data ------")
print("traindata length should be 5 (cmds): " + str(len(traindata)))
print("traindata[0] length should be 1500 (samples): " + str(len(traindata[0])))
print("traindata[0][0] length should be 8 (channels): " + str(len(traindata[0][0])))
# 1. Filter and Downsample Trainingdata and Baseline
[filterdTraindata, baselineDataBP] = filterDownsampleData(traindata, baseline, commands, debug)
if debug:
print("\n------ Filtered Training Data ------")
print("filterdTraindata length should be 5 (cmds): " + str(len(filterdTraindata)))
print("filterdTraindata[0] length is now 8 (channels): " + str(len(filterdTraindata[0])))
print("filterdTraindata[0][0] length is now 250 (samples): " + str(len(filterdTraindata[0][0])))
# # save filterd Data
# filterdTraindata = np.array(filterdTraindata)
# baselineDataBP = np.array(baselineDataBP)
# outfile = '../../data/mind/model/filterdTraingdata.txt'
# json.dump(filterdTraindata.tolist(), codecs.open(outfile, 'w', encoding='utf-8'), separators=(',', ':'), sort_keys=True,
# indent=4) ### this saves the array in .json format
# outfile = '../../data/mind/model/baselineDataBP.txt'
# json.dump(baselineDataBP.tolist(), codecs.open(outfile, 'w', encoding='utf-8'), separators=(',', ':'), sort_keys=True,
# indent=4) ### this saves the array in .json format
## 2. Extract Features for Trainingdata (only commands)
[X, y] = extractFeature(filterdTraindata)
if debug:
print("Anz. Features: " + str(len(X)))
print("y: " + str(y))
## 3. Train Model with features
# gamma: defines how far the influence of a single training example reaches, with low values meaning ‘far’ and high values meaning ‘close’.
# C: trades off misclassification of training examples against simplicity of the decision surface.
# A low C makes the decision surface smooth, while a high C aims at classifying all training examples correctly by giving the model freedom to select more samples as support vectors.
# Find optimal gamma and C parameters: http://scikit-learn.org/stable/auto_examples/svm/plot_rbf_parameters.html
# TODO: Set correct SVM params
[C, gamma] = findTrainClassifier(X, y)
clf = svm.SVC(kernel='rbf', gamma=gamma, C=C)
clf.fit(X, y)
## save model
with open('../../data/mind/model/svm_model-mind.txt', 'wb') as outfile:
pickle.dump(clf, outfile)
## Check if trainingdata get 100% accuracy
if debug:
[accuracy, _, _] = modelAccuracy(y, clf.predict(X))
if (accuracy == 1.0):
print("Correct classification with traingdata")
else:
print("Wrong classification with traingdata. check SVM algorithm")
print("\n------ Test Data ------")
## 4. Filter and Downsample Testdata
[filterdTestdata] = filterDownsampleData(voltsTest, baselineTest, commands, debug)
## 5. Extract Features from Testdata
targetCmd = 1 # Playpause===1
[X_test, y_test] = extractFeature(filterdTestdata, targetCmd)
print("Anz. Features X_Test: " + str(len(X_test)))
print("y_Test: " + str(y_test))
## 6. Check Model Accuracy
print("\n------ Model Accuracy ------")
y_pred = clf.predict(X_test) # Predict the response for test dataset
if debug: print("predicted y " + str(y_pred))
[accuracy, precision, recall] = modelAccuracy(y_test, y_pred)
print("Accuracy: " + str(accuracy))
print("Precision: " + str(precision))
print("Recall: " + str(recall))
# send success back to node
# TODO: implement real success boolean return
print('true')
def extractFeature(dataFilterd):
## Create X and Y data for SVM training
X = []
y = []
# TODO: Extract Features
## Reshape Data
reshapedData = []
dataFilterdNp = np.array(dataFilterd)
trainCmd, nx, ny = dataFilterdNp.shape
reshapedData = dataFilterdNp.reshape((trainCmd, nx * ny))
if (debug):
print("\n-- Reshaped Data ---")
print("len(reshapedData) aka 5 cmds: " + str(len(reshapedData)))
print("len(reshapedData[0]) channels*samples aka 8*250=2000 : " + str(len(reshapedData[0])))
for cmd in range(cmdCount):
X.append(reshapedData[cmd][0:2000])
X.append(reshapedData[cmd][2000:4000])
X.append(reshapedData[cmd][4000:6000])
y.append(cmd)
y.append(cmd)
y.append(cmd)
# Feature Standardization
X = preprocessing.scale(X)
return X, y
def extractFeatureTest(dataDownSample, cmd):
## Create X and Y data for SVM test
X = []
y = []
print(len(X))
X.append(dataDownSample)
y.append(cmd)
if debug:
print("\n-- X and Y Data ---")
print("y : " + str(y))
## Feature Standardization
X = preprocessing.scale(X)
return X, y
def modelAccuracy(y_test, y_pred):
# Model Accuracy: how often is the classifier correct
accuracy = metrics.accuracy_score(y_test, y_pred)
# Model Precision: what percentage of positive tuples are labeled as such?
precision = metrics.precision_score(y_test, y_pred)
# Model Recall: what percentage of positive tuples are labelled as such?
recall = metrics.recall_score(y_test, y_pred)
return [accuracy, precision, recall]
def findTrainClassifier(X, y):
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(n_splits=5, test_size=0.2, random_state=42)
grid = GridSearchCV(svm.SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
if debug:
print("The best parameters are %s with a score of %0.2f" % (grid.best_params_, grid.best_score_))
return grid.best_params_['C'], grid.best_params_['gamma']
# start process
if __name__ == '__main__':
main()
|
[
"pickle.dump",
"json.load",
"sklearn.preprocessing.scale",
"os.getcwd",
"numpy.logspace",
"sklearn.metrics.accuracy_score",
"os.path.basename",
"sklearn.metrics.recall_score",
"sklearn.model_selection.StratifiedShuffleSplit",
"pathlib.Path",
"numpy.array",
"sklearn.metrics.precision_score",
"sklearn.svm.SVC",
"mindFunctions.filterDownsampleData"
] |
[((745, 756), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (754, 756), False, 'import os\n'), ((1302, 1350), 'pathlib.Path', 'Path', (["(traindataFolder + 'training-baseline.json')"], {}), "(traindataFolder + 'training-baseline.json')\n", (1306, 1350), False, 'from pathlib import Path\n'), ((1452, 1475), 'numpy.array', 'np.array', (['bl'], {'dtype': '"""f"""'}), "(bl, dtype='f')\n", (1460, 1475), True, 'import numpy as np\n'), ((1732, 1762), 'numpy.array', 'np.array', (['voltsTest'], {'dtype': '"""f"""'}), "(voltsTest, dtype='f')\n", (1740, 1762), True, 'import numpy as np\n'), ((1782, 1815), 'numpy.array', 'np.array', (['baselineTest'], {'dtype': '"""f"""'}), "(baselineTest, dtype='f')\n", (1790, 1815), True, 'import numpy as np\n'), ((2236, 2294), 'mindFunctions.filterDownsampleData', 'filterDownsampleData', (['traindata', 'baseline', 'commands', 'debug'], {}), '(traindata, baseline, commands, debug)\n', (2256, 2294), False, 'from mindFunctions import filterDownsampleData\n'), ((4184, 4223), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""rbf"""', 'gamma': 'gamma', 'C': 'C'}), "(kernel='rbf', gamma=gamma, C=C)\n", (4191, 4223), False, 'from sklearn import svm, preprocessing, metrics\n'), ((5826, 5847), 'numpy.array', 'np.array', (['dataFilterd'], {}), '(dataFilterd)\n', (5834, 5847), True, 'import numpy as np\n'), ((6462, 6484), 'sklearn.preprocessing.scale', 'preprocessing.scale', (['X'], {}), '(X)\n', (6481, 6484), False, 'from sklearn import svm, preprocessing, metrics\n'), ((6800, 6822), 'sklearn.preprocessing.scale', 'preprocessing.scale', (['X'], {}), '(X)\n', (6819, 6822), False, 'from sklearn import svm, preprocessing, metrics\n'), ((6950, 6988), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (6972, 6988), False, 'from sklearn import svm, preprocessing, metrics\n'), ((7085, 7124), 'sklearn.metrics.precision_score', 'metrics.precision_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (7108, 7124), False, 'from sklearn import svm, preprocessing, metrics\n'), ((7216, 7252), 'sklearn.metrics.recall_score', 'metrics.recall_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (7236, 7252), False, 'from sklearn import svm, preprocessing, metrics\n'), ((7342, 7365), 'numpy.logspace', 'np.logspace', (['(-2)', '(10)', '(13)'], {}), '(-2, 10, 13)\n', (7353, 7365), True, 'import numpy as np\n'), ((7384, 7406), 'numpy.logspace', 'np.logspace', (['(-9)', '(3)', '(13)'], {}), '(-9, 3, 13)\n', (7395, 7406), True, 'import numpy as np\n'), ((7468, 7534), 'sklearn.model_selection.StratifiedShuffleSplit', 'StratifiedShuffleSplit', ([], {'n_splits': '(5)', 'test_size': '(0.2)', 'random_state': '(42)'}), '(n_splits=5, test_size=0.2, random_state=42)\n', (7490, 7534), False, 'from sklearn.model_selection import GridSearchCV, StratifiedShuffleSplit\n'), ((860, 881), 'os.path.basename', 'os.path.basename', (['cwd'], {}), '(cwd)\n', (876, 881), False, 'import os\n'), ((1022, 1083), 'pathlib.Path', 'Path', (["(traindataFolder + 'training-' + commands[cmd] + '.json')"], {}), "(traindataFolder + 'training-' + commands[cmd] + '.json')\n", (1026, 1083), False, 'from pathlib import Path\n'), ((1422, 1436), 'json.load', 'json.load', (['blf'], {}), '(blf)\n', (1431, 1436), False, 'import codecs, json\n'), ((1585, 1597), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1594, 1597), False, 'import codecs, json\n'), ((1675, 1687), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1684, 1687), False, 'import codecs, json\n'), ((4345, 4370), 'pickle.dump', 'pickle.dump', (['clf', 'outfile'], {}), '(clf, outfile)\n', (4356, 4370), False, 'import pickle\n'), ((4794, 4856), 'mindFunctions.filterDownsampleData', 'filterDownsampleData', (['voltsTest', 'baselineTest', 'commands', 'debug'], {}), '(voltsTest, baselineTest, commands, debug)\n', (4814, 4856), False, 'from mindFunctions import filterDownsampleData\n'), ((7559, 7568), 'sklearn.svm.SVC', 'svm.SVC', ([], {}), '()\n', (7566, 7568), False, 'from sklearn import svm, preprocessing, metrics\n'), ((1172, 1184), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1181, 1184), False, 'import codecs, json\n'), ((1210, 1235), 'numpy.array', 'np.array', (['data'], {'dtype': '"""f"""'}), "(data, dtype='f')\n", (1218, 1235), True, 'import numpy as np\n')]
|
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
from matplotlib.figure import Figure
from gui.utils import MessageBox
import numpy as np
class MplCanvas(FigureCanvasQTAgg):
"""
A canvas for matplotlib plots. Contains all plot functionality for Plot Mode
"""
def __init__(self, components, plotting_preferences):
self.fig = Figure(dpi=100)
self.empty = True
self.components = components
self.isenthalps = None
self.isentropes = None
self.isotherms = None
self.isobars = None
super(MplCanvas, self).__init__(figure=self.fig)
self.plotting_preferences = plotting_preferences
def toggle_isenthalps(self, is_checked):
"""
Hides / shows isenthalp lines in the plot if a plot exists
:param is_checked: Status of isenthalp button (bool)
"""
if not self.empty and self.isenthalps:
if is_checked:
for line in self.isenthalps:
line.set_linestyle("solid")
else:
for line in self.isenthalps:
line.set_linestyle("None")
self.draw()
def toggle_isentropes(self, is_checked):
"""
Hides / shows isentrope lines in the plot if a plot exists
:param is_checked: Status of isentrope button (bool)
"""
if not self.empty and self.isentropes:
if is_checked:
for line in self.isentropes:
line.set_linestyle("solid")
else:
for line in self.isentropes:
line.set_linestyle("None")
self.draw()
else:
return
def toggle_isotherms(self, is_checked):
"""
Hides / shows isotherm lines in the plot if a plot exists
:param is_checked: Status of isotherm button (bool)
"""
if not self.empty and self.isotherms:
if is_checked:
for line in self.isotherms:
line.set_linestyle("solid")
else:
for line in self.isotherms:
line.set_linestyle("None")
self.draw()
else:
return
def toggle_isobars(self, is_checked):
"""
Hides / shows isobar lines in the plot if a plot exists
:param is_checked: Status of isobar button (bool)
"""
if not self.empty and self.isobars:
if is_checked:
for line in self.isobars:
line.set_linestyle("solid")
else:
for line in self.isobars:
line.set_linestyle("None")
self.draw()
else:
return
def plot_envelope(self, tp, prim_vars, fractions):
"""
Plots a phase envelope
:param tp: Thermopack instance
:param prim_vars: Primary variables for the plot (e.g. PT, PH, ..)
:param fractions: List of molar fractions for the components
"""
tpv_settings = self.plotting_preferences["Phase envelope"]["TPV"]
isopleth_settings = self.plotting_preferences["Phase envelope"]["Isopleths"]
critical_settings = self.plotting_preferences["Phase envelope"]["Critical"]
plot_settings = self.plotting_preferences["Phase envelope"]["Plotting"]
p_initial = tpv_settings["Initial pressure"]
t_min = tpv_settings["Minimum temperature"]
p_max = tpv_settings["Maximum pressure"]
step_size = tpv_settings["Step size"]
# Calculate T, P, V
T, P, V = tp.get_envelope_twophase(initial_pressure=p_initial, z=fractions, maximum_pressure=p_max,
minimum_temperature=t_min, step_size=step_size, calc_v=True)
H = np.array([tp.enthalpy_tv(T[i], V[i], fractions) for i in range(len(T))])
S = np.array([tp.entropy_tv(T[i], V[i], fractions) for i in range(len(T))])
global H_list
global T_list
global S_list
global P_list
n_isopleths = isopleth_settings["Number of isopleths"]
H_list = np.linspace(np.min(H), np.max(H), n_isopleths)
S_list = np.linspace(np.min(S), np.max(S), n_isopleths)
T_list = np.linspace(np.min(T) * 0.60, np.max(T) * 1.40, n_isopleths)
P_list = np.linspace(np.min(P) * 0.60, np.max(P) * 1.40, n_isopleths)
temp = critical_settings["Temperature"]
v = critical_settings["Volume"]
tol = critical_settings["Error tolerance"]
# Calculate critical variables
try:
T_c, V_c, P_c = tp.critical(n=fractions, temp=temp, v=v, tol=tol)
H_c = tp.enthalpy_tv(T_c, V_c, fractions)
S_c = tp.entropy_tv(T_c, V_c, fractions)
except Exception as e:
msg = MessageBox("Error", str(e))
msg.exec_()
T_c, V_c, P_c, H_c, S_c = None, None, None, None, None
# Set global variables, so that they are accessible in all phase envelope plot functions
global isopleth_1_color
global isopleth_2_color
global P_min
global P_max
global T_min
global T_max
global nmax
isopleth_1_color = plot_settings["Colors"][2]
isopleth_2_color = plot_settings["Colors"][3]
P_min = isopleth_settings["Minimum pressure"]
P_max = isopleth_settings["Maximum pressure"]
T_min = isopleth_settings["Minimum temperature"]
T_max = isopleth_settings["Maximum temperature"]
nmax = isopleth_settings["N max"]
# Plot depending on which primary variables are chosen
if prim_vars == "PT":
x, y, crit_x, crit_y = self.plot_envelope_PT(tp, T, P, T_c, P_c, fractions)
elif prim_vars == "PH":
x, y, crit_x, crit_y = self.plot_envelope_PH(tp, P, H, P_c, H_c, fractions)
elif prim_vars == "PS":
x, y, crit_x, crit_y = self.plot_envelope_PS(tp, P, S, P_c, S_c, fractions)
elif prim_vars == "TH":
x, y, crit_x, crit_y = self.plot_envelope_TH(tp, T, H, T_c, H_c, fractions)
elif prim_vars == "TS":
x, y, crit_x, crit_y = self.plot_envelope_TS(tp, T, S, T_c, S_c, fractions)
else:
return
# Plotting
line_color = plot_settings["Colors"][0]
point_color = plot_settings["Colors"][1]
grid_on = plot_settings["Grid on"]
xlabel = plot_settings["x label"]
ylabel = plot_settings["y label"]
title = plot_settings["Title"]
self.axes.plot(x, y, color=line_color, label="Phase envelope")
self.axes.scatter([crit_x], [crit_y], color=point_color, label="Critical point")
self.axes.set_title(title)
self.axes.grid(grid_on)
self.axes.set_xlabel(xlabel)
self.axes.set_ylabel(ylabel)
# Sort entries in the legend
legend = True
if legend:
if n_isopleths > 0:
handles, labels = self.axes.get_legend_handles_labels()
self.axes.legend([handles[3], handles[2], handles[0], handles[1]],
[labels[3], labels[2], labels[0], labels[1]],
loc="best")
else:
self.axes.legend()
self.draw()
def plot_envelope_PT(self, tp, T, P, T_c, P_c, fractions):
"""
Return plot data for a PT phase envelope
:param tp: Thermopack instance
:param T: Temperature values
:param P: Pressure values
:param T_c: Critical temperature
:param P_c: Critical pressure
:param fractions: List of molar fractions
:return: x: x values for plot,
y: y values for plot,
crit_x: x value for critical point,
crit_y: y value for critical point,
"""
# Display correct buttons
self.parent().parent().parent().isopleth_btn_stack.setCurrentIndex(0)
self.parent().parent().parent().PT_H_btn.setChecked(True)
self.parent().parent().parent().PT_S_btn.setChecked(True)
x = T
y = P
crit_x = T_c
crit_y = P_c
# Isenthalps, isentropes
enthalpies = H_list
entropies = S_list
self.isenthalps = []
self.isentropes = []
for i in range(len(enthalpies)):
t_vals, p_vals, v_vals, s_vals = tp.get_isenthalp(enthalpies[i], fractions, minimum_pressure=P_min,
maximum_pressure=P_max, minimum_temperature=T_min,
maximum_temperature=T_max, nmax=nmax)
if i == 0:
h_line, = self.axes.plot(t_vals, p_vals, color=isopleth_1_color, label="Isenthalp")
else:
h_line, = self.axes.plot(t_vals, p_vals, color=isopleth_1_color)
self.isenthalps.append(h_line)
t_vals, p_vals, v_vals, h_vals = tp.get_isentrope(entropies[i], fractions, minimum_pressure=P_min,
maximum_pressure=P_max, minimum_temperature=T_min,
maximum_temperature=T_max, nmax=nmax)
if i == 0:
s_line, = self.axes.plot(t_vals, p_vals, color=isopleth_2_color, label="Isentrope")
else:
s_line, = self.axes.plot(t_vals, p_vals, color=isopleth_2_color)
self.isentropes.append(s_line)
self.isotherms = None
self.isobars = None
return x, y, crit_x, crit_y
def plot_envelope_PH(self, tp, P, H, P_c, H_c, fractions):
"""
Return plot data for a PH phase envelope
:param tp: Thermopack instance
:param P: Pressure values
:param H: Enthalpy values
:param P_c: Critical pressure
:param H_c: Critical enthalpy
:param fractions: List of molar fractions
:return: x: x values for plot,
y: y values for plot,
crit_x: x value for critical point,
crit_y: y value for critical point,
"""
# Display correct buttons
self.parent().parent().parent().isopleth_btn_stack.setCurrentIndex(1)
self.parent().parent().parent().PH_T_btn.setChecked(True)
self.parent().parent().parent().PH_S_btn.setChecked(True)
x = H
y = P
crit_x = H_c
crit_y = P_c
# isotherms, isentropes
temperatures = T_list
entropies = S_list
self.isotherms = []
self.isentropes = []
for i in range(len(temperatures)):
p_vals, v_vals, s_vals, h_vals = tp.get_isotherm(temperatures[i], fractions, minimum_pressure=P_min,
maximum_pressure=P_max, nmax=nmax)
if i == 0:
t_line, = self.axes.plot(h_vals, p_vals, color=isopleth_1_color, label="Isotherm")
else:
t_line, = self.axes.plot(h_vals, p_vals, color=isopleth_1_color)
self.isotherms.append(t_line)
t_vals, p_vals, v_vals, h_vals = tp.get_isentrope(entropies[i], fractions, minimum_pressure=P_min,
maximum_pressure=P_max, minimum_temperature=T_min,
maximum_temperature=T_max, nmax=nmax)
if i == 0:
s_line, = self.axes.plot(h_vals, p_vals, color=isopleth_2_color, label="Isentrope")
else:
s_line, = self.axes.plot(h_vals, p_vals, color=isopleth_2_color)
self.isentropes.append(s_line)
self.isenthalps = None
self.isobars = None
return x, y, crit_x, crit_y
def plot_envelope_PS(self, tp, P, S, P_c, S_c, fractions):
"""
Return plot data for a PS phase envelope
:param tp: Thermopack instance
:param P: Pressure values
:param S: Entropy values
:param P_c: Critical pressure
:param S_c: Critical entropy
:param fractions: List of molar fractions
:return: x: x values for plot,
y: y values for plot,
crit_x: x value for critical point,
crit_y: y value for critical point,
"""
# Display correct buttons
self.parent().parent().parent().isopleth_btn_stack.setCurrentIndex(2)
self.parent().parent().parent().PS_T_btn.setChecked(True)
self.parent().parent().parent().PS_H_btn.setChecked(True)
x = S
y = P
crit_x = S_c
crit_y = P_c
# isotherms, isenthalps
temperatures = T_list
enthalpies = H_list
self.isotherms = []
self.isenthalps = []
for i in range(len(temperatures)):
p_vals, v_vals, s_vals, h_vals = tp.get_isotherm(temperatures[i], fractions, minimum_pressure=P_min,
maximum_pressure=P_max, nmax=nmax)
if i == 0:
t_line, = self.axes.plot(s_vals, p_vals, color=isopleth_1_color, label="Isotherm")
else:
t_line, = self.axes.plot(s_vals, p_vals, color=isopleth_1_color)
self.isotherms.append(t_line)
t_vals, p_vals, v_vals, s_vals = tp.get_isenthalp(enthalpies[i], fractions, minimum_pressure=P_min,
maximum_pressure=P_max, minimum_temperature=T_min,
maximum_temperature=T_max, nmax=nmax)
if i == 0:
h_line, = self.axes.plot(s_vals, p_vals, color=isopleth_2_color, label="Isenthalp")
else:
h_line, = self.axes.plot(s_vals, p_vals, color=isopleth_2_color)
self.isenthalps.append(h_line)
self.isentropes = None
self.isobars = None
return x, y, crit_x, crit_y
def plot_envelope_TH(self, tp, T, H, T_c, H_c, fractions):
"""
Return plot data for a PS phase envelope
:param tp: Thermopack instance
:param T: Temperature values
:param H: Enthalpy values
:param T_c: Critical temperature
:param H_c: Critical enthalpy
:param fractions: List of molar fractions
:return: x: x values for plot,
y: y values for plot,
crit_x: x value for critical point,
crit_y: y value for critical point,
"""
# Display correct buttons
self.parent().parent().parent().isopleth_btn_stack.setCurrentIndex(3)
self.parent().parent().parent().TH_S_btn.setChecked(True)
self.parent().parent().parent().TH_P_btn.setChecked(True)
x = H
y = T
crit_x = H_c
crit_y = T_c
# isobars, isentropes
pressures = P_list
entropies = S_list
self.isobars = []
self.isentropes = []
for i in range(len(pressures)):
t_vals, v_vals, s_vals, h_vals = tp.get_isobar(pressures[i], fractions, minimum_temperature=200.0,
maximum_temperature=500.0, nmax=100)
if i == 0:
p_line, = self.axes.plot(h_vals, t_vals, color=isopleth_1_color, label="Isobar")
else:
p_line, = self.axes.plot(h_vals, t_vals, color=isopleth_1_color)
self.isobars.append(p_line)
t_vals, p_vals, v_vals, h_vals = tp.get_isentrope(entropies[i], fractions, minimum_pressure=P_min,
maximum_pressure=P_max, minimum_temperature=T_min,
maximum_temperature=T_max, nmax=nmax)
if i == 0:
s_line, = self.axes.plot(h_vals, t_vals, color=isopleth_2_color, label="Isentrope")
else:
s_line, = self.axes.plot(h_vals, t_vals, color=isopleth_2_color)
self.isentropes.append(s_line)
self.isenthalps = None
self.isotherms = None
return x, y, crit_x, crit_y
def plot_envelope_TS(self, tp, T, S, T_c, S_c, fractions):
"""
Return plot data for a PS phase envelope
:param tp: Thermopack instance
:param T: Temperature values
:param S: Entropy values
:param T_c: Critical temperature
:param S_c: Critical entropy
:param fractions: List of molar fractions
:return: x: x values for plot,
y: y values for plot,
crit_x: x value for critical point,
crit_y: y value for critical point,
"""
# Display correct buttons
self.parent().parent().parent().isopleth_btn_stack.setCurrentIndex(4)
self.parent().parent().parent().TS_P_btn.setChecked(True)
self.parent().parent().parent().TS_H_btn.setChecked(True)
x = S
y = T
crit_x = S_c
crit_y = T_c
# Isenthalps, isobars
pressures = P_list
enthalpies = H_list
self.isenthalps = []
self.isobars = []
for i in range(len(pressures)):
t_vals, v_vals, s_vals, h_vals = tp.get_isobar(pressures[i], fractions, minimum_temperature=T_min,
maximum_temperature=T_max)
if i == 0:
p_line, = self.axes.plot(s_vals, t_vals, color="#ffd2d2", label="Isobar")
else:
p_line, = self.axes.plot(s_vals, t_vals, color="#ffd2d2")
self.isobars.append(p_line)
t_vals, p_vals, v_vals, s_vals = tp.get_isenthalp(enthalpies[i], fractions, minimum_pressure=P_min,
maximum_pressure=P_max, minimum_temperature=T_min,
maximum_temperature=T_max, nmax=nmax)
if i == 0:
h_line, = self.axes.plot(s_vals, p_vals, color="#d5d3ff", label="Isenthalp")
else:
h_line, = self.axes.plot(s_vals, p_vals, color="#d5d3ff")
self.isenthalps.append(h_line)
self.isentropes = None
self.isotherms = None
return x, y, crit_x, crit_y
def plot_binary_pxy(self, tp):
"""
Plots a binary pxy plot
:param tp: Thermopack instance
"""
calc_settings = self.plotting_preferences["Binary pxy"]["Calc"]
plot_settings = self.plotting_preferences["Binary pxy"]["Plotting"]
T = calc_settings["Temperature"]
p_max = calc_settings["Maximum pressure"]
p_min = calc_settings["Minimum pressure"]
dz_max = calc_settings["Maximum dz"]
dlns_max = calc_settings["Maximum dlns"]
LLE, L1VE, L2VE = tp.get_binary_pxy(temp=T, maximum_pressure=p_max, minimum_pressure=p_min,
maximum_dz=dz_max, maximum_dlns=dlns_max)
line_color = plot_settings["Colors"][0]
if LLE[0] is not None:
self.axes.plot(LLE[0], LLE[2], color=line_color)
self.axes.plot(LLE[1], LLE[2], color=line_color)
if L1VE[0] is not None:
self.axes.plot(L1VE[0], L1VE[2], color=line_color)
self.axes.plot(L1VE[1], L1VE[2], color=line_color)
if L2VE[0] is not None:
self.axes.plot(L2VE[0], L2VE[2], color=line_color)
self.axes.plot(L2VE[1], L2VE[2], color=line_color)
grid_on = plot_settings["Grid on"]
title = plot_settings["Title"]
xlabel = plot_settings["x label"]
ylabel = plot_settings["y label"]
self.axes.grid(grid_on)
self.axes.set_title(title)
self.axes.set_xlabel(xlabel)
self.axes.set_ylabel(ylabel)
self.draw()
def plot_pressure_density(self, tp, fractions):
"""
Plots a pressure density plot
:param tp: Thermopack instance
:param fractions: List of molar fractions
"""
calc_settings = self.plotting_preferences["Pressure density"]["Calc"]
tpv_settings = self.plotting_preferences["Pressure density"]["TPV"]
crit_settings = self.plotting_preferences["Pressure density"]["Critical"]
plot_settings = self.plotting_preferences["Pressure density"]["Plotting"]
p_initial = tpv_settings["Initial pressure"]
t_min = tpv_settings["Minimum temperature"]
p_max = tpv_settings["Maximum pressure"]
step_size = tpv_settings["Step size"]
# Calculate T, P, V
T_ph_env, P_ph_env, V_ph_env = tp.get_envelope_twophase(initial_pressure=p_initial, z=fractions,
maximum_pressure=p_max,
minimum_temperature=t_min, step_size=step_size,
calc_v=True)
crit_t_guess = crit_settings["Temperature"]
crit_v_guess = crit_settings["Volume"]
crit_tol = crit_settings["Error tolerance"]
# Calculate critical T, V, P
T_c, V_c, P_c = tp.critical(n=fractions, temp=crit_t_guess, v=crit_v_guess, tol=crit_tol)
T_list = calc_settings["Temperatures"]
V_start = V_c * calc_settings["Volume range start"]
V_end = V_c * calc_settings["Volume range end"]
V_num_points = calc_settings["Num points"]
V_list = np.linspace(V_start, V_end, V_num_points)
P_lists = []
for T in T_list:
P_list = []
for V in V_list:
P, = tp.pressure_tv(temp=T, volume=V, n=fractions)
P_list.append(P)
P_lists.append(P_list)
rho_list = 1 / V_list
title = plot_settings["Title"]
grid_on = plot_settings["Grid on"]
xlabel = plot_settings["x label"]
ylabel = plot_settings["y label"]
self.axes.plot([1 / v for v in V_ph_env], P_ph_env, label="Phase envelope")
self.axes.scatter([1 / V_c], [P_c], label="Critical point")
for i in range(len(P_lists)):
self.axes.plot(rho_list, P_lists[i], label=str(T_list[i]) + " K")
self.axes.set_title(title)
self.axes.grid(grid_on)
self.axes.set_xlabel(xlabel)
self.axes.set_ylabel(ylabel)
self.axes.legend(loc="best")
self.draw()
def plot_global_binary(self, tp):
"""
Plots a binary pxy plot
:param tp: Thermopack instance
"""
calc_settings = self.plotting_preferences["Global binary"]["Calc"]
plot_settings = self.plotting_preferences["Global binary"]["Plotting"]
min_press = calc_settings["Minimum pressure"]
min_temp = calc_settings["Minimum temperature"]
azeotropes = calc_settings["Azeotropes"]
KSTYPE, VLE, LLVE, CRIT, AZ = tp.global_binary_plot(minimum_pressure=min_press, minimum_temperature=min_temp,
include_azeotropes=azeotropes)
colors = plot_settings["Colors"]
linestyles = ["-", "--", ":", "-."]
label = "VLE"
for i in range(len(VLE)):
self.axes.plot(VLE[i][:, 0], VLE[i][:, 1], linestyle=linestyles[0], color=colors[0], label=label)
label = None
label = "LLVE"
for i in range(len(LLVE)):
self.axes.plot(LLVE[i][:, 0], LLVE[i][:, 1], linestyle=linestyles[1], color=colors[1], label=label)
label = None
label = "CRIT"
for i in range(len(CRIT)):
self.axes.plot(CRIT[i][:, 0], CRIT[i][:, 1], linestyle=linestyles[2], color=colors[2], label=label)
label = None
label = "AZ"
for i in range(len(AZ)):
self.axes.plot(AZ[i][:, 0], AZ[i][:, 1], linestyle=linestyles[3], color=colors[3], label=label)
label = None
ks_strings = {
1: "I",
2: "II",
3: "III",
4: "IV",
5: "V"
}
title = plot_settings["Title"]
xlabel = plot_settings["x label"]
ylabel = plot_settings["y label"]
grid_on = plot_settings["Grid on"]
if title == "<NAME> and Scott type: ":
title += ks_strings[KSTYPE]
self.axes.set_title(title)
legend = self.axes.legend(loc="best", numpoints=1)
legend.get_frame().set_linewidth(0.0)
self.axes.set_xlabel(xlabel)
self.axes.set_ylabel(ylabel)
self.axes.grid(grid_on)
self.draw()
|
[
"matplotlib.figure.Figure",
"numpy.max",
"numpy.min",
"numpy.linspace"
] |
[((369, 384), 'matplotlib.figure.Figure', 'Figure', ([], {'dpi': '(100)'}), '(dpi=100)\n', (375, 384), False, 'from matplotlib.figure import Figure\n'), ((21817, 21858), 'numpy.linspace', 'np.linspace', (['V_start', 'V_end', 'V_num_points'], {}), '(V_start, V_end, V_num_points)\n', (21828, 21858), True, 'import numpy as np\n'), ((4167, 4176), 'numpy.min', 'np.min', (['H'], {}), '(H)\n', (4173, 4176), True, 'import numpy as np\n'), ((4178, 4187), 'numpy.max', 'np.max', (['H'], {}), '(H)\n', (4184, 4187), True, 'import numpy as np\n'), ((4231, 4240), 'numpy.min', 'np.min', (['S'], {}), '(S)\n', (4237, 4240), True, 'import numpy as np\n'), ((4242, 4251), 'numpy.max', 'np.max', (['S'], {}), '(S)\n', (4248, 4251), True, 'import numpy as np\n'), ((4295, 4304), 'numpy.min', 'np.min', (['T'], {}), '(T)\n', (4301, 4304), True, 'import numpy as np\n'), ((4313, 4322), 'numpy.max', 'np.max', (['T'], {}), '(T)\n', (4319, 4322), True, 'import numpy as np\n'), ((4373, 4382), 'numpy.min', 'np.min', (['P'], {}), '(P)\n', (4379, 4382), True, 'import numpy as np\n'), ((4391, 4400), 'numpy.max', 'np.max', (['P'], {}), '(P)\n', (4397, 4400), True, 'import numpy as np\n')]
|
import argparse
import json
from time import time
import os
import shutil
import numpy as np
import torch
from datasets.oxford import get_dataloaders
from datasets.boreas import get_dataloaders_boreas
from datasets.radiate import get_dataloaders_radiate
from networks.under_the_radar import UnderTheRadar
from networks.hero import HERO
from utils.utils import get_transform2, get_T_ba, computeKittiMetrics, computeMedianError
from utils.vis import plot_sequences, draw_radar, draw_mask, draw_masked_radar, draw_detector_scores, \
draw_weights, draw_keypoints, draw_src_tgt_matches
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.enabled = True
torch.backends.cudnn.deterministic = True
def build_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, required=True)
parser.add_argument('--checkpoint', type=str, required=True)
parser.add_argument('-no-vis', '--no-visualization', action='store_true')
parser.add_argument('-out-fld', '--out-folder', type=str, required=True)
return parser
def makedirs_for_visualization(out_folder):
os.makedirs(os.path.join(out_folder, 'radar'), exist_ok=True)
os.makedirs(os.path.join(out_folder, 'mask'), exist_ok=True)
os.makedirs(os.path.join(out_folder, 'masked_radar_vis'), exist_ok=True)
os.makedirs(os.path.join(out_folder, 'detector_scores'), exist_ok=True)
os.makedirs(os.path.join(out_folder, 'weights'), exist_ok=True)
os.makedirs(os.path.join(out_folder, 'keypoints'), exist_ok=True)
os.makedirs(os.path.join(out_folder, 'keypoints_only_masked'), exist_ok=True)
os.makedirs(os.path.join(out_folder, 'keypoints_all'), exist_ok=True)
os.makedirs(os.path.join(out_folder, 'keypoints_on_detector_scores'), exist_ok=True)
os.makedirs(os.path.join(out_folder, 'keypoints_on_detector_scores_only_masked'), exist_ok=True)
os.makedirs(os.path.join(out_folder, 'keypoints_on_detector_scores_all'), exist_ok=True)
os.makedirs(os.path.join(out_folder, 'src_tgt_matches'), exist_ok=True)
os.makedirs(os.path.join(out_folder, 'src_tgt_matches_only_masked'), exist_ok=True)
os.makedirs(os.path.join(out_folder, 'src_tgt_matches_all'), exist_ok=True)
os.makedirs(os.path.join(out_folder, 'src_tgt_matches_on_detector_scores'), exist_ok=True)
os.makedirs(os.path.join(out_folder, 'src_tgt_matches_on_detector_scores_only_masked'), exist_ok=True)
os.makedirs(os.path.join(out_folder, 'src_tgt_matches_on_detector_scores_all'), exist_ok=True)
def visualize(batchi, batch, out, config, out_folder):
radar_img = draw_radar(batch, i=1)
radar_img.save(os.path.join(out_folder, 'radar/radar_{}.png'.format(batchi+1)))
mask_img = draw_mask(batch, i=1)
mask_img.save(os.path.join(out_folder, 'mask/mask_{}.png'.format(batchi+1)))
masked_radar_img = draw_masked_radar(batch, i=1)
masked_radar_img.save(os.path.join(out_folder, 'masked_radar_vis/masked_radar_vis_{}.png'.format(batchi+1)))
detector_scores_img = draw_detector_scores(out, i=1)
detector_scores_img.save(os.path.join(out_folder, 'detector_scores/detector_scores_{}.png'.format(batchi+1)))
weights_img = draw_weights(out, i=1)
weights_img.save(os.path.join(out_folder, 'weights/weights_{}.png'.format(batchi+1)))
keypoints_img = draw_keypoints(batch, out, config, i=1, draw_uncertainty_scale=20)
keypoints_img.save(os.path.join(out_folder, 'keypoints/keypoints_{}.png'.format(batchi+1)))
keypoints_only_masked_img = draw_keypoints(batch, out, config, i=1, filtering='mask')
keypoints_only_masked_img.save(os.path.join(out_folder, 'keypoints_only_masked/keypoints_only_masked_{}.png'.format(batchi+1)))
keypoints_all_img = draw_keypoints(batch, out, config, i=1, filtering='none')
keypoints_all_img.save(os.path.join(out_folder, 'keypoints_all/keypoints_all_{}.png'.format(batchi+1)))
keypoints_on_detector_scores_img = draw_keypoints(batch, out, config, i=1, draw_on='detector_scores', draw_uncertainty_scale=20)
keypoints_on_detector_scores_img.save(os.path.join(out_folder,
'keypoints_on_detector_scores/keypoints_on_detector_scores_{}.png'.format(batchi+1)))
keypoints_on_detector_scores_only_masked_img = draw_keypoints(batch, out, config, i=1, draw_on='detector_scores', filtering='mask')
keypoints_on_detector_scores_only_masked_img.save(os.path.join(out_folder,
'keypoints_on_detector_scores_only_masked/keypoints_on_detector_scores_only_masked_{}.png'.format(batchi+1)))
keypoints_on_detector_scores_all_img = draw_keypoints(batch, out, config, i=1, draw_on='detector_scores', filtering='none')
keypoints_on_detector_scores_all_img.save(os.path.join(out_folder,
'keypoints_on_detector_scores_all/keypoints_on_detector_scores_all_{}.png'.format(batchi+1)))
src_tgt_matches_img = draw_src_tgt_matches(batch, out, config, draw_uncertainty_scale=20)
src_tgt_matches_img.save(os.path.join(out_folder,
'src_tgt_matches/src_tgt_matches_{}.png'.format(batchi)))
src_tgt_matches_only_masked_img = draw_src_tgt_matches(batch, out, config, filtering='mask')
src_tgt_matches_only_masked_img.save(os.path.join(out_folder,
'src_tgt_matches_only_masked/src_tgt_matches_only_masked_{}.png'.format(batchi)))
src_tgt_matches_all_img = draw_src_tgt_matches(batch, out, config, filtering='none')
src_tgt_matches_all_img.save(os.path.join(out_folder,
'src_tgt_matches_all/src_tgt_matches_all_{}.png'.format(batchi)))
src_tgt_matches_on_detector_scores_img = draw_src_tgt_matches(batch, out, config, draw_on='detector_scores', draw_uncertainty_scale=20)
src_tgt_matches_on_detector_scores_img.save(os.path.join(out_folder,
'src_tgt_matches_on_detector_scores/src_tgt_matches_on_detector_scores_{}.png'.format(batchi)))
src_tgt_matches_on_detector_scores_only_masked_img = draw_src_tgt_matches(batch, out, config, draw_on='detector_scores', filtering='mask')
src_tgt_matches_on_detector_scores_only_masked_img.save(os.path.join(out_folder,
'src_tgt_matches_on_detector_scores_only_masked/src_tgt_matches_on_detector_scores_only_masked_{}.png'.format(batchi)))
src_tgt_matches_on_detector_scores_all_img = draw_src_tgt_matches(batch, out, config, draw_on='detector_scores', filtering='none')
src_tgt_matches_on_detector_scores_all_img.save(os.path.join(out_folder,
'src_tgt_matches_on_detector_scores_all/src_tgt_matches_on_detector_scores_all_{}.png'.format(batchi)))
def print_used_time(model):
print("Time used:")
print(" All: {} s".format(np.mean(model.time_used['all'])))
print(" Feature map extraction: {} s".format(np.mean(model.time_used['feature_map_extraction'])))
print(" Keypoint extraction: {} s".format(np.mean(model.time_used['keypoint_extraction'])))
print(" Keypoint matching: {} s".format(np.mean(model.time_used['keypoint_matching'])))
print(" Optimization: {} s".format(np.mean(model.time_used['optimization'])))
if __name__ == '__main__':
torch.set_num_threads(8)
parser = build_parser()
args = parser.parse_args()
out_folder = args.out_folder
with_visualization = not args.no_visualization
os.makedirs(out_folder, exist_ok=True)
with open(args.config) as f:
config = json.load(f)
config_copy = os.path.join(out_folder, os.path.basename(args.config))
if args.config != config_copy:
shutil.copy(args.config, config_copy)
if config['model'] == 'UnderTheRadar':
model = UnderTheRadar(config).to(config['gpuid'])
elif config['model'] == 'HERO':
model = HERO(config).to(config['gpuid'])
model.solver.sliding_flag = False
checkpoint = torch.load(args.checkpoint, map_location=torch.device(config['gpuid']))
failed = False
try:
model.load_state_dict(checkpoint['model_state_dict'], strict=False)
except Exception as e:
print(e)
failed = True
if failed:
model.load_state_dict(checkpoint, strict=False)
model.eval()
model.no_throw = True
seq_name_all = list()
time_used_all = list()
T_gt_all = list()
T_pred_all = list()
t_err_all = list()
r_err_all = list()
seq_nums = config['test_split']
for seq_num in seq_nums:
config['test_split'] = [seq_num]
if config['dataset'] == 'oxford':
_, _, test_loader = get_dataloaders(config)
elif config['dataset'] == 'boreas':
_, _, test_loader = get_dataloaders_boreas(config)
elif config['dataset'] == 'radiate':
_, _, test_loader = get_dataloaders_radiate(config)
seq_len = test_loader.dataset.seq_lens[0]
seq_name = test_loader.dataset.sequences[0]
time_used = list()
T_gt = list()
T_pred = list()
print('Evaluating sequence {} (len {}): {}'.format(seq_num, seq_len, seq_name))
if with_visualization:
out_vis_folder = os.path.join(out_folder, seq_name)
makedirs_for_visualization(out_vis_folder)
model.solver.solver_cpp.resetTraj()
for batchi, batch in enumerate(test_loader):
ts = time()
with torch.no_grad():
out = model(batch)
if out['exception'] is not None:
fail_folder = os.path.join(out_folder, 'failed_{}'.format(batchi))
os.makedirs(fail_folder, exist_ok=True)
makedirs_for_visualization(fail_folder)
visualize(batchi, batch, out, config, fail_folder)
print_used_time(model)
raise out['exception']
if with_visualization and batchi % config['vis_rate'] == 0:
visualize(batchi, batch, out, config, out_vis_folder)
if config['model'] == 'UnderTheRadar':
if 'T_21' in batch:
T_gt.append(batch['T_21'][0].numpy().squeeze())
R_pred = out['R'][0].detach().cpu().numpy().squeeze()
t_pred = out['t'][0].detach().cpu().numpy().squeeze()
T_pred.append(get_transform2(R_pred, t_pred))
elif config['model'] == 'HERO':
if batchi == len(test_loader) - 1:
for w in range(config['window_size'] - 1):
if 'T_21' in batch:
T_gt.append(batch['T_21'][w].numpy().squeeze())
T_pred.append(get_T_ba(out, a=w, b=w+1))
else:
w = 0
if 'T_21' in batch:
T_gt.append(batch['T_21'][w].numpy().squeeze())
T_pred.append(get_T_ba(out, a=w, b=w+1))
time_used.append(time() - ts)
if (batchi + 1) % config['print_rate'] == 0:
print('Eval Batch {} / {}: {:.2}s'.format(batchi, len(test_loader), np.mean(time_used[-config['print_rate']:])))
time_used_all.extend(time_used)
if len(T_gt) > 0:
seq_name_all.append(seq_name)
T_gt_all.extend(T_gt)
T_pred_all.extend(T_pred)
t_err, r_err = computeKittiMetrics(T_gt, T_pred, [len(T_gt)])
print('SEQ: {} : {}'.format(seq_num, seq_name))
print('KITTI t_err: {} %'.format(t_err))
print('KITTI r_err: {} deg/m'.format(r_err))
t_err_all.append(t_err)
r_err_all.append(r_err)
fname = os.path.join(out_folder, seq_name + '.png')
if len(T_gt) > 0:
plot_sequences(T_gt, T_pred, [len(T_pred)], returnTensor=False, savePDF=True, fnames=[fname])
else:
plot_sequences(T_pred, T_pred, [len(T_pred)], returnTensor=False, savePDF=True, fnames=[fname])
print('time_used: {}'.format(sum(time_used_all) / len(time_used_all)))
if len(T_gt_all) > 0:
results = computeMedianError(T_gt_all, T_pred_all)
print('dt: {} sigma_dt: {} dr: {} sigma_dr: {}'.format(results[0], results[1], results[2], results[3]))
t_err_mean = np.mean(t_err_all)
r_err_mean = np.mean(r_err_all)
print('Average KITTI metrics over all test sequences:')
print('KITTI t_err: {} %'.format(t_err_mean))
print('KITTI r_err: {} deg/m'.format(r_err_mean))
with open(os.path.join(out_folder, 'metrics.txt'), 'w') as f:
f.write('sequence name: translation error (%) rotation error (deg/m)\n')
for seq_name, t_err, r_err in zip(seq_name_all, t_err_all, r_err_all):
line = '{}: {} {}\n'.format(seq_name, t_err, r_err)
f.write(line)
f.write("\n")
f.write("mean: {} {}\n".format(t_err_mean, r_err_mean))
print_used_time(model)
|
[
"argparse.ArgumentParser",
"utils.vis.draw_masked_radar",
"utils.vis.draw_src_tgt_matches",
"utils.vis.draw_radar",
"torch.set_num_threads",
"numpy.mean",
"datasets.boreas.get_dataloaders_boreas",
"torch.device",
"datasets.radiate.get_dataloaders_radiate",
"torch.no_grad",
"os.path.join",
"shutil.copy",
"utils.vis.draw_mask",
"networks.under_the_radar.UnderTheRadar",
"utils.utils.computeMedianError",
"datasets.oxford.get_dataloaders",
"os.path.basename",
"utils.utils.get_transform2",
"utils.utils.get_T_ba",
"utils.vis.draw_weights",
"json.load",
"os.makedirs",
"utils.vis.draw_detector_scores",
"time.time",
"networks.hero.HERO",
"utils.vis.draw_keypoints"
] |
[((739, 764), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (762, 764), False, 'import argparse\n'), ((2589, 2611), 'utils.vis.draw_radar', 'draw_radar', (['batch'], {'i': '(1)'}), '(batch, i=1)\n', (2599, 2611), False, 'from utils.vis import plot_sequences, draw_radar, draw_mask, draw_masked_radar, draw_detector_scores, draw_weights, draw_keypoints, draw_src_tgt_matches\n'), ((2712, 2733), 'utils.vis.draw_mask', 'draw_mask', (['batch'], {'i': '(1)'}), '(batch, i=1)\n', (2721, 2733), False, 'from utils.vis import plot_sequences, draw_radar, draw_mask, draw_masked_radar, draw_detector_scores, draw_weights, draw_keypoints, draw_src_tgt_matches\n'), ((2839, 2868), 'utils.vis.draw_masked_radar', 'draw_masked_radar', (['batch'], {'i': '(1)'}), '(batch, i=1)\n', (2856, 2868), False, 'from utils.vis import plot_sequences, draw_radar, draw_mask, draw_masked_radar, draw_detector_scores, draw_weights, draw_keypoints, draw_src_tgt_matches\n'), ((3009, 3039), 'utils.vis.draw_detector_scores', 'draw_detector_scores', (['out'], {'i': '(1)'}), '(out, i=1)\n', (3029, 3039), False, 'from utils.vis import plot_sequences, draw_radar, draw_mask, draw_masked_radar, draw_detector_scores, draw_weights, draw_keypoints, draw_src_tgt_matches\n'), ((3173, 3195), 'utils.vis.draw_weights', 'draw_weights', (['out'], {'i': '(1)'}), '(out, i=1)\n', (3185, 3195), False, 'from utils.vis import plot_sequences, draw_radar, draw_mask, draw_masked_radar, draw_detector_scores, draw_weights, draw_keypoints, draw_src_tgt_matches\n'), ((3307, 3373), 'utils.vis.draw_keypoints', 'draw_keypoints', (['batch', 'out', 'config'], {'i': '(1)', 'draw_uncertainty_scale': '(20)'}), '(batch, out, config, i=1, draw_uncertainty_scale=20)\n', (3321, 3373), False, 'from utils.vis import plot_sequences, draw_radar, draw_mask, draw_masked_radar, draw_detector_scores, draw_weights, draw_keypoints, draw_src_tgt_matches\n'), ((3503, 3560), 'utils.vis.draw_keypoints', 'draw_keypoints', (['batch', 'out', 'config'], {'i': '(1)', 'filtering': '"""mask"""'}), "(batch, out, config, i=1, filtering='mask')\n", (3517, 3560), False, 'from utils.vis import plot_sequences, draw_radar, draw_mask, draw_masked_radar, draw_detector_scores, draw_weights, draw_keypoints, draw_src_tgt_matches\n'), ((3718, 3775), 'utils.vis.draw_keypoints', 'draw_keypoints', (['batch', 'out', 'config'], {'i': '(1)', 'filtering': '"""none"""'}), "(batch, out, config, i=1, filtering='none')\n", (3732, 3775), False, 'from utils.vis import plot_sequences, draw_radar, draw_mask, draw_masked_radar, draw_detector_scores, draw_weights, draw_keypoints, draw_src_tgt_matches\n'), ((3924, 4021), 'utils.vis.draw_keypoints', 'draw_keypoints', (['batch', 'out', 'config'], {'i': '(1)', 'draw_on': '"""detector_scores"""', 'draw_uncertainty_scale': '(20)'}), "(batch, out, config, i=1, draw_on='detector_scores',\n draw_uncertainty_scale=20)\n", (3938, 4021), False, 'from utils.vis import plot_sequences, draw_radar, draw_mask, draw_masked_radar, draw_detector_scores, draw_weights, draw_keypoints, draw_src_tgt_matches\n'), ((4231, 4319), 'utils.vis.draw_keypoints', 'draw_keypoints', (['batch', 'out', 'config'], {'i': '(1)', 'draw_on': '"""detector_scores"""', 'filtering': '"""mask"""'}), "(batch, out, config, i=1, draw_on='detector_scores',\n filtering='mask')\n", (4245, 4319), False, 'from utils.vis import plot_sequences, draw_radar, draw_mask, draw_masked_radar, draw_detector_scores, draw_weights, draw_keypoints, draw_src_tgt_matches\n'), ((4557, 4645), 'utils.vis.draw_keypoints', 'draw_keypoints', (['batch', 'out', 'config'], {'i': '(1)', 'draw_on': '"""detector_scores"""', 'filtering': '"""none"""'}), "(batch, out, config, i=1, draw_on='detector_scores',\n filtering='none')\n", (4571, 4645), False, 'from utils.vis import plot_sequences, draw_radar, draw_mask, draw_masked_radar, draw_detector_scores, draw_weights, draw_keypoints, draw_src_tgt_matches\n'), ((4842, 4909), 'utils.vis.draw_src_tgt_matches', 'draw_src_tgt_matches', (['batch', 'out', 'config'], {'draw_uncertainty_scale': '(20)'}), '(batch, out, config, draw_uncertainty_scale=20)\n', (4862, 4909), False, 'from utils.vis import plot_sequences, draw_radar, draw_mask, draw_masked_radar, draw_detector_scores, draw_weights, draw_keypoints, draw_src_tgt_matches\n'), ((5069, 5127), 'utils.vis.draw_src_tgt_matches', 'draw_src_tgt_matches', (['batch', 'out', 'config'], {'filtering': '"""mask"""'}), "(batch, out, config, filtering='mask')\n", (5089, 5127), False, 'from utils.vis import plot_sequences, draw_radar, draw_mask, draw_masked_radar, draw_detector_scores, draw_weights, draw_keypoints, draw_src_tgt_matches\n'), ((5315, 5373), 'utils.vis.draw_src_tgt_matches', 'draw_src_tgt_matches', (['batch', 'out', 'config'], {'filtering': '"""none"""'}), "(batch, out, config, filtering='none')\n", (5335, 5373), False, 'from utils.vis import plot_sequences, draw_radar, draw_mask, draw_masked_radar, draw_detector_scores, draw_weights, draw_keypoints, draw_src_tgt_matches\n'), ((5552, 5650), 'utils.vis.draw_src_tgt_matches', 'draw_src_tgt_matches', (['batch', 'out', 'config'], {'draw_on': '"""detector_scores"""', 'draw_uncertainty_scale': '(20)'}), "(batch, out, config, draw_on='detector_scores',\n draw_uncertainty_scale=20)\n", (5572, 5650), False, 'from utils.vis import plot_sequences, draw_radar, draw_mask, draw_masked_radar, draw_detector_scores, draw_weights, draw_keypoints, draw_src_tgt_matches\n'), ((5882, 5971), 'utils.vis.draw_src_tgt_matches', 'draw_src_tgt_matches', (['batch', 'out', 'config'], {'draw_on': '"""detector_scores"""', 'filtering': '"""mask"""'}), "(batch, out, config, draw_on='detector_scores',\n filtering='mask')\n", (5902, 5971), False, 'from utils.vis import plot_sequences, draw_radar, draw_mask, draw_masked_radar, draw_detector_scores, draw_weights, draw_keypoints, draw_src_tgt_matches\n'), ((6231, 6320), 'utils.vis.draw_src_tgt_matches', 'draw_src_tgt_matches', (['batch', 'out', 'config'], {'draw_on': '"""detector_scores"""', 'filtering': '"""none"""'}), "(batch, out, config, draw_on='detector_scores',\n filtering='none')\n", (6251, 6320), False, 'from utils.vis import plot_sequences, draw_radar, draw_mask, draw_masked_radar, draw_detector_scores, draw_weights, draw_keypoints, draw_src_tgt_matches\n'), ((7034, 7058), 'torch.set_num_threads', 'torch.set_num_threads', (['(8)'], {}), '(8)\n', (7055, 7058), False, 'import torch\n'), ((7207, 7245), 'os.makedirs', 'os.makedirs', (['out_folder'], {'exist_ok': '(True)'}), '(out_folder, exist_ok=True)\n', (7218, 7245), False, 'import os\n'), ((1126, 1159), 'os.path.join', 'os.path.join', (['out_folder', '"""radar"""'], {}), "(out_folder, 'radar')\n", (1138, 1159), False, 'import os\n'), ((1192, 1224), 'os.path.join', 'os.path.join', (['out_folder', '"""mask"""'], {}), "(out_folder, 'mask')\n", (1204, 1224), False, 'import os\n'), ((1257, 1301), 'os.path.join', 'os.path.join', (['out_folder', '"""masked_radar_vis"""'], {}), "(out_folder, 'masked_radar_vis')\n", (1269, 1301), False, 'import os\n'), ((1334, 1377), 'os.path.join', 'os.path.join', (['out_folder', '"""detector_scores"""'], {}), "(out_folder, 'detector_scores')\n", (1346, 1377), False, 'import os\n'), ((1410, 1445), 'os.path.join', 'os.path.join', (['out_folder', '"""weights"""'], {}), "(out_folder, 'weights')\n", (1422, 1445), False, 'import os\n'), ((1478, 1515), 'os.path.join', 'os.path.join', (['out_folder', '"""keypoints"""'], {}), "(out_folder, 'keypoints')\n", (1490, 1515), False, 'import os\n'), ((1548, 1597), 'os.path.join', 'os.path.join', (['out_folder', '"""keypoints_only_masked"""'], {}), "(out_folder, 'keypoints_only_masked')\n", (1560, 1597), False, 'import os\n'), ((1630, 1671), 'os.path.join', 'os.path.join', (['out_folder', '"""keypoints_all"""'], {}), "(out_folder, 'keypoints_all')\n", (1642, 1671), False, 'import os\n'), ((1704, 1760), 'os.path.join', 'os.path.join', (['out_folder', '"""keypoints_on_detector_scores"""'], {}), "(out_folder, 'keypoints_on_detector_scores')\n", (1716, 1760), False, 'import os\n'), ((1793, 1861), 'os.path.join', 'os.path.join', (['out_folder', '"""keypoints_on_detector_scores_only_masked"""'], {}), "(out_folder, 'keypoints_on_detector_scores_only_masked')\n", (1805, 1861), False, 'import os\n'), ((1894, 1954), 'os.path.join', 'os.path.join', (['out_folder', '"""keypoints_on_detector_scores_all"""'], {}), "(out_folder, 'keypoints_on_detector_scores_all')\n", (1906, 1954), False, 'import os\n'), ((1987, 2030), 'os.path.join', 'os.path.join', (['out_folder', '"""src_tgt_matches"""'], {}), "(out_folder, 'src_tgt_matches')\n", (1999, 2030), False, 'import os\n'), ((2063, 2118), 'os.path.join', 'os.path.join', (['out_folder', '"""src_tgt_matches_only_masked"""'], {}), "(out_folder, 'src_tgt_matches_only_masked')\n", (2075, 2118), False, 'import os\n'), ((2151, 2198), 'os.path.join', 'os.path.join', (['out_folder', '"""src_tgt_matches_all"""'], {}), "(out_folder, 'src_tgt_matches_all')\n", (2163, 2198), False, 'import os\n'), ((2231, 2293), 'os.path.join', 'os.path.join', (['out_folder', '"""src_tgt_matches_on_detector_scores"""'], {}), "(out_folder, 'src_tgt_matches_on_detector_scores')\n", (2243, 2293), False, 'import os\n'), ((2326, 2400), 'os.path.join', 'os.path.join', (['out_folder', '"""src_tgt_matches_on_detector_scores_only_masked"""'], {}), "(out_folder, 'src_tgt_matches_on_detector_scores_only_masked')\n", (2338, 2400), False, 'import os\n'), ((2433, 2499), 'os.path.join', 'os.path.join', (['out_folder', '"""src_tgt_matches_on_detector_scores_all"""'], {}), "(out_folder, 'src_tgt_matches_on_detector_scores_all')\n", (2445, 2499), False, 'import os\n'), ((7297, 7309), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7306, 7309), False, 'import json\n'), ((7353, 7382), 'os.path.basename', 'os.path.basename', (['args.config'], {}), '(args.config)\n', (7369, 7382), False, 'import os\n'), ((7427, 7464), 'shutil.copy', 'shutil.copy', (['args.config', 'config_copy'], {}), '(args.config, config_copy)\n', (7438, 7464), False, 'import shutil\n'), ((11434, 11477), 'os.path.join', 'os.path.join', (['out_folder', "(seq_name + '.png')"], {}), "(out_folder, seq_name + '.png')\n", (11446, 11477), False, 'import os\n'), ((11853, 11893), 'utils.utils.computeMedianError', 'computeMedianError', (['T_gt_all', 'T_pred_all'], {}), '(T_gt_all, T_pred_all)\n', (11871, 11893), False, 'from utils.utils import get_transform2, get_T_ba, computeKittiMetrics, computeMedianError\n'), ((12028, 12046), 'numpy.mean', 'np.mean', (['t_err_all'], {}), '(t_err_all)\n', (12035, 12046), True, 'import numpy as np\n'), ((12068, 12086), 'numpy.mean', 'np.mean', (['r_err_all'], {}), '(r_err_all)\n', (12075, 12086), True, 'import numpy as np\n'), ((6591, 6622), 'numpy.mean', 'np.mean', (["model.time_used['all']"], {}), "(model.time_used['all'])\n", (6598, 6622), True, 'import numpy as np\n'), ((6675, 6725), 'numpy.mean', 'np.mean', (["model.time_used['feature_map_extraction']"], {}), "(model.time_used['feature_map_extraction'])\n", (6682, 6725), True, 'import numpy as np\n'), ((6775, 6822), 'numpy.mean', 'np.mean', (["model.time_used['keypoint_extraction']"], {}), "(model.time_used['keypoint_extraction'])\n", (6782, 6822), True, 'import numpy as np\n'), ((6870, 6915), 'numpy.mean', 'np.mean', (["model.time_used['keypoint_matching']"], {}), "(model.time_used['keypoint_matching'])\n", (6877, 6915), True, 'import numpy as np\n'), ((6958, 6998), 'numpy.mean', 'np.mean', (["model.time_used['optimization']"], {}), "(model.time_used['optimization'])\n", (6965, 6998), True, 'import numpy as np\n'), ((7753, 7782), 'torch.device', 'torch.device', (["config['gpuid']"], {}), "(config['gpuid'])\n", (7765, 7782), False, 'import torch\n'), ((8394, 8417), 'datasets.oxford.get_dataloaders', 'get_dataloaders', (['config'], {}), '(config)\n', (8409, 8417), False, 'from datasets.oxford import get_dataloaders\n'), ((8959, 8993), 'os.path.join', 'os.path.join', (['out_folder', 'seq_name'], {}), '(out_folder, seq_name)\n', (8971, 8993), False, 'import os\n'), ((9164, 9170), 'time.time', 'time', ([], {}), '()\n', (9168, 9170), False, 'from time import time\n'), ((7525, 7546), 'networks.under_the_radar.UnderTheRadar', 'UnderTheRadar', (['config'], {}), '(config)\n', (7538, 7546), False, 'from networks.under_the_radar import UnderTheRadar\n'), ((8494, 8524), 'datasets.boreas.get_dataloaders_boreas', 'get_dataloaders_boreas', (['config'], {}), '(config)\n', (8516, 8524), False, 'from datasets.boreas import get_dataloaders_boreas\n'), ((9189, 9204), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9202, 9204), False, 'import torch\n'), ((9386, 9425), 'os.makedirs', 'os.makedirs', (['fail_folder'], {'exist_ok': '(True)'}), '(fail_folder, exist_ok=True)\n', (9397, 9425), False, 'import os\n'), ((12282, 12321), 'os.path.join', 'os.path.join', (['out_folder', '"""metrics.txt"""'], {}), "(out_folder, 'metrics.txt')\n", (12294, 12321), False, 'import os\n'), ((7619, 7631), 'networks.hero.HERO', 'HERO', (['config'], {}), '(config)\n', (7623, 7631), False, 'from networks.hero import HERO\n'), ((8602, 8633), 'datasets.radiate.get_dataloaders_radiate', 'get_dataloaders_radiate', (['config'], {}), '(config)\n', (8625, 8633), False, 'from datasets.radiate import get_dataloaders_radiate\n'), ((10096, 10126), 'utils.utils.get_transform2', 'get_transform2', (['R_pred', 't_pred'], {}), '(R_pred, t_pred)\n', (10110, 10126), False, 'from utils.utils import get_transform2, get_T_ba, computeKittiMetrics, computeMedianError\n'), ((10722, 10728), 'time.time', 'time', ([], {}), '()\n', (10726, 10728), False, 'from time import time\n'), ((10876, 10918), 'numpy.mean', 'np.mean', (["time_used[-config['print_rate']:]"], {}), "(time_used[-config['print_rate']:])\n", (10883, 10918), True, 'import numpy as np\n'), ((10665, 10692), 'utils.utils.get_T_ba', 'get_T_ba', (['out'], {'a': 'w', 'b': '(w + 1)'}), '(out, a=w, b=w + 1)\n', (10673, 10692), False, 'from utils.utils import get_transform2, get_T_ba, computeKittiMetrics, computeMedianError\n'), ((10444, 10471), 'utils.utils.get_T_ba', 'get_T_ba', (['out'], {'a': 'w', 'b': '(w + 1)'}), '(out, a=w, b=w + 1)\n', (10452, 10471), False, 'from utils.utils import get_transform2, get_T_ba, computeKittiMetrics, computeMedianError\n')]
|
import logging
import numpy as np
import scipy.sparse
from typing import Union
from .external import closedform_glm_mean, closedform_glm_scale
logger = logging.getLogger("batchglm")
def closedform_norm_glm_mean(
x: Union[np.ndarray, scipy.sparse.csr_matrix],
design_loc: np.ndarray,
constraints_loc,
size_factors=None,
link_fn=lambda x: x,
inv_link_fn=lambda x: x
):
r"""
Calculates a closed-form solution for the `mean` parameters of normal GLMs.
:param x: The sample data
:param design_loc: design matrix for location
:param constraints_loc: tensor (all parameters x dependent parameters)
Tensor that encodes how complete parameter set which includes dependent
parameters arises from indepedent parameters: all = <constraints, indep>.
This form of constraints is used in vector generalized linear models (VGLMs).
:param size_factors: size factors for X
:return: tuple: (groupwise_means, mean, rmsd)
"""
return closedform_glm_mean(
x=x,
dmat=design_loc,
constraints=constraints_loc,
size_factors=size_factors,
link_fn=link_fn,
inv_link_fn=inv_link_fn
)
def closedform_norm_glm_logsd(
x: Union[np.ndarray, scipy.sparse.csr_matrix],
design_scale: np.ndarray,
constraints=None,
size_factors=None,
groupwise_means=None,
link_fn=np.log
):
r"""
Calculates a closed-form solution for the log-scale parameters of normal GLMs.
:param x: The sample data
:param design_scale: design matrix for scale
:param constraints: some design constraints
:param size_factors: size factors for X
:param groupwise_means: optional, in case if already computed this can be specified to spare double-calculation
:return: tuple (groupwise_scales, logsd, rmsd)
"""
def compute_scales_fun(variance, mean):
groupwise_scales = np.sqrt(variance)
return groupwise_scales
return closedform_glm_scale(
x=x,
design_scale=design_scale,
constraints=constraints,
size_factors=size_factors,
groupwise_means=groupwise_means,
link_fn=link_fn,
compute_scales_fun=compute_scales_fun
)
|
[
"logging.getLogger",
"numpy.sqrt"
] |
[((154, 183), 'logging.getLogger', 'logging.getLogger', (['"""batchglm"""'], {}), "('batchglm')\n", (171, 183), False, 'import logging\n'), ((1961, 1978), 'numpy.sqrt', 'np.sqrt', (['variance'], {}), '(variance)\n', (1968, 1978), True, 'import numpy as np\n')]
|
import time
from multiworld.core.image_env import ImageEnv, unormalize_image, normalize_image
from rlkit.core import logger
import cv2
import numpy as np
import os.path as osp
from rlkit.samplers.data_collector.scalor_env import WrappedEnvPathCollector as SCALORWrappedEnvPathCollector
from rlkit.torch.scalor.scalor import SCALOR
from rlkit.util.video import dump_video
from rlkit.util.io import load_local_or_remote_file
import rlkit.torch.pytorch_util as ptu
import gym
import multiworld
def generate_scalor_dataset(variant):
env_kwargs = variant.get('env_kwargs', None)
env_id = variant.get('env_id', None)
N = variant.get('N', 100)
rollout_length = variant.get('rollout_length', 100)
test_p = variant.get('test_p', 0.9)
use_cached = variant.get('use_cached', True)
imsize = variant.get('imsize', 64)
num_channels = variant.get('num_channels', 3)
show = variant.get('show', False)
init_camera = variant.get('init_camera', None)
dataset_path = variant.get('dataset_path', None)
oracle_dataset_using_set_to_goal = variant.get(
'oracle_dataset_using_set_to_goal', False)
random_rollout_data = variant.get('random_rollout_data', False)
random_and_oracle_policy_data = variant.get('random_and_oracle_policy_data',
False)
random_and_oracle_policy_data_split = variant.get(
'random_and_oracle_policy_data_split', 0)
policy_file = variant.get('policy_file', None)
n_random_steps = 1
scalor_dataset_specific_env_kwargs = variant.get(
'scalor_dataset_specific_env_kwargs', None)
save_file_prefix = variant.get('save_file_prefix', None)
tag = variant.get('tag', '')
if env_kwargs is None:
env_kwargs = {}
if save_file_prefix is None:
save_file_prefix = env_id
filename = "./data/tmp/{}_N{}_rollout_length{}_imsize{}_{}{}.npz".format(
save_file_prefix,
str(N),
str(rollout_length),
init_camera.__name__ if init_camera else '',
imsize,
tag,
)
import os
if not osp.exists('./data/tmp/'):
os.makedirs('./data/tmp/')
info = {}
import os
if not os.path.exists("./data/tmp/"):
os.makedirs("./data/tmp/")
if use_cached and osp.isfile(filename):
dataset = np.load(filename)
print("loaded data from saved file", filename)
else:
now = time.time()
multiworld.register_all_envs()
env = gym.make(env_id)
if not isinstance(env, ImageEnv):
env = ImageEnv(
env,
imsize,
init_camera=init_camera,
transpose=True,
normalize=True,
non_presampled_goal_img_is_garbage=True,
)
env.reset()
act_dim = env.action_space.low.size
info['env'] = env
imgs = np.zeros((N, rollout_length, imsize * imsize * num_channels),
dtype=np.uint8)
actions = np.zeros((N, rollout_length, act_dim))
for i in range(N):
env.reset()
for j in range(rollout_length):
action = env.action_space.sample()
obs = env.step(action)[0]
img = obs['image_observation']
imgs[i, j, :] = unormalize_image(img)
actions[i,j, :] = action
if show:
img = img.reshape(3, imsize, imsize).transpose()
img = img[::-1, :, ::-1]
cv2.imshow('img', img)
cv2.waitKey(1)
print("done making training data", filename, time.time() - now)
dataset = {"imgs": imgs, "actions": actions}
print(imgs.shape)
# np.savez(filename, **dataset)
return dataset, info
def scalor_training(variant):
scalor_params = variant.get("scalor_params", dict())
scalor_params["logdir"] = logger.get_snapshot_dir()
scalor = SCALOR(**scalor_params)
data, info = generate_scalor_dataset(variant['generate_scalor_dataset_kwargs'])
imgs, actions = data["imgs"], data["actions"]
imgs = normalize_image(imgs)
scalor.train(imgs=imgs, actions=actions)
|
[
"numpy.load",
"multiworld.register_all_envs",
"multiworld.core.image_env.unormalize_image",
"os.makedirs",
"rlkit.torch.scalor.scalor.SCALOR",
"gym.make",
"cv2.waitKey",
"os.path.exists",
"numpy.zeros",
"multiworld.core.image_env.normalize_image",
"time.time",
"multiworld.core.image_env.ImageEnv",
"os.path.isfile",
"rlkit.core.logger.get_snapshot_dir",
"cv2.imshow"
] |
[((3941, 3966), 'rlkit.core.logger.get_snapshot_dir', 'logger.get_snapshot_dir', ([], {}), '()\n', (3964, 3966), False, 'from rlkit.core import logger\n'), ((3980, 4003), 'rlkit.torch.scalor.scalor.SCALOR', 'SCALOR', ([], {}), '(**scalor_params)\n', (3986, 4003), False, 'from rlkit.torch.scalor.scalor import SCALOR\n'), ((4149, 4170), 'multiworld.core.image_env.normalize_image', 'normalize_image', (['imgs'], {}), '(imgs)\n', (4164, 4170), False, 'from multiworld.core.image_env import ImageEnv, unormalize_image, normalize_image\n'), ((2095, 2120), 'os.path.exists', 'osp.exists', (['"""./data/tmp/"""'], {}), "('./data/tmp/')\n", (2105, 2120), True, 'import os.path as osp\n'), ((2130, 2156), 'os.makedirs', 'os.makedirs', (['"""./data/tmp/"""'], {}), "('./data/tmp/')\n", (2141, 2156), False, 'import os\n'), ((2196, 2225), 'os.path.exists', 'os.path.exists', (['"""./data/tmp/"""'], {}), "('./data/tmp/')\n", (2210, 2225), False, 'import os\n'), ((2235, 2261), 'os.makedirs', 'os.makedirs', (['"""./data/tmp/"""'], {}), "('./data/tmp/')\n", (2246, 2261), False, 'import os\n'), ((2284, 2304), 'os.path.isfile', 'osp.isfile', (['filename'], {}), '(filename)\n', (2294, 2304), True, 'import os.path as osp\n'), ((2324, 2341), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (2331, 2341), True, 'import numpy as np\n'), ((2421, 2432), 'time.time', 'time.time', ([], {}), '()\n', (2430, 2432), False, 'import time\n'), ((2441, 2471), 'multiworld.register_all_envs', 'multiworld.register_all_envs', ([], {}), '()\n', (2469, 2471), False, 'import multiworld\n'), ((2486, 2502), 'gym.make', 'gym.make', (['env_id'], {}), '(env_id)\n', (2494, 2502), False, 'import gym\n'), ((2908, 2985), 'numpy.zeros', 'np.zeros', (['(N, rollout_length, imsize * imsize * num_channels)'], {'dtype': 'np.uint8'}), '((N, rollout_length, imsize * imsize * num_channels), dtype=np.uint8)\n', (2916, 2985), True, 'import numpy as np\n'), ((3020, 3058), 'numpy.zeros', 'np.zeros', (['(N, rollout_length, act_dim)'], {}), '((N, rollout_length, act_dim))\n', (3028, 3058), True, 'import numpy as np\n'), ((2563, 2687), 'multiworld.core.image_env.ImageEnv', 'ImageEnv', (['env', 'imsize'], {'init_camera': 'init_camera', 'transpose': '(True)', 'normalize': '(True)', 'non_presampled_goal_img_is_garbage': '(True)'}), '(env, imsize, init_camera=init_camera, transpose=True, normalize=\n True, non_presampled_goal_img_is_garbage=True)\n', (2571, 2687), False, 'from multiworld.core.image_env import ImageEnv, unormalize_image, normalize_image\n'), ((3326, 3347), 'multiworld.core.image_env.unormalize_image', 'unormalize_image', (['img'], {}), '(img)\n', (3342, 3347), False, 'from multiworld.core.image_env import ImageEnv, unormalize_image, normalize_image\n'), ((3659, 3670), 'time.time', 'time.time', ([], {}), '()\n', (3668, 3670), False, 'import time\n'), ((3548, 3570), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'img'], {}), "('img', img)\n", (3558, 3570), False, 'import cv2\n'), ((3591, 3605), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3602, 3605), False, 'import cv2\n')]
|
import perceptron as pc
import numpy as np
def mnist_load(file, samples):
raw_data = np.array(np.genfromtxt(file, delimiter=',', max_rows=samples))
labels = raw_data[:,0]
data = np.delete(raw_data, 0, 1)/255.0
return (data, labels)
def main():
print("loading data...")
samples = 10000
batch_size = 20
train = mnist_load("mnist_train.csv", samples)
validate = mnist_load("mnist_test.csv", samples)
restart_params = (.0001, 0.01, 0.01, 2*samples/batch_size) #lower bound, upper bound, decay rate, cycle length.
structure = [784, 256, 128, 10, 10]
activation_functions = ("elu", "elu", "elu", "softmax")
network = pc.network(structure, activation_functions, train, validate)
network.train(dropout=[.5, .2, 0], beta=0.9, lr_func="warm restarts", lr_params=restart_params, batch_size=batch_size, epochs=10, cost_func="cross entropy")
main()
|
[
"numpy.delete",
"numpy.genfromtxt",
"perceptron.network"
] |
[((666, 726), 'perceptron.network', 'pc.network', (['structure', 'activation_functions', 'train', 'validate'], {}), '(structure, activation_functions, train, validate)\n', (676, 726), True, 'import perceptron as pc\n'), ((99, 151), 'numpy.genfromtxt', 'np.genfromtxt', (['file'], {'delimiter': '""","""', 'max_rows': 'samples'}), "(file, delimiter=',', max_rows=samples)\n", (112, 151), True, 'import numpy as np\n'), ((191, 216), 'numpy.delete', 'np.delete', (['raw_data', '(0)', '(1)'], {}), '(raw_data, 0, 1)\n', (200, 216), True, 'import numpy as np\n')]
|
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
from gym import spaces, Env
class NXColoringEnv(Env):
def __init__(self, generator=nx.barabasi_albert_graph, **kwargs):
'''
generator — netwokrx graph generator,
kwargs — generator named arguments
'''
self.G = generator(**kwargs)
self.pos = nx.spring_layout(self.G, iterations=1000) #determine by n and m (?)
self.edges = np.array(self.G.edges())
self.n = len(self.G.nodes())
self.m = len(self.edges)
self.action_space = spaces.Box(low=0, high=self.n-1, shape=(self.n,2), dtype=np.uint32)
self.used_colors = []
self.current_state = np.full(self.n, self.n, dtype=np.uint32)
self.done = False
self.total_reward = 0
def get_graph(self):
return self.G.copy()
def step(self, action):
def is_action_available(action):
node, color = action
adjacent_nodes = np.unique(self.edges[np.sum(np.isin(self.edges, node), axis=1, dtype=bool)])
return ~np.any(self.current_state[adjacent_nodes]==color)
reward = 0
if is_action_available(action):
node, color = action
self.current_state[node] = color
if color not in self.used_colors:
reward = -1
self.total_reward -= 1
self.used_colors.append(color)
if self.n not in np.unique(self.current_state):
self.done = True
info = {}
return self.current_state, reward, self.done, info
def reset(self):
self.used_colors = []
self.current_state = np.full(self.n, self.n, dtype=np.uint32)
self.done = False
self.total_reward = 0
def render(self, mode='human', close=False):
nx.draw(self.G, self.pos, node_color=self.current_state, cmap=plt.cm.tab20)
|
[
"numpy.full",
"numpy.isin",
"numpy.any",
"networkx.spring_layout",
"networkx.draw",
"gym.spaces.Box",
"numpy.unique"
] |
[((342, 383), 'networkx.spring_layout', 'nx.spring_layout', (['self.G'], {'iterations': '(1000)'}), '(self.G, iterations=1000)\n', (358, 383), True, 'import networkx as nx\n'), ((539, 609), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(self.n - 1)', 'shape': '(self.n, 2)', 'dtype': 'np.uint32'}), '(low=0, high=self.n - 1, shape=(self.n, 2), dtype=np.uint32)\n', (549, 609), False, 'from gym import spaces, Env\n'), ((658, 698), 'numpy.full', 'np.full', (['self.n', 'self.n'], {'dtype': 'np.uint32'}), '(self.n, self.n, dtype=np.uint32)\n', (665, 698), True, 'import numpy as np\n'), ((1528, 1568), 'numpy.full', 'np.full', (['self.n', 'self.n'], {'dtype': 'np.uint32'}), '(self.n, self.n, dtype=np.uint32)\n', (1535, 1568), True, 'import numpy as np\n'), ((1669, 1744), 'networkx.draw', 'nx.draw', (['self.G', 'self.pos'], {'node_color': 'self.current_state', 'cmap': 'plt.cm.tab20'}), '(self.G, self.pos, node_color=self.current_state, cmap=plt.cm.tab20)\n', (1676, 1744), True, 'import networkx as nx\n'), ((1328, 1357), 'numpy.unique', 'np.unique', (['self.current_state'], {}), '(self.current_state)\n', (1337, 1357), True, 'import numpy as np\n'), ((1002, 1053), 'numpy.any', 'np.any', (['(self.current_state[adjacent_nodes] == color)'], {}), '(self.current_state[adjacent_nodes] == color)\n', (1008, 1053), True, 'import numpy as np\n'), ((939, 964), 'numpy.isin', 'np.isin', (['self.edges', 'node'], {}), '(self.edges, node)\n', (946, 964), True, 'import numpy as np\n')]
|
import numpy as np
# Note: please don't import any new package. You should solve this problem using only the package(s) above.
#-------------------------------------------------------------------------
'''
Problem 1: Multi-Armed Bandit Problem (15 points)
In this problem, you will implement the epsilon-greedy method for Multi-armed bandit problem.
A list of all variables being used in this problem is provided at the end of this file.
'''
#--------------------------
def Terms_and_Conditions():
'''
By submitting this homework or changing this function, you agree with the following terms:
(1) Not sharing your code/solution with any student before and after the homework due. For example, sending your code segment to another student, putting your solution online or lending your laptop (if your laptop contains your solution or your Dropbox automatically copied your solution from your desktop computer and your laptop) to another student to work on this homework will violate this term.
(2) Not using anyone's code in this homework and building your own solution. For example, using some code segments from another student or online resources due to any reason (like too busy recently) will violate this term. Changing other's code as your solution (such as changing the variable names) will also violate this term.
(3) When discussing with any other students about this homework, only discuss high-level ideas or use pseudo-code. Don't discuss about the solution at the code level. For example, two students discuss about the solution of a function (which needs 5 lines of code to solve) and they then work on the solution "independently", however the code of the two solutions are exactly the same, or only with minor differences (variable names are different). In this case, the two students violate this term.
All violations of (1),(2) or (3) will be handled in accordance with the WPI Academic Honesty Policy. For more details, please visit: https://www.wpi.edu/about/policies/academic-integrity/dishonesty
Note: we may use the Stanford Moss system to check your code for code similarity. https://theory.stanford.edu/~aiken/moss/
Historical Data: in one year, we ended up finding 25% of the students in that class violating this term in their homework submissions and we handled ALL of these violations according to the WPI Academic Honesty Policy.
'''
#*******************************************
# CHANGE HERE: if you have read and agree with the term above, change "False" to "True".
Read_and_Agree = True
#*******************************************
return Read_and_Agree
#----------------------------------------------------
'''
Given the player's memory about the previous results in the game and the action chosen and reward received at the current time step, update the player's memory.
---- Inputs: --------
* a: the index of the action being chosen by the player, an integer scalar between 0 and c-1.
* r: the reward received at the current time step, a float scalar.
* Rt: (player's memory) the total rewards (i.e., sum of rewards) collected for each action, a numpy float vector of length c. Rt_1[i] represents the sum of total rewards collected on the i-th action.
* Ct: (player's memory) the counts on how many times each action has been tried, a numpy integer vector of length c. Ct_1[i] represents the total number of samples collected on the i-th action, i.e., how many times the i-th action has been tried before".
---- Hints: --------
* This problem can be solved using 2 line(s) of code.
'''
#---------------------
def update_memory(a, r, Rt, Ct):
#########################################
## INSERT YOUR CODE HERE (3 points)
Rt[a] = Rt[a] + r
Ct[a] = Ct[a] + 1
#########################################
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test1.py:test_update_memory
--- OR ----
python3 -m nose -v test1.py:test_update_memory
--- OR ----
python -m nose -v test1.py:test_update_memory
---------------------------------------------------
'''
#----------------------------------------------------
'''
(Explore-only) Given a multi-armed bandit game, choose an action at the current time step using explore-only strategy. Randomly pick an action with uniform distribution: equal probability for all actions.
---- Inputs: --------
* c: the number of possible actions in a multi-armed bandit problem, an integer scalar.
---- Outputs: --------
* a: the index of the action being chosen by the player, an integer scalar between 0 and c-1.
---- Hints: --------
* This problem can be solved using 1 line(s) of code.
'''
#---------------------
def choose_action_explore(c):
#########################################
## INSERT YOUR CODE HERE (3 points)
a = np.random.randint(0, c)
#########################################
return a
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test1.py:test_choose_action_explore
--- OR ----
python3 -m nose -v test1.py:test_choose_action_explore
--- OR ----
python -m nose -v test1.py:test_choose_action_explore
---------------------------------------------------
'''
#----------------------------------------------------
'''
(Exploit-only) Given a multi-armed bandit game and the player's memory about the previous results, choose an action at the current time step using exploit-only strategy: choose the action with the highest average reward.
---- Inputs: --------
* Rt: (player's memory) the total rewards (i.e., sum of rewards) collected for each action, a numpy float vector of length c. Rt_1[i] represents the sum of total rewards collected on the i-th action.
* Ct: (player's memory) the counts on how many times each action has been tried, a numpy integer vector of length c. Ct_1[i] represents the total number of samples collected on the i-th action, i.e., how many times the i-th action has been tried before".
---- Outputs: --------
* a: the index of the action being chosen by the player, an integer scalar between 0 and c-1.
---- Hints: --------
* If the count in Ct[i] for the i-th action is 0, we can assume the average reward for the i-th action is 0. For example, if the count Ct for 3 actions are [0,1,1], we can assume the average reward for the first action is 0.
* You could us the argmax() function in numpy to return the index of the largest value in a vector.
* This problem can be solved using 1 line(s) of code.
'''
#---------------------
def choose_action_exploit(Rt, Ct):
#########################################
## INSERT YOUR CODE HERE (3 points)
a = np.argmax([0 if Ct[i] == 0 else Rt[i] / Ct[i] for i in range(Rt.size)])
#########################################
return a
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test1.py:test_choose_action_exploit
--- OR ----
python3 -m nose -v test1.py:test_choose_action_exploit
--- OR ----
python -m nose -v test1.py:test_choose_action_exploit
---------------------------------------------------
'''
#----------------------------------------------------
'''
Given a multi-armed bandit game and the player's memory about the previous results, choose an action at the current step of the game using epsilon-greedy method: with a small probability (epsilon) to follow explore-only method (randomly choose an action) and with a large probability (1-epsilon) to follow exploit-only method (choose the action with the highest average reward).
---- Inputs: --------
* Rt: (player's memory) the total rewards (i.e., sum of rewards) collected for each action, a numpy float vector of length c. Rt_1[i] represents the sum of total rewards collected on the i-th action.
* Ct: (player's memory) the counts on how many times each action has been tried, a numpy integer vector of length c. Ct_1[i] represents the total number of samples collected on the i-th action, i.e., how many times the i-th action has been tried before".
* e: (epsilon) the probability of the player to follow the exploration-only strategy. e is a float scalar between 0 and 1. The player has 1-e probability in each time step to follow the exploitation-only strategy.
---- Outputs: --------
* a: the index of the action being chosen by the player, an integer scalar between 0 and c-1.
---- Hints: --------
* You could use the random.rand() function in numpy to sample a number randomly using uniform distribution between 0 and 1.
* This problem can be solved using 1 line(s) of code.
'''
#---------------------
def choose_action(Rt, Ct, e=0.05):
#########################################
## INSERT YOUR CODE HERE (6 points)
a = choose_action_explore(Ct.size) if np.random.random() < e else choose_action_exploit(Rt, Ct)
#########################################
return a
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test1.py:test_choose_action
--- OR ----
python3 -m nose -v test1.py:test_choose_action
--- OR ----
python -m nose -v test1.py:test_choose_action
---------------------------------------------------
'''
#--------------------------------------------
'''
TEST problem 1:
Now you can test the correctness of all the above functions by typing the following in the terminal:
---------------------------------------------------
nosetests -v test1.py
--- OR ----
python3 -m nose -v test1.py
--- OR ----
python -m nose -v test1.py
---------------------------------------------------
If your code passed all the tests, you will see the following message in the terminal:
----------- Problem 1 (15 points in total)--------------------- ... ok
* (3 points) update_memory ... ok
* (3 points) choose_action_explore ... ok
* (3 points) choose_action_exploit ... ok
* (6 points) choose_action ... ok
----------------------------------------------------------------------
Ran 4 tests in 0.586s
OK
'''
#--------------------------------------------
#--------------------------------------------
'''
List of All Variables
* c: the number of possible actions in a multi-armed bandit problem, an integer scalar.
* e: (epsilon) the probability of the player to follow the exploration-only strategy. e is a float scalar between 0 and 1. The player has 1-e probability in each time step to follow the exploitation-only strategy.
* Rt: (player's memory) the total rewards (i.e., sum of rewards) collected for each action, a numpy float vector of length c. Rt_1[i] represents the sum of total rewards collected on the i-th action.
* Ct: (player's memory) the counts on how many times each action has been tried, a numpy integer vector of length c. Ct_1[i] represents the total number of samples collected on the i-th action, i.e., how many times the i-th action has been tried before".
* a: the index of the action being chosen by the player, an integer scalar between 0 and c-1.
* r: the reward received at the current time step, a float scalar.
'''
#--------------------------------------------
|
[
"numpy.random.randint",
"numpy.random.random"
] |
[((5139, 5162), 'numpy.random.randint', 'np.random.randint', (['(0)', 'c'], {}), '(0, c)\n', (5156, 5162), True, 'import numpy as np\n'), ((9556, 9574), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (9572, 9574), True, 'import numpy as np\n')]
|
import scipy.io
import numpy as np
import os
import random
import json
import pdb
def check_image_voxel_match(cls):
root = os.path.abspath('.')
out_dir = os.path.join(root, '../output', cls)
# out_dir = '/Users/heqian/Research/projects/primitive-based_3d/data/all_classes/chair'
voxel_txt_dir = os.path.join(out_dir, 'voxeltxt')
voxel_dirs = {x: os.path.join(voxel_txt_dir, 'voxel_{}.txt'.format(x))
for x in ['train', 'val', 'test']}
img_dirs = {x: os.path.join(voxel_txt_dir, '{}.txt'.format(x))
for x in ['train', 'val', 'test']}
with open(os.path.join(out_dir, 'voxels_dir_{}.txt'.format(cls)), 'r') as f:
voxel_all = f.readlines()
voxel_names = {}
img_names = {}
for phase in ['train', 'val', 'test']:
with open(os.path.join(voxel_dirs[phase]), 'r') as f:
voxel_names[phase] = f.readlines()
with open(os.path.join(img_dirs[phase]), 'r') as f:
img_names[phase] = f.readlines()
# pix3d_dir = os.path.join(root, '../input/pix3d.json')
# pix3d = json.load(open(pix3d_dir, 'r'))
match_id = scipy.io.loadmat(os.path.join(out_dir, 'img_voxel_idxs.mat'))
img_match_vox = {x: [] for x in ['train', 'val', 'test']}
for phase in ['train', 'val', 'test']:
for img in img_names[phase]:
id_img_ori = int(img.split('.')[0]) # 1-3839
img_id_real = list(match_id['img_idxs'][0]).index(id_img_ori) # 0-3493
voxel_id_ori = match_id['voxel_idxs'][0, img_id_real] # 1-216
vox = voxel_all[voxel_id_ori - 1]
img_match_vox[phase].append(vox)
# img_match_vox[phase].append('model/'+vox)
img_match_vox = {x: sorted(set(img_match_vox[x])) for x in ['train', 'val', 'test']}
# pdb.set_trace()
for phase in ['train', 'val', 'test']:
if len(set(voxel_names[phase]).difference(set(img_match_vox[phase]))) > 0:
print('error')
if len(set(img_match_vox[phase]).difference(set(voxel_names[phase]))) > 0:
print('error')
for name in voxel_names[phase]:
if name not in img_match_vox[phase]:
print(name)
for name in img_match_vox[phase]:
if name not in voxel_names[phase]:
print(name)
def split_voxel_then_image(cls):
# data_dir = '/Users/heqian/Research/projects/3dprnn/data/pix3d'
split_by_model = True # True-split by 216 models, False-split by 34 images
## split voxels into train, val, test
root = os.path.abspath('.')
out_dir = os.path.join(root, '../output', cls)
voxel_txt_dir = os.path.join(out_dir, 'voxeltxt')
if not os.path.exists(voxel_txt_dir):
os.makedirs(voxel_txt_dir)
voxel_train_txtpath = os.path.join(voxel_txt_dir, 'voxel_train.txt')
voxel_val_txtpath = os.path.join(voxel_txt_dir, 'voxel_val.txt')
voxel_test_txtpath = os.path.join(voxel_txt_dir, 'voxel_test.txt')
voxel_ftrain = open(voxel_train_txtpath, 'w')
voxel_fval = open(voxel_val_txtpath, 'w')
voxel_ftest = open(voxel_test_txtpath, 'w')
voxel_ltrain = []
voxel_lval = []
voxel_ltest = []
voxel_ctrain = 0
voxel_cval = 0
voxel_ctest = 0
with open(os.path.join(out_dir, 'voxels_dir_{}.txt'.format(cls)), 'r') as f:
voxel_dirs = f.readlines()
for i in range(len(voxel_dirs)):
voxel_dirs[i] = voxel_dirs[i].strip()
voxel_dirs[i] = voxel_dirs[i]
tmp = random.random()
if tmp < 0.65:
voxel_ftrain.write(voxel_dirs[i]+'\n')
voxel_ltrain.append(voxel_dirs[i])
voxel_ctrain += 1
elif tmp >= 0.65 and tmp < 0.8:
voxel_fval.write(voxel_dirs[i]+'\n')
voxel_lval.append(voxel_dirs[i])
voxel_cval += 1
else:
voxel_ftest.write(voxel_dirs[i]+'\n')
voxel_ltest.append(voxel_dirs[i])
voxel_ctest += 1
voxel_ftrain.close()
voxel_fval.close()
voxel_ftest.close()
## split images into train, val, test, according to voxels
# img_voxel_idxs = []
img_idxs = []
voxel_idxs = []
train_txtpath = os.path.join(voxel_txt_dir, 'train.txt')
val_txtpath = os.path.join(voxel_txt_dir, 'val.txt')
test_txtpath = os.path.join(voxel_txt_dir, 'test.txt')
ftrain = open(train_txtpath, 'w')
fval = open(val_txtpath, 'w')
ftest = open(test_txtpath, 'w')
ctrain = 0
cval = 0
ctest = 0
pix3d_dir = os.path.join(root, '../input/pix3d.json')
pix3d = json.load(open(pix3d_dir, 'r'))
for i in range(len(pix3d)):
# if json_file[i]['img'][4:9] == 'chair' and json_file[i]['voxel'] not in voxel_dirs:
# print(json_file[i]['img'], json_file[i]['voxel'])
voxel_dir = pix3d[i]['voxel'][6:]
if voxel_dir in voxel_dirs:
# pdb.set_trace()
img_file = pix3d[i]['img'].split('/')[-1] #[10:]
img_id = int(img_file.split('.')[0]) #int(pix3d[i]['img'][10:14])
img_idxs.append(img_id)
voxel_idxs.append(voxel_dirs.index(voxel_dir) + 1)
# img_voxel_idxs.append(voxel_dirs.index(voxel_dir))
# if img_id != len(img_voxel_idxs):
# print('Error!!!=======', img_id)
if split_by_model:
if voxel_dir in voxel_ltrain:
ftrain.write(img_file+'\n')
ctrain += 1
elif voxel_dir in voxel_lval:
fval.write(img_file+'\n')
cval += 1
elif voxel_dir in voxel_ltest:
ftest.write(img_file+'\n')
ctest += 1
else:
tmp = random.random()
if tmp < 0.65:
ftrain.write(img_file+'\n')
ctrain += 1
elif tmp >= 0.65 and tmp < 0.8:
fval.write(img_file+'\n')
cval += 1
else:
ftest.write(img_file+'\n')
ctest += 1
ftrain.close()
fval.close()
ftest.close()
# scipy.io.savemat(os.path.join(out_dir, 'img_voxel_idxs.mat'),
# {'img_voxel_idxs': np.array(img_voxel_idxs)})
scipy.io.savemat(os.path.join(out_dir, 'img_voxel_idxs.mat'),
{'img_idxs': np.array(img_idxs), 'voxel_idxs': np.array(voxel_idxs)})
print(voxel_ctrain+voxel_cval+voxel_ctest, voxel_ctrain, voxel_cval, voxel_ctest)
print(ctrain+cval+ctest, ctrain, cval, ctest)
print(len(img_idxs))
if __name__ == '__main__':
cls_all = ['chair', 'bed', 'bookcase', 'desk', 'misc', 'sofa', 'table', 'tool', 'wardrobe']
cls = 'table'
# for cls in cls_all:
split_voxel_then_image(cls)
check_image_voxel_match(cls)
|
[
"os.path.abspath",
"os.makedirs",
"os.path.exists",
"random.random",
"numpy.array",
"os.path.join"
] |
[((129, 149), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (144, 149), False, 'import os\n'), ((164, 200), 'os.path.join', 'os.path.join', (['root', '"""../output"""', 'cls'], {}), "(root, '../output', cls)\n", (176, 200), False, 'import os\n'), ((313, 346), 'os.path.join', 'os.path.join', (['out_dir', '"""voxeltxt"""'], {}), "(out_dir, 'voxeltxt')\n", (325, 346), False, 'import os\n'), ((2540, 2560), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (2555, 2560), False, 'import os\n'), ((2575, 2611), 'os.path.join', 'os.path.join', (['root', '"""../output"""', 'cls'], {}), "(root, '../output', cls)\n", (2587, 2611), False, 'import os\n'), ((2632, 2665), 'os.path.join', 'os.path.join', (['out_dir', '"""voxeltxt"""'], {}), "(out_dir, 'voxeltxt')\n", (2644, 2665), False, 'import os\n'), ((2769, 2815), 'os.path.join', 'os.path.join', (['voxel_txt_dir', '"""voxel_train.txt"""'], {}), "(voxel_txt_dir, 'voxel_train.txt')\n", (2781, 2815), False, 'import os\n'), ((2840, 2884), 'os.path.join', 'os.path.join', (['voxel_txt_dir', '"""voxel_val.txt"""'], {}), "(voxel_txt_dir, 'voxel_val.txt')\n", (2852, 2884), False, 'import os\n'), ((2910, 2955), 'os.path.join', 'os.path.join', (['voxel_txt_dir', '"""voxel_test.txt"""'], {}), "(voxel_txt_dir, 'voxel_test.txt')\n", (2922, 2955), False, 'import os\n'), ((4163, 4203), 'os.path.join', 'os.path.join', (['voxel_txt_dir', '"""train.txt"""'], {}), "(voxel_txt_dir, 'train.txt')\n", (4175, 4203), False, 'import os\n'), ((4222, 4260), 'os.path.join', 'os.path.join', (['voxel_txt_dir', '"""val.txt"""'], {}), "(voxel_txt_dir, 'val.txt')\n", (4234, 4260), False, 'import os\n'), ((4280, 4319), 'os.path.join', 'os.path.join', (['voxel_txt_dir', '"""test.txt"""'], {}), "(voxel_txt_dir, 'test.txt')\n", (4292, 4319), False, 'import os\n'), ((4486, 4527), 'os.path.join', 'os.path.join', (['root', '"""../input/pix3d.json"""'], {}), "(root, '../input/pix3d.json')\n", (4498, 4527), False, 'import os\n'), ((1145, 1188), 'os.path.join', 'os.path.join', (['out_dir', '"""img_voxel_idxs.mat"""'], {}), "(out_dir, 'img_voxel_idxs.mat')\n", (1157, 1188), False, 'import os\n'), ((2677, 2706), 'os.path.exists', 'os.path.exists', (['voxel_txt_dir'], {}), '(voxel_txt_dir)\n', (2691, 2706), False, 'import os\n'), ((2716, 2742), 'os.makedirs', 'os.makedirs', (['voxel_txt_dir'], {}), '(voxel_txt_dir)\n', (2727, 2742), False, 'import os\n'), ((3474, 3489), 'random.random', 'random.random', ([], {}), '()\n', (3487, 3489), False, 'import random\n'), ((6281, 6324), 'os.path.join', 'os.path.join', (['out_dir', '"""img_voxel_idxs.mat"""'], {}), "(out_dir, 'img_voxel_idxs.mat')\n", (6293, 6324), False, 'import os\n'), ((6360, 6378), 'numpy.array', 'np.array', (['img_idxs'], {}), '(img_idxs)\n', (6368, 6378), True, 'import numpy as np\n'), ((6394, 6414), 'numpy.array', 'np.array', (['voxel_idxs'], {}), '(voxel_idxs)\n', (6402, 6414), True, 'import numpy as np\n'), ((811, 842), 'os.path.join', 'os.path.join', (['voxel_dirs[phase]'], {}), '(voxel_dirs[phase])\n', (823, 842), False, 'import os\n'), ((920, 949), 'os.path.join', 'os.path.join', (['img_dirs[phase]'], {}), '(img_dirs[phase])\n', (932, 949), False, 'import os\n'), ((5718, 5733), 'random.random', 'random.random', ([], {}), '()\n', (5731, 5733), False, 'import random\n')]
|
#Exercícios Numpy-15
#*******************
import numpy as np
arr=np.ones((10,10))
arr[1:-1,1:-1]=0
print(arr)
print()
arr_zero=np.zeros((8,8))
arr_zero=np.pad(arr_zero,pad_width=1,mode='constant',constant_values=1)
print(arr_zero)
|
[
"numpy.pad",
"numpy.zeros",
"numpy.ones"
] |
[((67, 84), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (74, 84), True, 'import numpy as np\n'), ((132, 148), 'numpy.zeros', 'np.zeros', (['(8, 8)'], {}), '((8, 8))\n', (140, 148), True, 'import numpy as np\n'), ((158, 223), 'numpy.pad', 'np.pad', (['arr_zero'], {'pad_width': '(1)', 'mode': '"""constant"""', 'constant_values': '(1)'}), "(arr_zero, pad_width=1, mode='constant', constant_values=1)\n", (164, 223), True, 'import numpy as np\n')]
|
import numpy as np
class AccelerationSensor:
def __init__(self, measurement_covariance):
self.R_meas = measurement_covariance
def getMeasurements(self, true_accel):
return np.random.multivariate_normal(true_accel, self.R_meas)
|
[
"numpy.random.multivariate_normal"
] |
[((192, 246), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['true_accel', 'self.R_meas'], {}), '(true_accel, self.R_meas)\n', (221, 246), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# coding: utf-8
# # Loan Classification Project
# In[1]:
# Libraries we need
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
from sklearn.metrics import confusion_matrix, classification_report, precision_recall_curve,recall_score
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
# In[2]:
df = pd.read_csv("Dataset.csv")
# In[3]:
df.head()
# In[4]:
df.info()
# In[5]:
df.nunique()
# - Above we can see that Reason and Bad are binary variables
# - Nothing needs to be dropped
# In[6]:
df.describe()
# In[7]:
plt.hist(df['BAD'], bins=3)
plt.show()
# In[8]:
df['LOAN'].plot(kind='density')
plt.show()
# In[9]:
plt.pie(df['REASON'].value_counts(), labels=['DebtCon', 'HomeImp'], autopct='%.1f')
plt.show()
df['REASON'].value_counts()
# In[10]:
correlation = df.corr()
sns.heatmap(correlation)
plt.show()
# In[11]:
df['BAD'].value_counts(normalize=True)
# In[12]:
df.fillna(df.mean(), inplace=True)
# In[13]:
one_hot_encoding = pd.get_dummies(df['REASON'])
df = df.drop('REASON', axis=1)
df = df.join(one_hot_encoding)
df
# In[14]:
one_hot_encoding2 = pd.get_dummies(df['JOB'])
df = df.drop('JOB', axis=1)
df = df.join(one_hot_encoding2)
df
# In[15]:
dependent = df['BAD']
independent = df.drop(['BAD'], axis=1)
x_train, x_test, y_train, y_test = train_test_split(independent, dependent, test_size=0.3, random_state=1)
# In[16]:
def metrics_score(actual, predicted):
print(classification_report(actual, predicted))
cm = confusion_matrix(actual, predicted)
plt.figure(figsize=(8,5))
sns.heatmap(cm, annot=True, fmt='.2f', xticklabels=['Not Default', 'Default'], yticklabels=['Not Default', 'Default'])
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show()
# In[17]:
dtree = DecisionTreeClassifier(class_weight={0:0.20, 1:0.80}, random_state=1)
# In[18]:
dtree.fit(x_train, y_train)
# In[19]:
dependent_performance_dt = dtree.predict(x_train)
metrics_score(y_train, dependent_performance_dt)
# - The above is perfect because we are using the train values, not the test
# - Lets test on test data
# In[20]:
dependent_test_performance_dt = dtree.predict(x_test)
metrics_score(y_test,dependent_test_performance_dt)
# - As we can see, we got decent performance from this model, lets see if we can do better
# - Selfnote: do importance features next
# In[21]:
important = dtree.feature_importances_
columns = independent.columns
important_items_df = pd.DataFrame(important, index=columns, columns=['Importance']).sort_values(by='Importance', ascending=False)
plt.figure(figsize=(13,13))
sns.barplot(important_items_df.Importance, important_items_df.index)
plt.show()
# - I followed this from a previous project to see the most important features
# - We can see that the most important features are DEBTINC, CLAGE and CLNO
# In[22]:
tree_estimator = DecisionTreeClassifier(class_weight={0:0.20, 1:0.80}, random_state=1)
parameters = {
'max_depth':np.arange(2,7),
'criterion':['gini', 'entropy'],
'min_samples_leaf':[5,10,20,25]
}
score = metrics.make_scorer(recall_score, pos_label=1)
gridCV= GridSearchCV(tree_estimator, parameters, scoring=score,cv=10)
gridCV = gridCV.fit(x_train, y_train)
tree_estimator = gridCV.best_estimator_
tree_estimator.fit(x_train, y_train)
# In[23]:
dependent_performance_dt = tree_estimator.predict(x_train)
metrics_score(y_train, dependent_performance_dt)
# - We increased the less harmful error but decreased the harmful error
# In[24]:
dependent_test_performance_dt = tree_estimator.predict(x_test)
metrics_score(y_test, dependent_test_performance_dt)
# - Although the performance is slightly worse, we still reduce harmful error
# In[25]:
important = tree_estimator.feature_importances_
columns=independent.columns
importance_df=pd.DataFrame(important,index=columns,columns=['Importance']).sort_values(by='Importance',ascending=False)
plt.figure(figsize=(13,13))
sns.barplot(importance_df.Importance,importance_df.index)
plt.show()
# In[26]:
features = list(independent.columns)
plt.figure(figsize=(30,20))
tree.plot_tree(dtree,max_depth=4,feature_names=features,filled=True,fontsize=12,node_ids=True,class_names=True)
plt.show()
# - A visualization is one of the advantages that dtrees offer, we can show this to the client ot show the thought process
# In[27]:
forest_estimator = RandomForestClassifier(class_weight={0:0.20, 1:0.80}, random_state=1)
forest_estimator.fit(x_train, y_train)
# In[28]:
y_predict_training_forest = forest_estimator.predict(x_train)
metrics_score(y_train, y_predict_training_forest)
# - A perfect classification
# - This implies overfitting
# In[29]:
y_predict_test_forest = forest_estimator.predict(x_test)
metrics_score(y_test, y_predict_test_forest)
# - The performance is a lot better than the original single tree
# - Lets fix overfitting
# In[30]:
forest_estimator_tuned = RandomForestClassifier(class_weight={0:0.20,1:0.80}, random_state=1)
parameters_rf = {
"n_estimators": [100,250,500],
"min_samples_leaf": np.arange(1, 4,1),
"max_features": [0.7,0.9,'auto'],
}
score = metrics.make_scorer(recall_score, pos_label=1)
# Run the grid search
grid_obj = GridSearchCV(forest_estimator_tuned, parameters_rf, scoring=score, cv=5)
grid_obj = grid_obj.fit(x_train, y_train)
# Set the clf to the best combination of parameters
forest_estimator_tuned = grid_obj.best_estimator_
# In[31]:
forest_estimator_tuned.fit(x_train, y_train)
# In[32]:
y_predict_train_forest_tuned = forest_estimator_tuned.predict(x_train)
metrics_score(y_train, y_predict_train_forest_tuned)
# In[33]:
y_predict_test_forest_tuned = forest_estimator_tuned.predict(x_test)
metrics_score(y_test, y_predict_test_forest_tuned)
# - We now have very good performance
# - We can submit this to the company
# ### Conclusion
# - I made many models to get the best results.
# - The first one I made was a decision tree, this is not as good as random forest but it is transparent as it lets us visualize it. This first one had decent performance.
# - To improve the performance of this we tried to tune the model, this reduced the harmful error.
# - Then to improve even more I created a decision tree model, this had excellent performance once we created a second version which removed overfitting.
# ### Recommendations
# - The biggest thing that effects defaulting on a loan is the debt to income ratio. If someone has a lot of debt and a lower income they may have a harder time paying back a loan.
# - Something else that effects defaulting on a loan is the number of delinquent credit lines. This means that someone who cannot make their credit card payments will have a hard time paying back a loan.
# - Years at job is also a driver of a loans outcome. A large number of years at a job could indicate financial stability.
# - DEROG, or a history of delinquent payments is also a warning sign of not being able to pay back a loan.
# - Those are some warning signs/good signs that should be looked out for when looking for candidates to give loans to.
#
# I will now apply SHAP to look more into this model.
# In[34]:
get_ipython().system('pip install shap')
import shap
# In[35]:
shap.initjs()
# In[36]:
explain = shap.TreeExplainer(forest_estimator_tuned)
shap_vals = explain(x_train)
# In[37]:
type(shap_vals)
# In[38]:
shap.plots.bar(shap_vals[:, :, 0])
# In[39]:
shap.plots.heatmap(shap_vals[:, :, 0])
# In[40]:
shap.summary_plot(shap_vals[:, :, 0], x_train)
# In[53]:
print(forest_estimator_tuned.predict(x_test.iloc[107].to_numpy().reshape(1,-1))) # This predicts for one row, 0 means approved, 1 means no.
|
[
"sklearn.model_selection.GridSearchCV",
"seaborn.heatmap",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"shap.plots.heatmap",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.metrics.classification_report",
"matplotlib.pyplot.figure",
"numpy.arange",
"sklearn.tree.plot_tree",
"pandas.DataFrame",
"shap.TreeExplainer",
"sklearn.metrics.make_scorer",
"sklearn.ensemble.RandomForestClassifier",
"matplotlib.pyplot.show",
"pandas.get_dummies",
"seaborn.barplot",
"matplotlib.pyplot.ylabel",
"shap.summary_plot",
"shap.plots.bar",
"matplotlib.pyplot.hist",
"shap.initjs",
"sklearn.metrics.confusion_matrix",
"matplotlib.pyplot.xlabel"
] |
[((919, 945), 'pandas.read_csv', 'pd.read_csv', (['"""Dataset.csv"""'], {}), "('Dataset.csv')\n", (930, 945), True, 'import pandas as pd\n'), ((1153, 1180), 'matplotlib.pyplot.hist', 'plt.hist', (["df['BAD']"], {'bins': '(3)'}), "(df['BAD'], bins=3)\n", (1161, 1180), True, 'import matplotlib.pyplot as plt\n'), ((1181, 1191), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1189, 1191), True, 'import matplotlib.pyplot as plt\n'), ((1237, 1247), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1245, 1247), True, 'import matplotlib.pyplot as plt\n'), ((1345, 1355), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1353, 1355), True, 'import matplotlib.pyplot as plt\n'), ((1422, 1446), 'seaborn.heatmap', 'sns.heatmap', (['correlation'], {}), '(correlation)\n', (1433, 1446), True, 'import seaborn as sns\n'), ((1447, 1457), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1455, 1457), True, 'import matplotlib.pyplot as plt\n'), ((1593, 1621), 'pandas.get_dummies', 'pd.get_dummies', (["df['REASON']"], {}), "(df['REASON'])\n", (1607, 1621), True, 'import pandas as pd\n'), ((1721, 1746), 'pandas.get_dummies', 'pd.get_dummies', (["df['JOB']"], {}), "(df['JOB'])\n", (1735, 1746), True, 'import pandas as pd\n'), ((1920, 1991), 'sklearn.model_selection.train_test_split', 'train_test_split', (['independent', 'dependent'], {'test_size': '(0.3)', 'random_state': '(1)'}), '(independent, dependent, test_size=0.3, random_state=1)\n', (1936, 1991), False, 'from sklearn.model_selection import train_test_split\n'), ((2385, 2458), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'class_weight': '{(0): 0.2, (1): 0.8}', 'random_state': '(1)'}), '(class_weight={(0): 0.2, (1): 0.8}, random_state=1)\n', (2407, 2458), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((3182, 3210), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(13, 13)'}), '(figsize=(13, 13))\n', (3192, 3210), True, 'import matplotlib.pyplot as plt\n'), ((3210, 3278), 'seaborn.barplot', 'sns.barplot', (['important_items_df.Importance', 'important_items_df.index'], {}), '(important_items_df.Importance, important_items_df.index)\n', (3221, 3278), True, 'import seaborn as sns\n'), ((3279, 3289), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3287, 3289), True, 'import matplotlib.pyplot as plt\n'), ((3477, 3550), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'class_weight': '{(0): 0.2, (1): 0.8}', 'random_state': '(1)'}), '(class_weight={(0): 0.2, (1): 0.8}, random_state=1)\n', (3499, 3550), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((3691, 3737), 'sklearn.metrics.make_scorer', 'metrics.make_scorer', (['recall_score'], {'pos_label': '(1)'}), '(recall_score, pos_label=1)\n', (3710, 3737), False, 'from sklearn import metrics\n'), ((3746, 3808), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['tree_estimator', 'parameters'], {'scoring': 'score', 'cv': '(10)'}), '(tree_estimator, parameters, scoring=score, cv=10)\n', (3758, 3808), False, 'from sklearn.model_selection import GridSearchCV\n'), ((4538, 4566), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(13, 13)'}), '(figsize=(13, 13))\n', (4548, 4566), True, 'import matplotlib.pyplot as plt\n'), ((4566, 4624), 'seaborn.barplot', 'sns.barplot', (['importance_df.Importance', 'importance_df.index'], {}), '(importance_df.Importance, importance_df.index)\n', (4577, 4624), True, 'import seaborn as sns\n'), ((4624, 4634), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4632, 4634), True, 'import matplotlib.pyplot as plt\n'), ((4687, 4715), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(30, 20)'}), '(figsize=(30, 20))\n', (4697, 4715), True, 'import matplotlib.pyplot as plt\n'), ((4716, 4837), 'sklearn.tree.plot_tree', 'tree.plot_tree', (['dtree'], {'max_depth': '(4)', 'feature_names': 'features', 'filled': '(True)', 'fontsize': '(12)', 'node_ids': '(True)', 'class_names': '(True)'}), '(dtree, max_depth=4, feature_names=features, filled=True,\n fontsize=12, node_ids=True, class_names=True)\n', (4730, 4837), False, 'from sklearn import tree\n'), ((4828, 4838), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4836, 4838), True, 'import matplotlib.pyplot as plt\n'), ((4996, 5069), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'class_weight': '{(0): 0.2, (1): 0.8}', 'random_state': '(1)'}), '(class_weight={(0): 0.2, (1): 0.8}, random_state=1)\n', (5018, 5069), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((5539, 5612), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'class_weight': '{(0): 0.2, (1): 0.8}', 'random_state': '(1)'}), '(class_weight={(0): 0.2, (1): 0.8}, random_state=1)\n', (5561, 5612), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((5768, 5814), 'sklearn.metrics.make_scorer', 'metrics.make_scorer', (['recall_score'], {'pos_label': '(1)'}), '(recall_score, pos_label=1)\n', (5787, 5814), False, 'from sklearn import metrics\n'), ((5849, 5921), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['forest_estimator_tuned', 'parameters_rf'], {'scoring': 'score', 'cv': '(5)'}), '(forest_estimator_tuned, parameters_rf, scoring=score, cv=5)\n', (5861, 5921), False, 'from sklearn.model_selection import GridSearchCV\n'), ((7869, 7882), 'shap.initjs', 'shap.initjs', ([], {}), '()\n', (7880, 7882), False, 'import shap\n'), ((7907, 7949), 'shap.TreeExplainer', 'shap.TreeExplainer', (['forest_estimator_tuned'], {}), '(forest_estimator_tuned)\n', (7925, 7949), False, 'import shap\n'), ((8023, 8057), 'shap.plots.bar', 'shap.plots.bar', (['shap_vals[:, :, 0]'], {}), '(shap_vals[:, :, 0])\n', (8037, 8057), False, 'import shap\n'), ((8072, 8110), 'shap.plots.heatmap', 'shap.plots.heatmap', (['shap_vals[:, :, 0]'], {}), '(shap_vals[:, :, 0])\n', (8090, 8110), False, 'import shap\n'), ((8125, 8171), 'shap.summary_plot', 'shap.summary_plot', (['shap_vals[:, :, 0]', 'x_train'], {}), '(shap_vals[:, :, 0], x_train)\n', (8142, 8171), False, 'import shap\n'), ((2105, 2140), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['actual', 'predicted'], {}), '(actual, predicted)\n', (2121, 2140), False, 'from sklearn.metrics import confusion_matrix, classification_report, precision_recall_curve, recall_score\n'), ((2145, 2171), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (2155, 2171), True, 'import matplotlib.pyplot as plt\n'), ((2175, 2297), 'seaborn.heatmap', 'sns.heatmap', (['cm'], {'annot': '(True)', 'fmt': '""".2f"""', 'xticklabels': "['Not Default', 'Default']", 'yticklabels': "['Not Default', 'Default']"}), "(cm, annot=True, fmt='.2f', xticklabels=['Not Default',\n 'Default'], yticklabels=['Not Default', 'Default'])\n", (2186, 2297), True, 'import seaborn as sns\n'), ((2299, 2319), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Actual"""'], {}), "('Actual')\n", (2309, 2319), True, 'import matplotlib.pyplot as plt\n'), ((2324, 2347), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted"""'], {}), "('Predicted')\n", (2334, 2347), True, 'import matplotlib.pyplot as plt\n'), ((2352, 2362), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2360, 2362), True, 'import matplotlib.pyplot as plt\n'), ((3579, 3594), 'numpy.arange', 'np.arange', (['(2)', '(7)'], {}), '(2, 7)\n', (3588, 3594), True, 'import numpy as np\n'), ((5696, 5714), 'numpy.arange', 'np.arange', (['(1)', '(4)', '(1)'], {}), '(1, 4, 1)\n', (5705, 5714), True, 'import numpy as np\n'), ((2054, 2094), 'sklearn.metrics.classification_report', 'classification_report', (['actual', 'predicted'], {}), '(actual, predicted)\n', (2075, 2094), False, 'from sklearn.metrics import confusion_matrix, classification_report, precision_recall_curve, recall_score\n'), ((3073, 3135), 'pandas.DataFrame', 'pd.DataFrame', (['important'], {'index': 'columns', 'columns': "['Importance']"}), "(important, index=columns, columns=['Importance'])\n", (3085, 3135), True, 'import pandas as pd\n'), ((4432, 4494), 'pandas.DataFrame', 'pd.DataFrame', (['important'], {'index': 'columns', 'columns': "['Importance']"}), "(important, index=columns, columns=['Importance'])\n", (4444, 4494), True, 'import pandas as pd\n')]
|
import datetime
from floodsystem.datafetcher import fetch_measure_levels
from floodsystem.stationdata import build_station_list
from floodsystem.plot import plot_water_levels, plot_water_level_with_fit
stations = build_station_list()
import numpy as np
def test_polt_water_level_with_fit():
x = np.linspace(1, 1000, 100000)
y = []
for i in x:
y.append(3*i**2 + 5)
p_coeff = np.polyfit(x, y, 2)
poly = np.poly1d(p_coeff)
assert int(p_coeff[0]) == 2
|
[
"floodsystem.stationdata.build_station_list",
"numpy.poly1d",
"numpy.linspace",
"numpy.polyfit"
] |
[((214, 234), 'floodsystem.stationdata.build_station_list', 'build_station_list', ([], {}), '()\n', (232, 234), False, 'from floodsystem.stationdata import build_station_list\n'), ((302, 330), 'numpy.linspace', 'np.linspace', (['(1)', '(1000)', '(100000)'], {}), '(1, 1000, 100000)\n', (313, 330), True, 'import numpy as np\n'), ((401, 420), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(2)'], {}), '(x, y, 2)\n', (411, 420), True, 'import numpy as np\n'), ((432, 450), 'numpy.poly1d', 'np.poly1d', (['p_coeff'], {}), '(p_coeff)\n', (441, 450), True, 'import numpy as np\n')]
|
from CSIKit.csi import CSIFrame
import ast
import numpy as np
class ESP32CSIFrame(CSIFrame):
# https://docs.espressif.com/projects/esp-idf/en/latest/esp32/api-reference/network/esp_wifi.html#_CPPv418wifi_pkt_rx_ctrl_t
__slots__ = ["type", "role", "mac", "rssi", "rate", "sig_mode", "mcs", "bandwidth", "smoothing", "not_sounding",
"aggregation", "stbc", "fec_coding", "sgi", "noise_floor", "ampdu_cnt", "channel", "secondary_channel",
"local_timestamp", "ant", "sig_len", "rx_state", "real_time_set", "real_timestamp", "len", "CSI_DATA"]
def __init__(self, csv_line: list):
self.type = csv_line[0]
self.role = csv_line[1]
self.mac = csv_line[2]
self.rssi = csv_line[3]
self.rate = csv_line[4]
self.sig_mode = csv_line[5]
self.mcs = csv_line[6]
self.bandwidth = 20 if csv_line[7] == "0" else 40
self.smoothing = csv_line[8]
self.not_sounding = csv_line[9]
self.aggregation = csv_line[10]
self.stbc = csv_line[11]
self.fec_coding = csv_line[12]
self.sgi = csv_line[13]
self.noise_floor = csv_line[14]
self.ampdu_cnt = csv_line[15]
self.channel = csv_line[16]
self.secondary_channel = csv_line[17]
self.local_timestamp = csv_line[18]
self.ant = csv_line[19]
self.sig_len = csv_line[20]
self.rx_state = csv_line[21]
self.real_time_set = csv_line[22]
self.real_timestamp = csv_line[23]
self.len = csv_line[24]
string_data = csv_line[25]
self.csi_matrix = ESP32CSIFrame.parse_matrix(string_data)
@staticmethod
def parse_matrix(string_data, bandwidth=20):
array_string = string_data.replace(" ", ", ")
array_string_asarray = ast.literal_eval(array_string)
if bandwidth == 20 and len(array_string_asarray) < 128:
ESP32CSIFrame.fill_missing(array_string_asarray, 128)
elif bandwidth == 40 and len(array_string_asarray) < 256:
ESP32CSIFrame.fill_missing(array_string_asarray, 256)
int8_matrix = np.array(array_string_asarray)
int8_matrix = int8_matrix.reshape(-1, 2)
complex_matrix = int8_matrix.astype(np.float32).view(np.complex64)
return complex_matrix
# Seems some CSI lines are missing a value.
# Very rare, I assume weird dropped behaviour.
# Probably not the best way to fill the gap.
@staticmethod
def fill_missing(array, expected_length):
remainder = expected_length - len(array)
for _ in range(remainder):
array.append(0)
|
[
"ast.literal_eval",
"numpy.array"
] |
[((1808, 1838), 'ast.literal_eval', 'ast.literal_eval', (['array_string'], {}), '(array_string)\n', (1824, 1838), False, 'import ast\n'), ((2125, 2155), 'numpy.array', 'np.array', (['array_string_asarray'], {}), '(array_string_asarray)\n', (2133, 2155), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
debug = True # enable trace
def trace(x):
global debug
if debug: print(x)
trace("loading...")
from itertools import combinations, combinations_with_replacement
from glob import glob
from math import *
import operator
from os.path import basename
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn.linear_model
import sklearn.feature_selection
import datetime
def prec_from_pathname(path):
if '2k' in path: return 0.002
elif '5k' in path: return 0.005
else: raise AssertionError('Unknown field strengh: %s' % path)
# ['x', 'y', 'z', 'xx', 'xy', 'xz', 'yy', ...]
def combinatrial_vars(vars_str='xyz', length=3):
term_list = []
for l in range(length):
term_list.extend([''.join(v) for v in combinations_with_replacement(list(vars_str), 1 + l)])
return term_list
# product :: a#* => [a] -> a
def product(xs):
return reduce(operator.mul, xs, 1) # foldl in Haskell
# (XYZ, "xx") -> XX
def term(dataframe, vars_str):
return product(map(lambda x: dataframe[x], list(vars_str)))
# (f(X), Y) -> (max deviation, max%, avg dev, avg%)
def deviation_stat(fX, Y, prec=0.005):
dev = np.abs(fX - Y)
(max_dev, avg_dev) = (dev.max(axis=0), dev.mean(axis=0))
(max_pct, avg_pct) = (max_dev / prec * 100, avg_dev / prec * 100)
return (max_dev, max_pct, avg_dev, avg_pct)
# IO Df
def load_samples(path, cylindrical_axis=True, absolute_axis=True, genvars=[]):
sample_cols = ['x', 'y', 'z', 'Bx', 'By', 'Bz']
df = pd.read_csv(path, sep=' ', names=sample_cols)
if cylindrical_axis:
df['r'] = np.sqrt(df.x**2 + df.y**2)
df['p'] = np.arctan2(df.y, df.x)
df['Bt'] = np.sqrt(df.Bx**2 + df.By**2)
df['Bpsi'] = np.arctan2(df.By, df.Bx) - np.arctan2(df.y, df.x)
df['Br'] = df.Bt * np.cos(df.Bpsi)
df['Bp'] = df.Bt * np.sin(df.Bpsi)
if absolute_axis:
df['X'] = np.abs(df.x)
df['Y'] = np.abs(df.y)
df['Z'] = np.abs(df.z)
for var in genvars:
df[var] = term(df, var)
return df
def choose(vars, df1, df2):
X1 = df1.loc[:, vars].as_matrix()
X2 = df2.loc[:, vars].as_matrix()
return (X1, X2)
# IO ()
def run_analysis_for_all_fields():
sample_set = glob("dat_z22/*2k*.sample.dat")
test_set = glob("dat_z22/*2k*.test.dat")
#print(sample_set, test_set)
assert(len(sample_set) == len(test_set) and len(sample_set) > 0)
result = pd.DataFrame()
for i, sample_file in enumerate(sample_set):
trace("run_analysis('%s', '%s')" % (sample_file, test_set[i]))
df = run_analysis(sample_file, test_set[i])
result = result.append(df, ignore_index=True)
write_header(result)
def run_analysis(sample_file = 'dat_z22/tpc2k-z0-q2.sample.dat',
test_file = 'dat_z22/tpc2k-z0-q2.test.dat'):
global precision, df, test, lr, la, xvars_full, xvars, yvars, X, Y, Xtest, Ytest, ana_result
precision = prec_from_pathname(sample_file)
assert(precision == prec_from_pathname(test_file))
xvars_full = combinatrial_vars('xyz', 3)[3:] # variables except x, y, z upto 3 dims
trace("reading training samples... " + sample_file)
df = load_samples(sample_file, genvars=xvars_full)
trace("reading test samples..." + test_file)
test = load_samples(test_file, genvars=xvars_full)
trace("linear regression fit...")
lr = sklearn.linear_model.LinearRegression()
#ri = sklearn.linear_model.RidgeCV()
#la = sklearn.linear_model.LassoCV()
fs = sklearn.feature_selection.RFE(lr, 1, verbose=0)
#xvars = ['x','y','z','xx','yy','zz','xy','yz','xz','xzz','yzz']
#xvars = ["xx", "yy", "zz", 'x', 'y', 'z', 'xzz', 'yzz']
#xvars = ['xxxr', 'xrrX', 'zzrX', 'p', 'xyrr', 'xzzr', 'xrrY', 'xzrX', 'xxxz', 'xzzr']
#xvars=['x', 'xzz', 'xyz', 'yz', 'yy', 'zz', 'xy', 'xx', 'z', 'y', 'xz', 'yzz']
yvars = ['Bx', 'By', 'Bz']
#yvars = ['Bz']
(Y, Ytest) = choose(yvars, df, test)
#(Y, Ytest) = (df['Bz'], test['Bz'])
xvars = combinatrial_vars('xyz', 3) # use all terms upto 3rd power
(X, Xtest) = choose(xvars, df, test)
for y in yvars:
fs.fit(X, df[y])
res = pd.DataFrame({ "term": xvars, "rank": fs.ranking_ })
trace(y)
trace(res.sort_values(by = "rank"))
#xvars=list(res.sort_values(by="rank")[:26]['term'])
lr.fit(X, Y)
trace(', '.join(yvars) + " = 1 + " + ' + '.join(xvars))
test_dev = deviation_stat(lr.predict(Xtest), Ytest, prec=precision)
#for i in range(len(yvars)):
# arr = [lr.intercept_[i]] + lr.coef_[i]
# arr = [ str(x) for x in arr ]
# print(yvars[i] + " = { " + ', '.join(arr) + " }")
# print("deviation stat [test]: max %.2e (%.1f%%) avg %.2e (%.1f%%)" %
# ( test_dev[0][i], test_dev[1][i], test_dev[2][i], test_dev[3][i] ))
(sample_score, test_score) = (lr.score(X, Y), lr.score(Xtest, Ytest))
trace("linear regression R^2 [train data]: %.8f" % sample_score)
trace("linear regression R^2 [test data] : %.8f" % test_score)
return pd.DataFrame(
{ "xvars": [xvars],
"yvars": [yvars],
"max_dev": [test_dev[0]],
"max%": [test_dev[1]],
"avg_dev": [test_dev[2]],
"avg%": [test_dev[3]],
"sample_score": [sample_score],
"score": [test_score],
"coeffs": [lr.coef_],
"intercept": [lr.intercept_],
"sample_file": [sample_file],
"test_file": [test_file],
"precision": [precision],
"volume_id": [volume_id_from_path(sample_file)]
})
def volume_id_from_path(path):
return basename(path)\
.replace('.sample.dat', '')\
.replace('-', '_')
def get_location_by_volume_id(id):
if 'its' in id: r_bin = 0
if 'tpc' in id: r_bin = 1
if 'tof' in id: r_bin = 2
if 'tofext' in id: r_bin = 3
if 'cal' in id: r_bin = 4
z_bin = int(id.split('_')[1][1:]) # "tofext2k_z0_q4" -> 0
if 'q1' in id: quadrant = 0
if 'q2' in id: quadrant = 1
if 'q3' in id: quadrant = 2
if 'q4' in id: quadrant = 3
return r_bin, z_bin, quadrant
def write_header(result):
#result.to_csv("magfield_params.csv")
#result.to_html("magfield_params.html")
print("# This file was generated from sysid.py at " + str(datetime.datetime.today()))
print("# " + ', '.join(result.iloc[0].yvars) + " = 1 + " + ' + '.join(result.iloc[0].xvars))
print("# barrel r: 0 < its < 80 < tpc < 250 < tof < 400 < tofext < 423 < cal < 500")
print("# barrel z: -550 < z < 550")
print("# phi: 0 < q1 < 0.5pi < q2 < pi < q3 < 1.5pi < q4 < 2pi")
print("# header: Rbin Zbin Quadrant Nval_per_compoment(=20)")
print("# data: Nval_per_compoment x floats")
#print("# R^2: coefficient of determination in multiple linear regression. [0,1]")
print("")
for index, row in result.iterrows():
#print("// ** %s - R^2 %s" % (row.volume_id, row.score))
print("#" + row.volume_id)
r_bin, z_bin, quadrant = get_location_by_volume_id(row.volume_id)
print("%s %s %s 20" % (r_bin, z_bin, quadrant))
for i, yvar in enumerate(row.yvars):
name = row.volume_id #+ '_' + yvar.lower()
print("# precision: tgt %.2e max %.2e (%.1f%%) avg %.2e (%.1f%%)" %
(row['precision'], row['max_dev'][i], row['max%'][i], row['avg_dev'][i], row['avg%'][i]))
coef = [row['intercept'][i]] + list(row['coeffs'][i])
arr = [ "%.5e" % x for x in coef ]
body = ' '.join(arr)
#decl = "const double[] %s = { %s };\n" % (name, body)
#print(decl)
print(body)
print("")
#write_header(run_analysis())
run_analysis_for_all_fields()
#for i in range(10):
# for xvars in combinations(xvars_full, i+1):
#(X, Xtest) = choose(xvars, df, test)
#lr.fit(X, Y)
#ri.fit(X, Y)
#la.fit(X, Y)
#fs.fit(X, Y)
#print xvars
#(sample_score, test_score) = (lr.score(X, Y), lr.score(Xtest, Ytest))
#print("linear R^2[sample] %.8f" % sample_score)
#print("linear R^2[test] %.8f" % test_score)
#(sample_score2, test_score2) = (la.score(X, Y), la.score(Xtest, Ytest))
#print("lasso R^2[sample] %.8f" % sample_score2)
#print("lasso R^2[test] %.8f" % test_score2)
#print(la.coef_)
#for i in range(len(yvars)):
# print(yvars[i])
# print(pd.DataFrame({"Name": xvars, "Params": lr.coef_[i]}).sort_values(by='Params'))
# print("+ %e" % lr.intercept_[i])
#sample_dev = deviation_stat(lr.predict(X), Y, prec=precision)
#test_dev = deviation_stat(lr.predict(Xtest), Ytest, prec=precision)
#test_dev2 = deviation_stat(la.predict(Xtest), Ytest, prec=precision)
#print("[sample] max %.2e (%.1f%%) avg %.2e (%.1f%%)" % sample_dev)
#print("[test] max %.2e (%.1f%%) avg %.2e (%.1f%%)" % test_dev )
#print("lasso [test] max %.2e (%.1f%%) avg %.2e (%.1f%%)" % test_dev2 )
|
[
"pandas.DataFrame",
"numpy.abs",
"numpy.arctan2",
"datetime.datetime.today",
"os.path.basename",
"pandas.read_csv",
"numpy.sin",
"numpy.cos",
"glob.glob",
"numpy.sqrt"
] |
[((1147, 1161), 'numpy.abs', 'np.abs', (['(fX - Y)'], {}), '(fX - Y)\n', (1153, 1161), True, 'import numpy as np\n'), ((1475, 1520), 'pandas.read_csv', 'pd.read_csv', (['path'], {'sep': '""" """', 'names': 'sample_cols'}), "(path, sep=' ', names=sample_cols)\n", (1486, 1520), True, 'import pandas as pd\n'), ((2149, 2180), 'glob.glob', 'glob', (['"""dat_z22/*2k*.sample.dat"""'], {}), "('dat_z22/*2k*.sample.dat')\n", (2153, 2180), False, 'from glob import glob\n'), ((2195, 2224), 'glob.glob', 'glob', (['"""dat_z22/*2k*.test.dat"""'], {}), "('dat_z22/*2k*.test.dat')\n", (2199, 2224), False, 'from glob import glob\n'), ((2332, 2346), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2344, 2346), True, 'import pandas as pd\n'), ((1559, 1589), 'numpy.sqrt', 'np.sqrt', (['(df.x ** 2 + df.y ** 2)'], {}), '(df.x ** 2 + df.y ** 2)\n', (1566, 1589), True, 'import numpy as np\n'), ((1601, 1623), 'numpy.arctan2', 'np.arctan2', (['df.y', 'df.x'], {}), '(df.y, df.x)\n', (1611, 1623), True, 'import numpy as np\n'), ((1639, 1671), 'numpy.sqrt', 'np.sqrt', (['(df.Bx ** 2 + df.By ** 2)'], {}), '(df.Bx ** 2 + df.By ** 2)\n', (1646, 1671), True, 'import numpy as np\n'), ((1846, 1858), 'numpy.abs', 'np.abs', (['df.x'], {}), '(df.x)\n', (1852, 1858), True, 'import numpy as np\n'), ((1874, 1886), 'numpy.abs', 'np.abs', (['df.y'], {}), '(df.y)\n', (1880, 1886), True, 'import numpy as np\n'), ((1902, 1914), 'numpy.abs', 'np.abs', (['df.z'], {}), '(df.z)\n', (1908, 1914), True, 'import numpy as np\n'), ((3965, 4015), 'pandas.DataFrame', 'pd.DataFrame', (["{'term': xvars, 'rank': fs.ranking_}"], {}), "({'term': xvars, 'rank': fs.ranking_})\n", (3977, 4015), True, 'import pandas as pd\n'), ((1683, 1707), 'numpy.arctan2', 'np.arctan2', (['df.By', 'df.Bx'], {}), '(df.By, df.Bx)\n', (1693, 1707), True, 'import numpy as np\n'), ((1710, 1732), 'numpy.arctan2', 'np.arctan2', (['df.y', 'df.x'], {}), '(df.y, df.x)\n', (1720, 1732), True, 'import numpy as np\n'), ((1756, 1771), 'numpy.cos', 'np.cos', (['df.Bpsi'], {}), '(df.Bpsi)\n', (1762, 1771), True, 'import numpy as np\n'), ((1795, 1810), 'numpy.sin', 'np.sin', (['df.Bpsi'], {}), '(df.Bpsi)\n', (1801, 1810), True, 'import numpy as np\n'), ((5916, 5941), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (5939, 5941), False, 'import datetime\n'), ((5279, 5293), 'os.path.basename', 'basename', (['path'], {}), '(path)\n', (5287, 5293), False, 'from os.path import basename\n')]
|
import torch.nn as nn
import torch
import numpy as np
import cv2 as cv
def calculate_EPR(model): #TODO:尝试通过加载预训练权重计算有效感受野
for module in model.modules():
try:
nn.init.constant_(module.weight, 0.05)
nn.init.zeros_(module.bias)
nn.init.zeros_(module.running_mean)
nn.init.ones_(module.running_var)
except Exception as e:
pass
if type(module) is nn.BatchNorm2d:
module.eval()
input = torch.ones(1, 3, 640, 640, requires_grad= True)
model.zero_grad()
features = model(input)
for i in range(len(features)):
# if i != len(features)-1:
# continue
x = features[i]
#g_x = torch.zeros(size=[1, 1, x.shape[2], x.shape[3]])
g_x = torch.zeros_like(x)
h, w = g_x.shape[2]//2, g_x.shape[3]//2
g_x[:, :, h, w] = 1
x.backward(g_x, retain_graph = True)
# x = torch.mean(x, 1, keepdim=True)
# fake_fp = x * g_x[0, 0, ...]
# fake_loss = torch.mean(fake_fp)
# fake_loss.backward(retain_graph=True)
show(input, i)
model.zero_grad()
input.grad.data.zero_()
cv.waitKey(2000)
cv.waitKey(0)
def cal_rf_wh(grad_input):
binary_map: np.ndarray = (grad_input[:, :] > 0.0)
x_cs: np.ndarray = binary_map.sum(-1) >= 1
y_cs: np.ndarray = binary_map.sum(0) >= 1
width = x_cs.sum()
height = y_cs.sum()
return (width, height)
def show(input, i):
grad_input = np.abs(input.grad.data.numpy())
grad_input = grad_input / np.max(grad_input)
grad_input = grad_input.mean(0).mean(0)
# 有效感受野 0.75 - 0.85
#grad_input = np.where(grad_input > 0.85,1,0)
#grad_input_ = np.where(grad_input > 0.75, 1, grad_input)
# effient_values = grad_input > 0.0
# samll_effient_values = grad_input <= 0.2
# grad_input[np.logical_and(effient_values, samll_effient_values)] = 0.1
#grad_input = grad_input * 100
width, height = cal_rf_wh(grad_input)
print("width:", width, "height:", height)
grad_input_ERF = np.where(grad_input>0.01, 1, 0)
width, height = cal_rf_wh(grad_input_ERF)
print("ERF_width:", width, "ERF_height:", height)
np.expand_dims(grad_input, axis=2).repeat(3, axis=2)
grad_input = (grad_input * 255).astype(np.uint8)
cv.imshow("receip_field"+str(i), grad_input)
#cv.imwrite("./receip_field"+str(i)+".png", grad_input)
|
[
"torch.ones",
"torch.zeros_like",
"cv2.waitKey",
"numpy.expand_dims",
"torch.nn.init.zeros_",
"numpy.max",
"numpy.where",
"torch.nn.init.constant_",
"torch.nn.init.ones_"
] |
[((408, 454), 'torch.ones', 'torch.ones', (['(1)', '(3)', '(640)', '(640)'], {'requires_grad': '(True)'}), '(1, 3, 640, 640, requires_grad=True)\n', (418, 454), False, 'import torch\n'), ((1017, 1030), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (1027, 1030), True, 'import cv2 as cv\n'), ((1837, 1870), 'numpy.where', 'np.where', (['(grad_input > 0.01)', '(1)', '(0)'], {}), '(grad_input > 0.01, 1, 0)\n', (1845, 1870), True, 'import numpy as np\n'), ((660, 679), 'torch.zeros_like', 'torch.zeros_like', (['x'], {}), '(x)\n', (676, 679), False, 'import torch\n'), ((999, 1015), 'cv2.waitKey', 'cv.waitKey', (['(2000)'], {}), '(2000)\n', (1009, 1015), True, 'import cv2 as cv\n'), ((1358, 1376), 'numpy.max', 'np.max', (['grad_input'], {}), '(grad_input)\n', (1364, 1376), True, 'import numpy as np\n'), ((164, 202), 'torch.nn.init.constant_', 'nn.init.constant_', (['module.weight', '(0.05)'], {}), '(module.weight, 0.05)\n', (181, 202), True, 'import torch.nn as nn\n'), ((206, 233), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['module.bias'], {}), '(module.bias)\n', (220, 233), True, 'import torch.nn as nn\n'), ((237, 272), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['module.running_mean'], {}), '(module.running_mean)\n', (251, 272), True, 'import torch.nn as nn\n'), ((276, 309), 'torch.nn.init.ones_', 'nn.init.ones_', (['module.running_var'], {}), '(module.running_var)\n', (289, 309), True, 'import torch.nn as nn\n'), ((1967, 2001), 'numpy.expand_dims', 'np.expand_dims', (['grad_input'], {'axis': '(2)'}), '(grad_input, axis=2)\n', (1981, 2001), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
import numpy as np
import numpy.polynomial.polynomial as nppol
class Metawalk:
def __init__(self,
time_intervals=None,
nodes=None,
):
"""
A basic constructor for a ``Metwalks`` object
:param times : A list of couples of floats which represents times corresponding to the time intervals
:param links : A list of nodes. (first node = source ; last node = destination)
"""
self.time_intervals = time_intervals
self.nodes = nodes
def add_link(self, l, t):
self.time_intervals.append(t)
self.nodes.append(l)
def length(self):
return len(self.time_intervals)
def duration(self):
return self.time_intervals[-1][1] - self.time_intervals[0][0]
def clone(self):
return Metawalk(self.time_intervals[:],self.nodes[:])
def __hash__(self):
m = tuple(self.nodes)
n = tuple(self.time_intervals)
return hash((m,n))
def __str__(self):
s = ""
for i in range(0,self.length()):
s += " "
s += str(self.nodes[i])
s += " "
s += str(self.time_intervals[i])
s += " "
s += str(self.nodes[i+1])
s += " | volume = "
s += str(self.volume())
return s
def __repr__(self):
return self.__str__()
def __eq__(self, m):
if m == None:
return False
if m.length() != self.length():
return False
if (m.nodes == self.nodes) and (m.time_intervals == self.time_intervals):
return True
return False
def is_instantenous(self):
#we check from the last because of the algirothm that uses it add new links to the end of the metawalk
b = True
if len(self.time_intervals) == 1:
return True
x = self.time_intervals[-1]
for i in range(-2,-len(self.time_intervals)-1,-1):
if self.time_intervals[i] != x:
return False
return True
def update_following_last(self,b):
#sometimes when adding a metaedge the metawalk has to be cut at some points because some paths are no longer valid.
if b == 0:
#last edge added ends at same time but starts before
self.time_intervals[-1][0] = self.time_intervals[-2][0]
else:
end = self.time_intervals[-1][1]
# last edge starts at same time but ends before
for i in range(-2,-len(self.time_intervals)-1,-1):
if self.time_intervals[i][1] > end:
self.time_intervals[i][1] = end
def volume(self):
"""Normally the link are either exactly the same or disjoint, need to check for inclusion, exclusion of intervals """
time_intervals = self.time_intervals[:]
time_intervals.append([-1,-1])
res = [0 for i in range(len(time_intervals)+ 1)]
last_x,last_y = time_intervals[0]
b = True
if len(time_intervals)==1:
last_x,last_y = time_intervals[0]
if last_x != last_y:
b = False
res[1] = np.around((last_y - last_x), decimals=2)
else:
if last_x == last_y:
degree = 0
else:
degree = 1
for i in range(1,len(time_intervals)):
if last_x != last_y:
b = False
x,y = time_intervals[i]
#it should be enough to check one bound no overlap in linkq in fragmented link streams but maybe its ok to generalise it and make it work whenvever later on, update : false, [1,2],[1,1]
if x == last_x and y == last_y and degree > 0:
degree += 1
else:
res[degree] += np.around((last_y - last_x)/np.math.factorial(degree), decimals=2)
if x != y:
degree = 1
last_x = x
last_y = y
if b == True:
res[0] = 1
res = [np.around(e,decimals=2) for e in res]
return nppol.Polynomial(res)
def passes_through(self,t,v):
if v in self.nodes:
indice = self.nodes.index(v)
else:
return False
if indice == 0:
if t < self.time_intervals[0][0]:
return True
else:
return False
elif indice == len(self.nodes) -1:
if t >= self.time_intervals[-1][1]:
return True
else:
return False
else:
if t >= self.time_intervals[indice-1][1] and t < self.time_intervals[indice][0]:
return True
else:
return False
def passes_through_whole_interval(self,v,t1,t2):
return False
def passes_through_somewhere_interval(self,v,t1,t2):
#t1 included, but t2 not
return False
def add_interval_betweenness(self,t_max,interval_size):
res = []
for i in range(0,len(self.time_intervals)-1):
left_bound = self.time_intervals[i][1]
right_bound = self.time_intervals[i+1][0]
nb_interval_contributes_to = (left_bound - right_bound) // interval_size
fst_interval_left_bound = left_bound // interval_size
for j in range(1,nb_interval_contributes_to+1):
res.append((self.nodes[i+1], fst_interval_left_bound, fst_interval_left_bound + j * interval_size ))
fst_interval_left_bound = fst_interval_left_bound + j * interval_size
return res
def fastest_meta_walk(self):
if self.time_intervals[0] == self.time_intervals[-1]:
return self.clone()
else:
nodes = self.nodes[:]
time_intervals = self.time_intervals[:]
time_intervals[0] = (time_intervals[0][1],time_intervals[0][1])
time_intervals[-1] = (time_intervals[-1][0],time_intervals[-1][0])
for i in range(1,len(time_intervals)):
if time_intervals[i][0] < time_intervals[0][0]:
time_intervals[i] = (time_intervals[0][0],time_intervals[i][1])
if time_intervals[i][1] > time_intervals[-1][1]:
time_intervals[i] = (time_intervals[i][0],time_intervals[-1][1])
return Metawalk(time_intervals,nodes)
def first_time(self):
return self.time_intervals[0][0]
def last_departure(self):
return self.time_intervals[0][1]
def first_arrival(self):
return self.time_intervals[-1][0]
def first_node(self):
return self.nodes[0]
def last_node(self):
return self.nodes[-1]
def plot(self, S, color="#18036f",
markersize=10, dag=False, fig=None):
"""
Draw a path on the ``StreamGraph`` object *S*
:param S:
:param color:
:param markersize:
:param dag:
:param fig:
:return:
"""
if fig is None:
fig, ax = plt.subplots()
else:
ax = plt.gca()
if dag:
dag = S.condensation_dag()
dag.plot(node_to_label=S.node_to_label, ax=ax)
else:
S.plot(ax=ax)
# Plot Source
id_source = S.nodes.index(self.nodes[0])
plt.plot([self.time_intervals[0]], [id_source], color=color,
marker='o', alpha=0.8, markersize=markersize)
# Plot Destination
id_destination = S.nodes.index(self.nodes[-1])
plt.plot([self.time_intervals[-1]], [id_destination], color=color,
marker='o', alpha=0.8, markersize=markersize)
# Plot Path
for i in range(self.length()):
l = self.nodes[i]
l2 = self.nodes[i+1]
t = self.time_intervals[i][0]
t2 = self.time_intervals[i][1]
id1 = S.nodes.index(l)
id2 = S.nodes.index(l2)
idmax = max(id1, id2)
idmin = min(id1, id2)
# verts = [
# (idmin, t), # left, bottom
# (idmax, t), # left, top
# (idmax, t2), # right, top
# (idmin, t2), # right, bottom
# ]
plt.vlines(t, ymin=idmin, ymax=idmax, linewidth=6, alpha=0.8, color=color)
plt.vlines(t2, ymin=idmin, ymax=idmax, linewidth=6, alpha=0.8, color=color)
if i != self.length() - 1:
plt.hlines(id2, xmin=t, xmax=t2,
linewidth=4, alpha=0.8, color=color)
plt.hlines(id1, xmin=t, xmax=t2,
linewidth=4, alpha=0.8, color=color)
# Plot marker
# if t != self.times[i + 1]:
# plt.plot([t], [id2], color=color,
# marker='>', alpha=0.8, markersize=markersize)
# if i != 0 and (t, id1) != (self.times[0], id_source) != (self.times[-1], id_destination):
# # Plot marker
# if id1 == idmin:
# plt.plot([t], [id1], color=color,
# marker='^', alpha=0.8, markersize=markersize)
# else:
# plt.plot([t], [id1], color=color,
# marker='v', alpha=0.8, markersize=markersize)
plt.tight_layout()
return fig
def check_coherence(self, S):
for i in range(self.length()):
l = (self.nodes[i],self.nodes[i+1])
inter = self.time_intervals[i]
l_ = (self.nodes[i+1],self.nodes[i]) # Inverse the order of the interval
if l not in S.links and l_ not in S.links:
raise ValueError("Link : " + str(l) + " does not exists in the Stream Graph !")
else:
t = inter[0]
t2 = inter[1]
if l in S.links:
id_link = S.links.index(l)
else:
id_link = S.links.index(l_)
is_present = False
for lt0, lt1 in zip(S.link_presence[id_link][::2], S.link_presence[id_link][1::2]):
if (lt0 <= t <= lt1) and (lt0 <= t2 <= lt1) and (t <= t2):
is_present = True
if not is_present:
raise ValueError("Link : " + str(l) + " does not exists at time " + str(t) + " !")
print("Check Path Coherence ok !")
return
|
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.plot",
"numpy.polynomial.polynomial.Polynomial",
"matplotlib.pyplot.vlines",
"numpy.around",
"numpy.math.factorial",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.hlines",
"matplotlib.pyplot.subplots"
] |
[((4201, 4222), 'numpy.polynomial.polynomial.Polynomial', 'nppol.Polynomial', (['res'], {}), '(res)\n', (4217, 4222), True, 'import numpy.polynomial.polynomial as nppol\n'), ((7457, 7567), 'matplotlib.pyplot.plot', 'plt.plot', (['[self.time_intervals[0]]', '[id_source]'], {'color': 'color', 'marker': '"""o"""', 'alpha': '(0.8)', 'markersize': 'markersize'}), "([self.time_intervals[0]], [id_source], color=color, marker='o',\n alpha=0.8, markersize=markersize)\n", (7465, 7567), True, 'import matplotlib.pyplot as plt\n'), ((7671, 7788), 'matplotlib.pyplot.plot', 'plt.plot', (['[self.time_intervals[-1]]', '[id_destination]'], {'color': 'color', 'marker': '"""o"""', 'alpha': '(0.8)', 'markersize': 'markersize'}), "([self.time_intervals[-1]], [id_destination], color=color, marker=\n 'o', alpha=0.8, markersize=markersize)\n", (7679, 7788), True, 'import matplotlib.pyplot as plt\n'), ((9491, 9509), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9507, 9509), True, 'import matplotlib.pyplot as plt\n'), ((4148, 4172), 'numpy.around', 'np.around', (['e'], {'decimals': '(2)'}), '(e, decimals=2)\n', (4157, 4172), True, 'import numpy as np\n'), ((7166, 7180), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (7178, 7180), True, 'import matplotlib.pyplot as plt\n'), ((7212, 7221), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7219, 7221), True, 'import matplotlib.pyplot as plt\n'), ((8386, 8460), 'matplotlib.pyplot.vlines', 'plt.vlines', (['t'], {'ymin': 'idmin', 'ymax': 'idmax', 'linewidth': '(6)', 'alpha': '(0.8)', 'color': 'color'}), '(t, ymin=idmin, ymax=idmax, linewidth=6, alpha=0.8, color=color)\n', (8396, 8460), True, 'import matplotlib.pyplot as plt\n'), ((8473, 8548), 'matplotlib.pyplot.vlines', 'plt.vlines', (['t2'], {'ymin': 'idmin', 'ymax': 'idmax', 'linewidth': '(6)', 'alpha': '(0.8)', 'color': 'color'}), '(t2, ymin=idmin, ymax=idmax, linewidth=6, alpha=0.8, color=color)\n', (8483, 8548), True, 'import matplotlib.pyplot as plt\n'), ((3219, 3257), 'numpy.around', 'np.around', (['(last_y - last_x)'], {'decimals': '(2)'}), '(last_y - last_x, decimals=2)\n', (3228, 3257), True, 'import numpy as np\n'), ((8604, 8673), 'matplotlib.pyplot.hlines', 'plt.hlines', (['id2'], {'xmin': 't', 'xmax': 't2', 'linewidth': '(4)', 'alpha': '(0.8)', 'color': 'color'}), '(id2, xmin=t, xmax=t2, linewidth=4, alpha=0.8, color=color)\n', (8614, 8673), True, 'import matplotlib.pyplot as plt\n'), ((8717, 8786), 'matplotlib.pyplot.hlines', 'plt.hlines', (['id1'], {'xmin': 't', 'xmax': 't2', 'linewidth': '(4)', 'alpha': '(0.8)', 'color': 'color'}), '(id1, xmin=t, xmax=t2, linewidth=4, alpha=0.8, color=color)\n', (8727, 8786), True, 'import matplotlib.pyplot as plt\n'), ((3920, 3945), 'numpy.math.factorial', 'np.math.factorial', (['degree'], {}), '(degree)\n', (3937, 3945), True, 'import numpy as np\n')]
|
from setuptools import setup, find_packages
from Cython.Distutils.extension import Extension
from Cython.Build import cythonize, build_ext
import numpy
import os
from glob import glob
"""
ext_modules = [Extension("traj_dist.cydist.basic_geographical", ["traj_dist/cydist/basic_geographical.pyx"]),
Extension("traj_dist.cydist.basic_euclidean", ["traj_dist/cydist/basic_euclidean.pyx"]),
Extension("traj_dist.cydist.sspd", ["traj_dist/cydist/sspd.pyx"]),
Extension("traj_dist.cydist.dtw", ["traj_dist/cydist/dtw.pyx"]),
Extension("traj_dist.cydist.lcss", ["traj_dist/cydist/lcss.pyx"]),
Extension("traj_dist.cydist.hausdorff", ["traj_dist/cydist/hausdorff.pyx"]),
Extension("traj_dist.cydist.discret_frechet", ["traj_dist/cydist/discret_frechet.pyx"]),
Extension("traj_dist.cydist.frechet", ["traj_dist/cydist/frechet.pyx"]),
Extension("traj_dist.cydist.segment_distance", ["traj_dist/cydist/segment_distance.pyx"]),
Extension("traj_dist.cydist.sowd", ["traj_dist/cydist/sowd.pyx"]),
Extension("traj_dist.cydist.erp", ["traj_dist/cydist/erp.pyx"]),
Extension("traj_dist.cydist.edr", ["traj_dist/cydist/edr.pyx"])]
"""
sources = glob('traj_dist/cydist/*.pyx')
extensions = [
Extension(filename.split('.')[0].replace(os.path.sep, '.'),
sources=[filename],
)
for filename in sources]
setup(
name="trajectory_distance_py3",
version="1.0.1",
author="<NAME>",
author_email="<EMAIL>",
cmdclass={'build_ext': build_ext},
# ext_modules=ext_modules,
ext_modules=extensions,
include_dirs=[numpy.get_include()],
install_requires=["numpy>=1.14.0", "cython>=0.27.3", "shapely>=1.6.3", "geohash2>=1.1", 'pandas>=0.20.3',
'scipy>=0.19.1'],
description="Distance to compare 2D-trajectories in Cython",
packages=find_packages()
)
|
[
"numpy.get_include",
"setuptools.find_packages",
"glob.glob"
] |
[((1294, 1324), 'glob.glob', 'glob', (['"""traj_dist/cydist/*.pyx"""'], {}), "('traj_dist/cydist/*.pyx')\n", (1298, 1324), False, 'from glob import glob\n'), ((1950, 1965), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (1963, 1965), False, 'from setuptools import setup, find_packages\n'), ((1700, 1719), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (1717, 1719), False, 'import numpy\n')]
|
import numpy as np
from numba import guvectorize
from pygama.dsp.errors import DSPFatal
@guvectorize(["void(float32[:], float32, float32, float32, float32[:])",
"void(float64[:], float64, float64, float64, float64[:])"],
"(n),(),(),()->()", nopython=True, cache=True)
def time_point_thresh(w_in, a_threshold, t_start, walk_forward, t_out):
"""
Find the index where the waveform value crosses the threshold,
walking either forward or backward from the starting index.
Parameters
----------
w_in : array-like
The input waveform
a_threshold : float
The threshold value
t_start : int
The starting index
walk_forward: int
The backward (0) or forward (1) search direction
t_out : float
The index where the waveform value crosses the threshold
Processing Chain Example
------------------------
"tp_0": {
"function": "time_point_thresh",
"module": "pygama.dsp.processors",
"args": ["wf_atrap", "bl_std", "tp_start", 0, "tp_0"],
"unit": "ns",
"prereqs": ["wf_atrap", "bl_std", "tp_start"]
}
"""
t_out[0] = np.nan
if np.isnan(w_in).any() or np.isnan(a_threshold) or np.isnan(t_start) or np.isnan(walk_forward):
return
if np.floor(t_start) != t_start:
raise DSPFatal('The starting index must be an integer')
if np.floor(walk_forward) != walk_forward:
raise DSPFatal('The search direction must be an integer')
if int(t_start) < 0 or int(t_start) >= len(w_in):
raise DSPFatal('The starting index is out of range')
if int(walk_forward) == 1:
for i in range(int(t_start), len(w_in) - 1, 1):
if w_in[i] <= a_threshold < w_in[i+1]:
t_out[0] = i
return
else:
for i in range(int(t_start), 1, -1):
if w_in[i-1] < a_threshold <= w_in[i]:
t_out[0] = i
return
|
[
"numpy.floor",
"pygama.dsp.errors.DSPFatal",
"numba.guvectorize",
"numpy.isnan"
] |
[((90, 276), 'numba.guvectorize', 'guvectorize', (["['void(float32[:], float32, float32, float32, float32[:])',\n 'void(float64[:], float64, float64, float64, float64[:])']", '"""(n),(),(),()->()"""'], {'nopython': '(True)', 'cache': '(True)'}), "(['void(float32[:], float32, float32, float32, float32[:])',\n 'void(float64[:], float64, float64, float64, float64[:])'],\n '(n),(),(),()->()', nopython=True, cache=True)\n", (101, 276), False, 'from numba import guvectorize\n'), ((1277, 1298), 'numpy.isnan', 'np.isnan', (['a_threshold'], {}), '(a_threshold)\n', (1285, 1298), True, 'import numpy as np\n'), ((1302, 1319), 'numpy.isnan', 'np.isnan', (['t_start'], {}), '(t_start)\n', (1310, 1319), True, 'import numpy as np\n'), ((1323, 1345), 'numpy.isnan', 'np.isnan', (['walk_forward'], {}), '(walk_forward)\n', (1331, 1345), True, 'import numpy as np\n'), ((1374, 1391), 'numpy.floor', 'np.floor', (['t_start'], {}), '(t_start)\n', (1382, 1391), True, 'import numpy as np\n'), ((1418, 1467), 'pygama.dsp.errors.DSPFatal', 'DSPFatal', (['"""The starting index must be an integer"""'], {}), "('The starting index must be an integer')\n", (1426, 1467), False, 'from pygama.dsp.errors import DSPFatal\n'), ((1476, 1498), 'numpy.floor', 'np.floor', (['walk_forward'], {}), '(walk_forward)\n', (1484, 1498), True, 'import numpy as np\n'), ((1530, 1581), 'pygama.dsp.errors.DSPFatal', 'DSPFatal', (['"""The search direction must be an integer"""'], {}), "('The search direction must be an integer')\n", (1538, 1581), False, 'from pygama.dsp.errors import DSPFatal\n'), ((1651, 1697), 'pygama.dsp.errors.DSPFatal', 'DSPFatal', (['"""The starting index is out of range"""'], {}), "('The starting index is out of range')\n", (1659, 1697), False, 'from pygama.dsp.errors import DSPFatal\n'), ((1253, 1267), 'numpy.isnan', 'np.isnan', (['w_in'], {}), '(w_in)\n', (1261, 1267), True, 'import numpy as np\n')]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Redundant misc. functions to be eventually removed from AC_tools.
"""
import os
import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
from pandas import DataFrame
# time
import time
import datetime as datetime
# math
from math import radians, sin, cos, asin, sqrt, pi, atan2
def get_arr_edge_indices(arr, res='4x5', extra_points_point_on_edge=None,
verbose=True, debug=False):
"""
Find indices in a lon, lat (2D) grid, where value does not equal a given
value ( e.g. the edge )
"""
if verbose:
print(('get_arr_edge_indices for arr of shape: ', arr.shape))
# initialise variables
lon_c, lat_c, NIU = get_latlonalt4res(res=res, centre=True)
lon_e, lat_e, NIU = get_latlonalt4res(res=res, centre=False)
lon_diff = lon_e[-5]-lon_e[-6]
lat_diff = lat_e[-5]-lat_e[-6]
nn, n, = 0, 0
last_lat_box = arr[nn, n]
coords = []
last_lon_box = arr[nn, n]
need_lon_outer_edge, need_lat_outer_edge = False, False
if debug:
print((lon_e, lat_e))
# ---- Loop X dimension ( lon )
for nn, lon_ in enumerate(lon_c):
# Loop Y dimension ( lat ) and store edges
for n, lat_ in enumerate(lat_c):
if debug:
print((arr[nn, n], last_lat_box, last_lon_box,
arr[nn, n] == last_lat_box, arr[nn, n] == last_lon_box))
if arr[nn, n] != last_lat_box:
# If 1st lat, selct bottom of box
point_lon = lon_e[nn]+lon_diff/2
if need_lat_outer_edge:
point_lat = lat_e[n+1]
else:
point_lat = lat_e[n]
need_lat_outer_edge = True
need_lat_outer_edge = False
# Add mid point to cordinates list
if isinstance(extra_points_point_on_edge, type(None)):
mid_point = [point_lon, point_lat]
coords += [mid_point]
# Add given number of points along edge
else:
coords += [[lon_e[nn]+(lon_diff*i), point_lat] for i in
np.linspace(0, 1, extra_points_point_on_edge,
endpoint=True)]
# temporally save the previous box's value
last_lat_box = arr[nn, n]
# ---- Loop Y dimension ( lat )
for n, lat_ in enumerate(lat_c):
if debug:
print((arr[nn, n], last_lat_box, last_lon_box,
arr[nn, n] == last_lat_box, arr[nn, n] == last_lon_box))
# Loop X dimension ( lon ) and store edges
for nn, lon_ in enumerate(lon_c):
# If change in value at to list
if arr[nn, n] != last_lon_box:
point_lat = lat_e[n]+lat_diff/2
# Make sure we select the edge lon
if need_lon_outer_edge:
point_lon = lon_e[nn+1]
else:
point_lon = lon_e[nn]
need_lon_outer_edge = True
need_lon_outer_edge = False
# Add mid point to coordinates list
if isinstance(extra_points_point_on_edge, type(None)):
mid_point = [point_lon, point_lat]
coords += [mid_point]
# Add given number of points along edge
else:
coords += [[point_lon, lat_e[n]+(lat_diff*i)] for i in
np.linspace(0, 1, extra_points_point_on_edge,
endpoint=True)]
# temporally save the previous box's value
last_lon_box = arr[nn, n]
return coords
def split_data_by_days(data=None, dates=None, day_list=None,
verbose=False, debug=False):
"""
Takes a list of datetimes and data and returns a list of data and
the bins ( days )
"""
if verbose:
print('split_data_by_days called')
# Create DataFrame of Data and dates
df = DataFrame(data, index=dates, columns=['data'])
# Add list of dates ( just year, month, day ) <= this is mappable, update?
df['days'] = [datetime.datetime(*i.timetuple()[:3]) for i in dates]
if debug:
print(df)
# Get list of unique days
if isinstance(day_list, type(None)):
day_list = sorted(set(df['days'].values))
# Loop unique days and select data on these days
data4days = []
for day in day_list:
print((day, df[df['days'] == day]))
data4days += [df['data'][df['days'] == day]]
# Just return the values ( i.e. not pandas array )
data4days = [i.values.astype(float) for i in data4days]
print([type(i) for i in data4days])
# print data4days[0]
# sys.exit()
if debug:
print(('returning data for {} days, with lengths: '.format(
len(day_list)), [len(i) for i in data4days]))
# Return as list of days (datetimes) + list of data for each day
return data4days, day_list
def obs2grid(glon=None, glat=None, galt=None, nest='high res global',
sites=None, debug=False):
"""
values that have a given lat, lon and alt
Notes
-------
- Function flagged for removal
"""
if isinstance(glon, type(None)):
glon, glat, galt = get_latlonalt4res(nest=nest, centre=False,
debug=debug)
# Assume use of known CAST sites... unless others given.
if isinstance(sites, type(None)):
loc_dict = get_loc(rtn_dict=True)
sites = list(loc_dict.keys())
# Pull out site location indicies
indices_list = []
for site in sites:
lon, lat, alt = loc_dict[site]
vars = get_xy(lon, lat, glon, glat)
indices_list += [vars]
return indices_list
|
[
"pandas.DataFrame",
"numpy.linspace"
] |
[((4158, 4204), 'pandas.DataFrame', 'DataFrame', (['data'], {'index': 'dates', 'columns': "['data']"}), "(data, index=dates, columns=['data'])\n", (4167, 4204), False, 'from pandas import DataFrame\n'), ((2254, 2314), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'extra_points_point_on_edge'], {'endpoint': '(True)'}), '(0, 1, extra_points_point_on_edge, endpoint=True)\n', (2265, 2314), True, 'import numpy as np\n'), ((3607, 3667), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'extra_points_point_on_edge'], {'endpoint': '(True)'}), '(0, 1, extra_points_point_on_edge, endpoint=True)\n', (3618, 3667), True, 'import numpy as np\n')]
|
import numpy as np
import magpie
# check cartesian
def test_get_xedges():
xedges = magpie.grids.get_xedges(1., 2)
xedges = np.round(xedges, decimals=2)
assert len(xedges) == 3, "Length of xedges is incorrect."
assert xedges[-1] - xedges[0] == 1., "xedges range is incorrect."
xedges = magpie.grids.get_xedges(1., 2, xmin=-1.)
xedges = np.round(xedges, decimals=2)
assert xedges[0]==-1. and xedges[1]==-0.5 and xedges[-1]==0., "xedges with xmin are not as expected."
assert xedges[-1] - xedges[0] == 1., "xedges range is incorrect."
def test_xedges2mid():
xedges = magpie.grids.get_xedges(1., 10)
xmid = magpie.grids.xedges2mid(xedges)
xmid = np.round(xmid, decimals=2)
assert len(xedges) == len(xmid) + 1, "Length of xmid is incorrect."
assert xmid[0] == 0.05 and xmid[1] == 0.15 and xmid[5] == 0.55, "xmid is not as expected."
def test_xmid2edges():
xedges = magpie.grids.get_xedges(1., 10)
xmid = magpie.grids.xedges2mid(xedges)
xedges2 = magpie.grids.xmid2edges(xmid)
assert np.round(np.sum(xedges-xedges2), decimals=2), "Conversion from xmid to xedges is not consistent with input xedges."
def test_grid1d():
xmid = magpie.grids.grid1d(10., 10)
assert np.round(xmid[0], decimals=4) == 0.5 and np.round(xmid[7], decimals=4) == 7.5, "grid1d unexpected results."
xmid = magpie.grids.grid1d(10., 10, xmin=10)
assert np.round(xmid[0], decimals=4) == 10.5 and np.round(xmid[7], decimals=4) == 17.5, "grid1d unexpected results."
xmid, xedges = magpie.grids.grid1d(10., 10, return_edges=True)
assert len(xmid)+1 == len(xedges), "Length of xmid and xedges is not as expected."
assert np.round(xedges[0], decimals=4) == 0. and np.round(xedges[7], decimals=4) == 7., "grid1d unexpected results."
assert np.round(xmid[0], decimals=4) == 0.5 and np.round(xmid[7], decimals=4) == 7.5, "grid1d unexpected results."
def test_grid2d():
x2d, y2d = magpie.grids.grid2d(10, 10)
assert np.shape(x2d) == (10, 10), "shape is not as expected."
assert np.shape(y2d) == (10, 10), "shape is not as expected."
x2d, y2d, xmid, ymid = magpie.grids.grid2d(10, 10, return1d=True)
assert np.round(xmid[0], decimals=4) == 0.5 and np.round(xmid[7], decimals=4) == 7.5, "grid2d unexpected results."
assert np.round(np.sum(np.unique(x2d.flatten())-xmid), decimals=4) == 0., "xmid is inconsistent with x2d."
assert np.round(ymid[0], decimals=4) == 0.5 and np.round(ymid[7], decimals=4) == 7.5, "grid2d unexpected results."
assert np.round(np.sum(np.unique(y2d.flatten())-ymid), decimals=4) == 0., "ymid is inconsistent with y2d."
x2d, y2d, xmid, ymid = magpie.grids.grid2d(10, 10, mins=[10., 20.], return1d=True)
assert np.round(xmid[0], decimals=4) == 10.5 and np.round(xmid[7], decimals=4) == 17.5, "grid2d unexpected results."
assert np.round(np.sum(np.unique(x2d.flatten())-xmid), decimals=4) == 0., "xmid is inconsistent with x2d."
assert np.round(ymid[0], decimals=4) == 20.5 and np.round(ymid[7], decimals=4) == 27.5, "grid2d unexpected results."
assert np.round(np.sum(np.unique(y2d.flatten())-ymid), decimals=4) == 0., "ymid is inconsistent with y2d."
x2d, y2d = magpie.grids.grid2d(10, [10, 20])
assert np.shape(x2d) == (10, 20), "shape is not as expected."
assert np.shape(y2d) == (10, 20), "shape is not as expected."
x2d, y2d, xmid, ymid = magpie.grids.grid2d([10, 20], [10, 20], return1d=True)
assert np.round(xmid[0], decimals=4) == 0.5 and np.round(xmid[7], decimals=4) == 7.5, "grid2d unexpected results."
assert np.round(np.sum(np.unique(x2d.flatten())-xmid), decimals=4) == 0., "xmid is inconsistent with x2d."
assert np.round(ymid[0], decimals=4) == 0.5 and np.round(ymid[7], decimals=4) == 7.5, "grid2d unexpected results."
assert np.round(np.sum(np.unique(y2d.flatten())-ymid), decimals=4) == 0., "ymid is inconsistent with y2d."
x2d, y2d, xmid, ymid = magpie.grids.grid2d([10, 20], [10, 20], mins=[10., 20.], return1d=True)
assert np.round(xmid[0], decimals=4) == 10.5 and np.round(xmid[7], decimals=4) == 17.5, "grid2d unexpected results."
assert np.round(np.sum(np.unique(x2d.flatten())-xmid), decimals=4) == 0., "xmid is inconsistent with x2d."
assert np.round(ymid[0], decimals=4) == 20.5 and np.round(ymid[7], decimals=4) == 27.5, "grid2d unexpected results."
assert np.round(np.sum(np.unique(y2d.flatten())-ymid), decimals=4) == 0., "ymid is inconsistent with y2d."
def test_grid3d():
x3d, y3d, z3d = magpie.grids.grid3d(10, 10)
assert np.shape(x3d) == (10, 10, 10), "shape is not as expected."
assert np.shape(y3d) == (10, 10, 10), "shape is not as expected."
assert np.shape(z3d) == (10, 10, 10), "shape is not as expected."
x3d, y3d, z3d, xmid, ymid, zmid = magpie.grids.grid3d(10, 10, return1d=True)
assert np.round(xmid[0], decimals=4) == 0.5 and np.round(xmid[7], decimals=4) == 7.5, "grid3d unexpected results."
assert np.round(np.sum(np.unique(x3d.flatten())-xmid), decimals=4) == 0., "xmid is inconsistent with x3d."
assert np.round(ymid[0], decimals=4) == 0.5 and np.round(ymid[7], decimals=4) == 7.5, "grid3d unexpected results."
assert np.round(np.sum(np.unique(y3d.flatten())-ymid), decimals=4) == 0., "ymid is inconsistent with y3d."
assert np.round(zmid[0], decimals=4) == 0.5 and np.round(zmid[7], decimals=4) == 7.5, "grid3d unexpected results."
assert np.round(np.sum(np.unique(z3d.flatten())-zmid), decimals=4) == 0., "zmid is inconsistent with z3d."
x3d, y3d, z3d, xmid, ymid, zmid = magpie.grids.grid3d(10, 10, mins=[10., 20., 30.], return1d=True)
assert np.round(xmid[0], decimals=4) == 10.5 and np.round(xmid[7], decimals=4) == 17.5, "grid3d unexpected results."
assert np.round(np.sum(np.unique(x3d.flatten())-xmid), decimals=4) == 0., "xmid is inconsistent with x3d."
assert np.round(ymid[0], decimals=4) == 20.5 and np.round(ymid[7], decimals=4) == 27.5, "grid3d unexpected results."
assert np.round(np.sum(np.unique(y3d.flatten())-ymid), decimals=4) == 0., "ymid is inconsistent with y3d."
assert np.round(zmid[0], decimals=4) == 30.5 and np.round(zmid[7], decimals=4) == 37.5, "grid3d unexpected results."
assert np.round(np.sum(np.unique(z3d.flatten())-zmid), decimals=4) == 0., "zmid is inconsistent with z3d."
x3d, y3d, z3d = magpie.grids.grid3d(10, [10, 20, 30])
assert np.shape(x3d) == (10, 20, 30), "shape is not as expected."
assert np.shape(y3d) == (10, 20, 30), "shape is not as expected."
assert np.shape(z3d) == (10, 20, 30), "shape is not as expected."
x3d, y3d, z3d, xmid, ymid, zmid = magpie.grids.grid3d([10, 20, 30], [10, 20, 30], return1d=True)
assert np.round(xmid[0], decimals=4) == 0.5 and np.round(xmid[7], decimals=4) == 7.5, "grid3d unexpected results."
assert np.round(np.sum(np.unique(x3d.flatten())-xmid), decimals=4) == 0., "xmid is inconsistent with x3d."
assert np.round(ymid[0], decimals=4) == 0.5 and np.round(ymid[7], decimals=4) == 7.5, "grid3d unexpected results."
assert np.round(np.sum(np.unique(y3d.flatten())-ymid), decimals=4) == 0., "ymid is inconsistent with y3d."
assert np.round(zmid[0], decimals=4) == 0.5 and np.round(zmid[7], decimals=4) == 7.5, "grid3d unexpected results."
assert np.round(np.sum(np.unique(z3d.flatten())-zmid), decimals=4) == 0., "zmid is inconsistent with z3d."
x3d, y3d, z3d, xmid, ymid, zmid = magpie.grids.grid3d([10, 20, 30], [10, 20, 30], mins=[10., 20., 30], return1d=True)
assert np.round(xmid[0], decimals=4) == 10.5 and np.round(xmid[7], decimals=4) == 17.5, "grid3d unexpected results."
assert np.round(np.sum(np.unique(x3d.flatten())-xmid), decimals=4) == 0., "xmid is inconsistent with x3d."
assert np.round(ymid[0], decimals=4) == 20.5 and np.round(ymid[7], decimals=4) == 27.5, "grid3d unexpected results."
assert np.round(np.sum(np.unique(y3d.flatten())-ymid), decimals=4) == 0., "ymid is inconsistent with y3d."
assert np.round(zmid[0], decimals=4) == 30.5 and np.round(zmid[7], decimals=4) == 37.5, "grid3d unexpected results."
assert np.round(np.sum(np.unique(z3d.flatten())-zmid), decimals=4) == 0., "zmid is inconsistent with z3d."
# check polar
def test_polargrid():
r2d, p2d = magpie.grids.polargrid(10, 20)
assert np.shape(r2d) == (10, 20), "shape is not as expected."
assert np.shape(p2d) == (10, 20), "shape is not as expected."
r2d, p2d, rmid, pmid = magpie.grids.polargrid(10, 20, return1d=True)
assert np.round(rmid[0], decimals=4) == 0.05 and np.round(rmid[7], decimals=4) == 0.75, "polargrid unexpected results."
assert np.round(np.sum(np.unique(r2d.flatten())-rmid), decimals=4) == 0., "rmid is inconsistent with r2d."
assert np.round(pmid[0], decimals=4) == np.round(np.pi/20, decimals=4) and np.round(pmid[7], decimals=4) == np.round(15*np.pi/20, decimals=4), "polargrid unexpected results."
assert np.round(np.sum(np.unique(p2d.flatten())-pmid), decimals=4) == 0., "pmid is inconsistent with p2d."
r2d, p2d, rmid, pmid = magpie.grids.polargrid(10, 10, rmin=10., rmax=20., phimin=np.pi/2., phimax=np.pi, return1d=True)
assert np.round(rmid[0], decimals=4) == 10.5 and np.round(rmid[7], decimals=4) == 17.5, "polargrid unexpected results."
assert np.round(np.sum(np.unique(r2d.flatten())-rmid), decimals=4) == 0., "rmid is inconsistent with r2d."
assert np.round(pmid[0], decimals=4) == np.round((np.pi/2.)/20 + np.pi/2., decimals=4) \
and np.round(pmid[7], decimals=4) == np.round(15*(np.pi/2.)/20 + np.pi/2., decimals=4), "polargrid unexpected results."
assert np.round(np.sum(np.unique(p2d.flatten())-pmid), decimals=4) == 0., "pmid is inconsistent with p2d."
def test_polargrid():
r2d, p2d = magpie.grids.polargrid(10, 20)
assert np.shape(r2d) == (10, 20), "shape is not as expected."
assert np.shape(p2d) == (10, 20), "shape is not as expected."
r2d, p2d, rmid, pmid = magpie.grids.polargrid(10, 20, return1d=True)
assert np.round(rmid[0], decimals=4) == 0.05 and np.round(rmid[7], decimals=4) == 0.75, "polargrid unexpected results."
assert np.round(np.sum(np.unique(r2d.flatten())-rmid), decimals=4) == 0., "rmid is inconsistent with r2d."
assert np.round(pmid[0], decimals=4) == np.round(np.pi/20, decimals=4) and np.round(pmid[7], decimals=4) == np.round(15*np.pi/20, decimals=4), "polargrid unexpected results."
assert np.round(np.sum(np.unique(p2d.flatten())-pmid), decimals=4) == 0., "pmid is inconsistent with p2d."
r2d, p2d, rmid, pmid = magpie.grids.polargrid(10, 10, rmin=10., rmax=20., phimin=np.pi/2., phimax=np.pi, return1d=True)
assert np.round(rmid[0], decimals=4) == 10.5 and np.round(rmid[7], decimals=4) == 17.5, "polargrid unexpected results."
assert np.round(np.sum(np.unique(r2d.flatten())-rmid), decimals=4) == 0., "rmid is inconsistent with r2d."
assert np.round(pmid[0], decimals=4) == np.round((np.pi/2.)/20 + np.pi/2., decimals=4) \
and np.round(pmid[7], decimals=4) == np.round(15*(np.pi/2.)/20 + np.pi/2., decimals=4), "polargrid unexpected results."
assert np.round(np.sum(np.unique(p2d.flatten())-pmid), decimals=4) == 0., "pmid is inconsistent with p2d."
def test_polarEA():
r, p = magpie.grids.polarEA_grid(10)
npix = magpie.grids.polarEA_npix(10)
assert len(r) == len(p), "PolarEA grid size for r and p are not the same."
assert len(r) == npix, "Length of polarEA grid does not match expectations."
r, p = magpie.grids.polarEA_grid(6, base_nphi=3)
npix = magpie.grids.polarEA_npix(6, base_nphi=3)
assert len(r) == len(p), "PolarEA grid size for r and p are not the same."
assert len(r) == npix, "Length of polarEA grid does not match expectations."
r, p = magpie.grids.polarEA_grid(10, base_nphi=3)
npix = magpie.grids.polarEA_npix(10, base_nphi=3)
assert len(r) == len(p), "PolarEA grid size for r and p are not the same."
assert len(r) == npix, "Length of polarEA grid does not match expectations."
assert r[3*4**2] == 0.45, "r values are incorrect."
assert r[3*7**2] == 0.75, "r values are incorrect."
assert np.round(p[3*4**2], decimals=4) == np.round(np.pi/(3*(2*4+1)), decimals=4), "p values are incorrect."
assert np.round(p[3*7**2 + 7], decimals=4) == np.round(15*np.pi/(3*(2*7+1)), decimals=4), "p values are incorrect."
area = magpie.grids.polarEA_area(10, rmax=10., base_nphi=4)
assert(np.round(area, decimals=4) == np.round(np.pi/4., decimals=4)), "area calculation is incorrect."
|
[
"magpie.grids.polargrid",
"numpy.sum",
"magpie.grids.grid1d",
"magpie.grids.grid3d",
"magpie.grids.xmid2edges",
"magpie.grids.get_xedges",
"magpie.grids.polarEA_grid",
"numpy.shape",
"magpie.grids.polarEA_npix",
"magpie.grids.polarEA_area",
"magpie.grids.xedges2mid",
"numpy.round",
"magpie.grids.grid2d"
] |
[((90, 121), 'magpie.grids.get_xedges', 'magpie.grids.get_xedges', (['(1.0)', '(2)'], {}), '(1.0, 2)\n', (113, 121), False, 'import magpie\n'), ((134, 162), 'numpy.round', 'np.round', (['xedges'], {'decimals': '(2)'}), '(xedges, decimals=2)\n', (142, 162), True, 'import numpy as np\n'), ((308, 350), 'magpie.grids.get_xedges', 'magpie.grids.get_xedges', (['(1.0)', '(2)'], {'xmin': '(-1.0)'}), '(1.0, 2, xmin=-1.0)\n', (331, 350), False, 'import magpie\n'), ((362, 390), 'numpy.round', 'np.round', (['xedges'], {'decimals': '(2)'}), '(xedges, decimals=2)\n', (370, 390), True, 'import numpy as np\n'), ((604, 636), 'magpie.grids.get_xedges', 'magpie.grids.get_xedges', (['(1.0)', '(10)'], {}), '(1.0, 10)\n', (627, 636), False, 'import magpie\n'), ((647, 678), 'magpie.grids.xedges2mid', 'magpie.grids.xedges2mid', (['xedges'], {}), '(xedges)\n', (670, 678), False, 'import magpie\n'), ((690, 716), 'numpy.round', 'np.round', (['xmid'], {'decimals': '(2)'}), '(xmid, decimals=2)\n', (698, 716), True, 'import numpy as np\n'), ((921, 953), 'magpie.grids.get_xedges', 'magpie.grids.get_xedges', (['(1.0)', '(10)'], {}), '(1.0, 10)\n', (944, 953), False, 'import magpie\n'), ((964, 995), 'magpie.grids.xedges2mid', 'magpie.grids.xedges2mid', (['xedges'], {}), '(xedges)\n', (987, 995), False, 'import magpie\n'), ((1010, 1039), 'magpie.grids.xmid2edges', 'magpie.grids.xmid2edges', (['xmid'], {}), '(xmid)\n', (1033, 1039), False, 'import magpie\n'), ((1198, 1227), 'magpie.grids.grid1d', 'magpie.grids.grid1d', (['(10.0)', '(10)'], {}), '(10.0, 10)\n', (1217, 1227), False, 'import magpie\n'), ((1357, 1395), 'magpie.grids.grid1d', 'magpie.grids.grid1d', (['(10.0)', '(10)'], {'xmin': '(10)'}), '(10.0, 10, xmin=10)\n', (1376, 1395), False, 'import magpie\n'), ((1535, 1583), 'magpie.grids.grid1d', 'magpie.grids.grid1d', (['(10.0)', '(10)'], {'return_edges': '(True)'}), '(10.0, 10, return_edges=True)\n', (1554, 1583), False, 'import magpie\n'), ((1945, 1972), 'magpie.grids.grid2d', 'magpie.grids.grid2d', (['(10)', '(10)'], {}), '(10, 10)\n', (1964, 1972), False, 'import magpie\n'), ((2132, 2174), 'magpie.grids.grid2d', 'magpie.grids.grid2d', (['(10)', '(10)'], {'return1d': '(True)'}), '(10, 10, return1d=True)\n', (2151, 2174), False, 'import magpie\n'), ((2662, 2723), 'magpie.grids.grid2d', 'magpie.grids.grid2d', (['(10)', '(10)'], {'mins': '[10.0, 20.0]', 'return1d': '(True)'}), '(10, 10, mins=[10.0, 20.0], return1d=True)\n', (2681, 2723), False, 'import magpie\n'), ((3201, 3234), 'magpie.grids.grid2d', 'magpie.grids.grid2d', (['(10)', '[10, 20]'], {}), '(10, [10, 20])\n', (3220, 3234), False, 'import magpie\n'), ((3394, 3448), 'magpie.grids.grid2d', 'magpie.grids.grid2d', (['[10, 20]', '[10, 20]'], {'return1d': '(True)'}), '([10, 20], [10, 20], return1d=True)\n', (3413, 3448), False, 'import magpie\n'), ((3936, 4009), 'magpie.grids.grid2d', 'magpie.grids.grid2d', (['[10, 20]', '[10, 20]'], {'mins': '[10.0, 20.0]', 'return1d': '(True)'}), '([10, 20], [10, 20], mins=[10.0, 20.0], return1d=True)\n', (3955, 4009), False, 'import magpie\n'), ((4513, 4540), 'magpie.grids.grid3d', 'magpie.grids.grid3d', (['(10)', '(10)'], {}), '(10, 10)\n', (4532, 4540), False, 'import magpie\n'), ((4789, 4831), 'magpie.grids.grid3d', 'magpie.grids.grid3d', (['(10)', '(10)'], {'return1d': '(True)'}), '(10, 10, return1d=True)\n', (4808, 4831), False, 'import magpie\n'), ((5560, 5627), 'magpie.grids.grid3d', 'magpie.grids.grid3d', (['(10)', '(10)'], {'mins': '[10.0, 20.0, 30.0]', 'return1d': '(True)'}), '(10, 10, mins=[10.0, 20.0, 30.0], return1d=True)\n', (5579, 5627), False, 'import magpie\n'), ((6341, 6378), 'magpie.grids.grid3d', 'magpie.grids.grid3d', (['(10)', '[10, 20, 30]'], {}), '(10, [10, 20, 30])\n', (6360, 6378), False, 'import magpie\n'), ((6627, 6689), 'magpie.grids.grid3d', 'magpie.grids.grid3d', (['[10, 20, 30]', '[10, 20, 30]'], {'return1d': '(True)'}), '([10, 20, 30], [10, 20, 30], return1d=True)\n', (6646, 6689), False, 'import magpie\n'), ((7418, 7507), 'magpie.grids.grid3d', 'magpie.grids.grid3d', (['[10, 20, 30]', '[10, 20, 30]'], {'mins': '[10.0, 20.0, 30]', 'return1d': '(True)'}), '([10, 20, 30], [10, 20, 30], mins=[10.0, 20.0, 30],\n return1d=True)\n', (7437, 7507), False, 'import magpie\n'), ((8252, 8282), 'magpie.grids.polargrid', 'magpie.grids.polargrid', (['(10)', '(20)'], {}), '(10, 20)\n', (8274, 8282), False, 'import magpie\n'), ((8442, 8487), 'magpie.grids.polargrid', 'magpie.grids.polargrid', (['(10)', '(20)'], {'return1d': '(True)'}), '(10, 20, return1d=True)\n', (8464, 8487), False, 'import magpie\n'), ((9040, 9145), 'magpie.grids.polargrid', 'magpie.grids.polargrid', (['(10)', '(10)'], {'rmin': '(10.0)', 'rmax': '(20.0)', 'phimin': '(np.pi / 2.0)', 'phimax': 'np.pi', 'return1d': '(True)'}), '(10, 10, rmin=10.0, rmax=20.0, phimin=np.pi / 2.0,\n phimax=np.pi, return1d=True)\n', (9062, 9145), False, 'import magpie\n'), ((9743, 9773), 'magpie.grids.polargrid', 'magpie.grids.polargrid', (['(10)', '(20)'], {}), '(10, 20)\n', (9765, 9773), False, 'import magpie\n'), ((9933, 9978), 'magpie.grids.polargrid', 'magpie.grids.polargrid', (['(10)', '(20)'], {'return1d': '(True)'}), '(10, 20, return1d=True)\n', (9955, 9978), False, 'import magpie\n'), ((10531, 10636), 'magpie.grids.polargrid', 'magpie.grids.polargrid', (['(10)', '(10)'], {'rmin': '(10.0)', 'rmax': '(20.0)', 'phimin': '(np.pi / 2.0)', 'phimax': 'np.pi', 'return1d': '(True)'}), '(10, 10, rmin=10.0, rmax=20.0, phimin=np.pi / 2.0,\n phimax=np.pi, return1d=True)\n', (10553, 10636), False, 'import magpie\n'), ((11227, 11256), 'magpie.grids.polarEA_grid', 'magpie.grids.polarEA_grid', (['(10)'], {}), '(10)\n', (11252, 11256), False, 'import magpie\n'), ((11268, 11297), 'magpie.grids.polarEA_npix', 'magpie.grids.polarEA_npix', (['(10)'], {}), '(10)\n', (11293, 11297), False, 'import magpie\n'), ((11469, 11510), 'magpie.grids.polarEA_grid', 'magpie.grids.polarEA_grid', (['(6)'], {'base_nphi': '(3)'}), '(6, base_nphi=3)\n', (11494, 11510), False, 'import magpie\n'), ((11522, 11563), 'magpie.grids.polarEA_npix', 'magpie.grids.polarEA_npix', (['(6)'], {'base_nphi': '(3)'}), '(6, base_nphi=3)\n', (11547, 11563), False, 'import magpie\n'), ((11735, 11777), 'magpie.grids.polarEA_grid', 'magpie.grids.polarEA_grid', (['(10)'], {'base_nphi': '(3)'}), '(10, base_nphi=3)\n', (11760, 11777), False, 'import magpie\n'), ((11789, 11831), 'magpie.grids.polarEA_npix', 'magpie.grids.polarEA_npix', (['(10)'], {'base_nphi': '(3)'}), '(10, base_nphi=3)\n', (11814, 11831), False, 'import magpie\n'), ((12348, 12401), 'magpie.grids.polarEA_area', 'magpie.grids.polarEA_area', (['(10)'], {'rmax': '(10.0)', 'base_nphi': '(4)'}), '(10, rmax=10.0, base_nphi=4)\n', (12373, 12401), False, 'import magpie\n'), ((1060, 1084), 'numpy.sum', 'np.sum', (['(xedges - xedges2)'], {}), '(xedges - xedges2)\n', (1066, 1084), True, 'import numpy as np\n'), ((1984, 1997), 'numpy.shape', 'np.shape', (['x2d'], {}), '(x2d)\n', (1992, 1997), True, 'import numpy as np\n'), ((2050, 2063), 'numpy.shape', 'np.shape', (['y2d'], {}), '(y2d)\n', (2058, 2063), True, 'import numpy as np\n'), ((3246, 3259), 'numpy.shape', 'np.shape', (['x2d'], {}), '(x2d)\n', (3254, 3259), True, 'import numpy as np\n'), ((3312, 3325), 'numpy.shape', 'np.shape', (['y2d'], {}), '(y2d)\n', (3320, 3325), True, 'import numpy as np\n'), ((4552, 4565), 'numpy.shape', 'np.shape', (['x3d'], {}), '(x3d)\n', (4560, 4565), True, 'import numpy as np\n'), ((4622, 4635), 'numpy.shape', 'np.shape', (['y3d'], {}), '(y3d)\n', (4630, 4635), True, 'import numpy as np\n'), ((4692, 4705), 'numpy.shape', 'np.shape', (['z3d'], {}), '(z3d)\n', (4700, 4705), True, 'import numpy as np\n'), ((6390, 6403), 'numpy.shape', 'np.shape', (['x3d'], {}), '(x3d)\n', (6398, 6403), True, 'import numpy as np\n'), ((6460, 6473), 'numpy.shape', 'np.shape', (['y3d'], {}), '(y3d)\n', (6468, 6473), True, 'import numpy as np\n'), ((6530, 6543), 'numpy.shape', 'np.shape', (['z3d'], {}), '(z3d)\n', (6538, 6543), True, 'import numpy as np\n'), ((8294, 8307), 'numpy.shape', 'np.shape', (['r2d'], {}), '(r2d)\n', (8302, 8307), True, 'import numpy as np\n'), ((8360, 8373), 'numpy.shape', 'np.shape', (['p2d'], {}), '(p2d)\n', (8368, 8373), True, 'import numpy as np\n'), ((9785, 9798), 'numpy.shape', 'np.shape', (['r2d'], {}), '(r2d)\n', (9793, 9798), True, 'import numpy as np\n'), ((9851, 9864), 'numpy.shape', 'np.shape', (['p2d'], {}), '(p2d)\n', (9859, 9864), True, 'import numpy as np\n'), ((12115, 12150), 'numpy.round', 'np.round', (['p[3 * 4 ** 2]'], {'decimals': '(4)'}), '(p[3 * 4 ** 2], decimals=4)\n', (12123, 12150), True, 'import numpy as np\n'), ((12150, 12197), 'numpy.round', 'np.round', (['(np.pi / (3 * (2 * 4 + 1)))'], {'decimals': '(4)'}), '(np.pi / (3 * (2 * 4 + 1)), decimals=4)\n', (12158, 12197), True, 'import numpy as np\n'), ((12228, 12267), 'numpy.round', 'np.round', (['p[3 * 7 ** 2 + 7]'], {'decimals': '(4)'}), '(p[3 * 7 ** 2 + 7], decimals=4)\n', (12236, 12267), True, 'import numpy as np\n'), ((12267, 12319), 'numpy.round', 'np.round', (['(15 * np.pi / (3 * (2 * 7 + 1)))'], {'decimals': '(4)'}), '(15 * np.pi / (3 * (2 * 7 + 1)), decimals=4)\n', (12275, 12319), True, 'import numpy as np\n'), ((12412, 12438), 'numpy.round', 'np.round', (['area'], {'decimals': '(4)'}), '(area, decimals=4)\n', (12420, 12438), True, 'import numpy as np\n'), ((12442, 12475), 'numpy.round', 'np.round', (['(np.pi / 4.0)'], {'decimals': '(4)'}), '(np.pi / 4.0, decimals=4)\n', (12450, 12475), True, 'import numpy as np\n'), ((1238, 1267), 'numpy.round', 'np.round', (['xmid[0]'], {'decimals': '(4)'}), '(xmid[0], decimals=4)\n', (1246, 1267), True, 'import numpy as np\n'), ((1279, 1308), 'numpy.round', 'np.round', (['xmid[7]'], {'decimals': '(4)'}), '(xmid[7], decimals=4)\n', (1287, 1308), True, 'import numpy as np\n'), ((1406, 1435), 'numpy.round', 'np.round', (['xmid[0]'], {'decimals': '(4)'}), '(xmid[0], decimals=4)\n', (1414, 1435), True, 'import numpy as np\n'), ((1448, 1477), 'numpy.round', 'np.round', (['xmid[7]'], {'decimals': '(4)'}), '(xmid[7], decimals=4)\n', (1456, 1477), True, 'import numpy as np\n'), ((1681, 1712), 'numpy.round', 'np.round', (['xedges[0]'], {'decimals': '(4)'}), '(xedges[0], decimals=4)\n', (1689, 1712), True, 'import numpy as np\n'), ((1723, 1754), 'numpy.round', 'np.round', (['xedges[7]'], {'decimals': '(4)'}), '(xedges[7], decimals=4)\n', (1731, 1754), True, 'import numpy as np\n'), ((1802, 1831), 'numpy.round', 'np.round', (['xmid[0]'], {'decimals': '(4)'}), '(xmid[0], decimals=4)\n', (1810, 1831), True, 'import numpy as np\n'), ((1843, 1872), 'numpy.round', 'np.round', (['xmid[7]'], {'decimals': '(4)'}), '(xmid[7], decimals=4)\n', (1851, 1872), True, 'import numpy as np\n'), ((2186, 2215), 'numpy.round', 'np.round', (['xmid[0]'], {'decimals': '(4)'}), '(xmid[0], decimals=4)\n', (2194, 2215), True, 'import numpy as np\n'), ((2227, 2256), 'numpy.round', 'np.round', (['xmid[7]'], {'decimals': '(4)'}), '(xmid[7], decimals=4)\n', (2235, 2256), True, 'import numpy as np\n'), ((2416, 2445), 'numpy.round', 'np.round', (['ymid[0]'], {'decimals': '(4)'}), '(ymid[0], decimals=4)\n', (2424, 2445), True, 'import numpy as np\n'), ((2457, 2486), 'numpy.round', 'np.round', (['ymid[7]'], {'decimals': '(4)'}), '(ymid[7], decimals=4)\n', (2465, 2486), True, 'import numpy as np\n'), ((2733, 2762), 'numpy.round', 'np.round', (['xmid[0]'], {'decimals': '(4)'}), '(xmid[0], decimals=4)\n', (2741, 2762), True, 'import numpy as np\n'), ((2775, 2804), 'numpy.round', 'np.round', (['xmid[7]'], {'decimals': '(4)'}), '(xmid[7], decimals=4)\n', (2783, 2804), True, 'import numpy as np\n'), ((2965, 2994), 'numpy.round', 'np.round', (['ymid[0]'], {'decimals': '(4)'}), '(ymid[0], decimals=4)\n', (2973, 2994), True, 'import numpy as np\n'), ((3007, 3036), 'numpy.round', 'np.round', (['ymid[7]'], {'decimals': '(4)'}), '(ymid[7], decimals=4)\n', (3015, 3036), True, 'import numpy as np\n'), ((3460, 3489), 'numpy.round', 'np.round', (['xmid[0]'], {'decimals': '(4)'}), '(xmid[0], decimals=4)\n', (3468, 3489), True, 'import numpy as np\n'), ((3501, 3530), 'numpy.round', 'np.round', (['xmid[7]'], {'decimals': '(4)'}), '(xmid[7], decimals=4)\n', (3509, 3530), True, 'import numpy as np\n'), ((3690, 3719), 'numpy.round', 'np.round', (['ymid[0]'], {'decimals': '(4)'}), '(ymid[0], decimals=4)\n', (3698, 3719), True, 'import numpy as np\n'), ((3731, 3760), 'numpy.round', 'np.round', (['ymid[7]'], {'decimals': '(4)'}), '(ymid[7], decimals=4)\n', (3739, 3760), True, 'import numpy as np\n'), ((4019, 4048), 'numpy.round', 'np.round', (['xmid[0]'], {'decimals': '(4)'}), '(xmid[0], decimals=4)\n', (4027, 4048), True, 'import numpy as np\n'), ((4061, 4090), 'numpy.round', 'np.round', (['xmid[7]'], {'decimals': '(4)'}), '(xmid[7], decimals=4)\n', (4069, 4090), True, 'import numpy as np\n'), ((4251, 4280), 'numpy.round', 'np.round', (['ymid[0]'], {'decimals': '(4)'}), '(ymid[0], decimals=4)\n', (4259, 4280), True, 'import numpy as np\n'), ((4293, 4322), 'numpy.round', 'np.round', (['ymid[7]'], {'decimals': '(4)'}), '(ymid[7], decimals=4)\n', (4301, 4322), True, 'import numpy as np\n'), ((4843, 4872), 'numpy.round', 'np.round', (['xmid[0]'], {'decimals': '(4)'}), '(xmid[0], decimals=4)\n', (4851, 4872), True, 'import numpy as np\n'), ((4884, 4913), 'numpy.round', 'np.round', (['xmid[7]'], {'decimals': '(4)'}), '(xmid[7], decimals=4)\n', (4892, 4913), True, 'import numpy as np\n'), ((5073, 5102), 'numpy.round', 'np.round', (['ymid[0]'], {'decimals': '(4)'}), '(ymid[0], decimals=4)\n', (5081, 5102), True, 'import numpy as np\n'), ((5114, 5143), 'numpy.round', 'np.round', (['ymid[7]'], {'decimals': '(4)'}), '(ymid[7], decimals=4)\n', (5122, 5143), True, 'import numpy as np\n'), ((5303, 5332), 'numpy.round', 'np.round', (['zmid[0]'], {'decimals': '(4)'}), '(zmid[0], decimals=4)\n', (5311, 5332), True, 'import numpy as np\n'), ((5344, 5373), 'numpy.round', 'np.round', (['zmid[7]'], {'decimals': '(4)'}), '(zmid[7], decimals=4)\n', (5352, 5373), True, 'import numpy as np\n'), ((5636, 5665), 'numpy.round', 'np.round', (['xmid[0]'], {'decimals': '(4)'}), '(xmid[0], decimals=4)\n', (5644, 5665), True, 'import numpy as np\n'), ((5678, 5707), 'numpy.round', 'np.round', (['xmid[7]'], {'decimals': '(4)'}), '(xmid[7], decimals=4)\n', (5686, 5707), True, 'import numpy as np\n'), ((5868, 5897), 'numpy.round', 'np.round', (['ymid[0]'], {'decimals': '(4)'}), '(ymid[0], decimals=4)\n', (5876, 5897), True, 'import numpy as np\n'), ((5910, 5939), 'numpy.round', 'np.round', (['ymid[7]'], {'decimals': '(4)'}), '(ymid[7], decimals=4)\n', (5918, 5939), True, 'import numpy as np\n'), ((6100, 6129), 'numpy.round', 'np.round', (['zmid[0]'], {'decimals': '(4)'}), '(zmid[0], decimals=4)\n', (6108, 6129), True, 'import numpy as np\n'), ((6142, 6171), 'numpy.round', 'np.round', (['zmid[7]'], {'decimals': '(4)'}), '(zmid[7], decimals=4)\n', (6150, 6171), True, 'import numpy as np\n'), ((6701, 6730), 'numpy.round', 'np.round', (['xmid[0]'], {'decimals': '(4)'}), '(xmid[0], decimals=4)\n', (6709, 6730), True, 'import numpy as np\n'), ((6742, 6771), 'numpy.round', 'np.round', (['xmid[7]'], {'decimals': '(4)'}), '(xmid[7], decimals=4)\n', (6750, 6771), True, 'import numpy as np\n'), ((6931, 6960), 'numpy.round', 'np.round', (['ymid[0]'], {'decimals': '(4)'}), '(ymid[0], decimals=4)\n', (6939, 6960), True, 'import numpy as np\n'), ((6972, 7001), 'numpy.round', 'np.round', (['ymid[7]'], {'decimals': '(4)'}), '(ymid[7], decimals=4)\n', (6980, 7001), True, 'import numpy as np\n'), ((7161, 7190), 'numpy.round', 'np.round', (['zmid[0]'], {'decimals': '(4)'}), '(zmid[0], decimals=4)\n', (7169, 7190), True, 'import numpy as np\n'), ((7202, 7231), 'numpy.round', 'np.round', (['zmid[7]'], {'decimals': '(4)'}), '(zmid[7], decimals=4)\n', (7210, 7231), True, 'import numpy as np\n'), ((7513, 7542), 'numpy.round', 'np.round', (['xmid[0]'], {'decimals': '(4)'}), '(xmid[0], decimals=4)\n', (7521, 7542), True, 'import numpy as np\n'), ((7555, 7584), 'numpy.round', 'np.round', (['xmid[7]'], {'decimals': '(4)'}), '(xmid[7], decimals=4)\n', (7563, 7584), True, 'import numpy as np\n'), ((7745, 7774), 'numpy.round', 'np.round', (['ymid[0]'], {'decimals': '(4)'}), '(ymid[0], decimals=4)\n', (7753, 7774), True, 'import numpy as np\n'), ((7787, 7816), 'numpy.round', 'np.round', (['ymid[7]'], {'decimals': '(4)'}), '(ymid[7], decimals=4)\n', (7795, 7816), True, 'import numpy as np\n'), ((7977, 8006), 'numpy.round', 'np.round', (['zmid[0]'], {'decimals': '(4)'}), '(zmid[0], decimals=4)\n', (7985, 8006), True, 'import numpy as np\n'), ((8019, 8048), 'numpy.round', 'np.round', (['zmid[7]'], {'decimals': '(4)'}), '(zmid[7], decimals=4)\n', (8027, 8048), True, 'import numpy as np\n'), ((8499, 8528), 'numpy.round', 'np.round', (['rmid[0]'], {'decimals': '(4)'}), '(rmid[0], decimals=4)\n', (8507, 8528), True, 'import numpy as np\n'), ((8541, 8570), 'numpy.round', 'np.round', (['rmid[7]'], {'decimals': '(4)'}), '(rmid[7], decimals=4)\n', (8549, 8570), True, 'import numpy as np\n'), ((8734, 8763), 'numpy.round', 'np.round', (['pmid[0]'], {'decimals': '(4)'}), '(pmid[0], decimals=4)\n', (8742, 8763), True, 'import numpy as np\n'), ((8767, 8799), 'numpy.round', 'np.round', (['(np.pi / 20)'], {'decimals': '(4)'}), '(np.pi / 20, decimals=4)\n', (8775, 8799), True, 'import numpy as np\n'), ((8802, 8831), 'numpy.round', 'np.round', (['pmid[7]'], {'decimals': '(4)'}), '(pmid[7], decimals=4)\n', (8810, 8831), True, 'import numpy as np\n'), ((8835, 8872), 'numpy.round', 'np.round', (['(15 * np.pi / 20)'], {'decimals': '(4)'}), '(15 * np.pi / 20, decimals=4)\n', (8843, 8872), True, 'import numpy as np\n'), ((9148, 9177), 'numpy.round', 'np.round', (['rmid[0]'], {'decimals': '(4)'}), '(rmid[0], decimals=4)\n', (9156, 9177), True, 'import numpy as np\n'), ((9190, 9219), 'numpy.round', 'np.round', (['rmid[7]'], {'decimals': '(4)'}), '(rmid[7], decimals=4)\n', (9198, 9219), True, 'import numpy as np\n'), ((9383, 9412), 'numpy.round', 'np.round', (['pmid[0]'], {'decimals': '(4)'}), '(pmid[0], decimals=4)\n', (9391, 9412), True, 'import numpy as np\n'), ((9416, 9468), 'numpy.round', 'np.round', (['(np.pi / 2.0 / 20 + np.pi / 2.0)'], {'decimals': '(4)'}), '(np.pi / 2.0 / 20 + np.pi / 2.0, decimals=4)\n', (9424, 9468), True, 'import numpy as np\n'), ((9477, 9506), 'numpy.round', 'np.round', (['pmid[7]'], {'decimals': '(4)'}), '(pmid[7], decimals=4)\n', (9485, 9506), True, 'import numpy as np\n'), ((9510, 9569), 'numpy.round', 'np.round', (['(15 * (np.pi / 2.0) / 20 + np.pi / 2.0)'], {'decimals': '(4)'}), '(15 * (np.pi / 2.0) / 20 + np.pi / 2.0, decimals=4)\n', (9518, 9569), True, 'import numpy as np\n'), ((9990, 10019), 'numpy.round', 'np.round', (['rmid[0]'], {'decimals': '(4)'}), '(rmid[0], decimals=4)\n', (9998, 10019), True, 'import numpy as np\n'), ((10032, 10061), 'numpy.round', 'np.round', (['rmid[7]'], {'decimals': '(4)'}), '(rmid[7], decimals=4)\n', (10040, 10061), True, 'import numpy as np\n'), ((10225, 10254), 'numpy.round', 'np.round', (['pmid[0]'], {'decimals': '(4)'}), '(pmid[0], decimals=4)\n', (10233, 10254), True, 'import numpy as np\n'), ((10258, 10290), 'numpy.round', 'np.round', (['(np.pi / 20)'], {'decimals': '(4)'}), '(np.pi / 20, decimals=4)\n', (10266, 10290), True, 'import numpy as np\n'), ((10293, 10322), 'numpy.round', 'np.round', (['pmid[7]'], {'decimals': '(4)'}), '(pmid[7], decimals=4)\n', (10301, 10322), True, 'import numpy as np\n'), ((10326, 10363), 'numpy.round', 'np.round', (['(15 * np.pi / 20)'], {'decimals': '(4)'}), '(15 * np.pi / 20, decimals=4)\n', (10334, 10363), True, 'import numpy as np\n'), ((10639, 10668), 'numpy.round', 'np.round', (['rmid[0]'], {'decimals': '(4)'}), '(rmid[0], decimals=4)\n', (10647, 10668), True, 'import numpy as np\n'), ((10681, 10710), 'numpy.round', 'np.round', (['rmid[7]'], {'decimals': '(4)'}), '(rmid[7], decimals=4)\n', (10689, 10710), True, 'import numpy as np\n'), ((10874, 10903), 'numpy.round', 'np.round', (['pmid[0]'], {'decimals': '(4)'}), '(pmid[0], decimals=4)\n', (10882, 10903), True, 'import numpy as np\n'), ((10907, 10959), 'numpy.round', 'np.round', (['(np.pi / 2.0 / 20 + np.pi / 2.0)'], {'decimals': '(4)'}), '(np.pi / 2.0 / 20 + np.pi / 2.0, decimals=4)\n', (10915, 10959), True, 'import numpy as np\n'), ((10968, 10997), 'numpy.round', 'np.round', (['pmid[7]'], {'decimals': '(4)'}), '(pmid[7], decimals=4)\n', (10976, 10997), True, 'import numpy as np\n'), ((11001, 11060), 'numpy.round', 'np.round', (['(15 * (np.pi / 2.0) / 20 + np.pi / 2.0)'], {'decimals': '(4)'}), '(15 * (np.pi / 2.0) / 20 + np.pi / 2.0, decimals=4)\n', (11009, 11060), True, 'import numpy as np\n')]
|
import sys, os, random
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.linear_model import SGDClassifier
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.metrics import *
# find a current file' directory path.
try:
dirpath = os.path.dirname(__file__)
except Exception as inst:
dirpath = ''
pass
f_name1 = os.path.join(dirpath,"../datasets/breast-cancer.npz")
f_name2 = os.path.join(dirpath,"../datasets/diabetes.npz")
f_name3 = os.path.join(dirpath,"../datasets/digit.npz")
f_name4 = os.path.join(dirpath,"../datasets/iris.npz")
f_name5 = os.path.join(dirpath,"../datasets/wine.npz")
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
class ClassModels:
def __init__(self):
self.name = ''
self.grid = ''
self.param_grid = ''
self.cv = StratifiedShuffleSplit(n_splits=5, test_size=0.2, random_state=42)
self.scoring = 'neg_log_loss' #'accuracy', 'f1', 'precision', 'recall', 'roc_auc'
def trainModel(self, cname):
if (cname == "Logistic Regression"):
self.trainLogisticRegression()
elif (cname == "Linear SVM"):
self.trainLinearSVM()
elif (cname == "RBF SVM"):
self.trainRBFSVM()
elif (cname == "Neural Nets"):
self.trainNeuralNets()
else:
print("Please put existing classifier names")
pass
# run CV according to params for each classifier
def trainLogisticRegression(self):
# TODO: try different scoring rule such as Accuracy (default), F1-measure, AUC
loss_range = ['log']
penalty_range = ['l2','l1','none']
alpha_range = np.geomspace(1.e-07, 1.e+05, num=13) # 13 params
self.param_grid = dict(loss=loss_range, penalty=penalty_range, alpha=alpha_range, max_iter=[1000], tol=[1e-3])
self.grid = GridSearchCV(SGDClassifier(), param_grid=self.param_grid, cv=self.cv,
n_jobs=-1)
pass
def trainLinearSVM(self):
kernel_range = ['linear']
C_range = np.geomspace(1.e-07, 1.e+05, num=13) # 13 params :
self.param_grid = dict(kernel=kernel_range, C=C_range)
self.grid = GridSearchCV(SVC(), param_grid=self.param_grid, cv=self.cv,
n_jobs=-1)
pass
def trainRBFSVM(self):
# params C / gamma
kernel_range = ['rbf']
C_range = np.geomspace(1.e-07, 1.e+05, num=13) # 13 params :
gamma_range = np.array([0.001,0.005,0.01,0.05,0.1,0.5,1,2,3]) # 9 params
self.param_grid = dict(kernel=kernel_range, gamma=gamma_range, C=C_range)
self.grid = GridSearchCV(SVC(), param_grid=self.param_grid, cv=self.cv,
n_jobs=-1)
pass
def trainNeuralNets(self):
# early stopping default False, Momentum default 0.9
hidden_layer_sizes_range = np.array([1,2,3,4,5,6,7,8,9,10,16,32]) # 12 params
activation_range = ['logistic']
solver_range = ['sgd']
learning_rate_init_range = np.array([1.0e-04,1.0e-03,1.0e-02,1.0e-01]) # 4 params
self.param_grid = dict(hidden_layer_sizes=hidden_layer_sizes_range,
activation=activation_range,solver=solver_range,
learning_rate_init=learning_rate_init_range,
max_iter=[1000])
self.grid = GridSearchCV(MLPClassifier(), param_grid=self.param_grid, cv=self.cv,
n_jobs=-1)
pass
class Report:
def __init__(self):
pass
# Loss + Accuracy (training + test)
# auc + confusion matrix
# cpu computation time
def showResult(self, model, predicted_test, target_test, predicted_train, target_train):
print("The best parameters are %s with a score of %0.3f"
% (model.grid.best_params_, model.grid.best_score_))
print("The Train Log Loss %0.3f Zero one loss %f"
% (log_loss(target_train, predicted_train), zero_one_loss(target_train, predicted_train)))
print("The test Log Loss %0.3f Zero one loss %f"
% (log_loss(target_test, predicted_test), zero_one_loss(target_test, predicted_test)))
print("The train Accuracy %0.3f"
% (accuracy_score(target_train, predicted_train)))
print("The test Accuracy %0.3f"
% (accuracy_score(target_test, predicted_test) ))
print("The test AUC of %0.3f"
% (roc_auc_score(target_test, predicted_test) ))
print("The mean training time of %f"
% (np.mean(model.grid.cv_results_['mean_fit_time'], axis=0)) )
print("The mean test time of %f"
% (np.mean(model.grid.cv_results_['mean_score_time'], axis=0)) )
# confusion matrix
print("confusion matrix / precision recall scores")
print ( confusion_matrix(target_test, predicted_test) )
print ( classification_report(target_test, predicted_test) )
pass
def showPlot(self, model, clfname):
if (clfname == "Logistic Regression"):
self.showLogisticRegression(model, clfname)
elif (clfname == "Linear SVM"):
self.showLinearSVM(model, clfname)
elif (clfname == "RBF SVM"):
self.showRBFSVM(model, clfname)
elif (clfname == "Neural Nets"):
self.showNeuralNets(model, clfname)
else:
print("Please put existing classifier names")
pass
def showLogisticRegression(self, model, clfname):
penalty_range = model.param_grid['penalty']
alpha_range = model.param_grid['alpha'] # 13 params
scores = np.array(model.grid.cv_results_['mean_test_score'])
min_score = scores.min()
max_score = scores.max()
mean_score = np.mean(scores, axis=0)
scores = scores.reshape(len(alpha_range),len(penalty_range))
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=min_score, vmax=max_score, midpoint=mean_score))
plt.xlabel('penalty')
plt.ylabel('alpha (regularization)')
plt.colorbar()
plt.xticks(np.arange(len(penalty_range)), penalty_range, rotation=45)
plt.yticks(np.arange(len(alpha_range)), alpha_range)
plt.title('Validation accuracy')
# plt.show()
pass
def showLinearSVM(self, model, clfname):
C_range = model.param_grid['C']
scores = np.array(model.grid.cv_results_['mean_test_score'])
min_score = scores.min()
max_score = scores.max()
mean_score = np.mean(scores, axis=0)
scores = scores.reshape(len(C_range),1)
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=min_score, vmax=max_score, midpoint=mean_score))
plt.ylabel('C')
plt.colorbar()
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
# plt.show()
pass
def showRBFSVM(self, model, clfname):
C_range = model.param_grid['C']
gamma_range = model.param_grid['gamma']
# scores = model.grid.cv_results_['mean_test_score'].reshape(len(C_range), len(gamma_range))
scores = np.array(model.grid.cv_results_['mean_test_score'])
min_score = scores.min()
max_score = scores.max()
mean_score = np.mean(scores, axis=0)
scores = scores.reshape(len(C_range), len(gamma_range))
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
# plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
# norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=min_score,vmax=max_score, midpoint=mean_score))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
# plt.show()
pass
def showNeuralNets(self, model, clfname):
hidden_layer_sizes_range = model.param_grid['hidden_layer_sizes']
learning_rate_init_range = model.param_grid['learning_rate_init']
scores = np.array(model.grid.cv_results_['mean_test_score'])
min_score = scores.min()
max_score = scores.max()
mean_score = np.mean(scores, axis=0)
scores = scores.reshape(len(learning_rate_init_range), len(hidden_layer_sizes_range))
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=min_score,vmax=max_score, midpoint=mean_score))
plt.xlabel('hidden_layer_sizes')
plt.ylabel('learning_rate_init')
plt.colorbar()
plt.xticks(np.arange(len(hidden_layer_sizes_range)), hidden_layer_sizes_range, rotation=45)
plt.yticks(np.arange(len(learning_rate_init_range)), learning_rate_init_range)
plt.title('Validation accuracy')
# plt.show()
pass
def plotLROverTime(data_x, loss_y, acc_y, idx):
# Set the style globally
# Alternatives include bmh, fivethirtyeight, ggplot,
# dark_background, seaborn-deep, etc
plt.style.use('ggplot')
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Ubuntu'
plt.rcParams['font.monospace'] = 'Ubuntu Mono'
plt.rcParams['font.size'] = 10
plt.rcParams['axes.labelsize'] = 10
plt.rcParams['axes.labelweight'] = 'bold'
plt.rcParams['axes.titlesize'] = 10
plt.rcParams['xtick.labelsize'] = 8
plt.rcParams['ytick.labelsize'] = 8
plt.rcParams['legend.fontsize'] = 10
plt.rcParams['figure.titlesize'] = 12
# Set an aspect ratio
width, height = plt.figaspect(1.68)
fig = plt.figure(figsize=(width, height), dpi=400)
plt.plot(data_x, loss_y, linewidth=0.5, linestyle=':', marker='o',
markersize=2, label='loss')
plt.plot(data_x, acc_y, linewidth=0.5, linestyle='--', marker='v',
markersize=2, label='accuracy')
plt.xlabel('Data Points')
plt.ylabel('Score')
# Axes alteration to put zero values inside the figure Axes
# Avoids axis white lines cutting through zero values - fivethirtyeight style
xmin, xmax, ymin, ymax = plt.axis()
plt.axis([xmin - 0.1, xmax + 0.1, ymin, ymax])
plt.title('LR performance over time', fontstyle='italic')
plt.legend(loc='best', numpoints=1, fancybox=True)
# Space plots a bit
plt.subplots_adjust(hspace=0.25, wspace=0.40)
plt.savefig('./LR_overtime_'+str(idx)+'.png', bbox_inches='tight')
pass
def batches(l, n):
for i in range(0, len(l), n):
yield l[i:i+n]
def runLROverTime(train_X, train_y, test_X, test_y, idx):
clf = SGDClassifier(loss='log') # shuffle=True is useless here
shuffledRange = range(train_X.shape[0])
n_iter = 10
data_point = 0
f_loss = open('./LR_overtime_loss_'+str(idx)+'.txt', 'w')
f_acc = open('./LR_overtime_acc_'+str(idx)+'.txt', 'w')
data_x = []
loss_y = []
acc_y = []
# temp_loss = zero_one_loss(train_y, clf.predict(train_X))
# temp_acc = accuracy_score(train_y, clf.predict(train_X))
# f_loss.write("data_point= " + str(data_point) + " zero_one_loss= " + str(temp_loss) + " \n")
# f_acc.write("data_point= " + str(data_point) + " accuracy= " + str(temp_acc) + " \n")
# data_x.append(data_point)
# loss_y.append(temp_loss)
# acc_y.append(temp_acc)
for n in range(n_iter):
shuffledRange = list(shuffledRange)
random.shuffle(shuffledRange)
shuffledX = [train_X[i] for i in shuffledRange]
shuffledY = [train_y[i] for i in shuffledRange]
for batch in batches(range(len(shuffledX)), 10):
clf.partial_fit(shuffledX[batch[0]:batch[-1] + 1], shuffledY[batch[0]:batch[-1] + 1],
classes=np.unique(train_y))
data_point += len(batch)
temp_loss = zero_one_loss(train_y, clf.predict(train_X))
temp_acc = accuracy_score(train_y, clf.predict(train_X))
f_loss.write("data_point= " + str(data_point) + " zero_one_loss= " + str(temp_loss) + " \n")
f_acc.write("data_point= " + str(data_point) + " accuracy= " + str(temp_acc) + " \n")
data_x.append(data_point)
loss_y.append(temp_loss)
acc_y.append(temp_acc)
f_loss.write("\n===== End of Training / Test Set Results =====\n")
f_loss.write("data_point= %d , zero_one_loss= %f\n" % (data_point, zero_one_loss(test_y, clf.predict(test_X))))
f_acc.write("\n===== End of Training / Test Set Results =====\n")
f_acc.write("data_point= %d , accuracy= %f\n" % (data_point, accuracy_score(test_y, clf.predict(test_X))))
f_loss.close()
f_acc.close()
plotLROverTime(data_x, loss_y, acc_y, idx)
pass
class RunEval:
def __init__(self):
self.dnames = [f_name1, f_name2, f_name3, f_name4, f_name5]
self.train_X = []
self.train_y = []
self.test_X = []
self.test_y = []
def run(self):
report = Report()
for idx, dname in enumerate(self.dnames):
# load data
if len(sys.argv) > 1 and int(sys.argv[1]) != idx:
continue
data = np.load(dname)
self.train_y = data['train_Y']
self.test_y = data['test_Y']
# standardize data (mean=0, std=1)
self.train_X = StandardScaler().fit_transform(data['train_X'])
self.test_X = StandardScaler().fit_transform(data['test_X'])
print ("shape of data set ", self.train_X.shape, self.train_y.shape, self.test_X.shape, self.test_y.shape)
if len(sys.argv) > 2 and int(sys.argv[2]) == 1:
runLROverTime(self.train_X, self.train_y, self.test_X, self.test_y, idx)
continue
clfnames = ["Logistic Regression", "Linear SVM", "RBF SVM", "Neural Nets"]
# clfnames = ["RBF SVM"]
# clfnames = ["Linear SVM"]
for idx2, clfname in enumerate(clfnames):
print("===== %s " %(dname))
print("===== %s" %(clfname))
# (1) train model with CV model = ClassModels()
model = ClassModels()
model.trainModel(clfname)
model.grid.fit(self.train_X, self.train_y)
# (2) show results
predicted_test = model.grid.predict(self.test_X)
predicted_train = model.grid.predict(self.train_X)
# Loss + Accuracy (training + test)
# auc + confusion matrix
# cpu computation time
report.showResult(model, predicted_test, self.test_y, predicted_train, self.train_y)
report.showPlot(model, clfname)
plt.savefig('./'+clfname+'_'+str(idx)+'.png', bbox_inches = 'tight')
if __name__ == '__main__':
eval = RunEval()
eval.run()
exit()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.figaspect",
"numpy.load",
"sklearn.preprocessing.StandardScaler",
"random.shuffle",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"numpy.mean",
"sklearn.neural_network.MLPClassifier",
"sklearn.svm.SVC",
"numpy.interp",
"os.path.join",
"numpy.unique",
"sklearn.linear_model.SGDClassifier",
"numpy.geomspace",
"os.path.dirname",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.ylabel",
"matplotlib.colors.Normalize.__init__",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axis",
"sklearn.model_selection.StratifiedShuffleSplit",
"numpy.array",
"matplotlib.pyplot.xlabel"
] |
[((1137, 1191), 'os.path.join', 'os.path.join', (['dirpath', '"""../datasets/breast-cancer.npz"""'], {}), "(dirpath, '../datasets/breast-cancer.npz')\n", (1149, 1191), False, 'import sys, os, random\n'), ((1202, 1251), 'os.path.join', 'os.path.join', (['dirpath', '"""../datasets/diabetes.npz"""'], {}), "(dirpath, '../datasets/diabetes.npz')\n", (1214, 1251), False, 'import sys, os, random\n'), ((1262, 1308), 'os.path.join', 'os.path.join', (['dirpath', '"""../datasets/digit.npz"""'], {}), "(dirpath, '../datasets/digit.npz')\n", (1274, 1308), False, 'import sys, os, random\n'), ((1319, 1364), 'os.path.join', 'os.path.join', (['dirpath', '"""../datasets/iris.npz"""'], {}), "(dirpath, '../datasets/iris.npz')\n", (1331, 1364), False, 'import sys, os, random\n'), ((1375, 1420), 'os.path.join', 'os.path.join', (['dirpath', '"""../datasets/wine.npz"""'], {}), "(dirpath, '../datasets/wine.npz')\n", (1387, 1420), False, 'import sys, os, random\n'), ((1045, 1070), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1060, 1070), False, 'import sys, os, random\n'), ((11083, 11106), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (11096, 11106), True, 'import matplotlib.pyplot as plt\n'), ((11629, 11648), 'matplotlib.pyplot.figaspect', 'plt.figaspect', (['(1.68)'], {}), '(1.68)\n', (11642, 11648), True, 'import matplotlib.pyplot as plt\n'), ((11660, 11704), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(width, height)', 'dpi': '(400)'}), '(figsize=(width, height), dpi=400)\n', (11670, 11704), True, 'import matplotlib.pyplot as plt\n'), ((11712, 11810), 'matplotlib.pyplot.plot', 'plt.plot', (['data_x', 'loss_y'], {'linewidth': '(0.5)', 'linestyle': '""":"""', 'marker': '"""o"""', 'markersize': '(2)', 'label': '"""loss"""'}), "(data_x, loss_y, linewidth=0.5, linestyle=':', marker='o',\n markersize=2, label='loss')\n", (11720, 11810), True, 'import matplotlib.pyplot as plt\n'), ((11826, 11928), 'matplotlib.pyplot.plot', 'plt.plot', (['data_x', 'acc_y'], {'linewidth': '(0.5)', 'linestyle': '"""--"""', 'marker': '"""v"""', 'markersize': '(2)', 'label': '"""accuracy"""'}), "(data_x, acc_y, linewidth=0.5, linestyle='--', marker='v',\n markersize=2, label='accuracy')\n", (11834, 11928), True, 'import matplotlib.pyplot as plt\n'), ((11944, 11969), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Data Points"""'], {}), "('Data Points')\n", (11954, 11969), True, 'import matplotlib.pyplot as plt\n'), ((11975, 11994), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Score"""'], {}), "('Score')\n", (11985, 11994), True, 'import matplotlib.pyplot as plt\n'), ((12175, 12185), 'matplotlib.pyplot.axis', 'plt.axis', ([], {}), '()\n', (12183, 12185), True, 'import matplotlib.pyplot as plt\n'), ((12191, 12237), 'matplotlib.pyplot.axis', 'plt.axis', (['[xmin - 0.1, xmax + 0.1, ymin, ymax]'], {}), '([xmin - 0.1, xmax + 0.1, ymin, ymax])\n', (12199, 12237), True, 'import matplotlib.pyplot as plt\n'), ((12243, 12300), 'matplotlib.pyplot.title', 'plt.title', (['"""LR performance over time"""'], {'fontstyle': '"""italic"""'}), "('LR performance over time', fontstyle='italic')\n", (12252, 12300), True, 'import matplotlib.pyplot as plt\n'), ((12306, 12356), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""', 'numpoints': '(1)', 'fancybox': '(True)'}), "(loc='best', numpoints=1, fancybox=True)\n", (12316, 12356), True, 'import matplotlib.pyplot as plt\n'), ((12389, 12433), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.25)', 'wspace': '(0.4)'}), '(hspace=0.25, wspace=0.4)\n', (12408, 12433), True, 'import matplotlib.pyplot as plt\n'), ((12672, 12697), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'loss': '"""log"""'}), "(loss='log')\n", (12685, 12697), False, 'from sklearn.linear_model import SGDClassifier\n'), ((1675, 1717), 'matplotlib.colors.Normalize.__init__', 'Normalize.__init__', (['self', 'vmin', 'vmax', 'clip'], {}), '(self, vmin, vmax, clip)\n', (1693, 1717), False, 'from matplotlib.colors import Normalize\n'), ((2037, 2103), 'sklearn.model_selection.StratifiedShuffleSplit', 'StratifiedShuffleSplit', ([], {'n_splits': '(5)', 'test_size': '(0.2)', 'random_state': '(42)'}), '(n_splits=5, test_size=0.2, random_state=42)\n', (2059, 2103), False, 'from sklearn.model_selection import StratifiedShuffleSplit\n'), ((2908, 2945), 'numpy.geomspace', 'np.geomspace', (['(1e-07)', '(100000.0)'], {'num': '(13)'}), '(1e-07, 100000.0, num=13)\n', (2920, 2945), True, 'import numpy as np\n'), ((3294, 3331), 'numpy.geomspace', 'np.geomspace', (['(1e-07)', '(100000.0)'], {'num': '(13)'}), '(1e-07, 100000.0, num=13)\n', (3306, 3331), True, 'import numpy as np\n'), ((3646, 3683), 'numpy.geomspace', 'np.geomspace', (['(1e-07)', '(100000.0)'], {'num': '(13)'}), '(1e-07, 100000.0, num=13)\n', (3658, 3683), True, 'import numpy as np\n'), ((3721, 3776), 'numpy.array', 'np.array', (['[0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 2, 3]'], {}), '([0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 2, 3])\n', (3729, 3776), True, 'import numpy as np\n'), ((4123, 4172), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16, 32]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16, 32])\n', (4131, 4172), True, 'import numpy as np\n'), ((4283, 4319), 'numpy.array', 'np.array', (['[0.0001, 0.001, 0.01, 0.1]'], {}), '([0.0001, 0.001, 0.01, 0.1])\n', (4291, 4319), True, 'import numpy as np\n'), ((6959, 7010), 'numpy.array', 'np.array', (["model.grid.cv_results_['mean_test_score']"], {}), "(model.grid.cv_results_['mean_test_score'])\n", (6967, 7010), True, 'import numpy as np\n'), ((7101, 7124), 'numpy.mean', 'np.mean', (['scores'], {'axis': '(0)'}), '(scores, axis=0)\n', (7108, 7124), True, 'import numpy as np\n'), ((7206, 7232), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (7216, 7232), True, 'import matplotlib.pyplot as plt\n'), ((7242, 7306), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.2)', 'right': '(0.95)', 'bottom': '(0.15)', 'top': '(0.95)'}), '(left=0.2, right=0.95, bottom=0.15, top=0.95)\n', (7261, 7306), True, 'import matplotlib.pyplot as plt\n'), ((7482, 7503), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""penalty"""'], {}), "('penalty')\n", (7492, 7503), True, 'import matplotlib.pyplot as plt\n'), ((7513, 7549), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""alpha (regularization)"""'], {}), "('alpha (regularization)')\n", (7523, 7549), True, 'import matplotlib.pyplot as plt\n'), ((7559, 7573), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (7571, 7573), True, 'import matplotlib.pyplot as plt\n'), ((7724, 7756), 'matplotlib.pyplot.title', 'plt.title', (['"""Validation accuracy"""'], {}), "('Validation accuracy')\n", (7733, 7756), True, 'import matplotlib.pyplot as plt\n'), ((7900, 7951), 'numpy.array', 'np.array', (["model.grid.cv_results_['mean_test_score']"], {}), "(model.grid.cv_results_['mean_test_score'])\n", (7908, 7951), True, 'import numpy as np\n'), ((8042, 8065), 'numpy.mean', 'np.mean', (['scores'], {'axis': '(0)'}), '(scores, axis=0)\n', (8049, 8065), True, 'import numpy as np\n'), ((8126, 8152), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (8136, 8152), True, 'import matplotlib.pyplot as plt\n'), ((8162, 8226), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.2)', 'right': '(0.95)', 'bottom': '(0.15)', 'top': '(0.95)'}), '(left=0.2, right=0.95, bottom=0.15, top=0.95)\n', (8181, 8226), True, 'import matplotlib.pyplot as plt\n'), ((8402, 8417), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""C"""'], {}), "('C')\n", (8412, 8417), True, 'import matplotlib.pyplot as plt\n'), ((8427, 8441), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (8439, 8441), True, 'import matplotlib.pyplot as plt\n'), ((8505, 8537), 'matplotlib.pyplot.title', 'plt.title', (['"""Validation accuracy"""'], {}), "('Validation accuracy')\n", (8514, 8537), True, 'import matplotlib.pyplot as plt\n'), ((8829, 8880), 'numpy.array', 'np.array', (["model.grid.cv_results_['mean_test_score']"], {}), "(model.grid.cv_results_['mean_test_score'])\n", (8837, 8880), True, 'import numpy as np\n'), ((8971, 8994), 'numpy.mean', 'np.mean', (['scores'], {'axis': '(0)'}), '(scores, axis=0)\n', (8978, 8994), True, 'import numpy as np\n'), ((9071, 9097), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (9081, 9097), True, 'import matplotlib.pyplot as plt\n'), ((9107, 9171), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.2)', 'right': '(0.95)', 'bottom': '(0.15)', 'top': '(0.95)'}), '(left=0.2, right=0.95, bottom=0.15, top=0.95)\n', (9126, 9171), True, 'import matplotlib.pyplot as plt\n'), ((9489, 9508), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""gamma"""'], {}), "('gamma')\n", (9499, 9508), True, 'import matplotlib.pyplot as plt\n'), ((9518, 9533), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""C"""'], {}), "('C')\n", (9528, 9533), True, 'import matplotlib.pyplot as plt\n'), ((9543, 9557), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (9555, 9557), True, 'import matplotlib.pyplot as plt\n'), ((9696, 9728), 'matplotlib.pyplot.title', 'plt.title', (['"""Validation accuracy"""'], {}), "('Validation accuracy')\n", (9705, 9728), True, 'import matplotlib.pyplot as plt\n'), ((9984, 10035), 'numpy.array', 'np.array', (["model.grid.cv_results_['mean_test_score']"], {}), "(model.grid.cv_results_['mean_test_score'])\n", (9992, 10035), True, 'import numpy as np\n'), ((10126, 10149), 'numpy.mean', 'np.mean', (['scores'], {'axis': '(0)'}), '(scores, axis=0)\n', (10133, 10149), True, 'import numpy as np\n'), ((10256, 10282), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (10266, 10282), True, 'import matplotlib.pyplot as plt\n'), ((10292, 10356), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.2)', 'right': '(0.95)', 'bottom': '(0.15)', 'top': '(0.95)'}), '(left=0.2, right=0.95, bottom=0.15, top=0.95)\n', (10311, 10356), True, 'import matplotlib.pyplot as plt\n'), ((10531, 10563), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""hidden_layer_sizes"""'], {}), "('hidden_layer_sizes')\n", (10541, 10563), True, 'import matplotlib.pyplot as plt\n'), ((10573, 10605), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""learning_rate_init"""'], {}), "('learning_rate_init')\n", (10583, 10605), True, 'import matplotlib.pyplot as plt\n'), ((10615, 10629), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (10627, 10629), True, 'import matplotlib.pyplot as plt\n'), ((10828, 10860), 'matplotlib.pyplot.title', 'plt.title', (['"""Validation accuracy"""'], {}), "('Validation accuracy')\n", (10837, 10860), True, 'import matplotlib.pyplot as plt\n'), ((13485, 13514), 'random.shuffle', 'random.shuffle', (['shuffledRange'], {}), '(shuffledRange)\n', (13499, 13514), False, 'import sys, os, random\n'), ((1865, 1887), 'numpy.interp', 'np.interp', (['value', 'x', 'y'], {}), '(value, x, y)\n', (1874, 1887), True, 'import numpy as np\n'), ((3112, 3127), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {}), '()\n', (3125, 3127), False, 'from sklearn.linear_model import SGDClassifier\n'), ((3444, 3449), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (3447, 3449), False, 'from sklearn.svm import SVC\n'), ((3898, 3903), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (3901, 3903), False, 'from sklearn.svm import SVC\n'), ((4656, 4671), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {}), '()\n', (4669, 4671), False, 'from sklearn.neural_network import MLPClassifier\n'), ((15267, 15281), 'numpy.load', 'np.load', (['dname'], {}), '(dname)\n', (15274, 15281), True, 'import numpy as np\n'), ((5846, 5902), 'numpy.mean', 'np.mean', (["model.grid.cv_results_['mean_fit_time']"], {'axis': '(0)'}), "(model.grid.cv_results_['mean_fit_time'], axis=0)\n", (5853, 5902), True, 'import numpy as np\n'), ((5966, 6024), 'numpy.mean', 'np.mean', (["model.grid.cv_results_['mean_score_time']"], {'axis': '(0)'}), "(model.grid.cv_results_['mean_score_time'], axis=0)\n", (5973, 6024), True, 'import numpy as np\n'), ((13824, 13842), 'numpy.unique', 'np.unique', (['train_y'], {}), '(train_y)\n', (13833, 13842), True, 'import numpy as np\n'), ((15444, 15460), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (15458, 15460), False, 'from sklearn.preprocessing import StandardScaler\n'), ((15519, 15535), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (15533, 15535), False, 'from sklearn.preprocessing import StandardScaler\n')]
|
import gzip
import sys
import argparse
import re
import logging
import numpy as np
import pandas as p
from itertools import product, tee
from collections import Counter, OrderedDict
from Bio import SeqIO
def generate_feature_mapping(kmer_len):
BASE_COMPLEMENT = {"A":"T","T":"A","G":"C","C":"G"}
kmer_hash = {}
counter = 0
for kmer in product("ATGC",repeat=kmer_len):
if kmer not in kmer_hash:
kmer_hash[kmer] = counter
rev_compl = tuple([BASE_COMPLEMENT[x] for x in reversed(kmer)])
kmer_hash[rev_compl] = counter
counter += 1
return kmer_hash, counter
def window(seq,n):
els = tee(seq,n)
for i,el in enumerate(els):
for _ in range(i):
next(el, None)
return zip(*els)
def _calculate_composition(read_file, kmer_len, length_threshold=25):
#Generate kmer dictionary
feature_mapping, nr_features = generate_feature_mapping(kmer_len)
composition = np.zeros(nr_features,dtype=np.int)
start_composition = np.zeros(nr_features,dtype=np.int)
with gzip.open(read_file, "rt") as handle:
for seq in SeqIO.parse(handle,"fastq"):
seq_len = len(seq)
if seq_len<= length_threshold:
continue
str_seq = str(seq.seq)
# Create a list containing all kmers, translated to integers
kmers = [
feature_mapping[kmer_tuple]
for kmer_tuple
in window(str_seq.upper(), kmer_len)
if kmer_tuple in feature_mapping
]
# numpy.bincount returns an array of size = max + 1
# so we add the max value and remove it afterwards
# numpy.bincount was found to be much more efficient than
# counting manually or using collections.Counter
kmers.append(nr_features - 1)
composition_v = np.bincount(np.array(kmers))
composition_v[-1] -= 1
# Adding pseudo counts before storing in dict
composition += composition_v
failStart = 0
if seq_len >= kmer_len:
startKmer = str_seq[0:kmer_len].upper()
startKmerT = tuple(startKmer)
if startKmerT in feature_mapping:
start_composition[feature_mapping[startKmerT]]+=1
else:
failStart+=1
return feature_mapping, composition, start_composition, failStart
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument("read_file", help="gzipped fastq read file")
parser.add_argument("kmer_length", help="kmer length assumed overlap")
parser.add_argument("outFileStub", help="stub for output files")
args = parser.parse_args()
#import ipdb; ipdb.set_trace()
(feature_mapping, composition, start_composition,failStart) = _calculate_composition(args.read_file, int(args.kmer_length))
print(str(failStart))
for k in sorted(feature_mapping, key=feature_mapping.get):
kidx = feature_mapping[k]
print("".join(k) + "," + str(kidx) + "," + str(composition[kidx]) + "," + str(start_composition[kidx]) )
if __name__ == "__main__":
main(sys.argv[1:])
|
[
"gzip.open",
"argparse.ArgumentParser",
"Bio.SeqIO.parse",
"numpy.zeros",
"numpy.array",
"itertools.product",
"itertools.tee"
] |
[((355, 387), 'itertools.product', 'product', (['"""ATGC"""'], {'repeat': 'kmer_len'}), "('ATGC', repeat=kmer_len)\n", (362, 387), False, 'from itertools import product, tee\n'), ((664, 675), 'itertools.tee', 'tee', (['seq', 'n'], {}), '(seq, n)\n', (667, 675), False, 'from itertools import product, tee\n'), ((971, 1006), 'numpy.zeros', 'np.zeros', (['nr_features'], {'dtype': 'np.int'}), '(nr_features, dtype=np.int)\n', (979, 1006), True, 'import numpy as np\n'), ((1030, 1065), 'numpy.zeros', 'np.zeros', (['nr_features'], {'dtype': 'np.int'}), '(nr_features, dtype=np.int)\n', (1038, 1065), True, 'import numpy as np\n'), ((2557, 2582), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2580, 2582), False, 'import argparse\n'), ((1079, 1105), 'gzip.open', 'gzip.open', (['read_file', '"""rt"""'], {}), "(read_file, 'rt')\n", (1088, 1105), False, 'import gzip\n'), ((1137, 1165), 'Bio.SeqIO.parse', 'SeqIO.parse', (['handle', '"""fastq"""'], {}), "(handle, 'fastq')\n", (1148, 1165), False, 'from Bio import SeqIO\n'), ((1951, 1966), 'numpy.array', 'np.array', (['kmers'], {}), '(kmers)\n', (1959, 1966), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
'''
<NAME>
1. a. Frequentist confidence intervals do not respect the physical limitations imposed on a system, ie non-negativity of a mass.
b. Typically, that the probability to be found outside the interval on both sides of the distribution is 16% (or (100-CL)/2 %).
Often constructed with a likelihood function, finding where the likelihood reduces by a half.
c. We need a prior PDF to construct the posterior PDF for \mu_t.
d. 1/\mu_t. He justifies that this is invariant over changes of power of \mu_t.
e. Bayesian methods fail to be objective: they must be injected with a prior PDF to construct the posterior from the likelihood function.
Classical intervals fail to consider physical limitations on the measured parameter.
Classical limits also handle systematics in a counterintuitive way, such that a bad calibration leads to a tighter confidence interval.
It seems that generally people use classical statistics except when it produces things that 'seem' wrong, in which case use Bayesian.
f. As Cousins did, perform classical analysis on the mean and statistical error and use a Bayesian analysis of the detector sensitivity.
2. I repeated this entire problem for a quadratic plot. The files ending in "_quad.pdf" are from the second iteration with a quadratic dataset.
a. The data are shown in blue, the linear fit in red, and the quadratic fit in blue.
b. The symmetry of the histogram reflects unbiased estimators.
c. The functional form is:
1/(2^{df/2}\Gamma(df/2)) x^{df/2-1}e^{-x/2}
The single parameter, df, is the number of degrees of freedom in the fit. Since we have 15 data points, this is either 12 or 13.
For the linear fit, we have two free parameters so df=13; for the quadratic fit with three free parameters, df=12.
We expected the reduced chi square to be around 1, and this is the result for both fits.
* For comparison I give a normalized reduced Chi2 distribution for df=12 and df=13. Overlaying them was not obviously easy, but comparing by-eye they are identical.
I plotted reduced chi squares through because of their goodness-of-fit usefulness, but the conversion between the two statistics is simple.
d. In the case of the linear data, the fit gets worse. It is difficult to predict what happens here: if we are lucky enough that we can fit
some noise to the new x^2 degree of freedom, the X^2 will lower. However, the ndf has reduced by 1, so if there is overall no noise we can
fit away, then the reduced chi square will rise.
In the case of the quadratic data, the linear fit is abysmal and the quadratic fit is around 1. This is also expected.
3. a. I sampled the distribution using the cdf; for reference I included both the plot of the distrubution and the cdf.
b. Transforming error bars for log data is not entirely trivial because applying the logarithm literally yields asymmetric error bars.
Instead, I transformed to first-order (d/dx log x), using \sigma_{D,log}=\sigma_D/D
c. It takes a rather long time to run this with a large number of statistics (maybe I am doing something very inefficient).
From running the experiment 500 times, I can say that poor sampling of the tails of the distribution leads to underestimation: that is,
we can see a bias in the distribution that favors the left side. I verified this by reducing the number of samples taken
from the distribution by a factor of 10 and re-running, giving bins that are much less well-populated. I attached outputs for both cases.
Rather than wrestle with masking or reassigning garbage datasets post-log, I discarded all results for which the fit failed.
'''
import random
import math
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stat
#Samples from the PDF and computes the mean.
#Maps random reals in (0,1) to the Poisson distribution using a Poisson lookup table
class MeasurementTaker:
def __init__(self, resolution):
self.theResolution=resolution
def GeneratePointWithResolution(self, val):
point=np.random.normal(loc=val,scale=self.theResolution)
return point
class theLine:
def __init__(self, degree):
self.quadcoeff=1
self.degree=degree
self.m=2
self.b=6
self.res=2
self.X=np.linspace(1,15, 15)
self.Y=[]
self.x=0
self.residuals=0
self.ChiSquare=0
if self.degree == 1:
self.BuildLine()
else:
self.BuildLineQuadratic()
self.FitLine()
def BuildLine(self):
measurer = MeasurementTaker(2)
for i, entry in enumerate(self.X):
self.Y.append(measurer.GeneratePointWithResolution(self.m*entry+self.b))
def BuildLineQuadratic(self):
measurer = MeasurementTaker(2)
for i, entry in enumerate(self.X):
self.Y.append(measurer.GeneratePointWithResolution(self.quadcoeff*entry**2+self.m*entry+self.b))
def FitLine(self):
self.coeffs = np.polyfit(self.X, self.Y, 1)
self.ChiSquare=np.sum((((self.coeffs[0]*self.X+self.coeffs[1])-self.Y)/self.res) ** 2)
self.quadcoeffs=np.polyfit(self.X, self.Y,2)
self.ChiSquareQuad=np.sum((((self.quadcoeffs[0]*self.X**2+self.quadcoeffs[1]*self.X+self.quadcoeffs[2])-self.Y)/self.res)**2)
def PlotLine(self, title):
plt.errorbar(self.X,self.Y,xerr=0,yerr=2)
plt.plot(self.X,self.quadcoeffs[0]*self.X**2+self.quadcoeffs[1]*self.X+self.quadcoeffs[2])
plt.plot(self.X,self.coeffs[0]*self.X+self.coeffs[1])
plt.xlabel("x")
plt.ylabel("y")
plt.title("The Line")
plt.savefig(title)
plt.clf()
class theExponential:
lookup_x=[]
lookup_y=[]
cdf=[]
maxcdf=0
def GenerateSample(self):
randomNumber = random.uniform(theExponential.cdf[0],theExponential.maxcdf)
index=-1
if randomNumber < theExponential.cdf[0]:
index=0
else:
for i in range(0,len(theExponential.cdf)-1):
if randomNumber > theExponential.cdf[i] and randomNumber < theExponential.cdf[i+1]:
index=i+1
if index != -1:
self.samples.append(theExponential.lookup_x[index])
def GenerateNSamples(self, numSamples):
for i in range(0, numSamples):
self.GenerateSample()
def AnalyzeDistro(self, index):
y,binEdges = np.histogram(self.samples,bins=10)
bincenters = 0.5*(binEdges[1:]+binEdges[:-1])
menStd = np.sqrt(y)
width = 0.20
plt.clf()
if index == 1:
plt.bar(bincenters, y, width=width, yerr=menStd, ecolor='g')
plt.xlabel("Value")
plt.ylabel("Entries")
plt.title(str(len(self.samples))+" exponential samples")
plt.savefig("3b_exp_samples.png")
plt.clf()
self.logsamples=np.log(y)
logcoeffs = np.polyfit(bincenters, self.logsamples, 1)
if index == 1:
plt.bar(bincenters,self.logsamples,width=width, yerr=menStd/y, ecolor='g')
plt.xlabel("Value")
plt.ylabel("log Entries")
plt.title(str(len(self.samples))+" exponential samples")
theFitX=np.linspace(0,5,1000)
theFitY=theFitX*logcoeffs[0]+logcoeffs[1]
plt.plot(theFitX,theFitY)
plt.savefig("3b_exp_samples_log.png")
plt.clf()
return -1*logcoeffs[0]
def __init__(self, nSamples):
self.samples=[]
self.logbins=[]
self.GenerateNSamples(nSamples)
theExponential.lookup_x=np.linspace(0, 5, 10000)
theExponential.lookup_y=np.exp(-theExponential.lookup_x)
runningAverage=0
for val in theExponential.lookup_y:
runningAverage=runningAverage+val
theExponential.cdf.append(runningAverage)
theExponential.maxcdf=theExponential.cdf[len(theExponential.cdf)-1]
plt.clf()
print("Running...")
plt.plot(theExponential.lookup_x, theExponential.lookup_y)
plt.xlabel("x")
plt.ylabel("$e^{-x}$")
plt.title("Exponential distribution")
plt.savefig("3_exponential_dist.png")
plt.clf()
plt.plot(theExponential.lookup_x, theExponential.cdf)
plt.xlabel("x")
plt.ylabel("cdf")
plt.title("Exponential cdf")
plt.savefig("3_exponential_cdf.png")
plt.clf()
for i in range(0,2):
fileEnding=0
degree=i+1
if i == 0:
fileEnding=".png"
else:
fileEnding="_quad.png"
Lines=[]
slopes=[]
intercepts=[]
quads=[]
chisqs=[]
chisqquads=[]
for j in range(0,1000):
line = theLine(degree)
Lines.append(line)
if j == 1:
line.PlotLine("2a_line"+fileEnding)
if i == 0:
slopes.append(line.coeffs[0])
intercepts.append(line.coeffs[1])
else:
quads.append(line.quadcoeffs[0])
slopes.append(line.quadcoeffs[1])
intercepts.append(line.quadcoeffs[2])
chisqs.append(line.ChiSquare/13)
chisqquads.append(line.ChiSquareQuad/12)
plt.hist(slopes, bins=100)
plt.xlabel("m")
plt.ylabel("Entries")
plt.title("Slopes histogram")
plt.savefig("2b_slopes"+fileEnding)
plt.clf()
plt.hist(intercepts, bins=100)
plt.xlabel("b")
plt.ylabel("Entries")
plt.title("Intercepts histogram")
plt.savefig("2b_intercepts"+fileEnding)
plt.clf()
if i == 1:
plt.hist(intercepts, bins=100)
plt.xlabel("a (quadratic coefficient)")
plt.ylabel("Entries")
plt.title("Quadratic coefficient histogram")
plt.savefig("2b_quads"+fileEnding)
plt.clf()
plt.hist(chisqs, bins=100)
plt.xlabel("X^2 / ndf")
plt.ylabel("Entries")
plt.title("Chi-square of linear fit")
plt.savefig("2c_chisq"+fileEnding)
plt.clf()
plt.hist(chisqquads, bins=100)
plt.xlabel("X^2 / ndf")
plt.ylabel("Entries")
plt.title("Chi-square of quadratic fit")
plt.savefig("2d_chisq2"+fileEnding)
plt.clf()
theNdf=0
if i ==1:
theNdf=12
else:
theNdf=13
chispace=np.linspace(0,theNdf*3,1000)
chidist=stat.chi2(theNdf,1)
plt.plot(chispace/theNdf, chidist.pdf(chispace))
plt.xlabel("X^2")
plt.ylabel("P")
plt.title("Chi-square distribution (ndf ="+str(theNdf)+")")
plt.savefig("2d_chisq2pdf"+fileEnding)
plt.clf()
Taus=[]
for i in range(0,500):
if i % 100 == 0:
print(i)
exp = theExponential(500)
result=exp.AnalyzeDistro(i)
if math.isnan(result) == False:
Taus.append(result)
print(Taus)
plt.hist(Taus, bins=20)
plt.xlabel("Tau")
plt.ylabel("Entries")
plt.title("Estimated Tau")
plt.savefig("3c_tau_hist_500samples.png")
Taus=[]
for i in range(0,500):
if i % 100 == 0:
print(i)
exp = theExponential(50)
result=exp.AnalyzeDistro(i)
if math.isnan(result) == False:
Taus.append(result)
print(Taus)
plt.hist(Taus, bins=20)
plt.xlabel("Tau")
plt.ylabel("Entries")
plt.title("Estimated Tau")
plt.savefig("3c_tau_hist_50samples.png")
|
[
"matplotlib.pyplot.title",
"numpy.sum",
"matplotlib.pyplot.clf",
"numpy.polyfit",
"matplotlib.pyplot.bar",
"numpy.histogram",
"numpy.exp",
"numpy.random.normal",
"numpy.linspace",
"matplotlib.pyplot.errorbar",
"math.isnan",
"scipy.stats.chi2",
"matplotlib.pyplot.ylabel",
"numpy.log",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.hist",
"random.uniform",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] |
[((8154, 8178), 'numpy.linspace', 'np.linspace', (['(0)', '(5)', '(10000)'], {}), '(0, 5, 10000)\n', (8165, 8178), True, 'import numpy as np\n'), ((8204, 8236), 'numpy.exp', 'np.exp', (['(-theExponential.lookup_x)'], {}), '(-theExponential.lookup_x)\n', (8210, 8236), True, 'import numpy as np\n'), ((8448, 8457), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (8455, 8457), True, 'import matplotlib.pyplot as plt\n'), ((8482, 8540), 'matplotlib.pyplot.plot', 'plt.plot', (['theExponential.lookup_x', 'theExponential.lookup_y'], {}), '(theExponential.lookup_x, theExponential.lookup_y)\n', (8490, 8540), True, 'import matplotlib.pyplot as plt\n'), ((8542, 8557), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (8552, 8557), True, 'import matplotlib.pyplot as plt\n'), ((8559, 8581), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$e^{-x}$"""'], {}), "('$e^{-x}$')\n", (8569, 8581), True, 'import matplotlib.pyplot as plt\n'), ((8583, 8620), 'matplotlib.pyplot.title', 'plt.title', (['"""Exponential distribution"""'], {}), "('Exponential distribution')\n", (8592, 8620), True, 'import matplotlib.pyplot as plt\n'), ((8622, 8659), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""3_exponential_dist.png"""'], {}), "('3_exponential_dist.png')\n", (8633, 8659), True, 'import matplotlib.pyplot as plt\n'), ((8661, 8670), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (8668, 8670), True, 'import matplotlib.pyplot as plt\n'), ((8674, 8727), 'matplotlib.pyplot.plot', 'plt.plot', (['theExponential.lookup_x', 'theExponential.cdf'], {}), '(theExponential.lookup_x, theExponential.cdf)\n', (8682, 8727), True, 'import matplotlib.pyplot as plt\n'), ((8729, 8744), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (8739, 8744), True, 'import matplotlib.pyplot as plt\n'), ((8746, 8763), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""cdf"""'], {}), "('cdf')\n", (8756, 8763), True, 'import matplotlib.pyplot as plt\n'), ((8766, 8794), 'matplotlib.pyplot.title', 'plt.title', (['"""Exponential cdf"""'], {}), "('Exponential cdf')\n", (8775, 8794), True, 'import matplotlib.pyplot as plt\n'), ((8796, 8832), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""3_exponential_cdf.png"""'], {}), "('3_exponential_cdf.png')\n", (8807, 8832), True, 'import matplotlib.pyplot as plt\n'), ((8834, 8843), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (8841, 8843), True, 'import matplotlib.pyplot as plt\n'), ((11260, 11283), 'matplotlib.pyplot.hist', 'plt.hist', (['Taus'], {'bins': '(20)'}), '(Taus, bins=20)\n', (11268, 11283), True, 'import matplotlib.pyplot as plt\n'), ((11285, 11302), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Tau"""'], {}), "('Tau')\n", (11295, 11302), True, 'import matplotlib.pyplot as plt\n'), ((11304, 11325), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Entries"""'], {}), "('Entries')\n", (11314, 11325), True, 'import matplotlib.pyplot as plt\n'), ((11327, 11353), 'matplotlib.pyplot.title', 'plt.title', (['"""Estimated Tau"""'], {}), "('Estimated Tau')\n", (11336, 11353), True, 'import matplotlib.pyplot as plt\n'), ((11355, 11396), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""3c_tau_hist_500samples.png"""'], {}), "('3c_tau_hist_500samples.png')\n", (11366, 11396), True, 'import matplotlib.pyplot as plt\n'), ((11615, 11638), 'matplotlib.pyplot.hist', 'plt.hist', (['Taus'], {'bins': '(20)'}), '(Taus, bins=20)\n', (11623, 11638), True, 'import matplotlib.pyplot as plt\n'), ((11640, 11657), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Tau"""'], {}), "('Tau')\n", (11650, 11657), True, 'import matplotlib.pyplot as plt\n'), ((11659, 11680), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Entries"""'], {}), "('Entries')\n", (11669, 11680), True, 'import matplotlib.pyplot as plt\n'), ((11682, 11708), 'matplotlib.pyplot.title', 'plt.title', (['"""Estimated Tau"""'], {}), "('Estimated Tau')\n", (11691, 11708), True, 'import matplotlib.pyplot as plt\n'), ((11710, 11750), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""3c_tau_hist_50samples.png"""'], {}), "('3c_tau_hist_50samples.png')\n", (11721, 11750), True, 'import matplotlib.pyplot as plt\n'), ((9619, 9645), 'matplotlib.pyplot.hist', 'plt.hist', (['slopes'], {'bins': '(100)'}), '(slopes, bins=100)\n', (9627, 9645), True, 'import matplotlib.pyplot as plt\n'), ((9651, 9666), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""m"""'], {}), "('m')\n", (9661, 9666), True, 'import matplotlib.pyplot as plt\n'), ((9672, 9693), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Entries"""'], {}), "('Entries')\n", (9682, 9693), True, 'import matplotlib.pyplot as plt\n'), ((9699, 9728), 'matplotlib.pyplot.title', 'plt.title', (['"""Slopes histogram"""'], {}), "('Slopes histogram')\n", (9708, 9728), True, 'import matplotlib.pyplot as plt\n'), ((9734, 9771), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('2b_slopes' + fileEnding)"], {}), "('2b_slopes' + fileEnding)\n", (9745, 9771), True, 'import matplotlib.pyplot as plt\n'), ((9775, 9784), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (9782, 9784), True, 'import matplotlib.pyplot as plt\n'), ((9796, 9826), 'matplotlib.pyplot.hist', 'plt.hist', (['intercepts'], {'bins': '(100)'}), '(intercepts, bins=100)\n', (9804, 9826), True, 'import matplotlib.pyplot as plt\n'), ((9832, 9847), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""b"""'], {}), "('b')\n", (9842, 9847), True, 'import matplotlib.pyplot as plt\n'), ((9853, 9874), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Entries"""'], {}), "('Entries')\n", (9863, 9874), True, 'import matplotlib.pyplot as plt\n'), ((9880, 9913), 'matplotlib.pyplot.title', 'plt.title', (['"""Intercepts histogram"""'], {}), "('Intercepts histogram')\n", (9889, 9913), True, 'import matplotlib.pyplot as plt\n'), ((9919, 9960), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('2b_intercepts' + fileEnding)"], {}), "('2b_intercepts' + fileEnding)\n", (9930, 9960), True, 'import matplotlib.pyplot as plt\n'), ((9964, 9973), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (9971, 9973), True, 'import matplotlib.pyplot as plt\n'), ((10268, 10294), 'matplotlib.pyplot.hist', 'plt.hist', (['chisqs'], {'bins': '(100)'}), '(chisqs, bins=100)\n', (10276, 10294), True, 'import matplotlib.pyplot as plt\n'), ((10300, 10323), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X^2 / ndf"""'], {}), "('X^2 / ndf')\n", (10310, 10323), True, 'import matplotlib.pyplot as plt\n'), ((10329, 10350), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Entries"""'], {}), "('Entries')\n", (10339, 10350), True, 'import matplotlib.pyplot as plt\n'), ((10356, 10393), 'matplotlib.pyplot.title', 'plt.title', (['"""Chi-square of linear fit"""'], {}), "('Chi-square of linear fit')\n", (10365, 10393), True, 'import matplotlib.pyplot as plt\n'), ((10399, 10435), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('2c_chisq' + fileEnding)"], {}), "('2c_chisq' + fileEnding)\n", (10410, 10435), True, 'import matplotlib.pyplot as plt\n'), ((10439, 10448), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (10446, 10448), True, 'import matplotlib.pyplot as plt\n'), ((10466, 10496), 'matplotlib.pyplot.hist', 'plt.hist', (['chisqquads'], {'bins': '(100)'}), '(chisqquads, bins=100)\n', (10474, 10496), True, 'import matplotlib.pyplot as plt\n'), ((10502, 10525), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X^2 / ndf"""'], {}), "('X^2 / ndf')\n", (10512, 10525), True, 'import matplotlib.pyplot as plt\n'), ((10531, 10552), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Entries"""'], {}), "('Entries')\n", (10541, 10552), True, 'import matplotlib.pyplot as plt\n'), ((10558, 10598), 'matplotlib.pyplot.title', 'plt.title', (['"""Chi-square of quadratic fit"""'], {}), "('Chi-square of quadratic fit')\n", (10567, 10598), True, 'import matplotlib.pyplot as plt\n'), ((10604, 10641), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('2d_chisq2' + fileEnding)"], {}), "('2d_chisq2' + fileEnding)\n", (10615, 10641), True, 'import matplotlib.pyplot as plt\n'), ((10645, 10654), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (10652, 10654), True, 'import matplotlib.pyplot as plt\n'), ((10757, 10789), 'numpy.linspace', 'np.linspace', (['(0)', '(theNdf * 3)', '(1000)'], {}), '(0, theNdf * 3, 1000)\n', (10768, 10789), True, 'import numpy as np\n'), ((10799, 10819), 'scipy.stats.chi2', 'stat.chi2', (['theNdf', '(1)'], {}), '(theNdf, 1)\n', (10808, 10819), True, 'import scipy.stats as stat\n'), ((10878, 10895), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X^2"""'], {}), "('X^2')\n", (10888, 10895), True, 'import matplotlib.pyplot as plt\n'), ((10901, 10916), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""P"""'], {}), "('P')\n", (10911, 10916), True, 'import matplotlib.pyplot as plt\n'), ((10987, 11027), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('2d_chisq2pdf' + fileEnding)"], {}), "('2d_chisq2pdf' + fileEnding)\n", (10998, 11027), True, 'import matplotlib.pyplot as plt\n'), ((11031, 11040), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (11038, 11040), True, 'import matplotlib.pyplot as plt\n'), ((4403, 4454), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'val', 'scale': 'self.theResolution'}), '(loc=val, scale=self.theResolution)\n', (4419, 4454), True, 'import numpy as np\n'), ((4659, 4681), 'numpy.linspace', 'np.linspace', (['(1)', '(15)', '(15)'], {}), '(1, 15, 15)\n', (4670, 4681), True, 'import numpy as np\n'), ((5400, 5429), 'numpy.polyfit', 'np.polyfit', (['self.X', 'self.Y', '(1)'], {}), '(self.X, self.Y, 1)\n', (5410, 5429), True, 'import numpy as np\n'), ((5454, 5531), 'numpy.sum', 'np.sum', (['(((self.coeffs[0] * self.X + self.coeffs[1] - self.Y) / self.res) ** 2)'], {}), '(((self.coeffs[0] * self.X + self.coeffs[1] - self.Y) / self.res) ** 2)\n', (5460, 5531), True, 'import numpy as np\n'), ((5551, 5580), 'numpy.polyfit', 'np.polyfit', (['self.X', 'self.Y', '(2)'], {}), '(self.X, self.Y, 2)\n', (5561, 5580), True, 'import numpy as np\n'), ((5608, 5732), 'numpy.sum', 'np.sum', (['(((self.quadcoeffs[0] * self.X ** 2 + self.quadcoeffs[1] * self.X + self.\n quadcoeffs[2] - self.Y) / self.res) ** 2)'], {}), '(((self.quadcoeffs[0] * self.X ** 2 + self.quadcoeffs[1] * self.X +\n self.quadcoeffs[2] - self.Y) / self.res) ** 2)\n', (5614, 5732), True, 'import numpy as np\n'), ((5766, 5810), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['self.X', 'self.Y'], {'xerr': '(0)', 'yerr': '(2)'}), '(self.X, self.Y, xerr=0, yerr=2)\n', (5778, 5810), True, 'import matplotlib.pyplot as plt\n'), ((5817, 5922), 'matplotlib.pyplot.plot', 'plt.plot', (['self.X', '(self.quadcoeffs[0] * self.X ** 2 + self.quadcoeffs[1] * self.X + self.\n quadcoeffs[2])'], {}), '(self.X, self.quadcoeffs[0] * self.X ** 2 + self.quadcoeffs[1] *\n self.X + self.quadcoeffs[2])\n', (5825, 5922), True, 'import matplotlib.pyplot as plt\n'), ((5917, 5975), 'matplotlib.pyplot.plot', 'plt.plot', (['self.X', '(self.coeffs[0] * self.X + self.coeffs[1])'], {}), '(self.X, self.coeffs[0] * self.X + self.coeffs[1])\n', (5925, 5975), True, 'import matplotlib.pyplot as plt\n'), ((5980, 5995), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (5990, 5995), True, 'import matplotlib.pyplot as plt\n'), ((6005, 6020), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (6015, 6020), True, 'import matplotlib.pyplot as plt\n'), ((6030, 6051), 'matplotlib.pyplot.title', 'plt.title', (['"""The Line"""'], {}), "('The Line')\n", (6039, 6051), True, 'import matplotlib.pyplot as plt\n'), ((6061, 6079), 'matplotlib.pyplot.savefig', 'plt.savefig', (['title'], {}), '(title)\n', (6072, 6079), True, 'import matplotlib.pyplot as plt\n'), ((6089, 6098), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6096, 6098), True, 'import matplotlib.pyplot as plt\n'), ((6241, 6301), 'random.uniform', 'random.uniform', (['theExponential.cdf[0]', 'theExponential.maxcdf'], {}), '(theExponential.cdf[0], theExponential.maxcdf)\n', (6255, 6301), False, 'import random\n'), ((6895, 6930), 'numpy.histogram', 'np.histogram', (['self.samples'], {'bins': '(10)'}), '(self.samples, bins=10)\n', (6907, 6930), True, 'import numpy as np\n'), ((7007, 7017), 'numpy.sqrt', 'np.sqrt', (['y'], {}), '(y)\n', (7014, 7017), True, 'import numpy as np\n'), ((7054, 7063), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (7061, 7063), True, 'import matplotlib.pyplot as plt\n'), ((7400, 7409), 'numpy.log', 'np.log', (['y'], {}), '(y)\n', (7406, 7409), True, 'import numpy as np\n'), ((7431, 7473), 'numpy.polyfit', 'np.polyfit', (['bincenters', 'self.logsamples', '(1)'], {}), '(bincenters, self.logsamples, 1)\n', (7441, 7473), True, 'import numpy as np\n'), ((10009, 10039), 'matplotlib.pyplot.hist', 'plt.hist', (['intercepts'], {'bins': '(100)'}), '(intercepts, bins=100)\n', (10017, 10039), True, 'import matplotlib.pyplot as plt\n'), ((10053, 10092), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""a (quadratic coefficient)"""'], {}), "('a (quadratic coefficient)')\n", (10063, 10092), True, 'import matplotlib.pyplot as plt\n'), ((10106, 10127), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Entries"""'], {}), "('Entries')\n", (10116, 10127), True, 'import matplotlib.pyplot as plt\n'), ((10141, 10185), 'matplotlib.pyplot.title', 'plt.title', (['"""Quadratic coefficient histogram"""'], {}), "('Quadratic coefficient histogram')\n", (10150, 10185), True, 'import matplotlib.pyplot as plt\n'), ((10199, 10235), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('2b_quads' + fileEnding)"], {}), "('2b_quads' + fileEnding)\n", (10210, 10235), True, 'import matplotlib.pyplot as plt\n'), ((10247, 10256), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (10254, 10256), True, 'import matplotlib.pyplot as plt\n'), ((11188, 11206), 'math.isnan', 'math.isnan', (['result'], {}), '(result)\n', (11198, 11206), False, 'import math\n'), ((11543, 11561), 'math.isnan', 'math.isnan', (['result'], {}), '(result)\n', (11553, 11561), False, 'import math\n'), ((7106, 7166), 'matplotlib.pyplot.bar', 'plt.bar', (['bincenters', 'y'], {'width': 'width', 'yerr': 'menStd', 'ecolor': '"""g"""'}), "(bincenters, y, width=width, yerr=menStd, ecolor='g')\n", (7113, 7166), True, 'import matplotlib.pyplot as plt\n'), ((7180, 7199), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Value"""'], {}), "('Value')\n", (7190, 7199), True, 'import matplotlib.pyplot as plt\n'), ((7213, 7234), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Entries"""'], {}), "('Entries')\n", (7223, 7234), True, 'import matplotlib.pyplot as plt\n'), ((7318, 7351), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""3b_exp_samples.png"""'], {}), "('3b_exp_samples.png')\n", (7329, 7351), True, 'import matplotlib.pyplot as plt\n'), ((7365, 7374), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (7372, 7374), True, 'import matplotlib.pyplot as plt\n'), ((7511, 7589), 'matplotlib.pyplot.bar', 'plt.bar', (['bincenters', 'self.logsamples'], {'width': 'width', 'yerr': '(menStd / y)', 'ecolor': '"""g"""'}), "(bincenters, self.logsamples, width=width, yerr=menStd / y, ecolor='g')\n", (7518, 7589), True, 'import matplotlib.pyplot as plt\n'), ((7599, 7618), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Value"""'], {}), "('Value')\n", (7609, 7618), True, 'import matplotlib.pyplot as plt\n'), ((7632, 7657), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""log Entries"""'], {}), "('log Entries')\n", (7642, 7657), True, 'import matplotlib.pyplot as plt\n'), ((7749, 7772), 'numpy.linspace', 'np.linspace', (['(0)', '(5)', '(1000)'], {}), '(0, 5, 1000)\n', (7760, 7772), True, 'import numpy as np\n'), ((7839, 7865), 'matplotlib.pyplot.plot', 'plt.plot', (['theFitX', 'theFitY'], {}), '(theFitX, theFitY)\n', (7847, 7865), True, 'import matplotlib.pyplot as plt\n'), ((7878, 7915), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""3b_exp_samples_log.png"""'], {}), "('3b_exp_samples_log.png')\n", (7889, 7915), True, 'import matplotlib.pyplot as plt\n'), ((7929, 7938), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (7936, 7938), True, 'import matplotlib.pyplot as plt\n')]
|
# Copyright 2019 The DMLab2D Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dmlab2d.dmlab2d."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from dm_env import test_utils
import numpy as np
import dmlab2d
from dmlab2d import runfiles_helper
class Dmlab2dDmEnvTest(test_utils.EnvironmentTestMixin, absltest.TestCase):
def make_object_under_test(self):
lab2d = dmlab2d.Lab2d(runfiles_helper.find(),
{'levelName': 'examples/level_api'})
return dmlab2d.Environment(lab2d, lab2d.observation_names(), 0)
class Dmlab2DTest(absltest.TestCase):
def _create_env(self, extra_settings=None):
settings = extra_settings.copy() if extra_settings else {}
settings['levelName'] = 'examples/level_api'
return dmlab2d.Lab2d(runfiles_helper.find(), settings)
def test_lab2d_environment_name(self):
self.assertEqual(self._create_env().name(), 'dmlab2d')
def test_lab2d_observation_names(self):
env = self._create_env()
self.assertEqual(env.observation_names(),
['VIEW' + str(i) for i in range(1, 6)])
def test_lab2d_observation_spec(self):
env = self._create_env()
self.assertEqual(
env.observation_spec('VIEW1'), {
'dtype': np.dtype('uint8'),
'shape': (1,)
})
self.assertEqual(
env.observation_spec('VIEW2'), {
'dtype': np.dtype('double'),
'shape': (2,)
})
self.assertEqual(
env.observation_spec('VIEW3'), {
'dtype': np.dtype('int32'),
'shape': (3,)
})
self.assertEqual(
env.observation_spec('VIEW4'), {
'dtype': np.dtype('int64'),
'shape': (4,)
})
# Text is stored in objects.
self.assertEqual(
env.observation_spec('VIEW5'), {
'dtype': np.dtype('O'),
'shape': ()
})
def test_lab2d_action_spec(self):
env = self._create_env()
self.assertEqual(env.action_discrete_names(), ['REWARD_ACT'])
self.assertEqual(
env.action_discrete_spec('REWARD_ACT'), {
'min': 0,
'max': 4
})
self.assertEqual(env.action_continuous_names(), ['OBSERVATION_ACT'])
self.assertEqual(
env.action_continuous_spec('OBSERVATION_ACT'), {
'min': -5,
'max': 5
})
self.assertEqual(env.action_text_names(), ['LOG_EVENT'])
def test_lab2d_start_environment(self):
env = self._create_env()
env.start(episode=0, seed=0)
def test_lab2d_events_start(self):
env = self._create_env()
env.start(episode=0, seed=0)
events = env.events()
self.assertLen(events, 1)
event_name, observations = events[0]
self.assertEqual(event_name, 'start')
self.assertLen(observations, 1)
np.testing.assert_array_equal(observations[0], [1, 2, 3])
def test_lab2d_events_cleared_after_advance_not_read(self):
env = self._create_env()
env.start(episode=0, seed=0)
self.assertLen(env.events(), 1)
self.assertLen(env.events(), 1)
env.advance()
self.assertEmpty(env.events())
def test_lab2d_observe(self):
env = self._create_env()
env.start(episode=0, seed=0)
np.testing.assert_array_equal(env.observation('VIEW1'), [1])
np.testing.assert_array_equal(env.observation('VIEW2'), [1, 2])
np.testing.assert_array_equal(env.observation('VIEW3'), [1, 2, 3])
np.testing.assert_array_equal(env.observation('VIEW4'), [1, 2, 3, 4])
self.assertEqual(env.observation('VIEW5'), b'')
def test_lab2d_ten_steps_terminate_environment(self):
env = self._create_env()
env.start(episode=0, seed=0)
for _ in range(9):
self.assertEqual(env.advance()[0], dmlab2d.RUNNING)
self.assertEqual(env.advance()[0], dmlab2d.TERMINATED)
def test_lab2d_settings_environment(self):
env = self._create_env({'steps': '5'})
env.start(episode=0, seed=0)
for _ in range(4):
self.assertEqual(env.advance()[0], dmlab2d.RUNNING)
self.assertEqual(env.advance()[0], dmlab2d.TERMINATED)
def test_lab2d_properties_environment(self):
env = self._create_env({'steps': '5'})
properties = env.list_property('')
self.assertLen(properties, 1)
self.assertEqual(properties[0],
('steps', dmlab2d.PropertyAttribute.READABLE_WRITABLE))
self.assertEqual(env.read_property('steps'), '5')
env.write_property('steps', '3')
self.assertEqual(env.read_property('steps'), '3')
env.start(episode=0, seed=0)
for _ in range(2):
self.assertEqual(env.advance()[0], dmlab2d.RUNNING)
self.assertEqual(env.advance()[0], dmlab2d.TERMINATED)
def test_lab2d_act_discrete(self):
env = self._create_env({'steps': '5'})
env.start(episode=0, seed=0)
env.act_discrete(np.array([2], np.dtype('int32')))
_, reward = env.advance()
self.assertEqual(reward, 2)
def test_lab2d_act_continuous(self):
env = self._create_env({'steps': '5'})
env.start(episode=0, seed=0)
np.testing.assert_array_equal(env.observation('VIEW3'), [1, 2, 3])
env.act_continuous([10])
env.advance()
np.testing.assert_array_equal(env.observation('VIEW3'), [11, 12, 13])
def test_lab2d_act_text(self):
env = self._create_env({'steps': '5'})
env.start(episode=0, seed=0)
view = env.observation('VIEW5')
self.assertEqual(view, b'')
env.act_text(['Hello'])
env.advance()
view = env.observation('VIEW5')
self.assertEqual(view, b'Hello')
def test_lab2d_invalid_setting(self):
with self.assertRaises(ValueError):
self._create_env({'missing': '5'})
def test_lab2d_bad_action_spec_name(self):
env = self._create_env()
with self.assertRaises(KeyError):
env.action_discrete_spec('bad_key')
with self.assertRaises(KeyError):
env.action_continuous_spec('bad_key')
def test_lab2d_bad_observation_spec_name(self):
env = self._create_env()
with self.assertRaises(KeyError):
env.observation_spec('bad_key')
def test_lab2d_observe_before_start(self):
env = self._create_env()
with self.assertRaises(RuntimeError):
env.observation('VIEW1')
def test_lab2d_act_before_start(self):
env = self._create_env()
with self.assertRaises(RuntimeError):
env.act_discrete([0])
with self.assertRaises(RuntimeError):
env.act_continuous([0])
with self.assertRaises(RuntimeError):
env.act_text([''])
def test_lab2d_act_bad_shape(self):
env = self._create_env()
env.start(0, 0)
with self.assertRaises(ValueError):
env.act_discrete([0, 1])
with self.assertRaises(ValueError):
env.act_continuous([0, 1])
def test_lab2d_advance_after_episode_ends(self):
env = self._create_env({'steps': '2'})
env.start(0, 0)
self.assertEqual(env.advance()[0], dmlab2d.RUNNING)
self.assertEqual(env.advance()[0], dmlab2d.TERMINATED)
with self.assertRaises(RuntimeError):
env.advance()
def test_lab2d_missing_properties(self):
env = self._create_env({'steps': '5'})
with self.assertRaises(KeyError):
env.list_property('missing')
with self.assertRaises(KeyError):
env.read_property('missing')
with self.assertRaises(KeyError):
env.write_property('missing', '10')
def test_lab2d_invalid_ops_properties(self):
env = self._create_env({'steps': '5'})
with self.assertRaises(ValueError):
env.list_property('steps')
with self.assertRaises(ValueError):
env.write_property('steps', 'mouse')
if __name__ == '__main__':
absltest.main()
|
[
"dmlab2d.runfiles_helper.find",
"absl.testing.absltest.main",
"numpy.dtype",
"numpy.testing.assert_array_equal"
] |
[((8160, 8175), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (8173, 8175), False, 'from absl.testing import absltest\n'), ((3413, 3470), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['observations[0]', '[1, 2, 3]'], {}), '(observations[0], [1, 2, 3])\n', (3442, 3470), True, 'import numpy as np\n'), ((1006, 1028), 'dmlab2d.runfiles_helper.find', 'runfiles_helper.find', ([], {}), '()\n', (1026, 1028), False, 'from dmlab2d import runfiles_helper\n'), ((1385, 1407), 'dmlab2d.runfiles_helper.find', 'runfiles_helper.find', ([], {}), '()\n', (1405, 1407), False, 'from dmlab2d import runfiles_helper\n'), ((1855, 1872), 'numpy.dtype', 'np.dtype', (['"""uint8"""'], {}), "('uint8')\n", (1863, 1872), True, 'import numpy as np\n'), ((1996, 2014), 'numpy.dtype', 'np.dtype', (['"""double"""'], {}), "('double')\n", (2004, 2014), True, 'import numpy as np\n'), ((2138, 2155), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (2146, 2155), True, 'import numpy as np\n'), ((2279, 2296), 'numpy.dtype', 'np.dtype', (['"""int64"""'], {}), "('int64')\n", (2287, 2296), True, 'import numpy as np\n'), ((2453, 2466), 'numpy.dtype', 'np.dtype', (['"""O"""'], {}), "('O')\n", (2461, 2466), True, 'import numpy as np\n'), ((5411, 5428), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (5419, 5428), True, 'import numpy as np\n')]
|
import numpy as np
import random
import numbers
import cv2
from PIL import Image
import wpcv
from wpcv.utils.ops import pil_ops, polygon_ops
from wpcv.utils.data_aug.base import Compose, Zip
from wpcv.utils.data_aug import img_aug
class ToPILImage(object):
def __init__(self):
self.to = img_aug.ToPILImage()
def __call__(self, img, *args):
if len(args):
return (self.to(img), *args)
else:
return self.to(img)
class BboxesToPoints(object):
def __call__(self, img, bboxes):
points = np.array(bboxes).reshape((-1, 2, 2))
return img, points
class PointsToBboxes(object):
def __call__(self, img, points):
bboxes = np.array(points).reshape((-1, 4))
return img, bboxes
class Reshape(object):
def __init__(self, shape):
self.target_shape = shape
def __call__(self, x):
return np.array(x).reshape(self.target_shape)
class Limitsize(object):
def __init__(self, maxsize):
limit = maxsize
if isinstance(limit, (tuple, list, set,)):
mw, mh = limit
else:
mw = mh = limit
self.size = (mw, mh)
def __call__(self, img, points):
mw, mh = self.size
w, h = img.size
rw = w / mw
rh = h / mh
r = max(rw, rh)
if r > 1:
nw, nh = int(w / r), int(h / r)
img = pil_ops.resize(img, (nw, nh))
points = polygon_ops.scale(points, 1 / r)
return img, points
class Scale(object):
def __init__(self, scales):
if isinstance(scales, (tuple, list)):
scaleX, scaleY = scales
else:
scaleX = scaleY = scales
self.scaleX, self.scaleY = scaleX, scaleY
def __call__(self, img, points):
scaleX, scaleY = self.scaleX, self.scaleY
img = pil_ops.scale(img, (scaleX, scaleY))
points = polygon_ops.scale(points, (scaleX, scaleY))
return img, points
class Resize(object):
def __init__(self, size, keep_ratio=False, fillcolor='black'):
self.size = size
self.keep_ratio = keep_ratio
self.fillcolor = fillcolor
def __call__(self, img, points):
w, h = img.size
tw, th = self.size
if not self.keep_ratio:
scaleX, scaleY = tw / w, th / h
img = pil_ops.resize(img, self.size)
points = polygon_ops.scale(points, (scaleX, scaleY))
else:
if self.fillcolor is 'random':
fillcolor = tuple(np.random.choice(range(256), size=3))
else:
fillcolor = self.fillcolor
img = pil_ops.resize_keep_ratio(img, self.size, fillcolor=fillcolor)
rx = w / tw
ry = h / th
r = max(rx, ry)
nw = w / r
nh = h / r
dw = (tw - nw) // 2
dh = (th - nh) // 2
points = polygon_ops.scale(points, 1 / r)
points = polygon_ops.translate(points, (dw, dh))
return img, points
class RandomHorizontalFlip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, img, points):
imw, imh = img.size
if random.random() < self.p:
img = pil_ops.hflip(img)
points = [polygon_ops.hflip(pnts, imw) for pnts in points]
return img, points
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
class RandomVerticalFlip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, img, points):
imw, imh = img.size
if random.random() < self.p:
img = pil_ops.vflip(img)
points = [polygon_ops.vflip(pnts, imh) for pnts in points]
return img, points
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
class RandomTranslate(object):
def __init__(self, max_offset=None, fillcolor='black'):
if max_offset is not None and len(max_offset) == 2:
mx, my = max_offset
max_offset = [-mx, -my, mx, my]
self.max_offset = max_offset
self.fillcolor = fillcolor
def __call__(self, img, points):
if self.fillcolor is 'random':
fillcolor = tuple(np.random.choice(range(256), size=3))
else:
fillcolor = self.fillcolor
rang = polygon_ops.get_translate_range(points, img.size)
if self.max_offset:
def limit_box(box, limits=None):
if limits is None: return box
if len(limits) == 2:
ml, mt = 0, 0
mr, mb = limits
else:
assert len(limits) == 4
ml, mt, mr, mb = limits
l, t, r, b = box
l = max(ml, l)
t = max(mt, t)
r = min(mr, r)
b = min(mb, b)
if l > r:
return None
if t > b: return None
return [l, t, r, b]
rang = limit_box(rang, self.max_offset)
if rang is None:
return img, points
ofx = random.randint(rang[0], rang[2])
ofy = random.randint(rang[1], rang[3])
img = pil_ops.translate(img, offset=(ofx, ofy), fillcolor=fillcolor)
points = [polygon_ops.translate(pnts, (ofx, ofy)) for pnts in points]
return img, points
class RandomRotate(object):
def __init__(self, degree, expand=True, fillcolor='black'):
self.degree = degree if not isinstance(degree, numbers.Number) else [-degree, degree]
self.expand = expand
self.fillcolor = fillcolor
def __call__(self, img, points):
if self.fillcolor is 'random':
fillcolor = tuple(np.random.choice(range(256), size=3))
else:
fillcolor = self.fillcolor
degree = random.random() * (self.degree[1] - self.degree[0]) + self.degree[0]
w, h = img.size
img = pil_ops.rotate(img, degree, expand=self.expand, fillcolor=fillcolor)
points = [polygon_ops.rotate(pnts, degree, (w // 2, h // 2), img_size=(w, h), expand=self.expand) for pnts in
points]
return img, points
class RandomShearX(object):
def __init__(self, degree):
self.degree = degree if not isinstance(degree, numbers.Number) else [-degree, degree]
def __call__(self, img, points):
degree = random.random() * (self.degree[1] - self.degree[0]) + self.degree[0]
w, h = img.size
img = pil_ops.shear_x(img, degree)
points = [polygon_ops.shear_x(pnts, degree, img_size=(w, h), expand=True) for pnts in points]
return img, points
class RandomShearY(object):
def __init__(self, degree):
self.degree = degree if not isinstance(degree, numbers.Number) else [-degree, degree]
def __call__(self, img, points):
degree = random.random() * (self.degree[1] - self.degree[0]) + self.degree[0]
w, h = img.size
img = pil_ops.shear_y(img, degree)
points = [polygon_ops.shear_y(pnts, degree, img_size=(w, h), expand=True) for pnts in points]
return img, points
class RandomShear(object):
def __init__(self, xdegree, ydegree=None, fillcolor='balck'):
def get_param(param, defualt=None):
if param is None: return defualt
return param if not isinstance(param, numbers.Number) else [-param, param]
self.xdegree = get_param(xdegree)
self.ydegree = get_param(ydegree)
self.fillcolor = fillcolor
def __call__(self, img, points):
if self.xdegree:
if self.fillcolor is 'random':
fillcolor = tuple(np.random.choice(range(256), size=3))
else:
fillcolor = self.fillcolor
degree = random.random() * (self.xdegree[1] - self.xdegree[0]) + self.xdegree[0]
w, h = img.size
img = pil_ops.shear_x(img, degree, fillcolor=fillcolor)
points = [polygon_ops.shear_x(pnts, degree, img_size=(w, h), expand=True) for pnts in points]
if self.ydegree:
if self.fillcolor is 'random':
fillcolor = tuple(np.random.choice(range(256), size=3))
else:
fillcolor = self.fillcolor
degree = random.random() * (self.ydegree[1] - self.ydegree[0]) + self.ydegree[0]
w, h = img.size
img = pil_ops.shear_y(img, degree, fillcolor=fillcolor)
points = [polygon_ops.shear_y(pnts, degree, img_size=(w, h), expand=True) for pnts in points]
return img, points
# class RandomPerspective:
|
[
"wpcv.utils.ops.pil_ops.vflip",
"wpcv.utils.ops.polygon_ops.get_translate_range",
"wpcv.utils.ops.pil_ops.resize_keep_ratio",
"wpcv.utils.ops.polygon_ops.scale",
"random.randint",
"wpcv.utils.ops.pil_ops.scale",
"wpcv.utils.ops.polygon_ops.translate",
"wpcv.utils.data_aug.img_aug.ToPILImage",
"wpcv.utils.ops.pil_ops.shear_y",
"wpcv.utils.ops.pil_ops.shear_x",
"wpcv.utils.ops.pil_ops.resize",
"wpcv.utils.ops.polygon_ops.shear_x",
"random.random",
"wpcv.utils.ops.pil_ops.hflip",
"wpcv.utils.ops.pil_ops.translate",
"wpcv.utils.ops.polygon_ops.vflip",
"wpcv.utils.ops.polygon_ops.rotate",
"wpcv.utils.ops.pil_ops.rotate",
"wpcv.utils.ops.polygon_ops.shear_y",
"numpy.array",
"wpcv.utils.ops.polygon_ops.hflip"
] |
[((301, 321), 'wpcv.utils.data_aug.img_aug.ToPILImage', 'img_aug.ToPILImage', ([], {}), '()\n', (319, 321), False, 'from wpcv.utils.data_aug import img_aug\n'), ((1855, 1891), 'wpcv.utils.ops.pil_ops.scale', 'pil_ops.scale', (['img', '(scaleX, scaleY)'], {}), '(img, (scaleX, scaleY))\n', (1868, 1891), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((1909, 1952), 'wpcv.utils.ops.polygon_ops.scale', 'polygon_ops.scale', (['points', '(scaleX, scaleY)'], {}), '(points, (scaleX, scaleY))\n', (1926, 1952), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((4390, 4439), 'wpcv.utils.ops.polygon_ops.get_translate_range', 'polygon_ops.get_translate_range', (['points', 'img.size'], {}), '(points, img.size)\n', (4421, 4439), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((5196, 5228), 'random.randint', 'random.randint', (['rang[0]', 'rang[2]'], {}), '(rang[0], rang[2])\n', (5210, 5228), False, 'import random\n'), ((5243, 5275), 'random.randint', 'random.randint', (['rang[1]', 'rang[3]'], {}), '(rang[1], rang[3])\n', (5257, 5275), False, 'import random\n'), ((5290, 5352), 'wpcv.utils.ops.pil_ops.translate', 'pil_ops.translate', (['img'], {'offset': '(ofx, ofy)', 'fillcolor': 'fillcolor'}), '(img, offset=(ofx, ofy), fillcolor=fillcolor)\n', (5307, 5352), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((6032, 6100), 'wpcv.utils.ops.pil_ops.rotate', 'pil_ops.rotate', (['img', 'degree'], {'expand': 'self.expand', 'fillcolor': 'fillcolor'}), '(img, degree, expand=self.expand, fillcolor=fillcolor)\n', (6046, 6100), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((6590, 6618), 'wpcv.utils.ops.pil_ops.shear_x', 'pil_ops.shear_x', (['img', 'degree'], {}), '(img, degree)\n', (6605, 6618), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((7066, 7094), 'wpcv.utils.ops.pil_ops.shear_y', 'pil_ops.shear_y', (['img', 'degree'], {}), '(img, degree)\n', (7081, 7094), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((1404, 1433), 'wpcv.utils.ops.pil_ops.resize', 'pil_ops.resize', (['img', '(nw, nh)'], {}), '(img, (nw, nh))\n', (1418, 1433), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((1455, 1487), 'wpcv.utils.ops.polygon_ops.scale', 'polygon_ops.scale', (['points', '(1 / r)'], {}), '(points, 1 / r)\n', (1472, 1487), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((2351, 2381), 'wpcv.utils.ops.pil_ops.resize', 'pil_ops.resize', (['img', 'self.size'], {}), '(img, self.size)\n', (2365, 2381), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((2403, 2446), 'wpcv.utils.ops.polygon_ops.scale', 'polygon_ops.scale', (['points', '(scaleX, scaleY)'], {}), '(points, (scaleX, scaleY))\n', (2420, 2446), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((2655, 2717), 'wpcv.utils.ops.pil_ops.resize_keep_ratio', 'pil_ops.resize_keep_ratio', (['img', 'self.size'], {'fillcolor': 'fillcolor'}), '(img, self.size, fillcolor=fillcolor)\n', (2680, 2717), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((2925, 2957), 'wpcv.utils.ops.polygon_ops.scale', 'polygon_ops.scale', (['points', '(1 / r)'], {}), '(points, 1 / r)\n', (2942, 2957), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((2979, 3018), 'wpcv.utils.ops.polygon_ops.translate', 'polygon_ops.translate', (['points', '(dw, dh)'], {}), '(points, (dw, dh))\n', (3000, 3018), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((3211, 3226), 'random.random', 'random.random', ([], {}), '()\n', (3224, 3226), False, 'import random\n'), ((3255, 3273), 'wpcv.utils.ops.pil_ops.hflip', 'pil_ops.hflip', (['img'], {}), '(img)\n', (3268, 3273), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((3625, 3640), 'random.random', 'random.random', ([], {}), '()\n', (3638, 3640), False, 'import random\n'), ((3669, 3687), 'wpcv.utils.ops.pil_ops.vflip', 'pil_ops.vflip', (['img'], {}), '(img)\n', (3682, 3687), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((5371, 5410), 'wpcv.utils.ops.polygon_ops.translate', 'polygon_ops.translate', (['pnts', '(ofx, ofy)'], {}), '(pnts, (ofx, ofy))\n', (5392, 5410), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((6119, 6211), 'wpcv.utils.ops.polygon_ops.rotate', 'polygon_ops.rotate', (['pnts', 'degree', '(w // 2, h // 2)'], {'img_size': '(w, h)', 'expand': 'self.expand'}), '(pnts, degree, (w // 2, h // 2), img_size=(w, h), expand=\n self.expand)\n', (6137, 6211), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((6637, 6700), 'wpcv.utils.ops.polygon_ops.shear_x', 'polygon_ops.shear_x', (['pnts', 'degree'], {'img_size': '(w, h)', 'expand': '(True)'}), '(pnts, degree, img_size=(w, h), expand=True)\n', (6656, 6700), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((7113, 7176), 'wpcv.utils.ops.polygon_ops.shear_y', 'polygon_ops.shear_y', (['pnts', 'degree'], {'img_size': '(w, h)', 'expand': '(True)'}), '(pnts, degree, img_size=(w, h), expand=True)\n', (7132, 7176), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((7994, 8043), 'wpcv.utils.ops.pil_ops.shear_x', 'pil_ops.shear_x', (['img', 'degree'], {'fillcolor': 'fillcolor'}), '(img, degree, fillcolor=fillcolor)\n', (8009, 8043), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((8490, 8539), 'wpcv.utils.ops.pil_ops.shear_y', 'pil_ops.shear_y', (['img', 'degree'], {'fillcolor': 'fillcolor'}), '(img, degree, fillcolor=fillcolor)\n', (8505, 8539), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((554, 570), 'numpy.array', 'np.array', (['bboxes'], {}), '(bboxes)\n', (562, 570), True, 'import numpy as np\n'), ((704, 720), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (712, 720), True, 'import numpy as np\n'), ((898, 909), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (906, 909), True, 'import numpy as np\n'), ((3296, 3324), 'wpcv.utils.ops.polygon_ops.hflip', 'polygon_ops.hflip', (['pnts', 'imw'], {}), '(pnts, imw)\n', (3313, 3324), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((3710, 3738), 'wpcv.utils.ops.polygon_ops.vflip', 'polygon_ops.vflip', (['pnts', 'imh'], {}), '(pnts, imh)\n', (3727, 3738), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((5925, 5940), 'random.random', 'random.random', ([], {}), '()\n', (5938, 5940), False, 'import random\n'), ((6483, 6498), 'random.random', 'random.random', ([], {}), '()\n', (6496, 6498), False, 'import random\n'), ((6959, 6974), 'random.random', 'random.random', ([], {}), '()\n', (6972, 6974), False, 'import random\n'), ((8066, 8129), 'wpcv.utils.ops.polygon_ops.shear_x', 'polygon_ops.shear_x', (['pnts', 'degree'], {'img_size': '(w, h)', 'expand': '(True)'}), '(pnts, degree, img_size=(w, h), expand=True)\n', (8085, 8129), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((8562, 8625), 'wpcv.utils.ops.polygon_ops.shear_y', 'polygon_ops.shear_y', (['pnts', 'degree'], {'img_size': '(w, h)', 'expand': '(True)'}), '(pnts, degree, img_size=(w, h), expand=True)\n', (8581, 8625), False, 'from wpcv.utils.ops import pil_ops, polygon_ops\n'), ((7876, 7891), 'random.random', 'random.random', ([], {}), '()\n', (7889, 7891), False, 'import random\n'), ((8372, 8387), 'random.random', 'random.random', ([], {}), '()\n', (8385, 8387), False, 'import random\n')]
|
#
# INF 552 Homework 3
# Part 2: Fast Map
# Group Members: <NAME> (zhan198), <NAME> (minyihua), <NAME> (jeffyjac)
# Date: 2/27/2018
# Programming Language: Python 3.6
#
import numpy as np
import matplotlib.pyplot as plt
DIMENSION = 2
DATA_SIZE = 10
# WORDS = ["acting", "activist", "compute", "coward","forward","interaction","activity","odor","order","international"]
WORDS = []
data_file_name = "fastmap-data.txt"
words_file_name = 'fastmap-wordlist.txt'
table = np.zeros(shape=(DATA_SIZE, DATA_SIZE))
cood = np.zeros(shape=(DATA_SIZE, DIMENSION))
pivot = []
def main():
readFile(data_file_name)
print("\nOriginal table:")
readWords(words_file_name)
print(WORDS)
printTable()
for i in range(DIMENSION):
print("\n\nThe {i}st cood: ".format(i=i+1))
pickLongestPair()
calculateCoordinate(i)
print("\nUpdate table: ")
updateTable(i)
printTable()
plotResult()
def readFile(filename):
with open(filename, "r") as file:
print("Original input:")
for line in file:
line_array = line.split()
print(line_array)
table[int(line_array[0]) - 1][int(line_array[1]) - 1] = \
table[int(line_array[1]) - 1][int(line_array[0]) - 1] = float(line_array[2])
def readWords(filename):
global WORDS
with open(filename) as file:
WORDS = file.read().splitlines()
def printTable():
for row in table:
print(row)
def pickLongestPair():
max = np.amax(table)
indices = list(zip(*np.where(table == max)))
print("The longest distance pair is {pair}".format(pair = indices[0]))
print("Pivot is piont {piv}".format(piv = indices[0][0]))
pivot.append(indices[0])
def calculateCoordinate(dimen):
a = pivot[dimen][0]
b = pivot[dimen][1]
print("The coordinate table")
for i in range(len(table)):
cood[i][dimen] = (np.power(table[a][i],2) + np.power(table[a][b],2) - np.power(table[i][b],2))/ (2 * table[a][b])
print ("{i}\t({x}, {y})".format(i=i, x= round(cood[i][0], 3),y=round(cood[i][1], 3)))
def updateTable(dimen):
for i in range(0, DATA_SIZE):
for j in range(0, DATA_SIZE):
table[i][j] = np.sqrt(np.power(table[i][j],2) - np.power((cood[i][dimen] - cood[j][dimen]),2))
def plotResult():
x = cood[:, 0]
y = cood[:, 1]
fig, ax = plt.subplots()
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.scatter(x, y)
plt.scatter(x, y, color="red", s=30)
plt.title("Fast Map Result")
for i, txt in enumerate(WORDS):
ax.annotate(txt, (x[i], y[i]))
plt.show()
main()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.scatter",
"numpy.power",
"numpy.zeros",
"numpy.amax",
"numpy.where",
"matplotlib.pyplot.subplots"
] |
[((470, 508), 'numpy.zeros', 'np.zeros', ([], {'shape': '(DATA_SIZE, DATA_SIZE)'}), '(shape=(DATA_SIZE, DATA_SIZE))\n', (478, 508), True, 'import numpy as np\n'), ((516, 554), 'numpy.zeros', 'np.zeros', ([], {'shape': '(DATA_SIZE, DIMENSION)'}), '(shape=(DATA_SIZE, DIMENSION))\n', (524, 554), True, 'import numpy as np\n'), ((1503, 1517), 'numpy.amax', 'np.amax', (['table'], {}), '(table)\n', (1510, 1517), True, 'import numpy as np\n'), ((2371, 2385), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2383, 2385), True, 'import matplotlib.pyplot as plt\n'), ((2467, 2503), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'color': '"""red"""', 's': '(30)'}), "(x, y, color='red', s=30)\n", (2478, 2503), True, 'import matplotlib.pyplot as plt\n'), ((2508, 2536), 'matplotlib.pyplot.title', 'plt.title', (['"""Fast Map Result"""'], {}), "('Fast Map Result')\n", (2517, 2536), True, 'import matplotlib.pyplot as plt\n'), ((2617, 2627), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2625, 2627), True, 'import matplotlib.pyplot as plt\n'), ((1542, 1564), 'numpy.where', 'np.where', (['(table == max)'], {}), '(table == max)\n', (1550, 1564), True, 'import numpy as np\n'), ((1958, 1982), 'numpy.power', 'np.power', (['table[i][b]', '(2)'], {}), '(table[i][b], 2)\n', (1966, 1982), True, 'import numpy as np\n'), ((1906, 1930), 'numpy.power', 'np.power', (['table[a][i]', '(2)'], {}), '(table[a][i], 2)\n', (1914, 1930), True, 'import numpy as np\n'), ((1932, 1956), 'numpy.power', 'np.power', (['table[a][b]', '(2)'], {}), '(table[a][b], 2)\n', (1940, 1956), True, 'import numpy as np\n'), ((2227, 2251), 'numpy.power', 'np.power', (['table[i][j]', '(2)'], {}), '(table[i][j], 2)\n', (2235, 2251), True, 'import numpy as np\n'), ((2253, 2297), 'numpy.power', 'np.power', (['(cood[i][dimen] - cood[j][dimen])', '(2)'], {}), '(cood[i][dimen] - cood[j][dimen], 2)\n', (2261, 2297), True, 'import numpy as np\n')]
|
import time
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import queue
from insomnia.utils import empty_torch_queue
from insomnia.explores.gaussian_noise import GaussianActionNoise
from insomnia.numeric_models import d4pg
from insomnia.numeric_models.misc import l2_projection
class LearnerD4PG(object):
"""Policy and value network update routine. """
def __init__(self, policy_net, target_policy_net, learner_w_queue,
alpha, beta, input_dims, n_actions, fc1_dims, fc2_dims, name, v_min, v_max, n_atoms=51):
self.v_min = v_min
self.v_max = v_max
self.num_atoms = n_atoms
self.num_train_steps = 10000
self.batch_size = 256
self.tau = 0.001
self.gamma = 0.998
self.prioritized_replay = 0
self.learner_w_queue = learner_w_queue
self.delta_z = (self.v_max - self.v_min) / (self.num_atoms - 1)
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Noise process
self.noise = GaussianActionNoise(mu=np.zeros(n_actions))
# Value and policy nets
self.value_net = d4pg.CriticNetwork(beta, input_dims, fc1_dims, fc2_dims, n_actions, name,
self.v_min, self.v_max, self.num_atoms)
self.policy_net = policy_net
self.target_value_net = d4pg.CriticNetwork(beta, input_dims, fc1_dims, fc2_dims, n_actions, name,
self.v_min, self.v_max, self.num_atoms)
self.target_policy_net = target_policy_net
for target_param, param in zip(self.target_value_net.parameters(), self.value_net.parameters()):
target_param.data.copy_(param.data)
for target_param, param in zip(self.target_policy_net.parameters(), self.policy_net.parameters()):
target_param.data.copy_(param.data)
self.value_optimizer = optim.Adam(self.value_net.parameters(), lr=beta)
self.policy_optimizer = optim.Adam(self.policy_net.parameters(), lr=alpha)
self.value_criterion = nn.BCELoss(reduction='none')
def _update_step(self, batch, replay_priority_queue, update_step):
update_time = time.time()
state, action, reward, next_state, done, gamma, weights, inds = batch
state = np.asarray(state)
action = np.asarray(action)
reward = np.asarray(reward)
next_state = np.asarray(next_state)
done = np.asarray(done)
weights = np.asarray(weights)
inds = np.asarray(inds).flatten()
state = torch.from_numpy(state).float().to(self.device)
next_state = torch.from_numpy(next_state).float().to(self.device)
action = torch.from_numpy(action).float().to(self.device)
reward = torch.from_numpy(reward).float().to(self.device)
done = torch.from_numpy(done).float().to(self.device)
# ------- Update critic -------
# Predict next actions with target policy network
next_action = self.target_policy_net(next_state)
# Predict Z distribution with target value network
target_value = self.target_value_net.get_probs(next_state, next_action.detach())
# Get projected distribution
target_z_projected = l2_projection._l2_project(next_distr_v=target_value,
rewards_v=reward,
dones_mask_t=done,
gamma=self.gamma ** 5,
n_atoms=self.num_atoms,
v_min=self.v_min,
v_max=self.v_max,
delta_z=self.delta_z)
target_z_projected = torch.from_numpy(target_z_projected).float().to(self.device)
critic_value = self.value_net.get_probs(state, action)
critic_value = critic_value.to(self.device)
value_loss = self.value_criterion(critic_value, target_z_projected)
value_loss = value_loss.mean(axis=1)
# Update priorities in buffer
td_error = value_loss.cpu().detach().numpy().flatten()
priority_epsilon = 1e-4
if self.prioritized_replay:
weights_update = np.abs(td_error) + priority_epsilon
replay_priority_queue.put((inds, weights_update))
value_loss = value_loss * torch.tensor(weights).float().to(self.device)
# Update step
value_loss = value_loss.mean()
self.value_optimizer.zero_grad()
value_loss.backward()
self.value_optimizer.step()
# -------- Update actor -----------
policy_loss = self.value_net.get_probs(state, self.policy_net(state))
policy_loss = policy_loss * torch.from_numpy(self.value_net.z_atoms).float().to(self.device)
policy_loss = torch.sum(policy_loss, dim=1)
policy_loss = -policy_loss.mean()
self.policy_optimizer.zero_grad()
policy_loss.backward()
self.policy_optimizer.step()
for target_param, param in zip(self.target_value_net.parameters(), self.value_net.parameters()):
target_param.data.copy_(
target_param.data * (1.0 - self.tau) + param.data * self.tau
)
for target_param, param in zip(self.target_policy_net.parameters(), self.policy_net.parameters()):
target_param.data.copy_(
target_param.data * (1.0 - self.tau) + param.data * self.tau
)
# Send updated learner to the queue
if update_step.value % 100 == 0:
try:
params = [p.data.cpu().detach().numpy() for p in self.policy_net.parameters()]
self.learner_w_queue.put(params)
except:
pass
def run(self, training_on, batch_queue, replay_priority_queue, update_step):
while update_step.value < self.num_train_steps:
try:
batch = batch_queue.get_nowait()
except queue.Empty:
continue
self._update_step(batch, replay_priority_queue, update_step)
update_step.value += 1
if update_step.value % 1000 == 0:
print("Training step ", update_step.value)
training_on.value = 0
empty_torch_queue(self.learner_w_queue)
empty_torch_queue(replay_priority_queue)
print("Exit learner.")
|
[
"torch.from_numpy",
"numpy.abs",
"torch.nn.BCELoss",
"insomnia.utils.empty_torch_queue",
"insomnia.numeric_models.d4pg.CriticNetwork",
"numpy.asarray",
"numpy.zeros",
"time.time",
"torch.cuda.is_available",
"torch.sum",
"torch.tensor",
"insomnia.numeric_models.misc.l2_projection._l2_project"
] |
[((1153, 1270), 'insomnia.numeric_models.d4pg.CriticNetwork', 'd4pg.CriticNetwork', (['beta', 'input_dims', 'fc1_dims', 'fc2_dims', 'n_actions', 'name', 'self.v_min', 'self.v_max', 'self.num_atoms'], {}), '(beta, input_dims, fc1_dims, fc2_dims, n_actions, name,\n self.v_min, self.v_max, self.num_atoms)\n', (1171, 1270), False, 'from insomnia.numeric_models import d4pg\n'), ((1380, 1497), 'insomnia.numeric_models.d4pg.CriticNetwork', 'd4pg.CriticNetwork', (['beta', 'input_dims', 'fc1_dims', 'fc2_dims', 'n_actions', 'name', 'self.v_min', 'self.v_max', 'self.num_atoms'], {}), '(beta, input_dims, fc1_dims, fc2_dims, n_actions, name,\n self.v_min, self.v_max, self.num_atoms)\n', (1398, 1497), False, 'from insomnia.numeric_models import d4pg\n'), ((2102, 2130), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (2112, 2130), True, 'import torch.nn as nn\n'), ((2225, 2236), 'time.time', 'time.time', ([], {}), '()\n', (2234, 2236), False, 'import time\n'), ((2333, 2350), 'numpy.asarray', 'np.asarray', (['state'], {}), '(state)\n', (2343, 2350), True, 'import numpy as np\n'), ((2368, 2386), 'numpy.asarray', 'np.asarray', (['action'], {}), '(action)\n', (2378, 2386), True, 'import numpy as np\n'), ((2404, 2422), 'numpy.asarray', 'np.asarray', (['reward'], {}), '(reward)\n', (2414, 2422), True, 'import numpy as np\n'), ((2444, 2466), 'numpy.asarray', 'np.asarray', (['next_state'], {}), '(next_state)\n', (2454, 2466), True, 'import numpy as np\n'), ((2482, 2498), 'numpy.asarray', 'np.asarray', (['done'], {}), '(done)\n', (2492, 2498), True, 'import numpy as np\n'), ((2517, 2536), 'numpy.asarray', 'np.asarray', (['weights'], {}), '(weights)\n', (2527, 2536), True, 'import numpy as np\n'), ((3285, 3488), 'insomnia.numeric_models.misc.l2_projection._l2_project', 'l2_projection._l2_project', ([], {'next_distr_v': 'target_value', 'rewards_v': 'reward', 'dones_mask_t': 'done', 'gamma': '(self.gamma ** 5)', 'n_atoms': 'self.num_atoms', 'v_min': 'self.v_min', 'v_max': 'self.v_max', 'delta_z': 'self.delta_z'}), '(next_distr_v=target_value, rewards_v=reward,\n dones_mask_t=done, gamma=self.gamma ** 5, n_atoms=self.num_atoms, v_min\n =self.v_min, v_max=self.v_max, delta_z=self.delta_z)\n', (3310, 3488), False, 'from insomnia.numeric_models.misc import l2_projection\n'), ((4892, 4921), 'torch.sum', 'torch.sum', (['policy_loss'], {'dim': '(1)'}), '(policy_loss, dim=1)\n', (4901, 4921), False, 'import torch\n'), ((6349, 6388), 'insomnia.utils.empty_torch_queue', 'empty_torch_queue', (['self.learner_w_queue'], {}), '(self.learner_w_queue)\n', (6366, 6388), False, 'from insomnia.utils import empty_torch_queue\n'), ((6397, 6437), 'insomnia.utils.empty_torch_queue', 'empty_torch_queue', (['replay_priority_queue'], {}), '(replay_priority_queue)\n', (6414, 6437), False, 'from insomnia.utils import empty_torch_queue\n'), ((968, 993), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (991, 993), False, 'import torch\n'), ((1074, 1093), 'numpy.zeros', 'np.zeros', (['n_actions'], {}), '(n_actions)\n', (1082, 1093), True, 'import numpy as np\n'), ((2552, 2568), 'numpy.asarray', 'np.asarray', (['inds'], {}), '(inds)\n', (2562, 2568), True, 'import numpy as np\n'), ((4294, 4310), 'numpy.abs', 'np.abs', (['td_error'], {}), '(td_error)\n', (4300, 4310), True, 'import numpy as np\n'), ((2596, 2619), 'torch.from_numpy', 'torch.from_numpy', (['state'], {}), '(state)\n', (2612, 2619), False, 'import torch\n'), ((2665, 2693), 'torch.from_numpy', 'torch.from_numpy', (['next_state'], {}), '(next_state)\n', (2681, 2693), False, 'import torch\n'), ((2735, 2759), 'torch.from_numpy', 'torch.from_numpy', (['action'], {}), '(action)\n', (2751, 2759), False, 'import torch\n'), ((2801, 2825), 'torch.from_numpy', 'torch.from_numpy', (['reward'], {}), '(reward)\n', (2817, 2825), False, 'import torch\n'), ((2865, 2887), 'torch.from_numpy', 'torch.from_numpy', (['done'], {}), '(done)\n', (2881, 2887), False, 'import torch\n'), ((3796, 3832), 'torch.from_numpy', 'torch.from_numpy', (['target_z_projected'], {}), '(target_z_projected)\n', (3812, 3832), False, 'import torch\n'), ((4805, 4845), 'torch.from_numpy', 'torch.from_numpy', (['self.value_net.z_atoms'], {}), '(self.value_net.z_atoms)\n', (4821, 4845), False, 'import torch\n'), ((4430, 4451), 'torch.tensor', 'torch.tensor', (['weights'], {}), '(weights)\n', (4442, 4451), False, 'import torch\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.