code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from hypothesis import assume, given
import hypothesis.strategies as st
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class DropoutTest(hu.HypothesisTestCase):
@given(X=hu.tensor(),
in_place=st.booleans(),
ratio=st.floats(0, 0.999),
**mu.gcs)
def test_dropout_is_test(self, X, in_place, ratio, gc, dc):
"""Test with is_test=True for a deterministic reference impl."""
op = core.CreateOperator('Dropout', ['X'],
['X' if in_place else 'Y'],
ratio=ratio, is_test=True)
self.assertDeviceChecks(dc, op, [X], [0])
# No sense in checking gradients for test phase
def reference_dropout_test(x):
return x, np.ones(x.shape, dtype=np.bool)
self.assertReferenceChecks(
gc, op, [X], reference_dropout_test,
# The 'mask' output may be uninitialized
outputs_to_check=[0])
@given(X=hu.tensor(),
in_place=st.booleans(),
output_mask=st.booleans(),
**mu.gcs)
@unittest.skipIf(True, "Skip duo to different rand seed.")
def test_dropout_ratio0(self, X, in_place, output_mask, gc, dc):
"""Test with ratio=0 for a deterministic reference impl."""
is_test = not output_mask
op = core.CreateOperator('Dropout', ['X'],
['X' if in_place else 'Y'] +
(['mask'] if output_mask else []),
ratio=0.0, is_test=is_test)
self.assertDeviceChecks(dc, op, [X], [0])
def reference_dropout_ratio0(x):
return (x,) if is_test else (x, np.ones(x.shape, dtype=np.bool))
self.assertReferenceChecks(
gc, op, [X], reference_dropout_ratio0, outputs_to_check=[0])
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"unittest.skipIf",
"caffe2.python.hypothesis_test_util.tensor",
"numpy.ones",
"hypothesis.strategies.booleans",
"caffe2.python.core.CreateOperator",
"hypothesis.strategies.floats"
] |
[((430, 495), 'unittest.skipIf', 'unittest.skipIf', (['(not workspace.C.use_mkldnn)', '"""No MKLDNN support."""'], {}), "(not workspace.C.use_mkldnn, 'No MKLDNN support.')\n", (445, 495), False, 'import unittest\n'), ((1467, 1524), 'unittest.skipIf', 'unittest.skipIf', (['(True)', '"""Skip duo to different rand seed."""'], {}), "(True, 'Skip duo to different rand seed.')\n", (1482, 1524), False, 'import unittest\n'), ((2249, 2264), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2262, 2264), False, 'import unittest\n'), ((809, 906), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""Dropout"""', "['X']", "['X' if in_place else 'Y']"], {'ratio': 'ratio', 'is_test': '(True)'}), "('Dropout', ['X'], ['X' if in_place else 'Y'], ratio=\n ratio, is_test=True)\n", (828, 906), False, 'from caffe2.python import core, workspace\n'), ((1709, 1843), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""Dropout"""', "['X']", "(['X' if in_place else 'Y'] + (['mask'] if output_mask else []))"], {'ratio': '(0.0)', 'is_test': 'is_test'}), "('Dropout', ['X'], ['X' if in_place else 'Y'] + (['mask'\n ] if output_mask else []), ratio=0.0, is_test=is_test)\n", (1728, 1843), False, 'from caffe2.python import core, workspace\n'), ((552, 563), 'caffe2.python.hypothesis_test_util.tensor', 'hu.tensor', ([], {}), '()\n', (561, 563), True, 'import caffe2.python.hypothesis_test_util as hu\n'), ((585, 598), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (596, 598), True, 'import hypothesis.strategies as st\n'), ((617, 636), 'hypothesis.strategies.floats', 'st.floats', (['(0)', '(0.999)'], {}), '(0, 0.999)\n', (626, 636), True, 'import hypothesis.strategies as st\n'), ((1355, 1366), 'caffe2.python.hypothesis_test_util.tensor', 'hu.tensor', ([], {}), '()\n', (1364, 1366), True, 'import caffe2.python.hypothesis_test_util as hu\n'), ((1388, 1401), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (1399, 1401), True, 'import hypothesis.strategies as st\n'), ((1426, 1439), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (1437, 1439), True, 'import hypothesis.strategies as st\n'), ((1137, 1168), 'numpy.ones', 'np.ones', (['x.shape'], {'dtype': 'np.bool'}), '(x.shape, dtype=np.bool)\n', (1144, 1168), True, 'import numpy as np\n'), ((2075, 2106), 'numpy.ones', 'np.ones', (['x.shape'], {'dtype': 'np.bool'}), '(x.shape, dtype=np.bool)\n', (2082, 2106), True, 'import numpy as np\n')]
|
# Python imports
import unittest
import numpy as np
import os
import shutil
import xarray as xr
import pytest
import oggm
from scipy import optimize as optimization
salem = pytest.importorskip('salem')
gpd = pytest.importorskip('geopandas')
# Locals
import oggm.cfg as cfg
from oggm import tasks, utils, workflow
from oggm.workflow import execute_entity_task
from oggm.tests.funcs import get_test_dir
from oggm.utils import get_demo_file
from oggm.core import gis, centerlines
from oggm.core.massbalance import ConstantMassBalance
pytestmark = pytest.mark.test_env("benchmark")
do_plot = False
class TestSouthGlacier(unittest.TestCase):
# Test case optained from ITMIX
# Data available at:
# oggm-sample-data/tree/master/benchmarks/south_glacier
#
# Citation:
#
# <NAME>., <NAME>, <NAME>, and <NAME> (2011). Present
# dynamics and future prognosis of a slowly surging glacier.
# The Cryosphere, 5, 299-313. DOI: 10.5194/tc-5-299-2011, 2011.
def setUp(self):
# test directory
self.testdir = os.path.join(get_test_dir(), 'tmp')
if not os.path.exists(self.testdir):
os.makedirs(self.testdir)
self.clean_dir()
# Init
cfg.initialize()
cfg.PARAMS['use_multiprocessing'] = False
cfg.PARAMS['use_intersects'] = False
cfg.PATHS['working_dir'] = self.testdir
cfg.PATHS['dem_file'] = get_demo_file('dem_SouthGlacier.tif')
cfg.PARAMS['border'] = 10
self.tf = get_demo_file('cru_ts4.01.1901.2016.SouthGlacier.tmp.dat.nc')
self.pf = get_demo_file('cru_ts4.01.1901.2016.SouthGlacier.pre.dat.nc')
def tearDown(self):
self.rm_dir()
def rm_dir(self):
shutil.rmtree(self.testdir)
def clean_dir(self):
shutil.rmtree(self.testdir)
os.makedirs(self.testdir)
def get_ref_data(self, gdir):
# Reference data
df = salem.read_shapefile(get_demo_file('IceThick_SouthGlacier.shp'))
coords = np.array([p.xy for p in df.geometry]).squeeze()
df['lon'] = coords[:, 0]
df['lat'] = coords[:, 1]
df = df[['lon', 'lat', 'thick']]
ii, jj = gdir.grid.transform(df['lon'], df['lat'], crs=salem.wgs84,
nearest=True)
df['i'] = ii
df['j'] = jj
df['ij'] = ['{:04d}_{:04d}'.format(i, j) for i, j in zip(ii, jj)]
return df.groupby('ij').mean()
def test_mb(self):
# This is a function to produce the MB function needed by Anna
# Download the RGI file for the run
# Make a new dataframe of those
rgidf = gpd.read_file(get_demo_file('SouthGlacier.shp'))
# Go - initialize working directories
gdirs = workflow.init_glacier_directories(rgidf)
# Preprocessing tasks
task_list = [
tasks.define_glacier_region,
tasks.glacier_masks,
tasks.compute_centerlines,
tasks.initialize_flowlines,
tasks.catchment_area,
tasks.catchment_intersections,
tasks.catchment_width_geom,
tasks.catchment_width_correction,
]
for task in task_list:
execute_entity_task(task, gdirs)
execute_entity_task(tasks.process_cru_data, gdirs,
tmp_file=self.tf,
pre_file=self.pf)
execute_entity_task(tasks.local_t_star, gdirs)
execute_entity_task(tasks.mu_star_calibration, gdirs)
mbref = salem.GeoTiff(get_demo_file('mb_SouthGlacier.tif'))
demref = salem.GeoTiff(get_demo_file('dem_SouthGlacier.tif'))
mbref = mbref.get_vardata()
mbref[mbref == -9999] = np.NaN
demref = demref.get_vardata()[np.isfinite(mbref)]
mbref = mbref[np.isfinite(mbref)] * 1000
# compute the bias to make it 0 SMB on the 2D DEM
rho = cfg.PARAMS['ice_density']
mbmod = ConstantMassBalance(gdirs[0], bias=0)
mymb = mbmod.get_annual_mb(demref) * cfg.SEC_IN_YEAR * rho
mbmod = ConstantMassBalance(gdirs[0], bias=np.average(mymb))
mymb = mbmod.get_annual_mb(demref) * cfg.SEC_IN_YEAR * rho
np.testing.assert_allclose(np.average(mymb), 0., atol=1e-3)
# Same for ref
mbref = mbref - np.average(mbref)
np.testing.assert_allclose(np.average(mbref), 0., atol=1e-3)
# Fit poly
p = np.polyfit(demref, mbref, deg=2)
poly = np.poly1d(p)
myfit = poly(demref)
np.testing.assert_allclose(np.average(myfit), 0., atol=1e-3)
if do_plot:
import matplotlib.pyplot as plt
plt.scatter(mbref, demref, s=5,
label='Obs (2007-2012), shifted to Avg(SMB) = 0')
plt.scatter(mymb, demref, s=5, label='OGGM MB at t*')
plt.scatter(myfit, demref, s=5, label='Polyfit', c='C3')
plt.xlabel('MB (mm w.e yr-1)')
plt.ylabel('Altidude (m)')
plt.legend()
plt.show()
def test_inversion_attributes(self):
# Download the RGI file for the run
# Make a new dataframe of those
rgidf = gpd.read_file(get_demo_file('SouthGlacier.shp'))
# Go - initialize working directories
gdirs = workflow.init_glacier_directories(rgidf)
# Preprocessing tasks
task_list = [
tasks.define_glacier_region,
tasks.glacier_masks,
tasks.compute_centerlines,
tasks.initialize_flowlines,
tasks.catchment_area,
tasks.catchment_intersections,
tasks.catchment_width_geom,
tasks.catchment_width_correction,
]
for task in task_list:
execute_entity_task(task, gdirs)
execute_entity_task(tasks.process_cru_data, gdirs,
tmp_file=self.tf,
pre_file=self.pf)
execute_entity_task(tasks.local_t_star, gdirs)
execute_entity_task(tasks.mu_star_calibration, gdirs)
# Tested tasks
task_list = [
tasks.gridded_attributes,
tasks.gridded_mb_attributes,
]
for task in task_list:
execute_entity_task(task, gdirs)
# Check certain things
gdir = gdirs[0]
with xr.open_dataset(gdir.get_filepath('gridded_data')) as ds:
# The max catchment area should be area of glacier
assert (ds['catchment_area'].max() ==
ds['glacier_mask'].sum() * gdir.grid.dx**2)
assert (ds['catchment_area_on_catch'].max() ==
ds['glacier_mask'].sum() * gdir.grid.dx**2)
# In the lowest parts of the glaciers the data should be equivalent
ds_low = ds.isel(y=ds.y < 6741500)
np.testing.assert_allclose(ds_low['lin_mb_above_z'],
ds_low['lin_mb_above_z_on_catch'])
np.testing.assert_allclose(ds_low['oggm_mb_above_z'],
ds_low['oggm_mb_above_z_on_catch'])
# Build some loose tests based on correlation
df = self.get_ref_data(gdir)
vns = ['topo',
'slope',
'aspect',
'slope_factor',
'dis_from_border',
'catchment_area',
'catchment_area_on_catch',
'lin_mb_above_z',
'lin_mb_above_z_on_catch',
'oggm_mb_above_z',
'oggm_mb_above_z_on_catch',
]
with xr.open_dataset(gdir.get_filepath('gridded_data')) as ds:
for vn in vns:
df[vn] = ds[vn].isel(x=('z', df['i']), y=('z', df['j']))
# Loose tests based on correlations
cf = df.corr()
assert cf.loc['slope', 'slope_factor'] < -0.9
assert cf.loc['slope', 'thick'] < -0.4
assert cf.loc['dis_from_border', 'thick'] > 0.2
assert cf.loc['oggm_mb_above_z', 'thick'] > 0.5
assert cf.loc['lin_mb_above_z', 'thick'] > 0.5
assert cf.loc['lin_mb_above_z', 'oggm_mb_above_z'] > 0.95
def test_inversion(self):
# Download the RGI file for the run
# Make a new dataframe of those
rgidf = gpd.read_file(get_demo_file('SouthGlacier.shp'))
# Go - initialize working directories
gdirs = workflow.init_glacier_directories(rgidf)
# Preprocessing tasks
task_list = [
tasks.define_glacier_region,
tasks.glacier_masks,
tasks.compute_centerlines,
tasks.initialize_flowlines,
tasks.catchment_area,
tasks.catchment_intersections,
tasks.catchment_width_geom,
tasks.catchment_width_correction,
]
for task in task_list:
execute_entity_task(task, gdirs)
execute_entity_task(tasks.process_cru_data, gdirs,
tmp_file=self.tf,
pre_file=self.pf)
execute_entity_task(tasks.local_t_star, gdirs)
execute_entity_task(tasks.mu_star_calibration, gdirs)
# Inversion tasks
execute_entity_task(tasks.prepare_for_inversion, gdirs)
# We use the default parameters for this run
execute_entity_task(tasks.mass_conservation_inversion, gdirs)
execute_entity_task(tasks.distribute_thickness_per_altitude, gdirs,
varname_suffix='_alt')
execute_entity_task(tasks.distribute_thickness_interp, gdirs,
varname_suffix='_int')
# Reference data
gdir = gdirs[0]
df = self.get_ref_data(gdir)
with xr.open_dataset(gdir.get_filepath('gridded_data')) as ds:
v = ds.distributed_thickness_alt
df['oggm_alt'] = v.isel(x=('z', df['i']), y=('z', df['j']))
v = ds.distributed_thickness_int
df['oggm_int'] = v.isel(x=('z', df['i']), y=('z', df['j']))
ds['ref'] = xr.zeros_like(ds.distributed_thickness_int) * np.NaN
ds['ref'].data[df['j'], df['i']] = df['thick']
rmsd_int = ((df.oggm_int - df.thick) ** 2).mean() ** .5
rmsd_alt = ((df.oggm_int - df.thick) ** 2).mean() ** .5
assert rmsd_int < 85
assert rmsd_alt < 85
dfm = df.mean()
np.testing.assert_allclose(dfm.thick, dfm.oggm_int, 50)
np.testing.assert_allclose(dfm.thick, dfm.oggm_alt, 50)
if do_plot:
import matplotlib.pyplot as plt
df.plot(kind='scatter', x='oggm_int', y='thick')
plt.axis('equal')
df.plot(kind='scatter', x='oggm_alt', y='thick')
plt.axis('equal')
f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(12, 3))
ds.ref.plot(ax=ax1)
ds.distributed_thickness_int.plot(ax=ax2)
ds.distributed_thickness_alt.plot(ax=ax3)
plt.tight_layout()
plt.show()
def test_optimize_inversion(self):
# Download the RGI file for the run
# Make a new dataframe of those
rgidf = gpd.read_file(get_demo_file('SouthGlacier.shp'))
# Go - initialize working directories
gdirs = workflow.init_glacier_directories(rgidf)
# Preprocessing tasks
task_list = [
tasks.define_glacier_region,
tasks.glacier_masks,
tasks.compute_centerlines,
tasks.initialize_flowlines,
tasks.catchment_area,
tasks.catchment_intersections,
tasks.catchment_width_geom,
tasks.catchment_width_correction,
]
for task in task_list:
execute_entity_task(task, gdirs)
execute_entity_task(tasks.process_cru_data, gdirs,
tmp_file=self.tf,
pre_file=self.pf)
execute_entity_task(tasks.local_t_star, gdirs)
execute_entity_task(tasks.mu_star_calibration, gdirs)
# Reference data
gdir = gdirs[0]
df = self.get_ref_data(gdir)
# Inversion tasks
execute_entity_task(tasks.prepare_for_inversion, gdirs)
glen_a = cfg.PARAMS['inversion_glen_a']
fs = cfg.PARAMS['inversion_fs']
def to_optimize(x):
tasks.mass_conservation_inversion(gdir,
glen_a=glen_a * x[0],
fs=fs * x[1])
tasks.distribute_thickness_per_altitude(gdir)
with xr.open_dataset(gdir.get_filepath('gridded_data')) as ds:
thick = ds.distributed_thickness.isel(x=('z', df['i']),
y=('z', df['j']))
out = (np.abs(thick - df.thick)).mean()
return out
opti = optimization.minimize(to_optimize, [1., 1.],
bounds=((0.01, 10), (0.01, 10)),
tol=0.1)
# Check results and save.
execute_entity_task(tasks.mass_conservation_inversion, gdirs,
glen_a=glen_a*opti['x'][0],
fs=0)
execute_entity_task(tasks.distribute_thickness_per_altitude, gdirs)
with xr.open_dataset(gdir.get_filepath('gridded_data')) as ds:
df['oggm'] = ds.distributed_thickness.isel(x=('z', df['i']),
y=('z', df['j']))
ds['ref'] = xr.zeros_like(ds.distributed_thickness) * np.NaN
ds['ref'].data[df['j'], df['i']] = df['thick']
rmsd = ((df.oggm - df.thick) ** 2).mean() ** .5
assert rmsd < 30
dfm = df.mean()
np.testing.assert_allclose(dfm.thick, dfm.oggm, 10)
if do_plot:
import matplotlib.pyplot as plt
df.plot(kind='scatter', x='oggm', y='thick')
plt.axis('equal')
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 3))
ds.ref.plot(ax=ax1)
ds.distributed_thickness.plot(ax=ax2)
plt.tight_layout()
plt.show()
def test_workflow(self):
# This is a check that the inversion workflow works fine
# Download the RGI file for the run
# Make a new dataframe of those
rgidf = gpd.read_file(get_demo_file('SouthGlacier.shp'))
# Go - initialize working directories
gdirs = workflow.init_glacier_directories(rgidf)
# Preprocessing tasks
task_list = [
tasks.define_glacier_region,
tasks.glacier_masks,
tasks.compute_centerlines,
tasks.initialize_flowlines,
tasks.catchment_area,
tasks.catchment_intersections,
tasks.catchment_width_geom,
tasks.catchment_width_correction,
tasks.compute_downstream_line,
tasks.compute_downstream_bedshape,
]
for task in task_list:
execute_entity_task(task, gdirs)
execute_entity_task(tasks.process_cru_data, gdirs,
tmp_file=self.tf,
pre_file=self.pf)
execute_entity_task(tasks.local_t_star, gdirs)
execute_entity_task(tasks.mu_star_calibration, gdirs)
# Inversion tasks
execute_entity_task(tasks.prepare_for_inversion, gdirs)
# We use the default parameters for this run
execute_entity_task(tasks.mass_conservation_inversion, gdirs)
execute_entity_task(tasks.filter_inversion_output, gdirs)
df = utils.compile_glacier_statistics(gdirs)
assert df.inv_thickness_m[0] < 100
if do_plot:
import matplotlib.pyplot as plt
from oggm.graphics import plot_inversion
plot_inversion(gdirs)
plt.show()
class TestCoxeGlacier(unittest.TestCase):
# Test case for a tidewater glacier
def setUp(self):
# test directory
self.testdir = os.path.join(get_test_dir(), 'tmp')
if not os.path.exists(self.testdir):
os.makedirs(self.testdir)
self.clean_dir()
self.rgi_file = get_demo_file('rgi_RGI50-01.10299.shp')
# Init
cfg.initialize()
cfg.PARAMS['use_intersects'] = False
cfg.PATHS['dem_file'] = get_demo_file('dem_RGI50-01.10299.tif')
cfg.PARAMS['border'] = 40
def tearDown(self):
self.rm_dir()
def rm_dir(self):
shutil.rmtree(self.testdir)
def clean_dir(self):
shutil.rmtree(self.testdir)
os.makedirs(self.testdir)
def test_set_width(self):
entity = gpd.read_file(self.rgi_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
gis.glacier_masks(gdir)
centerlines.compute_centerlines(gdir)
centerlines.initialize_flowlines(gdir)
centerlines.compute_downstream_line(gdir)
centerlines.compute_downstream_bedshape(gdir)
centerlines.catchment_area(gdir)
centerlines.catchment_intersections(gdir)
centerlines.catchment_width_geom(gdir)
centerlines.catchment_width_correction(gdir)
# Test that area and area-altitude elev is fine
with utils.ncDataset(gdir.get_filepath('gridded_data')) as nc:
mask = nc.variables['glacier_mask'][:]
topo = nc.variables['topo_smoothed'][:]
rhgt = topo[np.where(mask)][:]
fls = gdir.read_pickle('inversion_flowlines')
hgt, widths = gdir.get_inversion_flowline_hw()
bs = 100
bins = np.arange(utils.nicenumber(np.min(hgt), bs, lower=True),
utils.nicenumber(np.max(hgt), bs) + 1,
bs)
h1, b = np.histogram(hgt, weights=widths, density=True, bins=bins)
h2, b = np.histogram(rhgt, density=True, bins=bins)
h1 = h1 / np.sum(h1)
h2 = h2 / np.sum(h2)
assert utils.rmsd(h1, h2) < 0.02 # less than 2% error
new_area = np.sum(widths * fls[-1].dx * gdir.grid.dx)
np.testing.assert_allclose(new_area, gdir.rgi_area_m2)
centerlines.terminus_width_correction(gdir, new_width=714)
fls = gdir.read_pickle('inversion_flowlines')
hgt, widths = gdir.get_inversion_flowline_hw()
# Check that the width is ok
np.testing.assert_allclose(fls[-1].widths[-1] * gdir.grid.dx, 714)
# Check for area distrib
bins = np.arange(utils.nicenumber(np.min(hgt), bs, lower=True),
utils.nicenumber(np.max(hgt), bs) + 1,
bs)
h1, b = np.histogram(hgt, weights=widths, density=True, bins=bins)
h2, b = np.histogram(rhgt, density=True, bins=bins)
h1 = h1 / np.sum(h1)
h2 = h2 / np.sum(h2)
assert utils.rmsd(h1, h2) < 0.02 # less than 2% error
new_area = np.sum(widths * fls[-1].dx * gdir.grid.dx)
np.testing.assert_allclose(new_area, gdir.rgi_area_m2)
def test_run(self):
entity = gpd.read_file(self.rgi_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
gis.glacier_masks(gdir)
centerlines.compute_centerlines(gdir)
centerlines.initialize_flowlines(gdir)
centerlines.compute_downstream_line(gdir)
centerlines.compute_downstream_bedshape(gdir)
centerlines.catchment_area(gdir)
centerlines.catchment_intersections(gdir)
centerlines.catchment_width_geom(gdir)
centerlines.catchment_width_correction(gdir)
# Climate tasks -- only data IO and tstar interpolation!
tasks.process_dummy_cru_file(gdir, seed=0)
tasks.local_t_star(gdir)
tasks.mu_star_calibration(gdir)
# Inversion tasks
tasks.find_inversion_calving(gdir)
# Final preparation for the run
tasks.init_present_time_glacier(gdir)
# check that calving happens in the real context as well
tasks.run_constant_climate(gdir, bias=0, nyears=200,
temperature_bias=-0.5)
with xr.open_dataset(gdir.get_filepath('model_diagnostics')) as ds:
assert ds.calving_m3[-1] > 10
|
[
"oggm.tasks.init_present_time_glacier",
"oggm.core.centerlines.compute_centerlines",
"oggm.core.centerlines.catchment_width_correction",
"numpy.sum",
"oggm.cfg.initialize",
"numpy.polyfit",
"numpy.abs",
"oggm.core.centerlines.compute_downstream_bedshape",
"xarray.zeros_like",
"oggm.core.centerlines.initialize_flowlines",
"numpy.histogram",
"oggm.graphics.plot_inversion",
"oggm.core.massbalance.ConstantMassBalance",
"shutil.rmtree",
"oggm.core.centerlines.catchment_width_geom",
"matplotlib.pyplot.tight_layout",
"oggm.tasks.run_constant_climate",
"scipy.optimize.minimize",
"oggm.core.centerlines.compute_downstream_line",
"os.path.exists",
"numpy.isfinite",
"numpy.max",
"numpy.testing.assert_allclose",
"matplotlib.pyplot.subplots",
"oggm.tasks.mu_star_calibration",
"numpy.average",
"matplotlib.pyplot.show",
"oggm.tests.funcs.get_test_dir",
"oggm.core.gis.define_glacier_region",
"matplotlib.pyplot.legend",
"oggm.core.centerlines.catchment_intersections",
"oggm.utils.get_demo_file",
"oggm.core.centerlines.terminus_width_correction",
"oggm.tasks.local_t_star",
"oggm.tasks.find_inversion_calving",
"oggm.core.centerlines.catchment_area",
"numpy.min",
"pytest.mark.test_env",
"matplotlib.pyplot.ylabel",
"numpy.poly1d",
"pytest.importorskip",
"oggm.workflow.execute_entity_task",
"oggm.core.gis.glacier_masks",
"os.makedirs",
"oggm.GlacierDirectory",
"matplotlib.pyplot.scatter",
"oggm.workflow.init_glacier_directories",
"matplotlib.pyplot.axis",
"oggm.tasks.mass_conservation_inversion",
"oggm.tasks.distribute_thickness_per_altitude",
"numpy.where",
"numpy.array",
"oggm.utils.rmsd",
"oggm.tasks.process_dummy_cru_file",
"matplotlib.pyplot.xlabel",
"oggm.utils.compile_glacier_statistics"
] |
[((174, 202), 'pytest.importorskip', 'pytest.importorskip', (['"""salem"""'], {}), "('salem')\n", (193, 202), False, 'import pytest\n'), ((209, 241), 'pytest.importorskip', 'pytest.importorskip', (['"""geopandas"""'], {}), "('geopandas')\n", (228, 241), False, 'import pytest\n'), ((547, 580), 'pytest.mark.test_env', 'pytest.mark.test_env', (['"""benchmark"""'], {}), "('benchmark')\n", (567, 580), False, 'import pytest\n'), ((1221, 1237), 'oggm.cfg.initialize', 'cfg.initialize', ([], {}), '()\n', (1235, 1237), True, 'import oggm.cfg as cfg\n'), ((1413, 1450), 'oggm.utils.get_demo_file', 'get_demo_file', (['"""dem_SouthGlacier.tif"""'], {}), "('dem_SouthGlacier.tif')\n", (1426, 1450), False, 'from oggm.utils import get_demo_file\n'), ((1504, 1565), 'oggm.utils.get_demo_file', 'get_demo_file', (['"""cru_ts4.01.1901.2016.SouthGlacier.tmp.dat.nc"""'], {}), "('cru_ts4.01.1901.2016.SouthGlacier.tmp.dat.nc')\n", (1517, 1565), False, 'from oggm.utils import get_demo_file\n'), ((1584, 1645), 'oggm.utils.get_demo_file', 'get_demo_file', (['"""cru_ts4.01.1901.2016.SouthGlacier.pre.dat.nc"""'], {}), "('cru_ts4.01.1901.2016.SouthGlacier.pre.dat.nc')\n", (1597, 1645), False, 'from oggm.utils import get_demo_file\n'), ((1724, 1751), 'shutil.rmtree', 'shutil.rmtree', (['self.testdir'], {}), '(self.testdir)\n', (1737, 1751), False, 'import shutil\n'), ((1786, 1813), 'shutil.rmtree', 'shutil.rmtree', (['self.testdir'], {}), '(self.testdir)\n', (1799, 1813), False, 'import shutil\n'), ((1822, 1847), 'os.makedirs', 'os.makedirs', (['self.testdir'], {}), '(self.testdir)\n', (1833, 1847), False, 'import os\n'), ((2750, 2790), 'oggm.workflow.init_glacier_directories', 'workflow.init_glacier_directories', (['rgidf'], {}), '(rgidf)\n', (2783, 2790), False, 'from oggm import tasks, utils, workflow\n'), ((3255, 3345), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.process_cru_data', 'gdirs'], {'tmp_file': 'self.tf', 'pre_file': 'self.pf'}), '(tasks.process_cru_data, gdirs, tmp_file=self.tf,\n pre_file=self.pf)\n', (3274, 3345), False, 'from oggm.workflow import execute_entity_task\n'), ((3406, 3452), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.local_t_star', 'gdirs'], {}), '(tasks.local_t_star, gdirs)\n', (3425, 3452), False, 'from oggm.workflow import execute_entity_task\n'), ((3461, 3514), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.mu_star_calibration', 'gdirs'], {}), '(tasks.mu_star_calibration, gdirs)\n', (3480, 3514), False, 'from oggm.workflow import execute_entity_task\n'), ((3952, 3989), 'oggm.core.massbalance.ConstantMassBalance', 'ConstantMassBalance', (['gdirs[0]'], {'bias': '(0)'}), '(gdirs[0], bias=0)\n', (3971, 3989), False, 'from oggm.core.massbalance import ConstantMassBalance\n'), ((4428, 4460), 'numpy.polyfit', 'np.polyfit', (['demref', 'mbref'], {'deg': '(2)'}), '(demref, mbref, deg=2)\n', (4438, 4460), True, 'import numpy as np\n'), ((4476, 4488), 'numpy.poly1d', 'np.poly1d', (['p'], {}), '(p)\n', (4485, 4488), True, 'import numpy as np\n'), ((5290, 5330), 'oggm.workflow.init_glacier_directories', 'workflow.init_glacier_directories', (['rgidf'], {}), '(rgidf)\n', (5323, 5330), False, 'from oggm import tasks, utils, workflow\n'), ((5795, 5885), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.process_cru_data', 'gdirs'], {'tmp_file': 'self.tf', 'pre_file': 'self.pf'}), '(tasks.process_cru_data, gdirs, tmp_file=self.tf,\n pre_file=self.pf)\n', (5814, 5885), False, 'from oggm.workflow import execute_entity_task\n'), ((5946, 5992), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.local_t_star', 'gdirs'], {}), '(tasks.local_t_star, gdirs)\n', (5965, 5992), False, 'from oggm.workflow import execute_entity_task\n'), ((6001, 6054), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.mu_star_calibration', 'gdirs'], {}), '(tasks.mu_star_calibration, gdirs)\n', (6020, 6054), False, 'from oggm.workflow import execute_entity_task\n'), ((8393, 8433), 'oggm.workflow.init_glacier_directories', 'workflow.init_glacier_directories', (['rgidf'], {}), '(rgidf)\n', (8426, 8433), False, 'from oggm import tasks, utils, workflow\n'), ((8898, 8988), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.process_cru_data', 'gdirs'], {'tmp_file': 'self.tf', 'pre_file': 'self.pf'}), '(tasks.process_cru_data, gdirs, tmp_file=self.tf,\n pre_file=self.pf)\n', (8917, 8988), False, 'from oggm.workflow import execute_entity_task\n'), ((9049, 9095), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.local_t_star', 'gdirs'], {}), '(tasks.local_t_star, gdirs)\n', (9068, 9095), False, 'from oggm.workflow import execute_entity_task\n'), ((9104, 9157), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.mu_star_calibration', 'gdirs'], {}), '(tasks.mu_star_calibration, gdirs)\n', (9123, 9157), False, 'from oggm.workflow import execute_entity_task\n'), ((9193, 9248), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.prepare_for_inversion', 'gdirs'], {}), '(tasks.prepare_for_inversion, gdirs)\n', (9212, 9248), False, 'from oggm.workflow import execute_entity_task\n'), ((9310, 9371), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.mass_conservation_inversion', 'gdirs'], {}), '(tasks.mass_conservation_inversion, gdirs)\n', (9329, 9371), False, 'from oggm.workflow import execute_entity_task\n'), ((9380, 9474), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.distribute_thickness_per_altitude', 'gdirs'], {'varname_suffix': '"""_alt"""'}), "(tasks.distribute_thickness_per_altitude, gdirs,\n varname_suffix='_alt')\n", (9399, 9474), False, 'from oggm.workflow import execute_entity_task\n'), ((9507, 9595), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.distribute_thickness_interp', 'gdirs'], {'varname_suffix': '"""_int"""'}), "(tasks.distribute_thickness_interp, gdirs,\n varname_suffix='_int')\n", (9526, 9595), False, 'from oggm.workflow import execute_entity_task\n'), ((10371, 10426), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['dfm.thick', 'dfm.oggm_int', '(50)'], {}), '(dfm.thick, dfm.oggm_int, 50)\n', (10397, 10426), True, 'import numpy as np\n'), ((10435, 10490), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['dfm.thick', 'dfm.oggm_alt', '(50)'], {}), '(dfm.thick, dfm.oggm_alt, 50)\n', (10461, 10490), True, 'import numpy as np\n'), ((11254, 11294), 'oggm.workflow.init_glacier_directories', 'workflow.init_glacier_directories', (['rgidf'], {}), '(rgidf)\n', (11287, 11294), False, 'from oggm import tasks, utils, workflow\n'), ((11759, 11849), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.process_cru_data', 'gdirs'], {'tmp_file': 'self.tf', 'pre_file': 'self.pf'}), '(tasks.process_cru_data, gdirs, tmp_file=self.tf,\n pre_file=self.pf)\n', (11778, 11849), False, 'from oggm.workflow import execute_entity_task\n'), ((11910, 11956), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.local_t_star', 'gdirs'], {}), '(tasks.local_t_star, gdirs)\n', (11929, 11956), False, 'from oggm.workflow import execute_entity_task\n'), ((11965, 12018), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.mu_star_calibration', 'gdirs'], {}), '(tasks.mu_star_calibration, gdirs)\n', (11984, 12018), False, 'from oggm.workflow import execute_entity_task\n'), ((12141, 12196), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.prepare_for_inversion', 'gdirs'], {}), '(tasks.prepare_for_inversion, gdirs)\n', (12160, 12196), False, 'from oggm.workflow import execute_entity_task\n'), ((12867, 12960), 'scipy.optimize.minimize', 'optimization.minimize', (['to_optimize', '[1.0, 1.0]'], {'bounds': '((0.01, 10), (0.01, 10))', 'tol': '(0.1)'}), '(to_optimize, [1.0, 1.0], bounds=((0.01, 10), (0.01, \n 10)), tol=0.1)\n', (12888, 12960), True, 'from scipy import optimize as optimization\n'), ((13070, 13171), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.mass_conservation_inversion', 'gdirs'], {'glen_a': "(glen_a * opti['x'][0])", 'fs': '(0)'}), "(tasks.mass_conservation_inversion, gdirs, glen_a=glen_a *\n opti['x'][0], fs=0)\n", (13089, 13171), False, 'from oggm.workflow import execute_entity_task\n'), ((13230, 13297), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.distribute_thickness_per_altitude', 'gdirs'], {}), '(tasks.distribute_thickness_per_altitude, gdirs)\n', (13249, 13297), False, 'from oggm.workflow import execute_entity_task\n'), ((13763, 13814), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['dfm.thick', 'dfm.oggm', '(10)'], {}), '(dfm.thick, dfm.oggm, 10)\n', (13789, 13814), True, 'import numpy as np\n'), ((14474, 14514), 'oggm.workflow.init_glacier_directories', 'workflow.init_glacier_directories', (['rgidf'], {}), '(rgidf)\n', (14507, 14514), False, 'from oggm import tasks, utils, workflow\n'), ((15069, 15159), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.process_cru_data', 'gdirs'], {'tmp_file': 'self.tf', 'pre_file': 'self.pf'}), '(tasks.process_cru_data, gdirs, tmp_file=self.tf,\n pre_file=self.pf)\n', (15088, 15159), False, 'from oggm.workflow import execute_entity_task\n'), ((15220, 15266), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.local_t_star', 'gdirs'], {}), '(tasks.local_t_star, gdirs)\n', (15239, 15266), False, 'from oggm.workflow import execute_entity_task\n'), ((15275, 15328), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.mu_star_calibration', 'gdirs'], {}), '(tasks.mu_star_calibration, gdirs)\n', (15294, 15328), False, 'from oggm.workflow import execute_entity_task\n'), ((15364, 15419), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.prepare_for_inversion', 'gdirs'], {}), '(tasks.prepare_for_inversion, gdirs)\n', (15383, 15419), False, 'from oggm.workflow import execute_entity_task\n'), ((15481, 15542), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.mass_conservation_inversion', 'gdirs'], {}), '(tasks.mass_conservation_inversion, gdirs)\n', (15500, 15542), False, 'from oggm.workflow import execute_entity_task\n'), ((15551, 15608), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.filter_inversion_output', 'gdirs'], {}), '(tasks.filter_inversion_output, gdirs)\n', (15570, 15608), False, 'from oggm.workflow import execute_entity_task\n'), ((15623, 15662), 'oggm.utils.compile_glacier_statistics', 'utils.compile_glacier_statistics', (['gdirs'], {}), '(gdirs)\n', (15655, 15662), False, 'from oggm import tasks, utils, workflow\n'), ((16206, 16245), 'oggm.utils.get_demo_file', 'get_demo_file', (['"""rgi_RGI50-01.10299.shp"""'], {}), "('rgi_RGI50-01.10299.shp')\n", (16219, 16245), False, 'from oggm.utils import get_demo_file\n'), ((16270, 16286), 'oggm.cfg.initialize', 'cfg.initialize', ([], {}), '()\n', (16284, 16286), True, 'import oggm.cfg as cfg\n'), ((16364, 16403), 'oggm.utils.get_demo_file', 'get_demo_file', (['"""dem_RGI50-01.10299.tif"""'], {}), "('dem_RGI50-01.10299.tif')\n", (16377, 16403), False, 'from oggm.utils import get_demo_file\n'), ((16516, 16543), 'shutil.rmtree', 'shutil.rmtree', (['self.testdir'], {}), '(self.testdir)\n', (16529, 16543), False, 'import shutil\n'), ((16578, 16605), 'shutil.rmtree', 'shutil.rmtree', (['self.testdir'], {}), '(self.testdir)\n', (16591, 16605), False, 'import shutil\n'), ((16614, 16639), 'os.makedirs', 'os.makedirs', (['self.testdir'], {}), '(self.testdir)\n', (16625, 16639), False, 'import os\n'), ((16741, 16793), 'oggm.GlacierDirectory', 'oggm.GlacierDirectory', (['entity'], {'base_dir': 'self.testdir'}), '(entity, base_dir=self.testdir)\n', (16762, 16793), False, 'import oggm\n'), ((16802, 16833), 'oggm.core.gis.define_glacier_region', 'gis.define_glacier_region', (['gdir'], {}), '(gdir)\n', (16827, 16833), False, 'from oggm.core import gis, centerlines\n'), ((16842, 16865), 'oggm.core.gis.glacier_masks', 'gis.glacier_masks', (['gdir'], {}), '(gdir)\n', (16859, 16865), False, 'from oggm.core import gis, centerlines\n'), ((16874, 16911), 'oggm.core.centerlines.compute_centerlines', 'centerlines.compute_centerlines', (['gdir'], {}), '(gdir)\n', (16905, 16911), False, 'from oggm.core import gis, centerlines\n'), ((16920, 16958), 'oggm.core.centerlines.initialize_flowlines', 'centerlines.initialize_flowlines', (['gdir'], {}), '(gdir)\n', (16952, 16958), False, 'from oggm.core import gis, centerlines\n'), ((16967, 17008), 'oggm.core.centerlines.compute_downstream_line', 'centerlines.compute_downstream_line', (['gdir'], {}), '(gdir)\n', (17002, 17008), False, 'from oggm.core import gis, centerlines\n'), ((17017, 17062), 'oggm.core.centerlines.compute_downstream_bedshape', 'centerlines.compute_downstream_bedshape', (['gdir'], {}), '(gdir)\n', (17056, 17062), False, 'from oggm.core import gis, centerlines\n'), ((17071, 17103), 'oggm.core.centerlines.catchment_area', 'centerlines.catchment_area', (['gdir'], {}), '(gdir)\n', (17097, 17103), False, 'from oggm.core import gis, centerlines\n'), ((17112, 17153), 'oggm.core.centerlines.catchment_intersections', 'centerlines.catchment_intersections', (['gdir'], {}), '(gdir)\n', (17147, 17153), False, 'from oggm.core import gis, centerlines\n'), ((17162, 17200), 'oggm.core.centerlines.catchment_width_geom', 'centerlines.catchment_width_geom', (['gdir'], {}), '(gdir)\n', (17194, 17200), False, 'from oggm.core import gis, centerlines\n'), ((17209, 17253), 'oggm.core.centerlines.catchment_width_correction', 'centerlines.catchment_width_correction', (['gdir'], {}), '(gdir)\n', (17247, 17253), False, 'from oggm.core import gis, centerlines\n'), ((17833, 17891), 'numpy.histogram', 'np.histogram', (['hgt'], {'weights': 'widths', 'density': '(True)', 'bins': 'bins'}), '(hgt, weights=widths, density=True, bins=bins)\n', (17845, 17891), True, 'import numpy as np\n'), ((17908, 17951), 'numpy.histogram', 'np.histogram', (['rhgt'], {'density': '(True)', 'bins': 'bins'}), '(rhgt, density=True, bins=bins)\n', (17920, 17951), True, 'import numpy as np\n'), ((18092, 18134), 'numpy.sum', 'np.sum', (['(widths * fls[-1].dx * gdir.grid.dx)'], {}), '(widths * fls[-1].dx * gdir.grid.dx)\n', (18098, 18134), True, 'import numpy as np\n'), ((18143, 18197), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['new_area', 'gdir.rgi_area_m2'], {}), '(new_area, gdir.rgi_area_m2)\n', (18169, 18197), True, 'import numpy as np\n'), ((18207, 18265), 'oggm.core.centerlines.terminus_width_correction', 'centerlines.terminus_width_correction', (['gdir'], {'new_width': '(714)'}), '(gdir, new_width=714)\n', (18244, 18265), False, 'from oggm.core import gis, centerlines\n'), ((18422, 18488), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['(fls[-1].widths[-1] * gdir.grid.dx)', '(714)'], {}), '(fls[-1].widths[-1] * gdir.grid.dx, 714)\n', (18448, 18488), True, 'import numpy as np\n'), ((18704, 18762), 'numpy.histogram', 'np.histogram', (['hgt'], {'weights': 'widths', 'density': '(True)', 'bins': 'bins'}), '(hgt, weights=widths, density=True, bins=bins)\n', (18716, 18762), True, 'import numpy as np\n'), ((18779, 18822), 'numpy.histogram', 'np.histogram', (['rhgt'], {'density': '(True)', 'bins': 'bins'}), '(rhgt, density=True, bins=bins)\n', (18791, 18822), True, 'import numpy as np\n'), ((18963, 19005), 'numpy.sum', 'np.sum', (['(widths * fls[-1].dx * gdir.grid.dx)'], {}), '(widths * fls[-1].dx * gdir.grid.dx)\n', (18969, 19005), True, 'import numpy as np\n'), ((19014, 19068), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['new_area', 'gdir.rgi_area_m2'], {}), '(new_area, gdir.rgi_area_m2)\n', (19040, 19068), True, 'import numpy as np\n'), ((19165, 19217), 'oggm.GlacierDirectory', 'oggm.GlacierDirectory', (['entity'], {'base_dir': 'self.testdir'}), '(entity, base_dir=self.testdir)\n', (19186, 19217), False, 'import oggm\n'), ((19226, 19257), 'oggm.core.gis.define_glacier_region', 'gis.define_glacier_region', (['gdir'], {}), '(gdir)\n', (19251, 19257), False, 'from oggm.core import gis, centerlines\n'), ((19266, 19289), 'oggm.core.gis.glacier_masks', 'gis.glacier_masks', (['gdir'], {}), '(gdir)\n', (19283, 19289), False, 'from oggm.core import gis, centerlines\n'), ((19298, 19335), 'oggm.core.centerlines.compute_centerlines', 'centerlines.compute_centerlines', (['gdir'], {}), '(gdir)\n', (19329, 19335), False, 'from oggm.core import gis, centerlines\n'), ((19344, 19382), 'oggm.core.centerlines.initialize_flowlines', 'centerlines.initialize_flowlines', (['gdir'], {}), '(gdir)\n', (19376, 19382), False, 'from oggm.core import gis, centerlines\n'), ((19391, 19432), 'oggm.core.centerlines.compute_downstream_line', 'centerlines.compute_downstream_line', (['gdir'], {}), '(gdir)\n', (19426, 19432), False, 'from oggm.core import gis, centerlines\n'), ((19441, 19486), 'oggm.core.centerlines.compute_downstream_bedshape', 'centerlines.compute_downstream_bedshape', (['gdir'], {}), '(gdir)\n', (19480, 19486), False, 'from oggm.core import gis, centerlines\n'), ((19495, 19527), 'oggm.core.centerlines.catchment_area', 'centerlines.catchment_area', (['gdir'], {}), '(gdir)\n', (19521, 19527), False, 'from oggm.core import gis, centerlines\n'), ((19536, 19577), 'oggm.core.centerlines.catchment_intersections', 'centerlines.catchment_intersections', (['gdir'], {}), '(gdir)\n', (19571, 19577), False, 'from oggm.core import gis, centerlines\n'), ((19586, 19624), 'oggm.core.centerlines.catchment_width_geom', 'centerlines.catchment_width_geom', (['gdir'], {}), '(gdir)\n', (19618, 19624), False, 'from oggm.core import gis, centerlines\n'), ((19633, 19677), 'oggm.core.centerlines.catchment_width_correction', 'centerlines.catchment_width_correction', (['gdir'], {}), '(gdir)\n', (19671, 19677), False, 'from oggm.core import gis, centerlines\n'), ((19752, 19794), 'oggm.tasks.process_dummy_cru_file', 'tasks.process_dummy_cru_file', (['gdir'], {'seed': '(0)'}), '(gdir, seed=0)\n', (19780, 19794), False, 'from oggm import tasks, utils, workflow\n'), ((19803, 19827), 'oggm.tasks.local_t_star', 'tasks.local_t_star', (['gdir'], {}), '(gdir)\n', (19821, 19827), False, 'from oggm import tasks, utils, workflow\n'), ((19836, 19867), 'oggm.tasks.mu_star_calibration', 'tasks.mu_star_calibration', (['gdir'], {}), '(gdir)\n', (19861, 19867), False, 'from oggm import tasks, utils, workflow\n'), ((19903, 19937), 'oggm.tasks.find_inversion_calving', 'tasks.find_inversion_calving', (['gdir'], {}), '(gdir)\n', (19931, 19937), False, 'from oggm import tasks, utils, workflow\n'), ((19987, 20024), 'oggm.tasks.init_present_time_glacier', 'tasks.init_present_time_glacier', (['gdir'], {}), '(gdir)\n', (20018, 20024), False, 'from oggm import tasks, utils, workflow\n'), ((20099, 20174), 'oggm.tasks.run_constant_climate', 'tasks.run_constant_climate', (['gdir'], {'bias': '(0)', 'nyears': '(200)', 'temperature_bias': '(-0.5)'}), '(gdir, bias=0, nyears=200, temperature_bias=-0.5)\n', (20125, 20174), False, 'from oggm import tasks, utils, workflow\n'), ((1066, 1080), 'oggm.tests.funcs.get_test_dir', 'get_test_dir', ([], {}), '()\n', (1078, 1080), False, 'from oggm.tests.funcs import get_test_dir\n'), ((1104, 1132), 'os.path.exists', 'os.path.exists', (['self.testdir'], {}), '(self.testdir)\n', (1118, 1132), False, 'import os\n'), ((1146, 1171), 'os.makedirs', 'os.makedirs', (['self.testdir'], {}), '(self.testdir)\n', (1157, 1171), False, 'import os\n'), ((1943, 1985), 'oggm.utils.get_demo_file', 'get_demo_file', (['"""IceThick_SouthGlacier.shp"""'], {}), "('IceThick_SouthGlacier.shp')\n", (1956, 1985), False, 'from oggm.utils import get_demo_file\n'), ((2652, 2685), 'oggm.utils.get_demo_file', 'get_demo_file', (['"""SouthGlacier.shp"""'], {}), "('SouthGlacier.shp')\n", (2665, 2685), False, 'from oggm.utils import get_demo_file\n'), ((3213, 3245), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['task', 'gdirs'], {}), '(task, gdirs)\n', (3232, 3245), False, 'from oggm.workflow import execute_entity_task\n'), ((3546, 3582), 'oggm.utils.get_demo_file', 'get_demo_file', (['"""mb_SouthGlacier.tif"""'], {}), "('mb_SouthGlacier.tif')\n", (3559, 3582), False, 'from oggm.utils import get_demo_file\n'), ((3615, 3652), 'oggm.utils.get_demo_file', 'get_demo_file', (['"""dem_SouthGlacier.tif"""'], {}), "('dem_SouthGlacier.tif')\n", (3628, 3652), False, 'from oggm.utils import get_demo_file\n'), ((3768, 3786), 'numpy.isfinite', 'np.isfinite', (['mbref'], {}), '(mbref)\n', (3779, 3786), True, 'import numpy as np\n'), ((4228, 4244), 'numpy.average', 'np.average', (['mymb'], {}), '(mymb)\n', (4238, 4244), True, 'import numpy as np\n'), ((4309, 4326), 'numpy.average', 'np.average', (['mbref'], {}), '(mbref)\n', (4319, 4326), True, 'import numpy as np\n'), ((4362, 4379), 'numpy.average', 'np.average', (['mbref'], {}), '(mbref)\n', (4372, 4379), True, 'import numpy as np\n'), ((4553, 4570), 'numpy.average', 'np.average', (['myfit'], {}), '(myfit)\n', (4563, 4570), True, 'import numpy as np\n'), ((4664, 4750), 'matplotlib.pyplot.scatter', 'plt.scatter', (['mbref', 'demref'], {'s': '(5)', 'label': '"""Obs (2007-2012), shifted to Avg(SMB) = 0"""'}), "(mbref, demref, s=5, label=\n 'Obs (2007-2012), shifted to Avg(SMB) = 0')\n", (4675, 4750), True, 'import matplotlib.pyplot as plt\n'), ((4782, 4835), 'matplotlib.pyplot.scatter', 'plt.scatter', (['mymb', 'demref'], {'s': '(5)', 'label': '"""OGGM MB at t*"""'}), "(mymb, demref, s=5, label='OGGM MB at t*')\n", (4793, 4835), True, 'import matplotlib.pyplot as plt\n'), ((4848, 4904), 'matplotlib.pyplot.scatter', 'plt.scatter', (['myfit', 'demref'], {'s': '(5)', 'label': '"""Polyfit"""', 'c': '"""C3"""'}), "(myfit, demref, s=5, label='Polyfit', c='C3')\n", (4859, 4904), True, 'import matplotlib.pyplot as plt\n'), ((4917, 4947), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""MB (mm w.e yr-1)"""'], {}), "('MB (mm w.e yr-1)')\n", (4927, 4947), True, 'import matplotlib.pyplot as plt\n'), ((4960, 4986), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Altidude (m)"""'], {}), "('Altidude (m)')\n", (4970, 4986), True, 'import matplotlib.pyplot as plt\n'), ((4999, 5011), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5009, 5011), True, 'import matplotlib.pyplot as plt\n'), ((5024, 5034), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5032, 5034), True, 'import matplotlib.pyplot as plt\n'), ((5192, 5225), 'oggm.utils.get_demo_file', 'get_demo_file', (['"""SouthGlacier.shp"""'], {}), "('SouthGlacier.shp')\n", (5205, 5225), False, 'from oggm.utils import get_demo_file\n'), ((5753, 5785), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['task', 'gdirs'], {}), '(task, gdirs)\n', (5772, 5785), False, 'from oggm.workflow import execute_entity_task\n'), ((6233, 6265), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['task', 'gdirs'], {}), '(task, gdirs)\n', (6252, 6265), False, 'from oggm.workflow import execute_entity_task\n'), ((6834, 6926), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (["ds_low['lin_mb_above_z']", "ds_low['lin_mb_above_z_on_catch']"], {}), "(ds_low['lin_mb_above_z'], ds_low[\n 'lin_mb_above_z_on_catch'])\n", (6860, 6926), True, 'import numpy as np\n'), ((6973, 7067), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (["ds_low['oggm_mb_above_z']", "ds_low['oggm_mb_above_z_on_catch']"], {}), "(ds_low['oggm_mb_above_z'], ds_low[\n 'oggm_mb_above_z_on_catch'])\n", (6999, 7067), True, 'import numpy as np\n'), ((8295, 8328), 'oggm.utils.get_demo_file', 'get_demo_file', (['"""SouthGlacier.shp"""'], {}), "('SouthGlacier.shp')\n", (8308, 8328), False, 'from oggm.utils import get_demo_file\n'), ((8856, 8888), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['task', 'gdirs'], {}), '(task, gdirs)\n', (8875, 8888), False, 'from oggm.workflow import execute_entity_task\n'), ((10629, 10646), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (10637, 10646), True, 'import matplotlib.pyplot as plt\n'), ((10720, 10737), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (10728, 10737), True, 'import matplotlib.pyplot as plt\n'), ((10771, 10806), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(12, 3)'}), '(1, 3, figsize=(12, 3))\n', (10783, 10806), True, 'import matplotlib.pyplot as plt\n'), ((10959, 10977), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10975, 10977), True, 'import matplotlib.pyplot as plt\n'), ((10990, 11000), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10998, 11000), True, 'import matplotlib.pyplot as plt\n'), ((11156, 11189), 'oggm.utils.get_demo_file', 'get_demo_file', (['"""SouthGlacier.shp"""'], {}), "('SouthGlacier.shp')\n", (11169, 11189), False, 'from oggm.utils import get_demo_file\n'), ((11717, 11749), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['task', 'gdirs'], {}), '(task, gdirs)\n', (11736, 11749), False, 'from oggm.workflow import execute_entity_task\n'), ((12327, 12402), 'oggm.tasks.mass_conservation_inversion', 'tasks.mass_conservation_inversion', (['gdir'], {'glen_a': '(glen_a * x[0])', 'fs': '(fs * x[1])'}), '(gdir, glen_a=glen_a * x[0], fs=fs * x[1])\n', (12360, 12402), False, 'from oggm import tasks, utils, workflow\n'), ((12507, 12552), 'oggm.tasks.distribute_thickness_per_altitude', 'tasks.distribute_thickness_per_altitude', (['gdir'], {}), '(gdir)\n', (12546, 12552), False, 'from oggm import tasks, utils, workflow\n'), ((13948, 13965), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (13956, 13965), True, 'import matplotlib.pyplot as plt\n'), ((13994, 14028), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(8, 3)'}), '(1, 2, figsize=(8, 3))\n', (14006, 14028), True, 'import matplotlib.pyplot as plt\n'), ((14123, 14141), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14139, 14141), True, 'import matplotlib.pyplot as plt\n'), ((14154, 14164), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14162, 14164), True, 'import matplotlib.pyplot as plt\n'), ((14376, 14409), 'oggm.utils.get_demo_file', 'get_demo_file', (['"""SouthGlacier.shp"""'], {}), "('SouthGlacier.shp')\n", (14389, 14409), False, 'from oggm.utils import get_demo_file\n'), ((15027, 15059), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['task', 'gdirs'], {}), '(task, gdirs)\n', (15046, 15059), False, 'from oggm.workflow import execute_entity_task\n'), ((15836, 15857), 'oggm.graphics.plot_inversion', 'plot_inversion', (['gdirs'], {}), '(gdirs)\n', (15850, 15857), False, 'from oggm.graphics import plot_inversion\n'), ((15870, 15880), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15878, 15880), True, 'import matplotlib.pyplot as plt\n'), ((16050, 16064), 'oggm.tests.funcs.get_test_dir', 'get_test_dir', ([], {}), '()\n', (16062, 16064), False, 'from oggm.tests.funcs import get_test_dir\n'), ((16088, 16116), 'os.path.exists', 'os.path.exists', (['self.testdir'], {}), '(self.testdir)\n', (16102, 16116), False, 'import os\n'), ((16130, 16155), 'os.makedirs', 'os.makedirs', (['self.testdir'], {}), '(self.testdir)\n', (16141, 16155), False, 'import os\n'), ((17970, 17980), 'numpy.sum', 'np.sum', (['h1'], {}), '(h1)\n', (17976, 17980), True, 'import numpy as np\n'), ((17999, 18009), 'numpy.sum', 'np.sum', (['h2'], {}), '(h2)\n', (18005, 18009), True, 'import numpy as np\n'), ((18025, 18043), 'oggm.utils.rmsd', 'utils.rmsd', (['h1', 'h2'], {}), '(h1, h2)\n', (18035, 18043), False, 'from oggm import tasks, utils, workflow\n'), ((18841, 18851), 'numpy.sum', 'np.sum', (['h1'], {}), '(h1)\n', (18847, 18851), True, 'import numpy as np\n'), ((18870, 18880), 'numpy.sum', 'np.sum', (['h2'], {}), '(h2)\n', (18876, 18880), True, 'import numpy as np\n'), ((18896, 18914), 'oggm.utils.rmsd', 'utils.rmsd', (['h1', 'h2'], {}), '(h1, h2)\n', (18906, 18914), False, 'from oggm import tasks, utils, workflow\n'), ((2004, 2041), 'numpy.array', 'np.array', (['[p.xy for p in df.geometry]'], {}), '([p.xy for p in df.geometry])\n', (2012, 2041), True, 'import numpy as np\n'), ((3810, 3828), 'numpy.isfinite', 'np.isfinite', (['mbref'], {}), '(mbref)\n', (3821, 3828), True, 'import numpy as np\n'), ((4108, 4124), 'numpy.average', 'np.average', (['mymb'], {}), '(mymb)\n', (4118, 4124), True, 'import numpy as np\n'), ((10039, 10082), 'xarray.zeros_like', 'xr.zeros_like', (['ds.distributed_thickness_int'], {}), '(ds.distributed_thickness_int)\n', (10052, 10082), True, 'import xarray as xr\n'), ((13540, 13579), 'xarray.zeros_like', 'xr.zeros_like', (['ds.distributed_thickness'], {}), '(ds.distributed_thickness)\n', (13553, 13579), True, 'import xarray as xr\n'), ((17505, 17519), 'numpy.where', 'np.where', (['mask'], {}), '(mask)\n', (17513, 17519), True, 'import numpy as np\n'), ((17694, 17705), 'numpy.min', 'np.min', (['hgt'], {}), '(hgt)\n', (17700, 17705), True, 'import numpy as np\n'), ((18565, 18576), 'numpy.min', 'np.min', (['hgt'], {}), '(hgt)\n', (18571, 18576), True, 'import numpy as np\n'), ((17766, 17777), 'numpy.max', 'np.max', (['hgt'], {}), '(hgt)\n', (17772, 17777), True, 'import numpy as np\n'), ((18637, 18648), 'numpy.max', 'np.max', (['hgt'], {}), '(hgt)\n', (18643, 18648), True, 'import numpy as np\n'), ((12795, 12819), 'numpy.abs', 'np.abs', (['(thick - df.thick)'], {}), '(thick - df.thick)\n', (12801, 12819), True, 'import numpy as np\n')]
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains training and sampling functions an autoregressive model."""
import functools
from typing import Any, Callable
from absl import logging
from flax import linen as nn
from flax import struct
from flax.training import common_utils
import jax
import jax.numpy as jnp
import ml_collections
import numpy as np
from autoregressive_diffusion.model import distributions
from autoregressive_diffusion.model.autoregressive_diffusion import ardm_utils
from autoregressive_diffusion.utils import util_fns
def cross_entropy(logits, targets):
"""Compute weighted cross entropy and entropy for log probs and targets.
Args:
logits: [batch, length, num_classes] float array.
targets: categorical targets [batch, length] int array.
Returns:
Tuple of scalar loss and batch normalizing factor.
"""
if logits.ndim != targets.ndim + 1:
raise ValueError('Incorrect shapes. Got shape %s logits and %s targets' %
(str(logits.shape), str(targets.shape)))
vocab_size = logits.shape[-1]
onehot_targets = common_utils.onehot(targets, vocab_size)
loss = -jnp.sum(onehot_targets * nn.log_softmax(logits), axis=-1)
d = np.prod(targets.shape[1:])
loss = util_fns.sum_except_batch(loss) / d / np.log(2)
return loss
def compute_accuracy(logits, targets):
"""Compute weighted accuracy for log probs and targets.
Args:
logits: [batch, length, num_classes] float array.
targets: categorical targets [batch, length] int array.
Returns:
Tuple of scalar loss and batch normalizing factor.
"""
if logits.shape[:-1] != targets.shape[:-1]:
raise ValueError('Incorrect shapes. Got shape %s logits and %s targets' %
(str(logits.shape), str(targets.shape)))
logits = logits[:, :, None, :] # Insert empty channel axis.
d = float(np.prod(logits.shape[1:-1]))
acc = jnp.equal(jnp.argmax(logits, axis=-1), targets) / d
acc = util_fns.sum_except_batch(acc)
return acc
class ARM(struct.PyTreeNode):
"""Static model object that wraps important model functions."""
config: ml_collections.config_dict.config_dict.ConfigDict
apply_fn: Callable[Ellipsis, Any]
logprob_fn: Callable[Ellipsis, Any]
sample_fn: Callable[Ellipsis, Any]
neural_net: Any
num_steps: int
policy_support: bool = False
num_stages: int = 1
absorbing_state: int = 0
random_order: bool = False
def log_px(self, rng, params, x, train, context=None):
batch_size = x.shape[0]
if self.random_order:
logging.info('Log-likelihood for a random-order ARM XLNet style.')
rng, rng_perm = jax.random.split(rng)
permutations = ardm_utils.get_batch_permutations(rng_perm, batch_size,
self.num_steps)
else:
logging.info('Log-likelihood for a standard ARM.')
permutations = None
net_out = self.apply_fn(
{'params': params}, x, t=None, mask=None, train=train, context=context,
permutations=permutations,
rngs={'dropout': rng} if train else None)
d = float(np.prod(net_out.shape[1:-1]))
log_px_elementwise = util_fns.sum_except_batch(self.logprob_fn(x, net_out))
log_px = log_px_elementwise / d / np.log(2)
neg_acc = -compute_accuracy(logits=net_out, targets=x)
t_batch_dummy = jnp.zeros((batch_size,), dtype=jnp.int32)
loss_components_dummy = jnp.zeros((batch_size,))
return log_px, loss_components_dummy, neg_acc, t_batch_dummy
def elbo(self, rng, params, x, train, context=None):
return self.log_px(rng, params, x, train, context)
def sample(self, rng, params, batch_size, context=None):
chain_sharded = self.p_sample(rng, params, batch_size, context)
chain = chain_sharded.reshape(
chain_sharded.shape[0], batch_size, *chain_sharded.shape[3:])
return chain
@functools.partial(jax.pmap, in_axes=(None, None, 0, None, 0),
out_axes=1,
static_broadcasted_argnums=(0, 3), axis_name='batch')
def p_sample(self, rng, params, batch_size, context):
"""Samples from the model, calls sample_step for every timestep."""
rng = jax.random.fold_in(rng, jax.lax.axis_index('batch'))
assert batch_size % jax.local_device_count() == 0
per_device_batch_size = batch_size // jax.local_device_count()
logging.info('Sampling from model, hope you are patient...')
if self.random_order:
rng, rng_perm = jax.random.split(rng)
orders = ardm_utils.get_batch_permutations(rng_perm,
per_device_batch_size,
self.num_steps)
else:
orders = jnp.arange(0, self.num_steps)[None, :]
orders = jnp.repeat(orders, repeats=per_device_batch_size, axis=0)
chain = []
x = jnp.full((per_device_batch_size, *self.config.data_shape),
fill_value=self.absorbing_state,
dtype=jnp.int32)
chain.append(x)
def next_sample_step(x, t):
x = self.sample_step(
jax.random.fold_in(rng, t), x,
t, orders, params, context)
return x, x
ts = jnp.arange(self.num_steps)
_, chain = jax.lax.scan(next_sample_step, init=x, xs=ts)
return chain
def get_naive_policy(self, budget=250):
assert budget <= self.num_steps
# We use budget+1 because a linspace contains the last step.
naive_policy = ardm_utils.integer_linspace(0, self.num_steps, budget+1)
# Last index does not need to be in policy.
naive_policy = naive_policy[:-1]
return naive_policy
def sample_with_naive_policy(self,
rng,
params,
batch_size,
budget=250):
logging.info('Sampling with naive policy.')
naive_policy = self.get_naive_policy(budget)
return self.sample_with_policy(rng, params, batch_size, naive_policy)
def sample_with_policy(self, rng, params, batch_size, policy):
"""Wrapper for p_sample_with_policy that takes care of unsharding."""
logging.info('Sampling from model (quickly)...')
chain_sharded = self.p_sample_with_policy(rng, params, batch_size, policy)
chain = chain_sharded.reshape(
chain_sharded.shape[0], batch_size, *chain_sharded.shape[3:])
return chain
@functools.partial(jax.pmap, in_axes=(None, None, 0, None, None),
out_axes=1,
static_broadcasted_argnums=(0, 3), axis_name='batch')
def p_sample_with_policy(self, rng, params, batch_size, policy):
"""Samples from the model, calls sample_step for every policy step."""
rng = jax.random.fold_in(rng, jax.lax.axis_index('batch'))
assert batch_size % jax.local_device_count() == 0
per_device_batch_size = batch_size // jax.local_device_count()
rng, rng_perm = jax.random.split(rng)
sigmas = ardm_utils.get_batch_permutations(rng_perm, per_device_batch_size,
self.num_steps)
policy_extended = jnp.concatenate(
[policy, jnp.array([self.num_steps], dtype=jnp.int32)], axis=0)
x = jnp.full((per_device_batch_size, *self.config.data_shape),
fill_value=self.absorbing_state,
dtype=jnp.int32)
def next_sample_step(x, idx):
left_t = policy_extended[idx]
right_t = policy_extended[idx + 1]
x = self.sample_step_with_policy(
jax.random.fold_in(rng, idx), x, left_t, right_t, sigmas, params)
return x, x
x, chain = jax.lax.scan(next_sample_step, x, jnp.arange(len(policy)))
return chain
def sample_step_with_policy(self, rng, x, left_t, right_t, sigmas, params):
"""Sampling code for a single step starting at left_t until right_t."""
batch_size = x.shape[0]
left_t = jnp.full(batch_size, fill_value=left_t)
right_t = jnp.full(batch_size, fill_value=right_t)
prev_selection, current_selection = ardm_utils.get_selections_for_sigma_and_range(
sigmas, left_t, right_t, self.config.data_shape)
params_px = self.apply_fn(
{'params': params},
x, left_t, prev_selection, train=False)
new_x = self.sample_fn(rng, params_px)
x = (1 - current_selection) * x + current_selection * new_x
x = jnp.asarray(x, jnp.int32)
return x
def sample_step(self, rng, x, t, sigmas, params, context):
"""Sampling code for a single step t."""
batch_size = x.shape[0]
t_batch = jnp.full(batch_size, fill_value=t)
prev_selection, current_selection = ardm_utils.get_selection_for_sigma_and_t(
sigmas, t_batch, self.config.data_shape)
if self.random_order:
permutations = sigmas
else:
permutations = None
params_px = self.apply_fn(
{'params': params},
x, t_batch, prev_selection, train=False, context=context,
permutations=permutations)
new_x = self.sample_fn(rng, params_px)
x = (1 - current_selection) * x + current_selection * new_x
x = jnp.asarray(x, jnp.int32)
return x
def init_architecture(self, init_rng, tmp_x, tmp_t, context=None):
tmp_mask = None
if context is None:
return self.neural_net.init(init_rng, tmp_x, tmp_t, tmp_mask, train=False)
else:
return self.neural_net.init(init_rng, tmp_x, tmp_t, tmp_mask,
train=False, context=context)
@classmethod
def create(cls, config, get_architecture, random_order):
"""Creates a new instance with `step=0` and initialized `opt_state`."""
required_num_outputs = config.num_classes
num_steps = int(np.prod(config.data_shape))
# We set num_steps=0 since this disables time conditioning, which is not
# necessary for ARMs.
neural_net = get_architecture(
config.num_classes, required_num_outputs, num_steps=0, is_causal=True)
out_dist = distributions.SoftmaxCategorical(config.data_shape[-1],
config.num_classes)
return cls(
config,
apply_fn=neural_net.apply,
logprob_fn=out_dist.log_prob,
sample_fn=out_dist.sample,
neural_net=neural_net,
num_steps=num_steps,
random_order=random_order
)
|
[
"autoregressive_diffusion.utils.util_fns.sum_except_batch",
"absl.logging.info",
"jax.lax.axis_index",
"autoregressive_diffusion.model.autoregressive_diffusion.ardm_utils.get_batch_permutations",
"autoregressive_diffusion.model.autoregressive_diffusion.ardm_utils.get_selections_for_sigma_and_range",
"jax.numpy.repeat",
"numpy.prod",
"jax.local_device_count",
"jax.random.fold_in",
"jax.numpy.argmax",
"jax.numpy.full",
"autoregressive_diffusion.model.distributions.SoftmaxCategorical",
"functools.partial",
"autoregressive_diffusion.model.autoregressive_diffusion.ardm_utils.integer_linspace",
"jax.lax.scan",
"jax.numpy.asarray",
"jax.numpy.zeros",
"jax.numpy.array",
"flax.linen.log_softmax",
"numpy.log",
"flax.training.common_utils.onehot",
"jax.numpy.arange",
"jax.random.split",
"autoregressive_diffusion.model.autoregressive_diffusion.ardm_utils.get_selection_for_sigma_and_t"
] |
[((1649, 1689), 'flax.training.common_utils.onehot', 'common_utils.onehot', (['targets', 'vocab_size'], {}), '(targets, vocab_size)\n', (1668, 1689), False, 'from flax.training import common_utils\n'), ((1766, 1792), 'numpy.prod', 'np.prod', (['targets.shape[1:]'], {}), '(targets.shape[1:])\n', (1773, 1792), True, 'import numpy as np\n'), ((2517, 2547), 'autoregressive_diffusion.utils.util_fns.sum_except_batch', 'util_fns.sum_except_batch', (['acc'], {}), '(acc)\n', (2542, 2547), False, 'from autoregressive_diffusion.utils import util_fns\n'), ((4418, 4549), 'functools.partial', 'functools.partial', (['jax.pmap'], {'in_axes': '(None, None, 0, None, 0)', 'out_axes': '(1)', 'static_broadcasted_argnums': '(0, 3)', 'axis_name': '"""batch"""'}), "(jax.pmap, in_axes=(None, None, 0, None, 0), out_axes=1,\n static_broadcasted_argnums=(0, 3), axis_name='batch')\n", (4435, 4549), False, 'import functools\n'), ((6930, 7064), 'functools.partial', 'functools.partial', (['jax.pmap'], {'in_axes': '(None, None, 0, None, None)', 'out_axes': '(1)', 'static_broadcasted_argnums': '(0, 3)', 'axis_name': '"""batch"""'}), "(jax.pmap, in_axes=(None, None, 0, None, None), out_axes=1,\n static_broadcasted_argnums=(0, 3), axis_name='batch')\n", (6947, 7064), False, 'import functools\n'), ((1841, 1850), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (1847, 1850), True, 'import numpy as np\n'), ((2420, 2447), 'numpy.prod', 'np.prod', (['logits.shape[1:-1]'], {}), '(logits.shape[1:-1])\n', (2427, 2447), True, 'import numpy as np\n'), ((3892, 3933), 'jax.numpy.zeros', 'jnp.zeros', (['(batch_size,)'], {'dtype': 'jnp.int32'}), '((batch_size,), dtype=jnp.int32)\n', (3901, 3933), True, 'import jax.numpy as jnp\n'), ((3962, 3986), 'jax.numpy.zeros', 'jnp.zeros', (['(batch_size,)'], {}), '((batch_size,))\n', (3971, 3986), True, 'import jax.numpy as jnp\n'), ((4905, 4965), 'absl.logging.info', 'logging.info', (['"""Sampling from model, hope you are patient..."""'], {}), "('Sampling from model, hope you are patient...')\n", (4917, 4965), False, 'from absl import logging\n'), ((5394, 5507), 'jax.numpy.full', 'jnp.full', (['(per_device_batch_size, *self.config.data_shape)'], {'fill_value': 'self.absorbing_state', 'dtype': 'jnp.int32'}), '((per_device_batch_size, *self.config.data_shape), fill_value=self.\n absorbing_state, dtype=jnp.int32)\n', (5402, 5507), True, 'import jax.numpy as jnp\n'), ((5725, 5751), 'jax.numpy.arange', 'jnp.arange', (['self.num_steps'], {}), '(self.num_steps)\n', (5735, 5751), True, 'import jax.numpy as jnp\n'), ((5767, 5812), 'jax.lax.scan', 'jax.lax.scan', (['next_sample_step'], {'init': 'x', 'xs': 'ts'}), '(next_sample_step, init=x, xs=ts)\n', (5779, 5812), False, 'import jax\n'), ((5994, 6052), 'autoregressive_diffusion.model.autoregressive_diffusion.ardm_utils.integer_linspace', 'ardm_utils.integer_linspace', (['(0)', 'self.num_steps', '(budget + 1)'], {}), '(0, self.num_steps, budget + 1)\n', (6021, 6052), False, 'from autoregressive_diffusion.model.autoregressive_diffusion import ardm_utils\n'), ((6365, 6408), 'absl.logging.info', 'logging.info', (['"""Sampling with naive policy."""'], {}), "('Sampling with naive policy.')\n", (6377, 6408), False, 'from absl import logging\n'), ((6676, 6724), 'absl.logging.info', 'logging.info', (['"""Sampling from model (quickly)..."""'], {}), "('Sampling from model (quickly)...')\n", (6688, 6724), False, 'from absl import logging\n'), ((7451, 7472), 'jax.random.split', 'jax.random.split', (['rng'], {}), '(rng)\n', (7467, 7472), False, 'import jax\n'), ((7486, 7573), 'autoregressive_diffusion.model.autoregressive_diffusion.ardm_utils.get_batch_permutations', 'ardm_utils.get_batch_permutations', (['rng_perm', 'per_device_batch_size', 'self.num_steps'], {}), '(rng_perm, per_device_batch_size, self.\n num_steps)\n', (7519, 7573), False, 'from autoregressive_diffusion.model.autoregressive_diffusion import ardm_utils\n'), ((7737, 7850), 'jax.numpy.full', 'jnp.full', (['(per_device_batch_size, *self.config.data_shape)'], {'fill_value': 'self.absorbing_state', 'dtype': 'jnp.int32'}), '((per_device_batch_size, *self.config.data_shape), fill_value=self.\n absorbing_state, dtype=jnp.int32)\n', (7745, 7850), True, 'import jax.numpy as jnp\n'), ((8414, 8453), 'jax.numpy.full', 'jnp.full', (['batch_size'], {'fill_value': 'left_t'}), '(batch_size, fill_value=left_t)\n', (8422, 8453), True, 'import jax.numpy as jnp\n'), ((8468, 8508), 'jax.numpy.full', 'jnp.full', (['batch_size'], {'fill_value': 'right_t'}), '(batch_size, fill_value=right_t)\n', (8476, 8508), True, 'import jax.numpy as jnp\n'), ((8550, 8649), 'autoregressive_diffusion.model.autoregressive_diffusion.ardm_utils.get_selections_for_sigma_and_range', 'ardm_utils.get_selections_for_sigma_and_range', (['sigmas', 'left_t', 'right_t', 'self.config.data_shape'], {}), '(sigmas, left_t, right_t, self\n .config.data_shape)\n', (8595, 8649), False, 'from autoregressive_diffusion.model.autoregressive_diffusion import ardm_utils\n'), ((8878, 8903), 'jax.numpy.asarray', 'jnp.asarray', (['x', 'jnp.int32'], {}), '(x, jnp.int32)\n', (8889, 8903), True, 'import jax.numpy as jnp\n'), ((9066, 9100), 'jax.numpy.full', 'jnp.full', (['batch_size'], {'fill_value': 't'}), '(batch_size, fill_value=t)\n', (9074, 9100), True, 'import jax.numpy as jnp\n'), ((9142, 9228), 'autoregressive_diffusion.model.autoregressive_diffusion.ardm_utils.get_selection_for_sigma_and_t', 'ardm_utils.get_selection_for_sigma_and_t', (['sigmas', 't_batch', 'self.config.data_shape'], {}), '(sigmas, t_batch, self.config.\n data_shape)\n', (9182, 9228), False, 'from autoregressive_diffusion.model.autoregressive_diffusion import ardm_utils\n'), ((9601, 9626), 'jax.numpy.asarray', 'jnp.asarray', (['x', 'jnp.int32'], {}), '(x, jnp.int32)\n', (9612, 9626), True, 'import jax.numpy as jnp\n'), ((10457, 10532), 'autoregressive_diffusion.model.distributions.SoftmaxCategorical', 'distributions.SoftmaxCategorical', (['config.data_shape[-1]', 'config.num_classes'], {}), '(config.data_shape[-1], config.num_classes)\n', (10489, 10532), False, 'from autoregressive_diffusion.model import distributions\n'), ((1803, 1834), 'autoregressive_diffusion.utils.util_fns.sum_except_batch', 'util_fns.sum_except_batch', (['loss'], {}), '(loss)\n', (1828, 1834), False, 'from autoregressive_diffusion.utils import util_fns\n'), ((2467, 2494), 'jax.numpy.argmax', 'jnp.argmax', (['logits'], {'axis': '(-1)'}), '(logits, axis=-1)\n', (2477, 2494), True, 'import jax.numpy as jnp\n'), ((3092, 3158), 'absl.logging.info', 'logging.info', (['"""Log-likelihood for a random-order ARM XLNet style."""'], {}), "('Log-likelihood for a random-order ARM XLNet style.')\n", (3104, 3158), False, 'from absl import logging\n'), ((3181, 3202), 'jax.random.split', 'jax.random.split', (['rng'], {}), '(rng)\n', (3197, 3202), False, 'import jax\n'), ((3224, 3295), 'autoregressive_diffusion.model.autoregressive_diffusion.ardm_utils.get_batch_permutations', 'ardm_utils.get_batch_permutations', (['rng_perm', 'batch_size', 'self.num_steps'], {}), '(rng_perm, batch_size, self.num_steps)\n', (3257, 3295), False, 'from autoregressive_diffusion.model.autoregressive_diffusion import ardm_utils\n'), ((3367, 3417), 'absl.logging.info', 'logging.info', (['"""Log-likelihood for a standard ARM."""'], {}), "('Log-likelihood for a standard ARM.')\n", (3379, 3417), False, 'from absl import logging\n'), ((3654, 3682), 'numpy.prod', 'np.prod', (['net_out.shape[1:-1]'], {}), '(net_out.shape[1:-1])\n', (3661, 3682), True, 'import numpy as np\n'), ((3802, 3811), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (3808, 3811), True, 'import numpy as np\n'), ((4750, 4777), 'jax.lax.axis_index', 'jax.lax.axis_index', (['"""batch"""'], {}), "('batch')\n", (4768, 4777), False, 'import jax\n'), ((4876, 4900), 'jax.local_device_count', 'jax.local_device_count', ([], {}), '()\n', (4898, 4900), False, 'import jax\n'), ((5015, 5036), 'jax.random.split', 'jax.random.split', (['rng'], {}), '(rng)\n', (5031, 5036), False, 'import jax\n'), ((5052, 5139), 'autoregressive_diffusion.model.autoregressive_diffusion.ardm_utils.get_batch_permutations', 'ardm_utils.get_batch_permutations', (['rng_perm', 'per_device_batch_size', 'self.num_steps'], {}), '(rng_perm, per_device_batch_size, self.\n num_steps)\n', (5085, 5139), False, 'from autoregressive_diffusion.model.autoregressive_diffusion import ardm_utils\n'), ((5312, 5369), 'jax.numpy.repeat', 'jnp.repeat', (['orders'], {'repeats': 'per_device_batch_size', 'axis': '(0)'}), '(orders, repeats=per_device_batch_size, axis=0)\n', (5322, 5369), True, 'import jax.numpy as jnp\n'), ((7279, 7306), 'jax.lax.axis_index', 'jax.lax.axis_index', (['"""batch"""'], {}), "('batch')\n", (7297, 7306), False, 'import jax\n'), ((7405, 7429), 'jax.local_device_count', 'jax.local_device_count', ([], {}), '()\n', (7427, 7429), False, 'import jax\n'), ((10195, 10221), 'numpy.prod', 'np.prod', (['config.data_shape'], {}), '(config.data_shape)\n', (10202, 10221), True, 'import numpy as np\n'), ((1726, 1748), 'flax.linen.log_softmax', 'nn.log_softmax', (['logits'], {}), '(logits)\n', (1740, 1748), True, 'from flax import linen as nn\n'), ((4804, 4828), 'jax.local_device_count', 'jax.local_device_count', ([], {}), '()\n', (4826, 4828), False, 'import jax\n'), ((5258, 5287), 'jax.numpy.arange', 'jnp.arange', (['(0)', 'self.num_steps'], {}), '(0, self.num_steps)\n', (5268, 5287), True, 'import jax.numpy as jnp\n'), ((5628, 5654), 'jax.random.fold_in', 'jax.random.fold_in', (['rng', 't'], {}), '(rng, t)\n', (5646, 5654), False, 'import jax\n'), ((7333, 7357), 'jax.local_device_count', 'jax.local_device_count', ([], {}), '()\n', (7355, 7357), False, 'import jax\n'), ((7673, 7717), 'jax.numpy.array', 'jnp.array', (['[self.num_steps]'], {'dtype': 'jnp.int32'}), '([self.num_steps], dtype=jnp.int32)\n', (7682, 7717), True, 'import jax.numpy as jnp\n'), ((8042, 8070), 'jax.random.fold_in', 'jax.random.fold_in', (['rng', 'idx'], {}), '(rng, idx)\n', (8060, 8070), False, 'import jax\n')]
|
"""
Copyright (c) 2021, Electric Power Research Institute
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of DER-VET nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
MarketServiceUpAndDown.py
This Python class contains methods and attributes that help model market
services that provide service through discharging more OR charging less
relative to the power set points.
"""
from storagevet.ValueStreams.ValueStream import ValueStream
import cvxpy as cvx
import pandas as pd
import numpy as np
import storagevet.Library as Lib
class MarketServiceUpAndDown(ValueStream):
""" A market service that can provide services in the "up" and "down"
directions
"""
def __init__(self, name, full_name, params):
""" Generates the objective function, finds and creates constraints.
Args:
name (str): abbreviated name
full_name (str): the expanded name of the service
params (Dict): input parameters
"""
ValueStream.__init__(self, name, params)
self.full_name = full_name
self.combined_market = params['CombinedMarket']
self.duration = params['duration']
self.energy_growth = params['energyprice_growth']/100
self.eod_avg = params['eod']
self.eou_avg = params['eou']
self.growth = params['growth']/100
self.price_down = params['regd_price']
self.price_up = params['regu_price']
self.price_energy = params['energy_price']
self.variable_names = {'up_ch', 'up_dis', 'down_ch', 'down_dis'}
self.variables_df = pd.DataFrame(columns=self.variable_names)
def grow_drop_data(self, years, frequency, load_growth):
""" Adds data by growing the given data OR drops any extra data that
might have slipped in. Update variable that hold timeseries data
after adding growth data. These method should be called after
add_growth_data and before the optimization is run.
Args:
years (List): list of years for which analysis will occur on
frequency (str): period frequency of the timeseries data
load_growth (float): percent/ decimal value of the growth rate of
loads in this simulation
"""
self.price_energy = Lib.fill_extra_data(self.price_energy, years,
self.energy_growth, frequency)
self.price_energy = Lib.drop_extra_data(self.price_energy, years)
self.price_up = Lib.fill_extra_data(self.price_up, years,
self.growth, frequency)
self.price_up = Lib.drop_extra_data(self.price_up, years)
self.price_down = Lib.fill_extra_data(self.price_down, years,
self.growth, frequency)
self.price_down = Lib.drop_extra_data(self.price_down, years)
def initialize_variables(self, size):
""" Updates the optimization variable attribute with new optimization
variables of size SIZE
Variables added:
up_ch (Variable): A cvxpy variable for freq regulation capacity to
increase charging power
down_ch (Variable): A cvxpy variable for freq regulation capacity to
decrease charging power
up_dis (Variable): A cvxpy variable for freq regulation capacity to
increase discharging power
down_dis (Variable): A cvxpy variable for freq regulation capacity to
decrease discharging power
Args:
size (Int): Length of optimization variables to create
Returns:
Dictionary of optimization variables
"""
self.variables = {
'up_ch': cvx.Variable(shape=size, name=f'{self.name}_up_c'),
'down_ch': cvx.Variable(shape=size, name=f'{self.name}_regd_c'),
'up_dis': cvx.Variable(shape=size, name=f'{self.name}_up_dis'),
'down_dis': cvx.Variable(shape=size, name=f'{self.name}_regd_d')
}
def objective_function(self, mask, load_sum, tot_variable_gen,
generator_out_sum, net_ess_power, annuity_scalar=1):
""" Generates the full objective function, including the optimization
variables.
Args:
mask (DataFrame): A boolean array that is true for indices
corresponding to time_series data included in the subs data set
tot_variable_gen (Expression): the sum of the variable/intermittent
generation sources
load_sum (list, Expression): the sum of load within the system
generator_out_sum (list, Expression): the sum of conventional
generation within the system
net_ess_power (list, Expression): the sum of the net power of all
the ESS in the system. [= charge - discharge]
annuity_scalar (float): a scalar value to be multiplied by any
yearly cost or benefit that helps capture the cost/benefit over
the entire project lifetime (only to be set iff sizing)
Returns:
A dictionary with the portion of the objective function that it
affects, labeled by the expression's key. Default is {}.
"""
# pay for reg down energy, get paid for reg up energy
# paid revenue for capacity to do both
size = sum(mask)
p_regu = cvx.Parameter(size, value=self.price_up.loc[mask].values,
name=f'{self.name}_p_regu')
p_regd = cvx.Parameter(size, value=self.price_down.loc[mask].values,
name=f'{self.name}_p_regd')
p_ene = cvx.Parameter(size, value=self.price_energy.loc[mask].values,
name=f'{self.name}_price')
eou = self.get_energy_option_up(mask)
eod = self.get_energy_option_down(mask)
# REGULATION DOWN: PAYMENT
regdown_disch_payment \
= cvx.sum(self.variables['down_dis'] * -p_regd) * annuity_scalar
regdown_charge_payment \
= cvx.sum(self.variables['down_ch'] * -p_regd) * annuity_scalar
reg_down_tot = regdown_charge_payment + regdown_disch_payment
# REGULATION UP: PAYMENT
regup_disch_payment \
= cvx.sum(self.variables['up_dis'] * -p_regu) * annuity_scalar
regup_charge_payment \
= cvx.sum(self.variables['up_ch'] * -p_regu) * annuity_scalar
reg_up_tot = regup_charge_payment + regup_disch_payment
# REGULATION UP & DOWN: ENERGY SETTLEMENT
regdown_disch_settlement \
= cvx.sum(cvx.multiply(cvx.multiply(self.variables['down_dis'],
p_ene),
eod)) * self.dt * annuity_scalar
regdown_charge_settlement \
= cvx.sum(cvx.multiply(cvx.multiply(self.variables['down_ch'],
p_ene),
eod)) * self.dt * annuity_scalar
e_settlement = regdown_disch_settlement + regdown_charge_settlement
regup_disch_settlement \
= cvx.sum(cvx.multiply(cvx.multiply(self.variables['up_dis'],
-p_ene),
eou)) * self.dt * annuity_scalar
regup_charge_settlement \
= cvx.sum(cvx.multiply(cvx.multiply(self.variables['up_ch'],
-p_ene),
eou)) * self.dt * annuity_scalar
e_settlement += regup_disch_settlement + regup_charge_settlement
return {f'{self.name}_regup_prof': reg_up_tot,
f'{self.name}_regdown_prof': reg_down_tot,
f'{self.name}_energy_settlement': e_settlement}
def get_energy_option_up(self, mask):
""" transform the energy option up into a n x 1 vector
Args:
mask:
Returns: a CVXPY vector
"""
return cvx.promote(self.eou_avg, mask.loc[mask].shape)
def get_energy_option_down(self, mask):
""" transform the energy option down into a n x 1 vector
Args:
mask:
Returns: a CVXPY vector
"""
return cvx.promote(self.eod_avg, mask.loc[mask].shape)
def constraints(self, mask, load_sum, tot_variable_gen, generator_out_sum,
net_ess_power, combined_rating):
"""build constraint list method for the optimization engine
Args:
mask (DataFrame): A boolean array that is true for indices
corresponding to time_series data included in the subs data set
tot_variable_gen (Expression): the sum of the variable/intermittent
generation sources
load_sum (list, Expression): the sum of load within the system
generator_out_sum (list, Expression): the sum of conventional
generation within the system
net_ess_power (list, Expression): the sum of the net power of all
the ESS in the system. flow out into the grid is negative
combined_rating (Dictionary): the combined rating of each DER class
type
Returns:
An list of constraints for the optimization variables added to
the system of equations
"""
constraint_list = []
constraint_list += [cvx.NonPos(-self.variables['up_ch'])]
constraint_list += [cvx.NonPos(-self.variables['down_ch'])]
constraint_list += [cvx.NonPos(-self.variables['up_dis'])]
constraint_list += [cvx.NonPos(-self.variables['down_dis'])]
if self.combined_market:
constraint_list += [
cvx.Zero(self.variables['down_dis'] + self.variables['down_ch'] -
self.variables['up_dis'] - self.variables['up_ch'])
]
return constraint_list
def p_reservation_charge_up(self, mask):
""" the amount of charging power in the up direction (supplying power
up into the grid) that needs to be reserved for this value stream
Args:
mask (DataFrame): A boolean array that is true for indices
corresponding to time_series data included in the subs data set
Returns: CVXPY parameter/variable
"""
return self.variables['up_ch']
def p_reservation_charge_down(self, mask):
""" the amount of charging power in the up direction (pulling power
down from the grid) that needs to be reserved for this value stream
Args:
mask (DataFrame): A boolean array that is true for indices
corresponding to time_series data included in the subs data set
Returns: CVXPY parameter/variable
"""
return self.variables['down_ch']
def p_reservation_discharge_up(self, mask):
""" the amount of charging power in the up direction (supplying power
up into the grid) that needs to be reserved for this value stream
Args:
mask (DataFrame): A boolean array that is true for indices
corresponding to time_series data included in the subs data set
Returns: CVXPY parameter/variable
"""
return self.variables['up_dis']
def p_reservation_discharge_down(self, mask):
""" the amount of charging power in the up direction (pulling power
down from the grid) that needs to be reserved for this value stream
Args:
mask (DataFrame): A boolean array that is true for indices
corresponding to time_series data included in the subs data set
Returns: CVXPY parameter/variable
"""
return self.variables['down_dis']
def uenergy_option_stored(self, mask):
""" the deviation in energy due to changes in charge
Args:
mask (DataFrame): A boolean array that is true for indices
corresponding to time_series data included in the subs data set
Returns:
"""
eou = self.get_energy_option_up(mask)
eod = self.get_energy_option_down(mask)
e_ch_less = cvx.multiply(self.variables['up_ch'], eou) * self.dt
e_ch_more = cvx.multiply(self.variables['down_ch'], eod) * self.dt
return e_ch_less - e_ch_more
def uenergy_option_provided(self, mask):
""" the deviation in energy due to changes in discharge
Args:
mask (DataFrame): A boolean array that is true for indices
corresponding to time_series data included in the subs data set
Returns:
"""
eou = self.get_energy_option_up(mask)
eod = self.get_energy_option_down(mask)
e_dis_less = cvx.multiply(self.variables['down_dis'], eod) * self.dt
e_dis_more = cvx.multiply(self.variables['up_dis'], eou) * self.dt
return e_dis_more - e_dis_less
def worst_case_uenergy_stored(self, mask):
""" the amount of energy, from the current SOE that needs to be
reserved for this value stream to prevent any violates between the
steps in time that are not catpured in our timeseries.
Note: stored energy should be positive and provided energy should be
negative
Args:
mask (DataFrame): A boolean array that is true for indices
corresponding to time_series data included in the subs data set
Returns: tuple (stored, provided),
where the first value is the case where the systems would end up
with more energy than expected and the second corresponds to the
case where the systems would end up with less energy than expected
"""
stored \
= self.variables['down_ch'] * self.duration \
+ self.variables['down_dis'] * self.duration
return stored
def worst_case_uenergy_provided(self, mask):
""" the amount of energy, from the current SOE that needs to be
reserved for this value stream to prevent any violates between the
steps in time that are not catpured in our timeseries.
Note: stored energy should be positive and provided energy should be
negative
Args:
mask (DataFrame): A boolean array that is true for indices
corresponding to time_series data included in the subs data set
Returns: tuple (stored, provided),
where the first value is the case where the systems would end up
with more energy than expected and the second corresponds to the
case where the systems would end up with less energy than expected
"""
provided \
= self.variables['up_ch'] * -self.duration \
+ self.variables['up_dis'] * -self.duration
return provided
def timeseries_report(self):
""" Summaries the optimization results for this Value Stream.
Returns: A timeseries dataframe with user-friendly column headers that
summarize the results pertaining to this instance
"""
report = pd.DataFrame(index=self.price_energy.index)
# GIVEN
report.loc[:, f"{self.name} Up Price ($/kW)"] \
= self.price_up
report.loc[:, f"{self.name} Down Price ($/kW)"] \
= self.price_down
report.loc[:, f"{self.name} Energy Settlement Price ($/kWh)"] = \
self.price_energy
# OPTIMIZATION VARIABLES
report.loc[:, f'{self.full_name} Down (Charging) (kW)'] \
= self.variables_df['down_ch']
report.loc[:, f'{self.full_name} Down (Discharging) (kW)'] \
= self.variables_df['down_dis']
report.loc[:, f'{self.full_name} Up (Charging) (kW)'] \
= self.variables_df['up_ch']
report.loc[:, f'{self.full_name} Up (Discharging) (kW)'] \
= self.variables_df['up_dis']
# CALCULATED EXPRESSIONS (ENERGY THROUGH-PUTS)
e_thru_down_dis = np.multiply(self.eod_avg,
self.variables_df['down_dis']) * self.dt
e_thru_down_ch = np.multiply(self.eod_avg,
self.variables_df['down_ch']) * self.dt
e_thru_up_dis = -np.multiply(self.eou_avg,
self.variables_df['up_dis']) * self.dt
e_thru_up_ch = -np.multiply(self.eou_avg,
self.variables_df['up_ch']) * self.dt
uenergy_down = e_thru_down_dis + e_thru_down_ch
uenergy_up = e_thru_up_dis + e_thru_up_ch
column_start = f"{self.name} Energy Throughput"
report.loc[:, f"{column_start} (kWh)"] = uenergy_down + uenergy_up
report.loc[:, f"{column_start} Up (Charging) (kWh)"] = e_thru_up_ch
report.loc[:, f"{column_start} Up (Discharging) (kWh)"] = e_thru_up_dis
report.loc[:, f"{column_start} Down (Charging) (kWh)"] = e_thru_down_ch
report.loc[:, f"{column_start} Down (Discharging) (kWh)"] \
= e_thru_down_dis
return report
def proforma_report(self, opt_years, apply_inflation_rate_func, fill_forward_func, results):
""" Calculates the proforma that corresponds to participation in this value stream
Args:
opt_years (list): list of years the optimization problem ran for
apply_inflation_rate_func:
fill_forward_func:
results (pd.DataFrame): DataFrame with all the optimization variable solutions
Returns: A DateFrame (of with each year in opt_year as the
index and the corresponding value this stream provided)
"""
proforma = super().proforma_report(opt_years, apply_inflation_rate_func,
fill_forward_func, results)
pref = self.full_name
reg_up =\
results.loc[:, f'{pref} Up (Charging) (kW)'] \
+ results.loc[:, f'{pref} Up (Discharging) (kW)']
regulation_up_prof = np.multiply(reg_up, self.price_up)
reg_down = \
results.loc[:, f'{pref} Down (Charging) (kW)'] \
+ results.loc[:, f'{pref} Down (Discharging) (kW)']
regulation_down_prof = np.multiply(reg_down, self.price_down)
energy_throughput \
= results.loc[:, f"{self.name} Energy Throughput (kWh)"]
energy_through_prof = np.multiply(energy_throughput, self.price_energy)
# combine all potential value streams into one df for faster
# splicing into years
fr_results = pd.DataFrame({'E': energy_through_prof,
'RU': regulation_up_prof,
'RD': regulation_down_prof},
index=results.index)
market_results_only = proforma.copy(deep=True)
for year in opt_years:
year_subset = fr_results[fr_results.index.year == year]
yr_pd = pd.Period(year=year, freq='y')
proforma.loc[yr_pd, f'{self.name} Energy Throughput'] \
= -year_subset['E'].sum()
market_results_only.loc[yr_pd, f'{pref} Up'] \
= year_subset['RU'].sum()
market_results_only.loc[yr_pd, f'{pref} Down'] \
= year_subset['RD'].sum()
# forward fill growth columns with inflation at their corresponding growth rates
market_results_only = fill_forward_func(market_results_only, self.growth)
proforma = fill_forward_func(proforma, self.energy_growth)
# concat the two together
proforma = pd.concat([proforma, market_results_only], axis=1)
return proforma
|
[
"pandas.DataFrame",
"numpy.multiply",
"cvxpy.Parameter",
"cvxpy.multiply",
"cvxpy.Zero",
"storagevet.Library.drop_extra_data",
"storagevet.ValueStreams.ValueStream.ValueStream.__init__",
"cvxpy.promote",
"cvxpy.NonPos",
"cvxpy.sum",
"storagevet.Library.fill_extra_data",
"pandas.Period",
"cvxpy.Variable",
"pandas.concat"
] |
[((2376, 2416), 'storagevet.ValueStreams.ValueStream.ValueStream.__init__', 'ValueStream.__init__', (['self', 'name', 'params'], {}), '(self, name, params)\n', (2396, 2416), False, 'from storagevet.ValueStreams.ValueStream import ValueStream\n'), ((2974, 3015), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'self.variable_names'}), '(columns=self.variable_names)\n', (2986, 3015), True, 'import pandas as pd\n'), ((3675, 3751), 'storagevet.Library.fill_extra_data', 'Lib.fill_extra_data', (['self.price_energy', 'years', 'self.energy_growth', 'frequency'], {}), '(self.price_energy, years, self.energy_growth, frequency)\n', (3694, 3751), True, 'import storagevet.Library as Lib\n'), ((3828, 3873), 'storagevet.Library.drop_extra_data', 'Lib.drop_extra_data', (['self.price_energy', 'years'], {}), '(self.price_energy, years)\n', (3847, 3873), True, 'import storagevet.Library as Lib\n'), ((3899, 3964), 'storagevet.Library.fill_extra_data', 'Lib.fill_extra_data', (['self.price_up', 'years', 'self.growth', 'frequency'], {}), '(self.price_up, years, self.growth, frequency)\n', (3918, 3964), True, 'import storagevet.Library as Lib\n'), ((4033, 4074), 'storagevet.Library.drop_extra_data', 'Lib.drop_extra_data', (['self.price_up', 'years'], {}), '(self.price_up, years)\n', (4052, 4074), True, 'import storagevet.Library as Lib\n'), ((4102, 4169), 'storagevet.Library.fill_extra_data', 'Lib.fill_extra_data', (['self.price_down', 'years', 'self.growth', 'frequency'], {}), '(self.price_down, years, self.growth, frequency)\n', (4121, 4169), True, 'import storagevet.Library as Lib\n'), ((4242, 4285), 'storagevet.Library.drop_extra_data', 'Lib.drop_extra_data', (['self.price_down', 'years'], {}), '(self.price_down, years)\n', (4261, 4285), True, 'import storagevet.Library as Lib\n'), ((6873, 6963), 'cvxpy.Parameter', 'cvx.Parameter', (['size'], {'value': 'self.price_up.loc[mask].values', 'name': 'f"""{self.name}_p_regu"""'}), "(size, value=self.price_up.loc[mask].values, name=\n f'{self.name}_p_regu')\n", (6886, 6963), True, 'import cvxpy as cvx\n'), ((7007, 7099), 'cvxpy.Parameter', 'cvx.Parameter', (['size'], {'value': 'self.price_down.loc[mask].values', 'name': 'f"""{self.name}_p_regd"""'}), "(size, value=self.price_down.loc[mask].values, name=\n f'{self.name}_p_regd')\n", (7020, 7099), True, 'import cvxpy as cvx\n'), ((7142, 7235), 'cvxpy.Parameter', 'cvx.Parameter', (['size'], {'value': 'self.price_energy.loc[mask].values', 'name': 'f"""{self.name}_price"""'}), "(size, value=self.price_energy.loc[mask].values, name=\n f'{self.name}_price')\n", (7155, 7235), True, 'import cvxpy as cvx\n'), ((9500, 9547), 'cvxpy.promote', 'cvx.promote', (['self.eou_avg', 'mask.loc[mask].shape'], {}), '(self.eou_avg, mask.loc[mask].shape)\n', (9511, 9547), True, 'import cvxpy as cvx\n'), ((9752, 9799), 'cvxpy.promote', 'cvx.promote', (['self.eod_avg', 'mask.loc[mask].shape'], {}), '(self.eod_avg, mask.loc[mask].shape)\n', (9763, 9799), True, 'import cvxpy as cvx\n'), ((16674, 16717), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'self.price_energy.index'}), '(index=self.price_energy.index)\n', (16686, 16717), True, 'import pandas as pd\n'), ((19578, 19612), 'numpy.multiply', 'np.multiply', (['reg_up', 'self.price_up'], {}), '(reg_up, self.price_up)\n', (19589, 19612), True, 'import numpy as np\n'), ((19791, 19829), 'numpy.multiply', 'np.multiply', (['reg_down', 'self.price_down'], {}), '(reg_down, self.price_down)\n', (19802, 19829), True, 'import numpy as np\n'), ((19958, 20007), 'numpy.multiply', 'np.multiply', (['energy_throughput', 'self.price_energy'], {}), '(energy_throughput, self.price_energy)\n', (19969, 20007), True, 'import numpy as np\n'), ((20131, 20250), 'pandas.DataFrame', 'pd.DataFrame', (["{'E': energy_through_prof, 'RU': regulation_up_prof, 'RD': regulation_down_prof\n }"], {'index': 'results.index'}), "({'E': energy_through_prof, 'RU': regulation_up_prof, 'RD':\n regulation_down_prof}, index=results.index)\n", (20143, 20250), True, 'import pandas as pd\n'), ((21161, 21211), 'pandas.concat', 'pd.concat', (['[proforma, market_results_only]'], {'axis': '(1)'}), '([proforma, market_results_only], axis=1)\n', (21170, 21211), True, 'import pandas as pd\n'), ((5161, 5211), 'cvxpy.Variable', 'cvx.Variable', ([], {'shape': 'size', 'name': 'f"""{self.name}_up_c"""'}), "(shape=size, name=f'{self.name}_up_c')\n", (5173, 5211), True, 'import cvxpy as cvx\n'), ((5236, 5288), 'cvxpy.Variable', 'cvx.Variable', ([], {'shape': 'size', 'name': 'f"""{self.name}_regd_c"""'}), "(shape=size, name=f'{self.name}_regd_c')\n", (5248, 5288), True, 'import cvxpy as cvx\n'), ((5312, 5364), 'cvxpy.Variable', 'cvx.Variable', ([], {'shape': 'size', 'name': 'f"""{self.name}_up_dis"""'}), "(shape=size, name=f'{self.name}_up_dis')\n", (5324, 5364), True, 'import cvxpy as cvx\n'), ((5390, 5442), 'cvxpy.Variable', 'cvx.Variable', ([], {'shape': 'size', 'name': 'f"""{self.name}_regd_d"""'}), "(shape=size, name=f'{self.name}_regd_d')\n", (5402, 5442), True, 'import cvxpy as cvx\n'), ((7436, 7481), 'cvxpy.sum', 'cvx.sum', (["(self.variables['down_dis'] * -p_regd)"], {}), "(self.variables['down_dis'] * -p_regd)\n", (7443, 7481), True, 'import cvxpy as cvx\n'), ((7546, 7590), 'cvxpy.sum', 'cvx.sum', (["(self.variables['down_ch'] * -p_regd)"], {}), "(self.variables['down_ch'] * -p_regd)\n", (7553, 7590), True, 'import cvxpy as cvx\n'), ((7756, 7799), 'cvxpy.sum', 'cvx.sum', (["(self.variables['up_dis'] * -p_regu)"], {}), "(self.variables['up_dis'] * -p_regu)\n", (7763, 7799), True, 'import cvxpy as cvx\n'), ((7862, 7904), 'cvxpy.sum', 'cvx.sum', (["(self.variables['up_ch'] * -p_regu)"], {}), "(self.variables['up_ch'] * -p_regu)\n", (7869, 7904), True, 'import cvxpy as cvx\n'), ((10927, 10963), 'cvxpy.NonPos', 'cvx.NonPos', (["(-self.variables['up_ch'])"], {}), "(-self.variables['up_ch'])\n", (10937, 10963), True, 'import cvxpy as cvx\n'), ((10993, 11031), 'cvxpy.NonPos', 'cvx.NonPos', (["(-self.variables['down_ch'])"], {}), "(-self.variables['down_ch'])\n", (11003, 11031), True, 'import cvxpy as cvx\n'), ((11061, 11098), 'cvxpy.NonPos', 'cvx.NonPos', (["(-self.variables['up_dis'])"], {}), "(-self.variables['up_dis'])\n", (11071, 11098), True, 'import cvxpy as cvx\n'), ((11128, 11167), 'cvxpy.NonPos', 'cvx.NonPos', (["(-self.variables['down_dis'])"], {}), "(-self.variables['down_dis'])\n", (11138, 11167), True, 'import cvxpy as cvx\n'), ((13708, 13750), 'cvxpy.multiply', 'cvx.multiply', (["self.variables['up_ch']", 'eou'], {}), "(self.variables['up_ch'], eou)\n", (13720, 13750), True, 'import cvxpy as cvx\n'), ((13781, 13825), 'cvxpy.multiply', 'cvx.multiply', (["self.variables['down_ch']", 'eod'], {}), "(self.variables['down_ch'], eod)\n", (13793, 13825), True, 'import cvxpy as cvx\n'), ((14291, 14336), 'cvxpy.multiply', 'cvx.multiply', (["self.variables['down_dis']", 'eod'], {}), "(self.variables['down_dis'], eod)\n", (14303, 14336), True, 'import cvxpy as cvx\n'), ((14368, 14411), 'cvxpy.multiply', 'cvx.multiply', (["self.variables['up_dis']", 'eou'], {}), "(self.variables['up_dis'], eou)\n", (14380, 14411), True, 'import cvxpy as cvx\n'), ((17562, 17618), 'numpy.multiply', 'np.multiply', (['self.eod_avg', "self.variables_df['down_dis']"], {}), "(self.eod_avg, self.variables_df['down_dis'])\n", (17573, 17618), True, 'import numpy as np\n'), ((17692, 17747), 'numpy.multiply', 'np.multiply', (['self.eod_avg', "self.variables_df['down_ch']"], {}), "(self.eod_avg, self.variables_df['down_ch'])\n", (17703, 17747), True, 'import numpy as np\n'), ((20525, 20555), 'pandas.Period', 'pd.Period', ([], {'year': 'year', 'freq': '"""y"""'}), "(year=year, freq='y')\n", (20534, 20555), True, 'import pandas as pd\n'), ((11251, 11373), 'cvxpy.Zero', 'cvx.Zero', (["(self.variables['down_dis'] + self.variables['down_ch'] - self.variables[\n 'up_dis'] - self.variables['up_ch'])"], {}), "(self.variables['down_dis'] + self.variables['down_ch'] - self.\n variables['up_dis'] - self.variables['up_ch'])\n", (11259, 11373), True, 'import cvxpy as cvx\n'), ((17820, 17874), 'numpy.multiply', 'np.multiply', (['self.eou_avg', "self.variables_df['up_dis']"], {}), "(self.eou_avg, self.variables_df['up_dis'])\n", (17831, 17874), True, 'import numpy as np\n'), ((17946, 17999), 'numpy.multiply', 'np.multiply', (['self.eou_avg', "self.variables_df['up_ch']"], {}), "(self.eou_avg, self.variables_df['up_ch'])\n", (17957, 17999), True, 'import numpy as np\n'), ((8107, 8154), 'cvxpy.multiply', 'cvx.multiply', (["self.variables['down_dis']", 'p_ene'], {}), "(self.variables['down_dis'], p_ene)\n", (8119, 8154), True, 'import cvxpy as cvx\n'), ((8343, 8389), 'cvxpy.multiply', 'cvx.multiply', (["self.variables['down_ch']", 'p_ene'], {}), "(self.variables['down_ch'], p_ene)\n", (8355, 8389), True, 'import cvxpy as cvx\n'), ((8652, 8698), 'cvxpy.multiply', 'cvx.multiply', (["self.variables['up_dis']", '(-p_ene)'], {}), "(self.variables['up_dis'], -p_ene)\n", (8664, 8698), True, 'import cvxpy as cvx\n'), ((8885, 8930), 'cvxpy.multiply', 'cvx.multiply', (["self.variables['up_ch']", '(-p_ene)'], {}), "(self.variables['up_ch'], -p_ene)\n", (8897, 8930), True, 'import cvxpy as cvx\n')]
|
import numpy as np
import tensorflow as tf
# the activation function
def g_1(x):
assert len(x.shape) == 1
rand = tf.random_uniform([x.shape.as_list()[0]], dtype=tf.float32)
t = tf.nn.sigmoid(x) - rand
return 0.5*(1 + t / (tf.abs(t) + 1e-8))
def g_2(x):
return tf.nn.sigmoid(x)
def g(x):
return tf.nn.leaky_relu(x)
def merge(inputs, weights):
assert len(inputs.shape)==1
assert len(weights.shape)==2
inputs = tf.reshape(inputs, [inputs.shape.as_list()[0], 1])
return tf.reshape(tf.matmul(weights, inputs), [weights.shape.as_list()[0]])
def rand_init(sizes):
assert len(sizes)<=2
if len(sizes)==0:
return np.float32(np.random.rand())
elif len(sizes)==1:
return np.float32(np.random.rand(sizes[0]))
elif len(sizes)==2:
return np.float32(np.random.rand(sizes[0], sizes[1]))
else:
assert False
class RealNN(object):
def __init__(self, feats):
# generate weight variables
self.weights = []
self.biases = []
self.in_dim = feats[0]
self.inputs = tf.placeholder(shape=[self.in_dim], dtype=tf.float32)
self.layers = [self.inputs]
self.before_act = []
self.alpha = 0.0
self.reg = None
self.opt = None
self.loss = None
self.minimizer = None
self.sess = None
for i in range(1, len(feats)):
w = tf.get_variable(initializer=rand_init([feats[i], feats[i-1]]), name='L%dW' % i)
self.weights.append(w)
b = tf.get_variable(initializer=rand_init([feats[i]]), name='L%dB' % i)
self.biases.append(b)
if i==len(feats)-1:
self.layers.append(merge(self.layers[-1], w)+b)
else:
self.before_act.append(merge(self.layers[-1], w)+b)
self.layers.append(g(self.before_act[-1]))
self.out_dim = feats[-1]
self.outputs = self.layers[-1]
self.truth = tf.placeholder(shape=[self.out_dim], dtype=tf.float32)
def train(self, x, y, max_iter):
self.opt = tf.train.GradientDescentOptimizer(learning_rate=1e-2)
self.loss = tf.reduce_mean(tf.abs(self.truth - self.outputs))
self.reg = 0.00
for i in range(len(self.before_act)):
self.reg = self.reg + tf.reduce_mean(tf.maximum(tf.abs(self.before_act[i])-3.0, 0))
self.reg = self.reg / len(self.before_act)
self.minimizer = self.opt.minimize((1-self.alpha)*self.loss + self.alpha*self.reg)
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.global_variables())
_cnt = 0
while _cnt < max_iter:
ind = np.random.randint(0, len(x), [])
_, _loss, _reg, _output = self.sess.run(
[self.minimizer, self.loss, self.reg, self.layers[-1]],
feed_dict={
self.inputs: x[ind],
self.truth: y[ind]
})
print('ITR# %d\t LOSS=%.6f REG=%.6f' % (_cnt, _loss, _reg))
#print(_output)
_cnt += 1
saver.save(self.sess, 'models/model.ckpt')
print('model saved to path: models/....')
def infer(self, x):
return None
def int2bins(x):
x = np.uint8(x)
op = 0b10000000
bins = np.array([0.0] * 8)
for i in range(8):
if op & x == op:
bins[i]=1
else:
bins[i]=0
op = op >> 1
return bins
def concat(bins_1, bins_2):
return np.concatenate((bins_1, bins_2), axis=0)
def observe(size):
x = np.random.randint(0,256,[size,2])
_x = np.zeros([size, 16], dtype=np.float32)
_y = np.zeros([size, 2], dtype=np.float32)
for i in range(size):
_x[i] = concat(int2bins(x[i,0]), int2bins(x[i,1]))
if x[i,0] > x[i, 1]:
_y[i, 0] = 0
_y[i, 1] = 1
elif x[i, 0] <= x[i, 1]:
_y[i, 0] = 1
_y[i, 1] = 0
else:
_y[i, 0] = 1
_y[i, 1] = 1
return _x, _y
def check_acc(y, y_i):
_score = 0.0
for i in range(y.shape.as_list()[0]):
if y[i,0]==y_i[i,0] and y[i,1]==y_i[i,1]:
_score += 1
return _score / y.shape.as_list()[0]
if __name__ == '__main__':
nn = RealNN([16, 32, 16, 8, 2])
x, y = observe(10)
print(x)
print(y)
nn.train(x, y, 100000)
#x, y = observe(10)
#y_i = nn.infer(x)
#check_acc(y_i, y)
|
[
"numpy.uint8",
"tensorflow.abs",
"tensorflow.global_variables_initializer",
"numpy.zeros",
"tensorflow.Session",
"tensorflow.matmul",
"tensorflow.placeholder",
"numpy.random.randint",
"numpy.array",
"tensorflow.global_variables",
"numpy.random.rand",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.nn.leaky_relu",
"tensorflow.nn.sigmoid",
"numpy.concatenate"
] |
[((284, 300), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['x'], {}), '(x)\n', (297, 300), True, 'import tensorflow as tf\n'), ((324, 343), 'tensorflow.nn.leaky_relu', 'tf.nn.leaky_relu', (['x'], {}), '(x)\n', (340, 343), True, 'import tensorflow as tf\n'), ((3318, 3329), 'numpy.uint8', 'np.uint8', (['x'], {}), '(x)\n', (3326, 3329), True, 'import numpy as np\n'), ((3361, 3380), 'numpy.array', 'np.array', (['([0.0] * 8)'], {}), '([0.0] * 8)\n', (3369, 3380), True, 'import numpy as np\n'), ((3565, 3605), 'numpy.concatenate', 'np.concatenate', (['(bins_1, bins_2)'], {'axis': '(0)'}), '((bins_1, bins_2), axis=0)\n', (3579, 3605), True, 'import numpy as np\n'), ((3635, 3671), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)', '[size, 2]'], {}), '(0, 256, [size, 2])\n', (3652, 3671), True, 'import numpy as np\n'), ((3678, 3716), 'numpy.zeros', 'np.zeros', (['[size, 16]'], {'dtype': 'np.float32'}), '([size, 16], dtype=np.float32)\n', (3686, 3716), True, 'import numpy as np\n'), ((3726, 3763), 'numpy.zeros', 'np.zeros', (['[size, 2]'], {'dtype': 'np.float32'}), '([size, 2], dtype=np.float32)\n', (3734, 3763), True, 'import numpy as np\n'), ((191, 207), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['x'], {}), '(x)\n', (204, 207), True, 'import tensorflow as tf\n'), ((525, 551), 'tensorflow.matmul', 'tf.matmul', (['weights', 'inputs'], {}), '(weights, inputs)\n', (534, 551), True, 'import tensorflow as tf\n'), ((1086, 1139), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[self.in_dim]', 'dtype': 'tf.float32'}), '(shape=[self.in_dim], dtype=tf.float32)\n', (1100, 1139), True, 'import tensorflow as tf\n'), ((1980, 2034), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[self.out_dim]', 'dtype': 'tf.float32'}), '(shape=[self.out_dim], dtype=tf.float32)\n', (1994, 2034), True, 'import tensorflow as tf\n'), ((2096, 2149), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': '(0.01)'}), '(learning_rate=0.01)\n', (2129, 2149), True, 'import tensorflow as tf\n'), ((2548, 2560), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2558, 2560), True, 'import tensorflow as tf\n'), ((680, 696), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (694, 696), True, 'import numpy as np\n'), ((2185, 2218), 'tensorflow.abs', 'tf.abs', (['(self.truth - self.outputs)'], {}), '(self.truth - self.outputs)\n', (2191, 2218), True, 'import tensorflow as tf\n'), ((2583, 2616), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2614, 2616), True, 'import tensorflow as tf\n'), ((2649, 2670), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (2668, 2670), True, 'import tensorflow as tf\n'), ((748, 772), 'numpy.random.rand', 'np.random.rand', (['sizes[0]'], {}), '(sizes[0])\n', (762, 772), True, 'import numpy as np\n'), ((240, 249), 'tensorflow.abs', 'tf.abs', (['t'], {}), '(t)\n', (246, 249), True, 'import tensorflow as tf\n'), ((824, 858), 'numpy.random.rand', 'np.random.rand', (['sizes[0]', 'sizes[1]'], {}), '(sizes[0], sizes[1])\n', (838, 858), True, 'import numpy as np\n'), ((2350, 2376), 'tensorflow.abs', 'tf.abs', (['self.before_act[i]'], {}), '(self.before_act[i])\n', (2356, 2376), True, 'import tensorflow as tf\n')]
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for aqt.jax.quantization."""
import itertools
from unittest import mock
from absl.testing import absltest
from absl.testing import parameterized
from flax import linen as nn
import jax
from jax import random
import jax.numpy as jnp
import numpy as onp
from aqt.jax import fp_cast
from aqt.jax import get_bounds
from aqt.jax import primitives
from aqt.jax import quant_config
from aqt.jax import quantization
from aqt.jax import test_utils
from aqt.jax.get_bounds import GetBounds
from aqt.jax.quantization import QuantOps
from aqt.jax.quantization import QuantType
from aqt.jax.quantization import SCALE_DTYPE
fp32 = onp.float32
test_utils.configure_jax()
class QuantOpsTest(parameterized.TestCase):
def setUp(self):
super(QuantOpsTest, self).setUp()
quantization.DISABLE_EPSILON_IN_SCALE_FUN_FOR_TESTING = True
@parameterized.named_parameters(
dict(testcase_name='prec_2', bounds=6.0, prec=2),
dict(testcase_name='prec_4', bounds=6.0, prec=4),
dict(testcase_name='prec_8', bounds=6.0, prec=8),
dict(
testcase_name='2_features_prec_8',
bounds=[6., 12.],
prec=8),
)
def test_attributes_create_positive(self, bounds, prec):
bounds = jnp.array(bounds)
relu6 = QuantOps.create_positive(bounds=bounds, prec=prec)
onp.testing.assert_array_equal(relu6._scale, 2**prec / bounds)
self.assertEqual(relu6._symmetric, False)
self.assertEqual(relu6._prec, prec)
@parameterized.named_parameters(
dict(testcase_name='prec_2', bounds=6.0, prec=2),
dict(testcase_name='prec_4', bounds=6.0, prec=4),
dict(testcase_name='prec_8', bounds=6.0, prec=8),
dict(
testcase_name='2_features_prec_8',
bounds=[6., 12.],
prec=8),
)
def test_attributes_create_symmetric(self, bounds, prec):
bounds = jnp.array(bounds)
act_signed = QuantOps.create_symmetric(
bounds=bounds, prec=prec, half_shift=False)
onp.testing.assert_array_equal(act_signed._scale,
(2**(prec - 1) - 1) / bounds)
self.assertEqual(act_signed._symmetric, True)
self.assertEqual(act_signed._prec, prec)
@parameterized.named_parameters(
dict(
testcase_name='fp8_143',
weight_range=[2.0, 64.0],
weight_shape=(10, 1),
fp_quant=QuantOps.FloatQuant(
is_scaled=True,
fp_spec=QuantOps.FloatQuant.FloatPrec(
exp_min=-11,
exp_max=4,
sig_bits=3,
),
),
),
dict(
testcase_name='fp8_152',
weight_range=[2.0, 64.0],
weight_shape=(10, 1),
fp_quant=QuantOps.FloatQuant(
is_scaled=True,
fp_spec=QuantOps.FloatQuant.FloatPrec(
exp_min=-23,
exp_max=8,
sig_bits=2,
),
),
),
)
def test_attributes_create_weights_op_fp(
self,
weight_range,
weight_shape,
fp_quant,
):
weights = jnp.array(
fp32(onp.random.uniform(*weight_range, size=weight_shape)))
axis = None if weight_shape[1] == 1 else 0
weights_quant_op = QuantOps.create_weights_ops(
w=weights,
weight_params=QuantOps.WeightParams(
prec=fp_quant, axis=axis, half_shift=False))
max_weight = onp.max(abs(weights), axis=0)
onp.testing.assert_array_equal(
jnp.squeeze(weights_quant_op._scale),
jnp.exp2(-jnp.floor(jnp.log2(max_weight))))
self.assertEqual(weights_quant_op._symmetric, True)
self.assertIs(weights_quant_op._prec, fp_quant)
weights_scaled = (weights * weights_quant_op._scale).astype(weights.dtype)
weights_quant_expected = fp_cast.downcast_sat_ftz(
weights_scaled,
fp_quant.fp_spec.exp_min,
fp_quant.fp_spec.exp_max,
fp_quant.fp_spec.sig_bits,
)
weights_quant_calculated = weights_quant_op.to_quantized(
weights, dtype=SCALE_DTYPE)
onp.testing.assert_array_equal(weights_quant_expected,
weights_quant_calculated)
# Test the lower (23 - fp_quant.fp_spec.sig_bits) bits of the calculated
# quantized weights are zero.
sig_mask = jnp.int32((1 << (23 - fp_quant.fp_spec.sig_bits)) - 1)
onp.testing.assert_array_equal(
weights_quant_calculated.view(jnp.int32) & sig_mask,
jnp.zeros_like(weights))
@parameterized.named_parameters(
dict(
testcase_name='fp_act_symmetric',
act_distribution='symmetric',
use_hparams_bounds=False,
),
# TODO(b/193561347): FP quantization with positive input distribution is
# not supported yet
dict(
testcase_name='fp_act_positive',
act_distribution='positive',
use_hparams_bounds=False,
),
dict(
testcase_name='fp_act_symmetric_hyper_bounds',
act_distribution='symmetric',
use_hparams_bounds=True,
),
dict(
testcase_name='fp_act_positive_hyper_bounds',
act_distribution='positive',
use_hparams_bounds=True,
),
)
def test_attributes_create_acts_op_fp(
self,
act_distribution,
use_hparams_bounds,
):
inputs = jnp.array(fp32(2.0 * onp.random.uniform(0, 1.0, size=(10, 4))))
fp_quant = QuantOps.FloatQuant(
is_scaled=True,
fp_spec=QuantOps.FloatQuant.FloatPrec(
exp_min=-15,
exp_max=15,
sig_bits=2,
),
)
if use_hparams_bounds:
bounds = get_bounds.GetBounds.Hyper(
initial_bound=6.0,
stddev_coeff=1,
absdev_coeff=0,
mix_coeff=1,
reset_stats=True,
ema_coeff=None,
use_cams=False,
granularity=quant_config.QuantGranularity.per_tensor)
else:
bounds = 6.0
hparams = QuantOps.ActHParams(
input_distribution=act_distribution, bounds=bounds, prec=fp_quant,
half_shift=False)
class TestModule(nn.Module):
hparams: QuantOps.ActHParams
@nn.compact
def __call__(self, inputs):
return QuantOps.create_input_ops(
inputs,
hparams=hparams,
get_bounds_params=GetBounds.Params(
update_stats=False,
update_bounds=False))
test_module = TestModule(hparams=hparams)
state = test_module.init(jax.random.PRNGKey(0), inputs=inputs)
act_quant_op = test_module.apply(state, inputs=inputs)
act_scaled = (inputs * act_quant_op._scale).astype(inputs.dtype)
act_quant_expected = fp_cast.downcast_sat_ftz(
act_scaled,
fp_quant.fp_spec.exp_min,
fp_quant.fp_spec.exp_max,
fp_quant.fp_spec.sig_bits,
)
act_quant_calculated = act_quant_op.to_quantized(inputs, dtype=SCALE_DTYPE)
onp.testing.assert_array_equal(act_quant_expected, act_quant_calculated)
@parameterized.named_parameters(
dict(
testcase_name='pos_weight_prec_2',
weight_range=[2.0, 10.0],
weight_shape=(10, 1),
prec=2),
dict(
testcase_name='pos_weight_prec_4',
weight_range=[2.0, 10.0],
weight_shape=(10, 1),
prec=4),
dict(
testcase_name='pos_weight_prec_8',
weight_range=[2.0, 10.0],
weight_shape=(10, 1),
prec=8),
dict(
testcase_name='neg_weight_prec_8',
weight_range=[-12.0, 2.0],
weight_shape=(10, 1),
prec=8),
dict(
testcase_name='neg_weight_2_features_prec_8',
weight_range=[-12.0, 2.0],
weight_shape=(10, 2),
prec=8),
)
def test_attributes_create_weights_ops(self, weight_range, weight_shape,
prec):
weights = jnp.array(
fp32(
onp.random.uniform(
weight_range[0], weight_range[1], size=weight_shape)))
axis = 0 if weight_shape[1] != 1 else None
weights_quant = QuantOps.create_weights_ops(
w=weights,
weight_params=QuantOps.WeightParams(
prec=prec, axis=axis, half_shift=False))
max_weight = onp.max(abs(weights), axis=0)
onp.testing.assert_array_equal(
jnp.squeeze(weights_quant._scale), (2**(prec - 1) - 1) / max_weight)
self.assertEqual(weights_quant._symmetric, True)
self.assertEqual(weights_quant._prec, prec)
@parameterized.named_parameters(
dict(testcase_name='per_layer_quant', axis=None),
dict(testcase_name='per_channel_quant', axis=(0,)))
def test_weight_scale_shape_is_expected(self, axis):
# Tests if scale is as expected for weights quantization.
num_features = 4
expected_scale_shape = (1, 1) if axis is None else (1, num_features)
# Weight Quantization
weights = jnp.array(
fp32(2.0 * onp.random.uniform(0, 1.0, size=(10, num_features))))
_ = QuantOps.create_weights_fake_quant(
w=weights,
weight_params=QuantOps.WeightParams(
prec=8.0,
axis=axis,
expected_scale_shape=expected_scale_shape,
half_shift=False))
def test_inputs_scale_shape_is_expected(self):
# Inputs quantization
inputs = jnp.array(fp32(2.0 * onp.random.uniform(0, 1.0, size=(10, 4))))
bounds = 6.0
expected_inputs_scale_shape = ()
_ = QuantOps.create_inputs_fake_quant(
inputs=inputs,
hparams=QuantOps.ActHParams(
input_distribution=QuantOps.ActHParams.InputDistribution.symmetric,
bounds=bounds,
prec=8.0,
half_shift=False),
get_bounds_params=GetBounds.Params(
update_stats=False,
update_bounds=False,
expected_bounds_shape=expected_inputs_scale_shape))
@parameterized.named_parameters(
dict(testcase_name='prec_2',
prec=2), dict(testcase_name='prec_4', prec=4),
dict(testcase_name='prec_8', prec=8))
def test_positive_activation_quantization_clips_outside_bounds(self, prec):
# Activation values less than 0 get clipped to 0, and values greater than
# upper_bound get clipped to upper_bound
relu6 = QuantOps.create_positive(bounds=6.0, prec=prec)
activation = jnp.array(fp32([-0.5, 6.2, 3.141]))
quantized_activations = relu6.to_quantized(activation, dtype=SCALE_DTYPE)
onp.testing.assert_array_equal(quantized_activations[0:2],
[0.0, 2**prec - 1])
activations = relu6.from_quantized(quantized_activations, dtype=jnp.float32)
max_clipped_val = (2**prec - 1) * (6.0 / 2**prec)
onp.testing.assert_array_equal(activations[0:2], [0.0, max_clipped_val])
@parameterized.named_parameters(
dict(testcase_name='prec_2', prec=2),
dict(testcase_name='prec_4', prec=4),
dict(testcase_name='prec_8', prec=8)
)
def test_per_feature_dim_unsigned_activation_quantization_clips_outside_bounds(
self, prec):
# Activation values less than -upper_bound get clipped to -upper_bound, and
# values greater than upper_bound get clipped to upper_bound
act_quant = QuantOps.create_symmetric(
bounds=jnp.array([[6.0, 8.0]]), prec=prec, half_shift=False)
activation = jnp.array(fp32([[-7, -8.9], [6.2, 9.4], [0, 0.]]))
quantized_activations = act_quant.to_quantized(
activation, dtype=SCALE_DTYPE)
onp.testing.assert_array_equal(
quantized_activations,
jnp.array([[-2**(prec - 1.0) + 1.0], [2**(prec - 1.0) - 1.0], [0.0]]) *
jnp.array([[1., 1.]]))
activations = act_quant.from_quantized(
quantized_activations, dtype=jnp.float32)
onp.testing.assert_array_equal(activations,
[[-6.0, -8.0], [6.0, 8.], [0, 0.]])
@parameterized.named_parameters(
dict(testcase_name='prec_2', prec=2),
dict(testcase_name='prec_4', prec=4),
dict(testcase_name='prec_8', prec=8)
)
def test_scale_invariance_signed_activation_quantization(self, prec):
# Scaling activation by power of 2 and bounds by same factor,
# should scale the output by the same scale.
activations = random.uniform(random.PRNGKey(0), (10, 1))
act_scale = 8.
scaled_activations = activations * act_scale
bounds = 6.
activations = QuantOps.create_inputs_fake_quant(
inputs=activations,
get_bounds_params=GetBounds.Params(
update_stats=False, update_bounds=False),
hparams=QuantOps.ActHParams(
input_distribution=QuantOps.ActHParams.InputDistribution.symmetric,
bounds=bounds,
prec=prec,
half_shift=False))
scaled_activations = QuantOps.create_inputs_fake_quant(
inputs=scaled_activations,
get_bounds_params=GetBounds.Params(
update_stats=False, update_bounds=False),
hparams=QuantOps.ActHParams(
input_distribution=QuantOps.ActHParams.InputDistribution.symmetric,
bounds=bounds * act_scale,
prec=prec,
half_shift=False))
onp.testing.assert_array_equal(activations * act_scale, scaled_activations)
@parameterized.named_parameters(
dict(testcase_name='prec_2', prec=2),
dict(testcase_name='prec_4', prec=4),
dict(testcase_name='prec_8', prec=8)
)
def test_per_feature_dim_scale_invariance_pos_activation_quantization(
self, prec):
# Scaling each channel of activations by a different power of 2 and upper
# bound with same scale, should scale the respective channel of output by
# the same scale.
activations = random.uniform(random.PRNGKey(0), (3, 4))
act_scale = 2**jnp.arange(4)
scaled_activations = activations * act_scale[jnp.newaxis, :]
upper_bound = 6.0 * jnp.ones((3, 4), jnp.float32)
act_quant_ops = QuantOps.create_positive(bounds=upper_bound, prec=prec)
activations = act_quant_ops.fake_quant(
activations, quantized_type=SCALE_DTYPE)
scaled_act_quant_ops = QuantOps.create_positive(
bounds=upper_bound * act_scale[jnp.newaxis, :], prec=prec)
scaled_activations = scaled_act_quant_ops.fake_quant(
scaled_activations, quantized_type=SCALE_DTYPE)
onp.testing.assert_array_equal(activations * act_scale[jnp.newaxis, :],
scaled_activations)
@parameterized.named_parameters(
dict(testcase_name='prec_4', prec=4),
dict(testcase_name='prec_8', prec=8))
def test_int_positive_act_quantization(self, prec):
# Integer activations within upper_bound and upper_bound == 2^i s.t. i<prec
# quantizes correctly.
upper_bound = 2**(prec - 3)
activations = random.randint(random.PRNGKey(0), (10, 1), 0, upper_bound)
rescaled_activations = QuantOps.create_inputs_fake_quant(
inputs=activations,
get_bounds_params=GetBounds.Params(
update_stats=False, update_bounds=False),
hparams=QuantOps.ActHParams(
input_distribution=QuantOps.ActHParams.InputDistribution.positive,
bounds=upper_bound,
prec=prec,
half_shift=False))
onp.testing.assert_array_equal(activations, rescaled_activations)
@parameterized.named_parameters(
dict(testcase_name='prec_2', prec=2),
dict(testcase_name='prec_4', prec=4),
dict(testcase_name='prec_8', prec=8)
)
def test_int_symmetric_act_quantization(self, prec):
# Integer activations within bounds and abs(bounds) == 2^(prec -1) - 1
# quantizes correctly.
bounds = 2**(prec - 1) - 1
activations = random.randint(random.PRNGKey(0), (10, 1), -bounds, bounds)
rescaled_activations = QuantOps.create_inputs_fake_quant(
inputs=activations,
get_bounds_params=GetBounds.Params(
update_stats=False, update_bounds=False),
hparams=QuantOps.ActHParams(
input_distribution=QuantOps.ActHParams.InputDistribution.symmetric,
bounds=bounds,
prec=prec,
half_shift=False))
onp.testing.assert_array_equal(activations, rescaled_activations)
@parameterized.named_parameters(
dict(testcase_name='prec_4', prec=4),
dict(testcase_name='prec_8', prec=8))
def test_float_weights_quantization(self, prec):
# Tests that quantized and rescaled float weights are close to original
# weights.
weights = jnp.array(fp32(2.0 * onp.random.uniform(0, 1.0, size=(10, 1))))
rescaled_weights = QuantOps.create_weights_fake_quant(
w=weights,
weight_params=QuantOps.WeightParams(
prec=prec, axis=None, half_shift=False))
test_utils.assert_all_close_prec(weights, rescaled_weights, prec=prec)
@parameterized.named_parameters(
dict(testcase_name='prec_2', prec=2),
dict(testcase_name='prec_4', prec=4),
dict(testcase_name='prec_8', prec=8)
)
def test_full_range_int_weight_quantization(self, prec):
# Integer weights in full range [-maxmin_signed_int, maxmin_signed_int]
# quantizes correctly.
minval = -2**(prec - 1) + 1
maxval = 2**(prec - 1) - 1
weights = random.randint(random.PRNGKey(0), (10, 1), minval, maxval + 1)
weights = jax.ops.index_update(weights, jax.ops.index[0, :], maxval)
weight_quant = QuantOps.create_weights_ops(
w=weights,
weight_params=QuantOps.WeightParams(
prec=prec, axis=None, half_shift=False))
quantized_weights = weight_quant.to_quantized(weights, dtype=SCALE_DTYPE)
onp.testing.assert_array_equal(quantized_weights[0],
(2**(prec - 1.0) - 1.0))
rescaled_weights = weight_quant.from_quantized(
quantized_weights, dtype=jnp.float32)
onp.testing.assert_array_equal(weights, rescaled_weights)
@parameterized.named_parameters(
dict(testcase_name='prec_2', prec=2),
dict(testcase_name='prec_4', prec=4),
dict(testcase_name='prec_8', prec=8))
def test_scale_invariance_weight_quantization(self, prec):
# Scaling weights by power of 2, should scale the output by the same scale.
weights = random.uniform(random.PRNGKey(0), (10, 1))
weight_scale = 16
scaled_weights = weights * weight_scale
weights = QuantOps.create_weights_fake_quant(
w=weights,
weight_params=QuantOps.WeightParams(
prec=prec, axis=None, half_shift=False))
scaled_weights = QuantOps.create_weights_fake_quant(
w=scaled_weights,
weight_params=QuantOps.WeightParams(
prec=prec, axis=None, half_shift=False))
onp.testing.assert_array_equal(weights * weight_scale, scaled_weights)
@parameterized.named_parameters(
dict(testcase_name='prec_2', prec=2),
dict(testcase_name='prec_4', prec=4),
dict(testcase_name='prec_8', prec=8)
)
def test_per_feature_dim_scale_invariance_weight_quantization(self, prec):
# Scaling each channel of weights by a different power of 2, should scale
# the respective channel of output by the same scale.
weights = random.uniform(random.PRNGKey(0), (3, 4))
weight_scale = 2**jnp.arange(4)[jnp.newaxis, :]
scaled_weights = weights * weight_scale
weights = quantization.QuantOps.create_weights_fake_quant(
w=weights,
weight_params=QuantOps.WeightParams(
prec=prec, axis=0, half_shift=False))
scaled_weights = quantization.QuantOps.create_weights_fake_quant(
w=scaled_weights,
weight_params=QuantOps.WeightParams(
prec=prec, axis=0, half_shift=False))
onp.testing.assert_array_equal(weights * weight_scale, scaled_weights)
@parameterized.named_parameters(
dict(
testcase_name='fp_prec_scaled',
prec=QuantOps.FloatQuant(
is_scaled=True,
fp_spec=QuantOps.FloatQuant.FloatPrec(
exp_min=-11,
exp_max=4,
sig_bits=3,
),
),
),
dict(
testcase_name='fp_prec_unscaled',
prec=QuantOps.FloatQuant(
is_scaled=False,
fp_spec=QuantOps.FloatQuant.FloatPrec(
exp_min=-11,
exp_max=4,
sig_bits=3,
),
),
),
dict(
testcase_name='int_prec',
prec=4.0,
),
)
def test_no_quantization(self, prec):
# If initial_bound==-1 when using GetBounds, then create_inputs_fake_quant
# should be a no-op.
inputs = jnp.array([[.3, 1.4], [-5.2, 4.0]])
bounds = get_bounds.GetBounds.Hyper(
initial_bound=-1,
stddev_coeff=1,
absdev_coeff=0,
mix_coeff=1,
reset_stats=True,
ema_coeff=None,
use_cams=False,
granularity=quant_config.QuantGranularity.per_tensor)
hparams = quantization.QuantOps.ActHParams(
input_distribution='symmetric',
bounds=bounds,
prec=prec,
half_shift=False)
# The call to create_inputs_fake_quant has to occur from within a Flax
# module since it calls GetBounds, which is itself a Flax module.
# Thus we create a wrapper module for testing.
class TestModule(nn.Module):
hparams: quantization.QuantOps.ActHParams
@nn.compact
def __call__(self, inputs):
return quantization.QuantOps.create_inputs_fake_quant(
inputs,
hparams=hparams,
get_bounds_params=GetBounds.Params(
update_stats=True, update_bounds=False))
test_module = TestModule(hparams=hparams)
state = test_module.init(jax.random.PRNGKey(0), inputs=inputs)
inputs_after_fake_quant, _ = test_module.apply(
state, inputs=inputs, mutable=True)
onp.testing.assert_array_equal(inputs, inputs_after_fake_quant)
# TODO(shivaniagrawal): Add tests for auto clip activation quantizations.
class AQTTest(parameterized.TestCase):
def setUp(self):
super().setUp()
key1, key2 = jax.random.split(jax.random.PRNGKey(0), 2)
self.rhs = jax.random.normal(key1, (2, 4)) * 20
self.lhs = jax.random.normal(key2, (3, 2)) * 2 + 3
@parameterized.named_parameters(
dict(
testcase_name='per_layer_act_per_column_weight',
act_bounds=4.0,
weight_prec=16,
weight_axis=(0,),
),
dict(
testcase_name='per_column_act_per_column_weight',
act_bounds=[[3.0, 4.0]],
weight_prec=16,
weight_axis=(0,)),
dict(
testcase_name='per_layer_act_per_layer_weight',
act_bounds=4.0,
weight_prec=16,
weight_axis=None),
dict(
testcase_name='per_column_act_per_layer_weight',
act_bounds=[[3.0, 4.0]],
weight_prec=16,
weight_axis=None),
dict(
testcase_name='per_layer_act_no_weight_quant',
act_bounds=4.0,
weight_prec=None,
weight_axis=None),
dict(
testcase_name='per_column_act_no_weight_quant',
act_bounds=[[3.0, 4.0]],
weight_prec=None,
weight_axis=None),
dict(
testcase_name='no_act_quant_per_column_weight',
act_bounds=None,
weight_prec=16,
weight_axis=(0,)),
dict(
testcase_name='no_act_quant_no_weight_quant',
act_bounds=None,
weight_prec=None,
weight_axis=None),)
def test_quantized_dot_aqt(self, act_bounds, weight_prec, weight_axis):
# With a high enough precision, we expect results from fakequant and AQT to
# be very similar.
weight_params = QuantOps.WeightParams(
prec=weight_prec, axis=weight_axis, half_shift=False)
if act_bounds is None:
act_params = None
else:
act_params = QuantOps.ActHParams(
input_distribution='symmetric',
bounds=jnp.array(act_bounds),
prec=16,
half_shift=False)
def quantized_matmul(quant_type):
return quantization.quantized_dot(
w=self.rhs,
act=self.lhs,
weight_params=weight_params,
act_hparams=act_params,
get_bounds_params=None,
quant_type=quant_type,
prefer_int8_to_int32_dot=True)
aqt_result = quantized_matmul(QuantType.aqt)
fakequant_result = quantized_matmul(QuantType.fake_quant)
onp.testing.assert_allclose(
aqt_result,
fakequant_result,
rtol=1e-2,
err_msg='AQT and fakequant significantly disagree')
@parameterized.named_parameters(
dict(
testcase_name='per_layer_act_per_column_weight',
act_bounds=4.0,
weight_prec=16,
weight_axis=(0,),
),
dict(
testcase_name='per_column_act_per_column_weight',
act_bounds=[[[3.0, 4.0]]],
weight_prec=16,
weight_axis=(0,)),
dict(
testcase_name='per_layer_act_per_layer_weight',
act_bounds=4.0,
weight_prec=16,
weight_axis=None),
dict(
testcase_name='per_column_act_per_layer_weight',
act_bounds=[[[3.0, 4.0]]],
weight_prec=16,
weight_axis=None),
dict(
testcase_name='per_layer_act_no_weight_quant',
act_bounds=4.0,
weight_prec=None,
weight_axis=None),
dict(
testcase_name='per_column_act_no_weight_quant',
act_bounds=[[[3.0, 4.0]]],
weight_prec=None,
weight_axis=None),
dict(
testcase_name='no_act_quant_per_column_weight',
act_bounds=None,
weight_prec=16,
weight_axis=(0,)),
dict(
testcase_name='no_act_quant_no_weight_quant',
act_bounds=None,
weight_prec=None,
weight_axis=None),
)
def test_quantized_dot_general_aqt(self, act_bounds, weight_prec,
weight_axis):
# With a high enough precision, we expect results from fakequant and AQT to
# be very similar.
weight_params = QuantOps.WeightParams(
prec=weight_prec, axis=weight_axis, half_shift=False)
if act_bounds is None:
act_params = None
else:
act_params = QuantOps.ActHParams(
input_distribution='symmetric',
bounds=jnp.array(act_bounds),
prec=16,
half_shift=False)
lhs_ndims_3 = jnp.array(
fp32(2.0 * onp.random.uniform(0, 1.0, size=(4, 3, 2))))
def quantized_matmul(quant_type):
return quantization.quantized_dot_general(
w=self.rhs,
act=lhs_ndims_3,
weight_params=weight_params,
act_hparams=act_params,
get_bounds_params=None,
quant_type=quant_type,
dimension_numbers=(((lhs_ndims_3.ndim - 1,), (0,)), ((), ())),
prefer_int8_to_int32_dot=True)
aqt_result = quantized_matmul(QuantType.aqt)
self.assertEqual(aqt_result.shape, (4, 3, 4))
fakequant_result = quantized_matmul(QuantType.fake_quant)
onp.testing.assert_allclose(
aqt_result,
fakequant_result,
rtol=1e-2,
err_msg='AQT and fakequant significantly disagree')
def assert_is_integer_in_range(self, x, *, prec, distribution):
if distribution == 'symmetric':
x_clipped = primitives.round_and_clip_to_signed_int(
x, prec=prec, dtype=x.dtype, half_shift=False)
elif distribution == 'positive':
x_clipped = primitives.floor_and_clip_to_unsigned_int(
x, prec=prec, dtype=x.dtype, half_shift=False)
else:
raise ValueError(f'Invalid distribution {distribution}')
onp.testing.assert_array_equal(
x, x_clipped,
f'Array cannot be losslessly cast to integer with precision {prec} '
f'and {distribution} distribution.')
@parameterized.parameters(
dict(act_distribution='symmetric', prefer_int8_to_int32_dot=True, prec=4),
dict(act_distribution='symmetric', prefer_int8_to_int32_dot=True, prec=8),
dict(act_distribution='positive', prefer_int8_to_int32_dot=True, prec=4),
dict(act_distribution='positive', prefer_int8_to_int32_dot=True, prec=8),
dict(
act_distribution='symmetric', prefer_int8_to_int32_dot=False, prec=4))
@mock.patch.object(jax.lax, 'dot_general')
def test_lax_dot_has_integer_inputs_in_quantized_dot(self, mock_dot_general,
act_distribution,
prefer_int8_to_int32_dot,
prec):
weight_params = QuantOps.WeightParams(
prec=prec, axis=(0,), half_shift=False)
act_params = QuantOps.ActHParams(
input_distribution=act_distribution,
bounds=jnp.array([[3.0, 1.5]]),
prec=prec,
half_shift=False)
act = self.lhs
if act_distribution == 'positive':
act = jnp.abs(act)
# We need this context manager to stop Jax from trying to compile the arms
# of the `lax.cond` call in `dot_general_aqt`. By default, Jax will always
# try to compile the functions passed to `lax.cond`, even if outside of a
# JITed context. JIT compilation is incompatible with using a mock for the
# call to 'dot_general' because during compilation Jax will expect
# 'dot_general' to return a tracer and will throw an error if it returns a
# mock instead. By explicily using jax.disable_jit, Jax will not try to
# compile the arms to lax.cond and so using a mock will work fine.
with jax.disable_jit():
quantization.quantized_dot(
w=self.rhs,
act=act,
weight_params=weight_params,
act_hparams=act_params,
get_bounds_params=None,
quant_type=QuantType.aqt,
prefer_int8_to_int32_dot=prefer_int8_to_int32_dot)
act_inputs, weight_inputs = mock_dot_general.call_args[0]
self.assert_is_integer_in_range(
act_inputs, prec=prec, distribution=act_distribution)
self.assert_is_integer_in_range(
weight_inputs, prec=prec, distribution='symmetric')
if prefer_int8_to_int32_dot and not (act_distribution == 'positive' and
prec == 8):
expected_input_dtype = jnp.int8
else:
expected_input_dtype = jnp.float32
self.assertEqual(act_inputs.dtype, expected_input_dtype)
self.assertEqual(weight_inputs.dtype, expected_input_dtype)
@parameterized.parameters(
itertools.product(
(jnp.bfloat16, jnp.float32), (4, None),
(quantization.QuantType.aqt, quantization.QuantType.fake_quant)))
def test_quantized_dot_has_correct_dtype(self, input_dtype, act_prec,
quant_type):
weight_params = QuantOps.WeightParams(prec=4, axis=(0,), half_shift=False)
act_params = QuantOps.ActHParams(
input_distribution='symmetric',
bounds=jnp.array([[3.0, 1.5]]),
prec=act_prec,
half_shift=False)
act = self.lhs.astype(input_dtype)
w = self.rhs.astype(input_dtype)
output = quantization.quantized_dot(
w=w,
act=act,
weight_params=weight_params,
act_hparams=act_params,
get_bounds_params=None,
quant_type=quant_type,
prefer_int8_to_int32_dot=True)
self.assertEqual(output.dtype, input_dtype)
@parameterized.parameters(
dict(quant_type=quantization.QuantType.aqt),
dict(quant_type=quantization.QuantType.fake_quant))
def test_quantized_dot_raises_with_mixed_dtype(self, quant_type):
weight_params = QuantOps.WeightParams(prec=4, axis=(0,), half_shift=False)
act_params = QuantOps.ActHParams(
input_distribution='symmetric',
bounds=jnp.array([[3.0, 1.5]]),
prec=4,
half_shift=False)
act = self.lhs.astype(jnp.bfloat16)
w = self.rhs.astype(jnp.float32)
with self.assertRaises(TypeError):
quantization.quantized_dot(
w=w,
act=act,
weight_params=weight_params,
act_hparams=act_params,
get_bounds_params=None,
quant_type=quant_type,
prefer_int8_to_int32_dot=True)
@parameterized.parameters(
itertools.product(
(jnp.bfloat16, jnp.float32), (4, None),
(quantization.QuantType.aqt, quantization.QuantType.fake_quant)))
def test_dynamic_quantized_dot_general_has_correct_dtype(
self, input_dtype, act_prec, quant_type):
lhs_params = QuantOps.ActHParams(
input_distribution='symmetric',
bounds=2.0,
prec=act_prec,
half_shift=False)
rhs_params = QuantOps.ActHParams(
input_distribution='symmetric',
bounds=1.5,
prec=act_prec,
half_shift=False)
lhs_act = self.lhs.astype(input_dtype)
rhs_act = self.rhs.astype(input_dtype)
output = quantization.quantized_dynamic_dot_general(
lhs_act=lhs_act,
rhs_act=rhs_act,
lhs_act_hparams=lhs_params,
rhs_act_hparams=rhs_params,
lhs_get_bounds_params=None,
rhs_get_bounds_params=None,
dot_dimension_numbers=(((1,), (0,)), ((), ())),
quant_type=quant_type)
self.assertEqual(output.dtype, input_dtype)
def test_dynamic_quantized_dot_general_raises_with_mixed_dtype(self):
lhs_params = QuantOps.ActHParams(
input_distribution='symmetric', bounds=2.0, prec=4, half_shift=False)
rhs_params = QuantOps.ActHParams(
input_distribution='symmetric', bounds=1.5, prec=4, half_shift=False)
lhs_act = self.lhs.astype(jnp.bfloat16)
rhs_act = self.rhs.astype(jnp.float32)
with self.assertRaises(TypeError):
quantization.quantized_dynamic_dot_general(
lhs_act=lhs_act,
rhs_act=rhs_act,
lhs_act_hparams=lhs_params,
rhs_act_hparams=rhs_params,
lhs_get_bounds_params=None,
rhs_get_bounds_params=None,
dot_dimension_numbers=(((1,), (0,)), ((), ())),
quant_type=QuantType.aqt)
@parameterized.parameters(
dict(lhs_prec=16, rhs_prec=16), dict(lhs_prec=None, rhs_prec=16),
dict(lhs_prec=16, rhs_prec=None), dict(lhs_prec=None, rhs_prec=None))
def test_quantized_dynamic_dot_general(self, lhs_prec, rhs_prec):
lhs_bounds = 2.0
rhs_bounds = 1.5
lhs_params = QuantOps.ActHParams(
input_distribution='symmetric',
bounds=lhs_bounds,
prec=lhs_prec,
half_shift=False)
rhs_params = QuantOps.ActHParams(
input_distribution='symmetric',
bounds=rhs_bounds,
prec=rhs_prec,
half_shift=False)
def quantized_matmul(quant_type):
return quantization.quantized_dynamic_dot_general(
lhs_act=self.lhs,
rhs_act=self.rhs,
lhs_act_hparams=lhs_params,
rhs_act_hparams=rhs_params,
lhs_get_bounds_params=None,
rhs_get_bounds_params=None,
dot_dimension_numbers=(((1,), (0,)), ((), ())),
quant_type=quant_type)
aqt_result = quantized_matmul(QuantType.aqt)
fakequant_result = quantized_matmul(QuantType.fake_quant)
onp.testing.assert_allclose(
aqt_result,
fakequant_result,
rtol=1e-2,
err_msg='AQT and fakequant significantly disagree')
def test_quantized_dynamic_dot_general_get_bounds(self):
class TestModule(nn.Module):
@nn.compact
def __call__(self, lhs, rhs):
lhs_get_bounds = GetBounds.Hyper(
initial_bound=10.0,
stddev_coeff=0,
absdev_coeff=0,
mix_coeff=0,
granularity=quant_config.QuantGranularity.per_tensor)
rhs_get_bounds = GetBounds.Hyper(
initial_bound=5.0,
stddev_coeff=0,
absdev_coeff=0,
mix_coeff=0,
granularity=quant_config.QuantGranularity.per_tensor)
lhs_params = QuantOps.ActHParams(
input_distribution='symmetric',
bounds=lhs_get_bounds,
prec=8,
half_shift=False)
rhs_params = QuantOps.ActHParams(
input_distribution='symmetric',
bounds=rhs_get_bounds,
prec=8,
half_shift=False)
lhs_get_bounds_params = get_bounds.GetBounds.Params(
update_stats=True, update_bounds=False, module_name='lhs')
rhs_get_bounds_params = get_bounds.GetBounds.Params(
update_stats=True, update_bounds=False, module_name='rhs')
out = quantization.quantized_dynamic_dot_general(
lhs_act=lhs,
rhs_act=rhs,
lhs_act_hparams=lhs_params,
rhs_act_hparams=rhs_params,
dot_dimension_numbers=(((1,), (0,)), ((), ())),
quant_type=QuantType.aqt,
lhs_get_bounds_params=lhs_get_bounds_params,
rhs_get_bounds_params=rhs_get_bounds_params)
return out
lhs = jnp.array([[2.0]])
rhs = jnp.array([[3.0]])
module = TestModule()
state = module.init(jax.random.PRNGKey(0), lhs, rhs)
out, _ = module.apply(state, lhs, rhs, mutable=True)
lhs_scale = 127.0 / 10.0
rhs_scale = 127.0 / 5.0
expected_out = (round(lhs_scale * 2.0) * round(rhs_scale * 3.0)) / (
lhs_scale * rhs_scale)
onp.testing.assert_allclose(out, [[expected_out]])
@parameterized.parameters(
dict(lhs_distribution='symmetric', rhs_distribution='symmetric'),
dict(lhs_distribution='positive', rhs_distribution='symmetric'),
dict(lhs_distribution='symmetric', rhs_distribution='positive'),
dict(lhs_distribution='positive', rhs_distribution='positive'))
@mock.patch.object(jax.lax, 'dot_general')
def test_lax_dot_has_integer_inputs_in_dynamic_dot_general(
self, mock_dot_general, lhs_distribution, rhs_distribution):
lhs_params = QuantOps.ActHParams(
input_distribution=lhs_distribution,
bounds=2.0,
prec=4,
half_shift=False)
rhs_params = QuantOps.ActHParams(
input_distribution=rhs_distribution,
bounds=1.5,
prec=4,
half_shift=False)
lhs_act = self.lhs
if lhs_distribution == 'positive':
lhs_act = jnp.abs(lhs_act)
rhs_act = self.rhs
if rhs_distribution == 'positive':
rhs_act = jnp.abs(rhs_act)
quantization.quantized_dynamic_dot_general(
lhs_act=lhs_act,
rhs_act=rhs_act,
lhs_act_hparams=lhs_params,
rhs_act_hparams=rhs_params,
lhs_get_bounds_params=None,
rhs_get_bounds_params=None,
dot_dimension_numbers=(((1,), (0,)), ((), ())),
quant_type=QuantType.aqt)
lhs_inputs, rhs_inputs = mock_dot_general.call_args[0]
self.assert_is_integer_in_range(
lhs_inputs, prec=4, distribution=lhs_distribution)
self.assert_is_integer_in_range(
rhs_inputs, prec=4, distribution=rhs_distribution)
def test_quantized_dot_no_quant(self):
act_hparams = QuantOps.ActHParams(
input_distribution='symmetric', bounds=-1.0, prec=4, half_shift=False)
weight_params = QuantOps.WeightParams(prec=4, axis=(0,), half_shift=False)
act = jnp.array([[-5.0]])
w = jnp.array([[-4.99]])
res = quantization.quantized_dot(
w=w,
act=act,
quant_type=quantization.QuantType.aqt,
weight_params=weight_params,
act_hparams=act_hparams,
get_bounds_params=None,
prefer_int8_to_int32_dot=True)
onp.testing.assert_allclose(res, act * w)
def test_quantized_dynamic_dot_general_no_quant(self):
act_hparams = QuantOps.ActHParams(
input_distribution='symmetric', bounds=-1.0, prec=4, half_shift=False)
lhs_act = jnp.array([[-5.0]])
rhs_act = jnp.array([[-4.99]])
res = quantization.quantized_dynamic_dot_general(
lhs_act=lhs_act,
rhs_act=rhs_act,
quant_type=quantization.QuantType.aqt,
lhs_act_hparams=act_hparams,
rhs_act_hparams=act_hparams,
lhs_get_bounds_params=None,
rhs_get_bounds_params=None,
dot_dimension_numbers=(((1,), (0,)), ((), ())))
onp.testing.assert_allclose(res, lhs_act * rhs_act)
class QuantizedDotFakeQuantTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.act = jnp.ones((3, 7))
self.weight = jnp.ones((7, 4))
@parameterized.named_parameters(
dict(testcase_name='no_quantization', weight_prec=None, act_prec=None),
dict(testcase_name='weight_only_quant', weight_prec=8., act_prec=None),
dict(testcase_name='act_only_quant', weight_prec=None, act_prec=4),
dict(testcase_name='both_quantized', weight_prec=4, act_prec=8),
dict(
testcase_name='both_quantized_fq_int',
weight_prec=4,
act_prec=8,
strategy=QuantType.fake_quant_with_int),
)
@mock.patch.object(QuantOps, 'create_weights_fake_quant')
@mock.patch.object(QuantOps, 'create_inputs_fake_quant')
def test_quantized_dot_general_should_call_weights_and_inputs_quantization(
self,
mock_act_fq,
mock_w_fq,
weight_prec,
act_prec,
strategy=QuantType.fake_quant):
mock_w_fq.side_effect = lambda inputs, **_: inputs
mock_act_fq.side_effect = lambda inputs, **_: inputs
weight_params = QuantOps.WeightParams(
prec=weight_prec, axis=None, half_shift=False)
act_hparams = QuantOps.ActHParams( # pylint: disable=g-long-ternary
bounds=6.,
prec=act_prec,
input_distribution=QuantOps.ActHParams.InputDistribution.symmetric,
half_shift=False) if act_prec else None
get_bounds_params = GetBounds.Params(
update_stats=False, update_bounds=False)
quantization.quantized_dot(
w=self.weight,
act=self.act,
quant_type=strategy,
weight_params=weight_params,
act_hparams=act_hparams,
get_bounds_params=get_bounds_params,
prefer_int8_to_int32_dot=True)
quantized_type = strategy.to_jax_type()
mock_w_fq.assert_called_with(
mock.ANY,
weight_params=weight_params,
quantized_type=quantized_type,
fake_dependency=mock.ANY)
if act_hparams:
mock_act_fq.assert_called_with(
mock.ANY, hparams=act_hparams, get_bounds_params=get_bounds_params)
else:
mock_act_fq.assert_not_called()
class QuantizedDynamicDotGeneralTest(parameterized.TestCase):
def setUp(self):
super(QuantizedDynamicDotGeneralTest, self).setUp()
self.lhs_act = jnp.ones((4, 2, 3, 7))
self.rhs_act = jnp.ones((3, 7, 5, 6))
self.dimension_numbers = (((2, 3), (0, 1)), ((), ()))
@parameterized.named_parameters(
dict(
testcase_name='no_quantization', lhs_act_prec=None,
rhs_act_prec=None),
dict(testcase_name='lhs_only_quant', lhs_act_prec=8., rhs_act_prec=None),
dict(testcase_name='rhs_only_quant', lhs_act_prec=None, rhs_act_prec=4),
dict(testcase_name='both_quantized', lhs_act_prec=4, rhs_act_prec=8),
dict(
testcase_name='both_quantized_fq_int',
lhs_act_prec=4,
rhs_act_prec=8,
strategy=QuantType.fake_quant_with_int),
)
@mock.patch.object(QuantOps, 'create_inputs_fake_quant')
def test_quantized_dynamic_dot_general_should_call_inputs_quantization(
self,
mock_act_fq,
lhs_act_prec,
rhs_act_prec,
strategy=QuantType.fake_quant):
mock_act_fq.side_effect = lambda inputs, hparams, get_bounds_params: inputs
# pylint: disable=g-long-ternary
lhs_act_hparams = QuantOps.ActHParams(
bounds=6.,
prec=lhs_act_prec,
input_distribution=QuantOps.ActHParams.InputDistribution.symmetric,
half_shift=False) if lhs_act_prec else None
rhs_act_hparams = QuantOps.ActHParams(
bounds=6.,
prec=rhs_act_prec,
input_distribution=QuantOps.ActHParams.InputDistribution.symmetric,
half_shift=False) if rhs_act_prec else None
# pylint: enable=g-long-ternary
get_bounds_params = GetBounds.Params(
update_stats=False, update_bounds=False)
quantization.quantized_dynamic_dot_general(
lhs_act=self.lhs_act,
rhs_act=self.rhs_act,
quant_type=strategy,
dot_dimension_numbers=self.dimension_numbers,
lhs_act_hparams=lhs_act_hparams,
lhs_get_bounds_params=get_bounds_params,
rhs_act_hparams=rhs_act_hparams,
rhs_get_bounds_params=get_bounds_params,
)
calls = []
for prec in [lhs_act_prec, rhs_act_prec]:
if prec is not None:
act_hparams = QuantOps.ActHParams(
bounds=6., prec=prec, input_distribution=mock.ANY, half_shift=False)
calls.append(
mock.call(
mock.ANY,
hparams=act_hparams,
get_bounds_params=get_bounds_params))
self.assertLen(calls, mock_act_fq.call_count)
mock_act_fq.assert_has_calls(calls, any_order=True)
class QuantizedSumTest(parameterized.TestCase):
@parameterized.parameters(
# This roughly corresponds to float32, so we expect no difference vs a
# float32 sum.
dict(exp_min=-2**7, exp_max=2**7, sig_bits=23, expected_result=100.001),
# In this low precision case, the addition of .001 to the accumulator will
# have no effect after quantization
dict(exp_min=-2**3, exp_max=2**3, sig_bits=1, expected_result=100.0))
def test_quantized_sum(self, exp_min, exp_max, sig_bits, expected_result):
x = jnp.array([0.001, 100.0])
prec = QuantOps.FloatQuant.FloatPrec(exp_min, exp_max, sig_bits)
x_quantized_sum, x_grad = jax.value_and_grad(quantization.quantized_sum)(
x, axis=0, keepdims=False, prec=prec)
onp.testing.assert_allclose(
x_quantized_sum, onp.array(expected_result), rtol=1e-6)
# This tests that the gradient is using the straight-through-estimator
onp.testing.assert_equal(x_grad, onp.array([1.0, 1.0]))
@parameterized.parameters(
dict(keepdims=True, axis=(0, 1), expected_shape=(1, 1)),
dict(keepdims=False, axis=(0, 1), expected_shape=()),
dict(keepdims=True, axis=(0,), expected_shape=(1, 2)),
dict(keepdims=False, axis=(1,), expected_shape=(3,)))
def test_keepdims_and_axis(self, keepdims, axis, expected_shape):
x = jnp.arange(6).reshape((3, 2)).astype(jnp.float32)
prec = QuantOps.FloatQuant.FloatPrec(-2**7, 2**7, 23)
x_quantized_sum = quantization.quantized_sum(
x, keepdims=keepdims, axis=axis, prec=prec)
self.assertEqual(x_quantized_sum.shape, expected_shape)
if __name__ == '__main__':
absltest.main()
|
[
"absl.testing.absltest.main",
"jax.disable_jit",
"jax.numpy.log2",
"aqt.jax.primitives.round_and_clip_to_signed_int",
"jax.random.PRNGKey",
"aqt.jax.quantization.quantized_dot_general",
"aqt.jax.test_utils.configure_jax",
"aqt.jax.quantization.quantized_dot",
"unittest.mock.patch.object",
"aqt.jax.quantization.QuantOps.create_positive",
"aqt.jax.quantization.quantized_sum",
"jax.numpy.squeeze",
"jax.random.normal",
"jax.numpy.int32",
"aqt.jax.test_utils.assert_all_close_prec",
"aqt.jax.quantization.QuantOps.create_symmetric",
"itertools.product",
"numpy.testing.assert_allclose",
"aqt.jax.quantization.QuantOps.FloatQuant.FloatPrec",
"jax.numpy.abs",
"aqt.jax.get_bounds.GetBounds.Hyper",
"aqt.jax.quantization.QuantOps.WeightParams",
"numpy.testing.assert_array_equal",
"jax.numpy.zeros_like",
"aqt.jax.quantization.quantized_dynamic_dot_general",
"jax.numpy.ones",
"aqt.jax.quantization.QuantOps.ActHParams",
"jax.numpy.array",
"numpy.random.uniform",
"jax.numpy.arange",
"aqt.jax.fp_cast.downcast_sat_ftz",
"numpy.array",
"jax.value_and_grad",
"aqt.jax.get_bounds.GetBounds.Params",
"jax.ops.index_update",
"unittest.mock.call",
"aqt.jax.primitives.floor_and_clip_to_unsigned_int"
] |
[((1250, 1276), 'aqt.jax.test_utils.configure_jax', 'test_utils.configure_jax', ([], {}), '()\n', (1274, 1276), False, 'from aqt.jax import test_utils\n'), ((28850, 28891), 'unittest.mock.patch.object', 'mock.patch.object', (['jax.lax', '"""dot_general"""'], {}), "(jax.lax, 'dot_general')\n", (28867, 28891), False, 'from unittest import mock\n'), ((38225, 38266), 'unittest.mock.patch.object', 'mock.patch.object', (['jax.lax', '"""dot_general"""'], {}), "(jax.lax, 'dot_general')\n", (38242, 38266), False, 'from unittest import mock\n'), ((41382, 41438), 'unittest.mock.patch.object', 'mock.patch.object', (['QuantOps', '"""create_weights_fake_quant"""'], {}), "(QuantOps, 'create_weights_fake_quant')\n", (41399, 41438), False, 'from unittest import mock\n'), ((41442, 41497), 'unittest.mock.patch.object', 'mock.patch.object', (['QuantOps', '"""create_inputs_fake_quant"""'], {}), "(QuantOps, 'create_inputs_fake_quant')\n", (41459, 41497), False, 'from unittest import mock\n'), ((43719, 43774), 'unittest.mock.patch.object', 'mock.patch.object', (['QuantOps', '"""create_inputs_fake_quant"""'], {}), "(QuantOps, 'create_inputs_fake_quant')\n", (43736, 43774), False, 'from unittest import mock\n'), ((47139, 47154), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (47152, 47154), False, 'from absl.testing import absltest\n'), ((1830, 1847), 'jax.numpy.array', 'jnp.array', (['bounds'], {}), '(bounds)\n', (1839, 1847), True, 'import jax.numpy as jnp\n'), ((1860, 1910), 'aqt.jax.quantization.QuantOps.create_positive', 'QuantOps.create_positive', ([], {'bounds': 'bounds', 'prec': 'prec'}), '(bounds=bounds, prec=prec)\n', (1884, 1910), False, 'from aqt.jax.quantization import QuantOps\n'), ((1915, 1979), 'numpy.testing.assert_array_equal', 'onp.testing.assert_array_equal', (['relu6._scale', '(2 ** prec / bounds)'], {}), '(relu6._scale, 2 ** prec / bounds)\n', (1945, 1979), True, 'import numpy as onp\n'), ((2449, 2466), 'jax.numpy.array', 'jnp.array', (['bounds'], {}), '(bounds)\n', (2458, 2466), True, 'import jax.numpy as jnp\n'), ((2484, 2553), 'aqt.jax.quantization.QuantOps.create_symmetric', 'QuantOps.create_symmetric', ([], {'bounds': 'bounds', 'prec': 'prec', 'half_shift': '(False)'}), '(bounds=bounds, prec=prec, half_shift=False)\n', (2509, 2553), False, 'from aqt.jax.quantization import QuantOps\n'), ((2567, 2652), 'numpy.testing.assert_array_equal', 'onp.testing.assert_array_equal', (['act_signed._scale', '((2 ** (prec - 1) - 1) / bounds)'], {}), '(act_signed._scale, (2 ** (prec - 1) - 1) /\n bounds)\n', (2597, 2652), True, 'import numpy as onp\n'), ((4378, 4502), 'aqt.jax.fp_cast.downcast_sat_ftz', 'fp_cast.downcast_sat_ftz', (['weights_scaled', 'fp_quant.fp_spec.exp_min', 'fp_quant.fp_spec.exp_max', 'fp_quant.fp_spec.sig_bits'], {}), '(weights_scaled, fp_quant.fp_spec.exp_min, fp_quant\n .fp_spec.exp_max, fp_quant.fp_spec.sig_bits)\n', (4402, 4502), False, 'from aqt.jax import fp_cast\n'), ((4639, 4724), 'numpy.testing.assert_array_equal', 'onp.testing.assert_array_equal', (['weights_quant_expected', 'weights_quant_calculated'], {}), '(weights_quant_expected, weights_quant_calculated\n )\n', (4669, 4724), True, 'import numpy as onp\n'), ((4881, 4933), 'jax.numpy.int32', 'jnp.int32', (['((1 << 23 - fp_quant.fp_spec.sig_bits) - 1)'], {}), '((1 << 23 - fp_quant.fp_spec.sig_bits) - 1)\n', (4890, 4933), True, 'import jax.numpy as jnp\n'), ((6539, 6647), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'input_distribution': 'act_distribution', 'bounds': 'bounds', 'prec': 'fp_quant', 'half_shift': '(False)'}), '(input_distribution=act_distribution, bounds=bounds,\n prec=fp_quant, half_shift=False)\n', (6558, 6647), False, 'from aqt.jax.quantization import QuantOps\n'), ((7264, 7384), 'aqt.jax.fp_cast.downcast_sat_ftz', 'fp_cast.downcast_sat_ftz', (['act_scaled', 'fp_quant.fp_spec.exp_min', 'fp_quant.fp_spec.exp_max', 'fp_quant.fp_spec.sig_bits'], {}), '(act_scaled, fp_quant.fp_spec.exp_min, fp_quant.\n fp_spec.exp_max, fp_quant.fp_spec.sig_bits)\n', (7288, 7384), False, 'from aqt.jax import fp_cast\n'), ((7503, 7575), 'numpy.testing.assert_array_equal', 'onp.testing.assert_array_equal', (['act_quant_expected', 'act_quant_calculated'], {}), '(act_quant_expected, act_quant_calculated)\n', (7533, 7575), True, 'import numpy as onp\n'), ((10844, 10891), 'aqt.jax.quantization.QuantOps.create_positive', 'QuantOps.create_positive', ([], {'bounds': '(6.0)', 'prec': 'prec'}), '(bounds=6.0, prec=prec)\n', (10868, 10891), False, 'from aqt.jax.quantization import QuantOps\n'), ((11027, 11112), 'numpy.testing.assert_array_equal', 'onp.testing.assert_array_equal', (['quantized_activations[0:2]', '[0.0, 2 ** prec - 1]'], {}), '(quantized_activations[0:2], [0.0, 2 ** prec - 1]\n )\n', (11057, 11112), True, 'import numpy as onp\n'), ((11280, 11352), 'numpy.testing.assert_array_equal', 'onp.testing.assert_array_equal', (['activations[0:2]', '[0.0, max_clipped_val]'], {}), '(activations[0:2], [0.0, max_clipped_val])\n', (11310, 11352), True, 'import numpy as onp\n'), ((12317, 12403), 'numpy.testing.assert_array_equal', 'onp.testing.assert_array_equal', (['activations', '[[-6.0, -8.0], [6.0, 8.0], [0, 0.0]]'], {}), '(activations, [[-6.0, -8.0], [6.0, 8.0], [0, \n 0.0]])\n', (12347, 12403), True, 'import numpy as onp\n'), ((13722, 13797), 'numpy.testing.assert_array_equal', 'onp.testing.assert_array_equal', (['(activations * act_scale)', 'scaled_activations'], {}), '(activations * act_scale, scaled_activations)\n', (13752, 13797), True, 'import numpy as onp\n'), ((14473, 14528), 'aqt.jax.quantization.QuantOps.create_positive', 'QuantOps.create_positive', ([], {'bounds': 'upper_bound', 'prec': 'prec'}), '(bounds=upper_bound, prec=prec)\n', (14497, 14528), False, 'from aqt.jax.quantization import QuantOps\n'), ((14650, 14737), 'aqt.jax.quantization.QuantOps.create_positive', 'QuantOps.create_positive', ([], {'bounds': '(upper_bound * act_scale[jnp.newaxis, :])', 'prec': 'prec'}), '(bounds=upper_bound * act_scale[jnp.newaxis, :],\n prec=prec)\n', (14674, 14737), False, 'from aqt.jax.quantization import QuantOps\n'), ((14861, 14956), 'numpy.testing.assert_array_equal', 'onp.testing.assert_array_equal', (['(activations * act_scale[jnp.newaxis, :])', 'scaled_activations'], {}), '(activations * act_scale[jnp.newaxis, :],\n scaled_activations)\n', (14891, 14956), True, 'import numpy as onp\n'), ((15777, 15842), 'numpy.testing.assert_array_equal', 'onp.testing.assert_array_equal', (['activations', 'rescaled_activations'], {}), '(activations, rescaled_activations)\n', (15807, 15842), True, 'import numpy as onp\n'), ((16671, 16736), 'numpy.testing.assert_array_equal', 'onp.testing.assert_array_equal', (['activations', 'rescaled_activations'], {}), '(activations, rescaled_activations)\n', (16701, 16736), True, 'import numpy as onp\n'), ((17261, 17331), 'aqt.jax.test_utils.assert_all_close_prec', 'test_utils.assert_all_close_prec', (['weights', 'rescaled_weights'], {'prec': 'prec'}), '(weights, rescaled_weights, prec=prec)\n', (17293, 17331), False, 'from aqt.jax import test_utils\n'), ((17819, 17877), 'jax.ops.index_update', 'jax.ops.index_update', (['weights', 'jax.ops.index[0, :]', 'maxval'], {}), '(weights, jax.ops.index[0, :], maxval)\n', (17839, 17877), False, 'import jax\n'), ((18125, 18202), 'numpy.testing.assert_array_equal', 'onp.testing.assert_array_equal', (['quantized_weights[0]', '(2 ** (prec - 1.0) - 1.0)'], {}), '(quantized_weights[0], 2 ** (prec - 1.0) - 1.0)\n', (18155, 18202), True, 'import numpy as onp\n'), ((18340, 18397), 'numpy.testing.assert_array_equal', 'onp.testing.assert_array_equal', (['weights', 'rescaled_weights'], {}), '(weights, rescaled_weights)\n', (18370, 18397), True, 'import numpy as onp\n'), ((19185, 19255), 'numpy.testing.assert_array_equal', 'onp.testing.assert_array_equal', (['(weights * weight_scale)', 'scaled_weights'], {}), '(weights * weight_scale, scaled_weights)\n', (19215, 19255), True, 'import numpy as onp\n'), ((20167, 20237), 'numpy.testing.assert_array_equal', 'onp.testing.assert_array_equal', (['(weights * weight_scale)', 'scaled_weights'], {}), '(weights * weight_scale, scaled_weights)\n', (20197, 20237), True, 'import numpy as onp\n'), ((21119, 21155), 'jax.numpy.array', 'jnp.array', (['[[0.3, 1.4], [-5.2, 4.0]]'], {}), '([[0.3, 1.4], [-5.2, 4.0]])\n', (21128, 21155), True, 'import jax.numpy as jnp\n'), ((21168, 21369), 'aqt.jax.get_bounds.GetBounds.Hyper', 'get_bounds.GetBounds.Hyper', ([], {'initial_bound': '(-1)', 'stddev_coeff': '(1)', 'absdev_coeff': '(0)', 'mix_coeff': '(1)', 'reset_stats': '(True)', 'ema_coeff': 'None', 'use_cams': '(False)', 'granularity': 'quant_config.QuantGranularity.per_tensor'}), '(initial_bound=-1, stddev_coeff=1, absdev_coeff=0,\n mix_coeff=1, reset_stats=True, ema_coeff=None, use_cams=False,\n granularity=quant_config.QuantGranularity.per_tensor)\n', (21194, 21369), False, 'from aqt.jax import get_bounds\n'), ((21441, 21554), 'aqt.jax.quantization.QuantOps.ActHParams', 'quantization.QuantOps.ActHParams', ([], {'input_distribution': '"""symmetric"""', 'bounds': 'bounds', 'prec': 'prec', 'half_shift': '(False)'}), "(input_distribution='symmetric', bounds=\n bounds, prec=prec, half_shift=False)\n", (21473, 21554), False, 'from aqt.jax import quantization\n'), ((22346, 22409), 'numpy.testing.assert_array_equal', 'onp.testing.assert_array_equal', (['inputs', 'inputs_after_fake_quant'], {}), '(inputs, inputs_after_fake_quant)\n', (22376, 22409), True, 'import numpy as onp\n'), ((24220, 24295), 'aqt.jax.quantization.QuantOps.WeightParams', 'QuantOps.WeightParams', ([], {'prec': 'weight_prec', 'axis': 'weight_axis', 'half_shift': '(False)'}), '(prec=weight_prec, axis=weight_axis, half_shift=False)\n', (24241, 24295), False, 'from aqt.jax.quantization import QuantOps\n'), ((24959, 25083), 'numpy.testing.assert_allclose', 'onp.testing.assert_allclose', (['aqt_result', 'fakequant_result'], {'rtol': '(0.01)', 'err_msg': '"""AQT and fakequant significantly disagree"""'}), "(aqt_result, fakequant_result, rtol=0.01,\n err_msg='AQT and fakequant significantly disagree')\n", (24986, 25083), True, 'import numpy as onp\n'), ((26653, 26728), 'aqt.jax.quantization.QuantOps.WeightParams', 'QuantOps.WeightParams', ([], {'prec': 'weight_prec', 'axis': 'weight_axis', 'half_shift': '(False)'}), '(prec=weight_prec, axis=weight_axis, half_shift=False)\n', (26674, 26728), False, 'from aqt.jax.quantization import QuantOps\n'), ((27621, 27745), 'numpy.testing.assert_allclose', 'onp.testing.assert_allclose', (['aqt_result', 'fakequant_result'], {'rtol': '(0.01)', 'err_msg': '"""AQT and fakequant significantly disagree"""'}), "(aqt_result, fakequant_result, rtol=0.01,\n err_msg='AQT and fakequant significantly disagree')\n", (27648, 27745), True, 'import numpy as onp\n'), ((28226, 28381), 'numpy.testing.assert_array_equal', 'onp.testing.assert_array_equal', (['x', 'x_clipped', 'f"""Array cannot be losslessly cast to integer with precision {prec} and {distribution} distribution."""'], {}), "(x, x_clipped,\n f'Array cannot be losslessly cast to integer with precision {prec} and {distribution} distribution.'\n )\n", (28256, 28381), True, 'import numpy as onp\n'), ((29207, 29268), 'aqt.jax.quantization.QuantOps.WeightParams', 'QuantOps.WeightParams', ([], {'prec': 'prec', 'axis': '(0,)', 'half_shift': '(False)'}), '(prec=prec, axis=(0,), half_shift=False)\n', (29228, 29268), False, 'from aqt.jax.quantization import QuantOps\n'), ((31378, 31436), 'aqt.jax.quantization.QuantOps.WeightParams', 'QuantOps.WeightParams', ([], {'prec': '(4)', 'axis': '(0,)', 'half_shift': '(False)'}), '(prec=4, axis=(0,), half_shift=False)\n', (31399, 31436), False, 'from aqt.jax.quantization import QuantOps\n'), ((31693, 31872), 'aqt.jax.quantization.quantized_dot', 'quantization.quantized_dot', ([], {'w': 'w', 'act': 'act', 'weight_params': 'weight_params', 'act_hparams': 'act_params', 'get_bounds_params': 'None', 'quant_type': 'quant_type', 'prefer_int8_to_int32_dot': '(True)'}), '(w=w, act=act, weight_params=weight_params,\n act_hparams=act_params, get_bounds_params=None, quant_type=quant_type,\n prefer_int8_to_int32_dot=True)\n', (31719, 31872), False, 'from aqt.jax import quantization\n'), ((31085, 31212), 'itertools.product', 'itertools.product', (['(jnp.bfloat16, jnp.float32)', '(4, None)', '(quantization.QuantType.aqt, quantization.QuantType.fake_quant)'], {}), '((jnp.bfloat16, jnp.float32), (4, None), (quantization.\n QuantType.aqt, quantization.QuantType.fake_quant))\n', (31102, 31212), False, 'import itertools\n'), ((32197, 32255), 'aqt.jax.quantization.QuantOps.WeightParams', 'QuantOps.WeightParams', ([], {'prec': '(4)', 'axis': '(0,)', 'half_shift': '(False)'}), '(prec=4, axis=(0,), half_shift=False)\n', (32218, 32255), False, 'from aqt.jax.quantization import QuantOps\n'), ((33087, 33188), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'input_distribution': '"""symmetric"""', 'bounds': '(2.0)', 'prec': 'act_prec', 'half_shift': '(False)'}), "(input_distribution='symmetric', bounds=2.0, prec=\n act_prec, half_shift=False)\n", (33106, 33188), False, 'from aqt.jax.quantization import QuantOps\n'), ((33234, 33335), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'input_distribution': '"""symmetric"""', 'bounds': '(1.5)', 'prec': 'act_prec', 'half_shift': '(False)'}), "(input_distribution='symmetric', bounds=1.5, prec=\n act_prec, half_shift=False)\n", (33253, 33335), False, 'from aqt.jax.quantization import QuantOps\n'), ((33463, 33734), 'aqt.jax.quantization.quantized_dynamic_dot_general', 'quantization.quantized_dynamic_dot_general', ([], {'lhs_act': 'lhs_act', 'rhs_act': 'rhs_act', 'lhs_act_hparams': 'lhs_params', 'rhs_act_hparams': 'rhs_params', 'lhs_get_bounds_params': 'None', 'rhs_get_bounds_params': 'None', 'dot_dimension_numbers': '(((1,), (0,)), ((), ()))', 'quant_type': 'quant_type'}), '(lhs_act=lhs_act, rhs_act=rhs_act,\n lhs_act_hparams=lhs_params, rhs_act_hparams=rhs_params,\n lhs_get_bounds_params=None, rhs_get_bounds_params=None,\n dot_dimension_numbers=(((1,), (0,)), ((), ())), quant_type=quant_type)\n', (33505, 33734), False, 'from aqt.jax import quantization\n'), ((32817, 32944), 'itertools.product', 'itertools.product', (['(jnp.bfloat16, jnp.float32)', '(4, None)', '(quantization.QuantType.aqt, quantization.QuantType.fake_quant)'], {}), '((jnp.bfloat16, jnp.float32), (4, None), (quantization.\n QuantType.aqt, quantization.QuantType.fake_quant))\n', (32834, 32944), False, 'import itertools\n'), ((33926, 34019), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'input_distribution': '"""symmetric"""', 'bounds': '(2.0)', 'prec': '(4)', 'half_shift': '(False)'}), "(input_distribution='symmetric', bounds=2.0, prec=4,\n half_shift=False)\n", (33945, 34019), False, 'from aqt.jax.quantization import QuantOps\n'), ((34042, 34135), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'input_distribution': '"""symmetric"""', 'bounds': '(1.5)', 'prec': '(4)', 'half_shift': '(False)'}), "(input_distribution='symmetric', bounds=1.5, prec=4,\n half_shift=False)\n", (34061, 34135), False, 'from aqt.jax.quantization import QuantOps\n'), ((34922, 35030), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'input_distribution': '"""symmetric"""', 'bounds': 'lhs_bounds', 'prec': 'lhs_prec', 'half_shift': '(False)'}), "(input_distribution='symmetric', bounds=lhs_bounds, prec\n =lhs_prec, half_shift=False)\n", (34941, 35030), False, 'from aqt.jax.quantization import QuantOps\n'), ((35076, 35184), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'input_distribution': '"""symmetric"""', 'bounds': 'rhs_bounds', 'prec': 'rhs_prec', 'half_shift': '(False)'}), "(input_distribution='symmetric', bounds=rhs_bounds, prec\n =rhs_prec, half_shift=False)\n", (35095, 35184), False, 'from aqt.jax.quantization import QuantOps\n'), ((35724, 35848), 'numpy.testing.assert_allclose', 'onp.testing.assert_allclose', (['aqt_result', 'fakequant_result'], {'rtol': '(0.01)', 'err_msg': '"""AQT and fakequant significantly disagree"""'}), "(aqt_result, fakequant_result, rtol=0.01,\n err_msg='AQT and fakequant significantly disagree')\n", (35751, 35848), True, 'import numpy as onp\n'), ((37504, 37522), 'jax.numpy.array', 'jnp.array', (['[[2.0]]'], {}), '([[2.0]])\n', (37513, 37522), True, 'import jax.numpy as jnp\n'), ((37533, 37551), 'jax.numpy.array', 'jnp.array', (['[[3.0]]'], {}), '([[3.0]])\n', (37542, 37551), True, 'import jax.numpy as jnp\n'), ((37857, 37907), 'numpy.testing.assert_allclose', 'onp.testing.assert_allclose', (['out', '[[expected_out]]'], {}), '(out, [[expected_out]])\n', (37884, 37907), True, 'import numpy as onp\n'), ((38413, 38511), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'input_distribution': 'lhs_distribution', 'bounds': '(2.0)', 'prec': '(4)', 'half_shift': '(False)'}), '(input_distribution=lhs_distribution, bounds=2.0, prec=4,\n half_shift=False)\n', (38432, 38511), False, 'from aqt.jax.quantization import QuantOps\n'), ((38558, 38656), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'input_distribution': 'rhs_distribution', 'bounds': '(1.5)', 'prec': '(4)', 'half_shift': '(False)'}), '(input_distribution=rhs_distribution, bounds=1.5, prec=4,\n half_shift=False)\n', (38577, 38656), False, 'from aqt.jax.quantization import QuantOps\n'), ((38880, 39154), 'aqt.jax.quantization.quantized_dynamic_dot_general', 'quantization.quantized_dynamic_dot_general', ([], {'lhs_act': 'lhs_act', 'rhs_act': 'rhs_act', 'lhs_act_hparams': 'lhs_params', 'rhs_act_hparams': 'rhs_params', 'lhs_get_bounds_params': 'None', 'rhs_get_bounds_params': 'None', 'dot_dimension_numbers': '(((1,), (0,)), ((), ()))', 'quant_type': 'QuantType.aqt'}), '(lhs_act=lhs_act, rhs_act=rhs_act,\n lhs_act_hparams=lhs_params, rhs_act_hparams=rhs_params,\n lhs_get_bounds_params=None, rhs_get_bounds_params=None,\n dot_dimension_numbers=(((1,), (0,)), ((), ())), quant_type=QuantType.aqt)\n', (38922, 39154), False, 'from aqt.jax import quantization\n'), ((39519, 39613), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'input_distribution': '"""symmetric"""', 'bounds': '(-1.0)', 'prec': '(4)', 'half_shift': '(False)'}), "(input_distribution='symmetric', bounds=-1.0, prec=4,\n half_shift=False)\n", (39538, 39613), False, 'from aqt.jax.quantization import QuantOps\n'), ((39639, 39697), 'aqt.jax.quantization.QuantOps.WeightParams', 'QuantOps.WeightParams', ([], {'prec': '(4)', 'axis': '(0,)', 'half_shift': '(False)'}), '(prec=4, axis=(0,), half_shift=False)\n', (39660, 39697), False, 'from aqt.jax.quantization import QuantOps\n'), ((39708, 39727), 'jax.numpy.array', 'jnp.array', (['[[-5.0]]'], {}), '([[-5.0]])\n', (39717, 39727), True, 'import jax.numpy as jnp\n'), ((39736, 39756), 'jax.numpy.array', 'jnp.array', (['[[-4.99]]'], {}), '([[-4.99]])\n', (39745, 39756), True, 'import jax.numpy as jnp\n'), ((39767, 39964), 'aqt.jax.quantization.quantized_dot', 'quantization.quantized_dot', ([], {'w': 'w', 'act': 'act', 'quant_type': 'quantization.QuantType.aqt', 'weight_params': 'weight_params', 'act_hparams': 'act_hparams', 'get_bounds_params': 'None', 'prefer_int8_to_int32_dot': '(True)'}), '(w=w, act=act, quant_type=quantization.QuantType.\n aqt, weight_params=weight_params, act_hparams=act_hparams,\n get_bounds_params=None, prefer_int8_to_int32_dot=True)\n', (39793, 39964), False, 'from aqt.jax import quantization\n'), ((40017, 40058), 'numpy.testing.assert_allclose', 'onp.testing.assert_allclose', (['res', '(act * w)'], {}), '(res, act * w)\n', (40044, 40058), True, 'import numpy as onp\n'), ((40135, 40229), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'input_distribution': '"""symmetric"""', 'bounds': '(-1.0)', 'prec': '(4)', 'half_shift': '(False)'}), "(input_distribution='symmetric', bounds=-1.0, prec=4,\n half_shift=False)\n", (40154, 40229), False, 'from aqt.jax.quantization import QuantOps\n'), ((40249, 40268), 'jax.numpy.array', 'jnp.array', (['[[-5.0]]'], {}), '([[-5.0]])\n', (40258, 40268), True, 'import jax.numpy as jnp\n'), ((40283, 40303), 'jax.numpy.array', 'jnp.array', (['[[-4.99]]'], {}), '([[-4.99]])\n', (40292, 40303), True, 'import jax.numpy as jnp\n'), ((40314, 40603), 'aqt.jax.quantization.quantized_dynamic_dot_general', 'quantization.quantized_dynamic_dot_general', ([], {'lhs_act': 'lhs_act', 'rhs_act': 'rhs_act', 'quant_type': 'quantization.QuantType.aqt', 'lhs_act_hparams': 'act_hparams', 'rhs_act_hparams': 'act_hparams', 'lhs_get_bounds_params': 'None', 'rhs_get_bounds_params': 'None', 'dot_dimension_numbers': '(((1,), (0,)), ((), ()))'}), '(lhs_act=lhs_act, rhs_act=rhs_act,\n quant_type=quantization.QuantType.aqt, lhs_act_hparams=act_hparams,\n rhs_act_hparams=act_hparams, lhs_get_bounds_params=None,\n rhs_get_bounds_params=None, dot_dimension_numbers=(((1,), (0,)), ((), ())))\n', (40356, 40603), False, 'from aqt.jax import quantization\n'), ((40661, 40712), 'numpy.testing.assert_allclose', 'onp.testing.assert_allclose', (['res', '(lhs_act * rhs_act)'], {}), '(res, lhs_act * rhs_act)\n', (40688, 40712), True, 'import numpy as onp\n'), ((40827, 40843), 'jax.numpy.ones', 'jnp.ones', (['(3, 7)'], {}), '((3, 7))\n', (40835, 40843), True, 'import jax.numpy as jnp\n'), ((40862, 40878), 'jax.numpy.ones', 'jnp.ones', (['(7, 4)'], {}), '((7, 4))\n', (40870, 40878), True, 'import jax.numpy as jnp\n'), ((41830, 41898), 'aqt.jax.quantization.QuantOps.WeightParams', 'QuantOps.WeightParams', ([], {'prec': 'weight_prec', 'axis': 'None', 'half_shift': '(False)'}), '(prec=weight_prec, axis=None, half_shift=False)\n', (41851, 41898), False, 'from aqt.jax.quantization import QuantOps\n'), ((42171, 42228), 'aqt.jax.get_bounds.GetBounds.Params', 'GetBounds.Params', ([], {'update_stats': '(False)', 'update_bounds': '(False)'}), '(update_stats=False, update_bounds=False)\n', (42187, 42228), False, 'from aqt.jax.get_bounds import GetBounds\n'), ((42243, 42450), 'aqt.jax.quantization.quantized_dot', 'quantization.quantized_dot', ([], {'w': 'self.weight', 'act': 'self.act', 'quant_type': 'strategy', 'weight_params': 'weight_params', 'act_hparams': 'act_hparams', 'get_bounds_params': 'get_bounds_params', 'prefer_int8_to_int32_dot': '(True)'}), '(w=self.weight, act=self.act, quant_type=strategy,\n weight_params=weight_params, act_hparams=act_hparams, get_bounds_params\n =get_bounds_params, prefer_int8_to_int32_dot=True)\n', (42269, 42450), False, 'from aqt.jax import quantization\n'), ((43050, 43072), 'jax.numpy.ones', 'jnp.ones', (['(4, 2, 3, 7)'], {}), '((4, 2, 3, 7))\n', (43058, 43072), True, 'import jax.numpy as jnp\n'), ((43092, 43114), 'jax.numpy.ones', 'jnp.ones', (['(3, 7, 5, 6)'], {}), '((3, 7, 5, 6))\n', (43100, 43114), True, 'import jax.numpy as jnp\n'), ((44571, 44628), 'aqt.jax.get_bounds.GetBounds.Params', 'GetBounds.Params', ([], {'update_stats': '(False)', 'update_bounds': '(False)'}), '(update_stats=False, update_bounds=False)\n', (44587, 44628), False, 'from aqt.jax.get_bounds import GetBounds\n'), ((44643, 44963), 'aqt.jax.quantization.quantized_dynamic_dot_general', 'quantization.quantized_dynamic_dot_general', ([], {'lhs_act': 'self.lhs_act', 'rhs_act': 'self.rhs_act', 'quant_type': 'strategy', 'dot_dimension_numbers': 'self.dimension_numbers', 'lhs_act_hparams': 'lhs_act_hparams', 'lhs_get_bounds_params': 'get_bounds_params', 'rhs_act_hparams': 'rhs_act_hparams', 'rhs_get_bounds_params': 'get_bounds_params'}), '(lhs_act=self.lhs_act, rhs_act=\n self.rhs_act, quant_type=strategy, dot_dimension_numbers=self.\n dimension_numbers, lhs_act_hparams=lhs_act_hparams,\n lhs_get_bounds_params=get_bounds_params, rhs_act_hparams=\n rhs_act_hparams, rhs_get_bounds_params=get_bounds_params)\n', (44685, 44963), False, 'from aqt.jax import quantization\n'), ((46037, 46062), 'jax.numpy.array', 'jnp.array', (['[0.001, 100.0]'], {}), '([0.001, 100.0])\n', (46046, 46062), True, 'import jax.numpy as jnp\n'), ((46074, 46131), 'aqt.jax.quantization.QuantOps.FloatQuant.FloatPrec', 'QuantOps.FloatQuant.FloatPrec', (['exp_min', 'exp_max', 'sig_bits'], {}), '(exp_min, exp_max, sig_bits)\n', (46103, 46131), False, 'from aqt.jax.quantization import QuantOps\n'), ((46899, 46949), 'aqt.jax.quantization.QuantOps.FloatQuant.FloatPrec', 'QuantOps.FloatQuant.FloatPrec', (['(-2 ** 7)', '(2 ** 7)', '(23)'], {}), '(-2 ** 7, 2 ** 7, 23)\n', (46928, 46949), False, 'from aqt.jax.quantization import QuantOps\n'), ((46968, 47038), 'aqt.jax.quantization.quantized_sum', 'quantization.quantized_sum', (['x'], {'keepdims': 'keepdims', 'axis': 'axis', 'prec': 'prec'}), '(x, keepdims=keepdims, axis=axis, prec=prec)\n', (46994, 47038), False, 'from aqt.jax import quantization\n'), ((4072, 4108), 'jax.numpy.squeeze', 'jnp.squeeze', (['weights_quant_op._scale'], {}), '(weights_quant_op._scale)\n', (4083, 4108), True, 'import jax.numpy as jnp\n'), ((5041, 5064), 'jax.numpy.zeros_like', 'jnp.zeros_like', (['weights'], {}), '(weights)\n', (5055, 5064), True, 'import jax.numpy as jnp\n'), ((6219, 6422), 'aqt.jax.get_bounds.GetBounds.Hyper', 'get_bounds.GetBounds.Hyper', ([], {'initial_bound': '(6.0)', 'stddev_coeff': '(1)', 'absdev_coeff': '(0)', 'mix_coeff': '(1)', 'reset_stats': '(True)', 'ema_coeff': 'None', 'use_cams': '(False)', 'granularity': 'quant_config.QuantGranularity.per_tensor'}), '(initial_bound=6.0, stddev_coeff=1, absdev_coeff=\n 0, mix_coeff=1, reset_stats=True, ema_coeff=None, use_cams=False,\n granularity=quant_config.QuantGranularity.per_tensor)\n', (6245, 6422), False, 'from aqt.jax import get_bounds\n'), ((7072, 7093), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (7090, 7093), False, 'import jax\n'), ((8918, 8951), 'jax.numpy.squeeze', 'jnp.squeeze', (['weights_quant._scale'], {}), '(weights_quant._scale)\n', (8929, 8951), True, 'import jax.numpy as jnp\n'), ((12823, 12840), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (12837, 12840), False, 'from jax import random\n'), ((14272, 14289), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (14286, 14289), False, 'from jax import random\n'), ((14318, 14331), 'jax.numpy.arange', 'jnp.arange', (['(4)'], {}), '(4)\n', (14328, 14331), True, 'import jax.numpy as jnp\n'), ((14422, 14451), 'jax.numpy.ones', 'jnp.ones', (['(3, 4)', 'jnp.float32'], {}), '((3, 4), jnp.float32)\n', (14430, 14451), True, 'import jax.numpy as jnp\n'), ((15338, 15355), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (15352, 15355), False, 'from jax import random\n'), ((16235, 16252), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (16249, 16252), False, 'from jax import random\n'), ((17757, 17774), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (17771, 17774), False, 'from jax import random\n'), ((18736, 18753), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (18750, 18753), False, 'from jax import random\n'), ((19669, 19686), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (19683, 19686), False, 'from jax import random\n'), ((22208, 22229), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (22226, 22229), False, 'import jax\n'), ((22601, 22622), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (22619, 22622), False, 'import jax\n'), ((22642, 22673), 'jax.random.normal', 'jax.random.normal', (['key1', '(2, 4)'], {}), '(key1, (2, 4))\n', (22659, 22673), False, 'import jax\n'), ((24588, 24780), 'aqt.jax.quantization.quantized_dot', 'quantization.quantized_dot', ([], {'w': 'self.rhs', 'act': 'self.lhs', 'weight_params': 'weight_params', 'act_hparams': 'act_params', 'get_bounds_params': 'None', 'quant_type': 'quant_type', 'prefer_int8_to_int32_dot': '(True)'}), '(w=self.rhs, act=self.lhs, weight_params=\n weight_params, act_hparams=act_params, get_bounds_params=None,\n quant_type=quant_type, prefer_int8_to_int32_dot=True)\n', (24614, 24780), False, 'from aqt.jax import quantization\n'), ((27115, 27386), 'aqt.jax.quantization.quantized_dot_general', 'quantization.quantized_dot_general', ([], {'w': 'self.rhs', 'act': 'lhs_ndims_3', 'weight_params': 'weight_params', 'act_hparams': 'act_params', 'get_bounds_params': 'None', 'quant_type': 'quant_type', 'dimension_numbers': '(((lhs_ndims_3.ndim - 1,), (0,)), ((), ()))', 'prefer_int8_to_int32_dot': '(True)'}), '(w=self.rhs, act=lhs_ndims_3,\n weight_params=weight_params, act_hparams=act_params, get_bounds_params=\n None, quant_type=quant_type, dimension_numbers=(((lhs_ndims_3.ndim - 1,\n ), (0,)), ((), ())), prefer_int8_to_int32_dot=True)\n', (27149, 27386), False, 'from aqt.jax import quantization\n'), ((27896, 27986), 'aqt.jax.primitives.round_and_clip_to_signed_int', 'primitives.round_and_clip_to_signed_int', (['x'], {'prec': 'prec', 'dtype': 'x.dtype', 'half_shift': '(False)'}), '(x, prec=prec, dtype=x.dtype,\n half_shift=False)\n', (27935, 27986), False, 'from aqt.jax import primitives\n'), ((29516, 29528), 'jax.numpy.abs', 'jnp.abs', (['act'], {}), '(act)\n', (29523, 29528), True, 'import jax.numpy as jnp\n'), ((30150, 30167), 'jax.disable_jit', 'jax.disable_jit', ([], {}), '()\n', (30165, 30167), False, 'import jax\n'), ((30175, 30385), 'aqt.jax.quantization.quantized_dot', 'quantization.quantized_dot', ([], {'w': 'self.rhs', 'act': 'act', 'weight_params': 'weight_params', 'act_hparams': 'act_params', 'get_bounds_params': 'None', 'quant_type': 'QuantType.aqt', 'prefer_int8_to_int32_dot': 'prefer_int8_to_int32_dot'}), '(w=self.rhs, act=act, weight_params=weight_params,\n act_hparams=act_params, get_bounds_params=None, quant_type=QuantType.\n aqt, prefer_int8_to_int32_dot=prefer_int8_to_int32_dot)\n', (30201, 30385), False, 'from aqt.jax import quantization\n'), ((32538, 32717), 'aqt.jax.quantization.quantized_dot', 'quantization.quantized_dot', ([], {'w': 'w', 'act': 'act', 'weight_params': 'weight_params', 'act_hparams': 'act_params', 'get_bounds_params': 'None', 'quant_type': 'quant_type', 'prefer_int8_to_int32_dot': '(True)'}), '(w=w, act=act, weight_params=weight_params,\n act_hparams=act_params, get_bounds_params=None, quant_type=quant_type,\n prefer_int8_to_int32_dot=True)\n', (32564, 32717), False, 'from aqt.jax import quantization\n'), ((34273, 34547), 'aqt.jax.quantization.quantized_dynamic_dot_general', 'quantization.quantized_dynamic_dot_general', ([], {'lhs_act': 'lhs_act', 'rhs_act': 'rhs_act', 'lhs_act_hparams': 'lhs_params', 'rhs_act_hparams': 'rhs_params', 'lhs_get_bounds_params': 'None', 'rhs_get_bounds_params': 'None', 'dot_dimension_numbers': '(((1,), (0,)), ((), ()))', 'quant_type': 'QuantType.aqt'}), '(lhs_act=lhs_act, rhs_act=rhs_act,\n lhs_act_hparams=lhs_params, rhs_act_hparams=rhs_params,\n lhs_get_bounds_params=None, rhs_get_bounds_params=None,\n dot_dimension_numbers=(((1,), (0,)), ((), ())), quant_type=QuantType.aqt)\n', (34315, 34547), False, 'from aqt.jax import quantization\n'), ((35265, 35539), 'aqt.jax.quantization.quantized_dynamic_dot_general', 'quantization.quantized_dynamic_dot_general', ([], {'lhs_act': 'self.lhs', 'rhs_act': 'self.rhs', 'lhs_act_hparams': 'lhs_params', 'rhs_act_hparams': 'rhs_params', 'lhs_get_bounds_params': 'None', 'rhs_get_bounds_params': 'None', 'dot_dimension_numbers': '(((1,), (0,)), ((), ()))', 'quant_type': 'quant_type'}), '(lhs_act=self.lhs, rhs_act=self.\n rhs, lhs_act_hparams=lhs_params, rhs_act_hparams=rhs_params,\n lhs_get_bounds_params=None, rhs_get_bounds_params=None,\n dot_dimension_numbers=(((1,), (0,)), ((), ())), quant_type=quant_type)\n', (35307, 35539), False, 'from aqt.jax import quantization\n'), ((37602, 37623), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (37620, 37623), False, 'import jax\n'), ((38764, 38780), 'jax.numpy.abs', 'jnp.abs', (['lhs_act'], {}), '(lhs_act)\n', (38771, 38780), True, 'import jax.numpy as jnp\n'), ((38859, 38875), 'jax.numpy.abs', 'jnp.abs', (['rhs_act'], {}), '(rhs_act)\n', (38866, 38875), True, 'import jax.numpy as jnp\n'), ((41926, 42063), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'bounds': '(6.0)', 'prec': 'act_prec', 'input_distribution': 'QuantOps.ActHParams.InputDistribution.symmetric', 'half_shift': '(False)'}), '(bounds=6.0, prec=act_prec, input_distribution=QuantOps.\n ActHParams.InputDistribution.symmetric, half_shift=False)\n', (41945, 42063), False, 'from aqt.jax.quantization import QuantOps\n'), ((44098, 44239), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'bounds': '(6.0)', 'prec': 'lhs_act_prec', 'input_distribution': 'QuantOps.ActHParams.InputDistribution.symmetric', 'half_shift': '(False)'}), '(bounds=6.0, prec=lhs_act_prec, input_distribution=\n QuantOps.ActHParams.InputDistribution.symmetric, half_shift=False)\n', (44117, 44239), False, 'from aqt.jax.quantization import QuantOps\n'), ((44315, 44456), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'bounds': '(6.0)', 'prec': 'rhs_act_prec', 'input_distribution': 'QuantOps.ActHParams.InputDistribution.symmetric', 'half_shift': '(False)'}), '(bounds=6.0, prec=rhs_act_prec, input_distribution=\n QuantOps.ActHParams.InputDistribution.symmetric, half_shift=False)\n', (44334, 44456), False, 'from aqt.jax.quantization import QuantOps\n'), ((46162, 46208), 'jax.value_and_grad', 'jax.value_and_grad', (['quantization.quantized_sum'], {}), '(quantization.quantized_sum)\n', (46180, 46208), False, 'import jax\n'), ((46314, 46340), 'numpy.array', 'onp.array', (['expected_result'], {}), '(expected_result)\n', (46323, 46340), True, 'import numpy as onp\n'), ((46465, 46486), 'numpy.array', 'onp.array', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (46474, 46486), True, 'import numpy as onp\n'), ((3706, 3758), 'numpy.random.uniform', 'onp.random.uniform', (['*weight_range'], {'size': 'weight_shape'}), '(*weight_range, size=weight_shape)\n', (3724, 3758), True, 'import numpy as onp\n'), ((3901, 3966), 'aqt.jax.quantization.QuantOps.WeightParams', 'QuantOps.WeightParams', ([], {'prec': 'fp_quant', 'axis': 'axis', 'half_shift': '(False)'}), '(prec=fp_quant, axis=axis, half_shift=False)\n', (3922, 3966), False, 'from aqt.jax.quantization import QuantOps\n'), ((6056, 6122), 'aqt.jax.quantization.QuantOps.FloatQuant.FloatPrec', 'QuantOps.FloatQuant.FloatPrec', ([], {'exp_min': '(-15)', 'exp_max': '(15)', 'sig_bits': '(2)'}), '(exp_min=-15, exp_max=15, sig_bits=2)\n', (6085, 6122), False, 'from aqt.jax.quantization import QuantOps\n'), ((8523, 8594), 'numpy.random.uniform', 'onp.random.uniform', (['weight_range[0]', 'weight_range[1]'], {'size': 'weight_shape'}), '(weight_range[0], weight_range[1], size=weight_shape)\n', (8541, 8594), True, 'import numpy as onp\n'), ((8751, 8812), 'aqt.jax.quantization.QuantOps.WeightParams', 'QuantOps.WeightParams', ([], {'prec': 'prec', 'axis': 'axis', 'half_shift': '(False)'}), '(prec=prec, axis=axis, half_shift=False)\n', (8772, 8812), False, 'from aqt.jax.quantization import QuantOps\n'), ((9660, 9768), 'aqt.jax.quantization.QuantOps.WeightParams', 'QuantOps.WeightParams', ([], {'prec': '(8.0)', 'axis': 'axis', 'expected_scale_shape': 'expected_scale_shape', 'half_shift': '(False)'}), '(prec=8.0, axis=axis, expected_scale_shape=\n expected_scale_shape, half_shift=False)\n', (9681, 9768), False, 'from aqt.jax.quantization import QuantOps\n'), ((10104, 10239), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'input_distribution': 'QuantOps.ActHParams.InputDistribution.symmetric', 'bounds': 'bounds', 'prec': '(8.0)', 'half_shift': '(False)'}), '(input_distribution=QuantOps.ActHParams.\n InputDistribution.symmetric, bounds=bounds, prec=8.0, half_shift=False)\n', (10123, 10239), False, 'from aqt.jax.quantization import QuantOps\n'), ((10311, 10423), 'aqt.jax.get_bounds.GetBounds.Params', 'GetBounds.Params', ([], {'update_stats': '(False)', 'update_bounds': '(False)', 'expected_bounds_shape': 'expected_inputs_scale_shape'}), '(update_stats=False, update_bounds=False,\n expected_bounds_shape=expected_inputs_scale_shape)\n', (10327, 10423), False, 'from aqt.jax.get_bounds import GetBounds\n'), ((11828, 11851), 'jax.numpy.array', 'jnp.array', (['[[6.0, 8.0]]'], {}), '([[6.0, 8.0]])\n', (11837, 11851), True, 'import jax.numpy as jnp\n'), ((12116, 12189), 'jax.numpy.array', 'jnp.array', (['[[-2 ** (prec - 1.0) + 1.0], [2 ** (prec - 1.0) - 1.0], [0.0]]'], {}), '([[-2 ** (prec - 1.0) + 1.0], [2 ** (prec - 1.0) - 1.0], [0.0]])\n', (12125, 12189), True, 'import jax.numpy as jnp\n'), ((12196, 12219), 'jax.numpy.array', 'jnp.array', (['[[1.0, 1.0]]'], {}), '([[1.0, 1.0]])\n', (12205, 12219), True, 'import jax.numpy as jnp\n'), ((13044, 13101), 'aqt.jax.get_bounds.GetBounds.Params', 'GetBounds.Params', ([], {'update_stats': '(False)', 'update_bounds': '(False)'}), '(update_stats=False, update_bounds=False)\n', (13060, 13101), False, 'from aqt.jax.get_bounds import GetBounds\n'), ((13132, 13268), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'input_distribution': 'QuantOps.ActHParams.InputDistribution.symmetric', 'bounds': 'bounds', 'prec': 'prec', 'half_shift': '(False)'}), '(input_distribution=QuantOps.ActHParams.\n InputDistribution.symmetric, bounds=bounds, prec=prec, half_shift=False)\n', (13151, 13268), False, 'from aqt.jax.quantization import QuantOps\n'), ((13436, 13493), 'aqt.jax.get_bounds.GetBounds.Params', 'GetBounds.Params', ([], {'update_stats': '(False)', 'update_bounds': '(False)'}), '(update_stats=False, update_bounds=False)\n', (13452, 13493), False, 'from aqt.jax.get_bounds import GetBounds\n'), ((13524, 13676), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'input_distribution': 'QuantOps.ActHParams.InputDistribution.symmetric', 'bounds': '(bounds * act_scale)', 'prec': 'prec', 'half_shift': '(False)'}), '(input_distribution=QuantOps.ActHParams.\n InputDistribution.symmetric, bounds=bounds * act_scale, prec=prec,\n half_shift=False)\n', (13543, 13676), False, 'from aqt.jax.quantization import QuantOps\n'), ((15499, 15556), 'aqt.jax.get_bounds.GetBounds.Params', 'GetBounds.Params', ([], {'update_stats': '(False)', 'update_bounds': '(False)'}), '(update_stats=False, update_bounds=False)\n', (15515, 15556), False, 'from aqt.jax.get_bounds import GetBounds\n'), ((15587, 15732), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'input_distribution': 'QuantOps.ActHParams.InputDistribution.positive', 'bounds': 'upper_bound', 'prec': 'prec', 'half_shift': '(False)'}), '(input_distribution=QuantOps.ActHParams.\n InputDistribution.positive, bounds=upper_bound, prec=prec, half_shift=False\n )\n', (15606, 15732), False, 'from aqt.jax.quantization import QuantOps\n'), ((16396, 16453), 'aqt.jax.get_bounds.GetBounds.Params', 'GetBounds.Params', ([], {'update_stats': '(False)', 'update_bounds': '(False)'}), '(update_stats=False, update_bounds=False)\n', (16412, 16453), False, 'from aqt.jax.get_bounds import GetBounds\n'), ((16484, 16620), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'input_distribution': 'QuantOps.ActHParams.InputDistribution.symmetric', 'bounds': 'bounds', 'prec': 'prec', 'half_shift': '(False)'}), '(input_distribution=QuantOps.ActHParams.\n InputDistribution.symmetric, bounds=bounds, prec=prec, half_shift=False)\n', (16503, 16620), False, 'from aqt.jax.quantization import QuantOps\n'), ((17181, 17242), 'aqt.jax.quantization.QuantOps.WeightParams', 'QuantOps.WeightParams', ([], {'prec': 'prec', 'axis': 'None', 'half_shift': '(False)'}), '(prec=prec, axis=None, half_shift=False)\n', (17202, 17242), False, 'from aqt.jax.quantization import QuantOps\n'), ((17967, 18028), 'aqt.jax.quantization.QuantOps.WeightParams', 'QuantOps.WeightParams', ([], {'prec': 'prec', 'axis': 'None', 'half_shift': '(False)'}), '(prec=prec, axis=None, half_shift=False)\n', (17988, 18028), False, 'from aqt.jax.quantization import QuantOps\n'), ((18922, 18983), 'aqt.jax.quantization.QuantOps.WeightParams', 'QuantOps.WeightParams', ([], {'prec': 'prec', 'axis': 'None', 'half_shift': '(False)'}), '(prec=prec, axis=None, half_shift=False)\n', (18943, 18983), False, 'from aqt.jax.quantization import QuantOps\n'), ((19104, 19165), 'aqt.jax.quantization.QuantOps.WeightParams', 'QuantOps.WeightParams', ([], {'prec': 'prec', 'axis': 'None', 'half_shift': '(False)'}), '(prec=prec, axis=None, half_shift=False)\n', (19125, 19165), False, 'from aqt.jax.quantization import QuantOps\n'), ((19718, 19731), 'jax.numpy.arange', 'jnp.arange', (['(4)'], {}), '(4)\n', (19728, 19731), True, 'import jax.numpy as jnp\n'), ((19897, 19955), 'aqt.jax.quantization.QuantOps.WeightParams', 'QuantOps.WeightParams', ([], {'prec': 'prec', 'axis': '(0)', 'half_shift': '(False)'}), '(prec=prec, axis=0, half_shift=False)\n', (19918, 19955), False, 'from aqt.jax.quantization import QuantOps\n'), ((20089, 20147), 'aqt.jax.quantization.QuantOps.WeightParams', 'QuantOps.WeightParams', ([], {'prec': 'prec', 'axis': '(0)', 'half_shift': '(False)'}), '(prec=prec, axis=0, half_shift=False)\n', (20110, 20147), False, 'from aqt.jax.quantization import QuantOps\n'), ((22694, 22725), 'jax.random.normal', 'jax.random.normal', (['key2', '(3, 2)'], {}), '(key2, (3, 2))\n', (22711, 22725), False, 'import jax\n'), ((28049, 28141), 'aqt.jax.primitives.floor_and_clip_to_unsigned_int', 'primitives.floor_and_clip_to_unsigned_int', (['x'], {'prec': 'prec', 'dtype': 'x.dtype', 'half_shift': '(False)'}), '(x, prec=prec, dtype=x.dtype,\n half_shift=False)\n', (28090, 28141), False, 'from aqt.jax import primitives\n'), ((29376, 29399), 'jax.numpy.array', 'jnp.array', (['[[3.0, 1.5]]'], {}), '([[3.0, 1.5]])\n', (29385, 29399), True, 'import jax.numpy as jnp\n'), ((31530, 31553), 'jax.numpy.array', 'jnp.array', (['[[3.0, 1.5]]'], {}), '([[3.0, 1.5]])\n', (31539, 31553), True, 'import jax.numpy as jnp\n'), ((32349, 32372), 'jax.numpy.array', 'jnp.array', (['[[3.0, 1.5]]'], {}), '([[3.0, 1.5]])\n', (32358, 32372), True, 'import jax.numpy as jnp\n'), ((36052, 36190), 'aqt.jax.get_bounds.GetBounds.Hyper', 'GetBounds.Hyper', ([], {'initial_bound': '(10.0)', 'stddev_coeff': '(0)', 'absdev_coeff': '(0)', 'mix_coeff': '(0)', 'granularity': 'quant_config.QuantGranularity.per_tensor'}), '(initial_bound=10.0, stddev_coeff=0, absdev_coeff=0,\n mix_coeff=0, granularity=quant_config.QuantGranularity.per_tensor)\n', (36067, 36190), False, 'from aqt.jax.get_bounds import GetBounds\n'), ((36273, 36410), 'aqt.jax.get_bounds.GetBounds.Hyper', 'GetBounds.Hyper', ([], {'initial_bound': '(5.0)', 'stddev_coeff': '(0)', 'absdev_coeff': '(0)', 'mix_coeff': '(0)', 'granularity': 'quant_config.QuantGranularity.per_tensor'}), '(initial_bound=5.0, stddev_coeff=0, absdev_coeff=0,\n mix_coeff=0, granularity=quant_config.QuantGranularity.per_tensor)\n', (36288, 36410), False, 'from aqt.jax.get_bounds import GetBounds\n'), ((36489, 36593), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'input_distribution': '"""symmetric"""', 'bounds': 'lhs_get_bounds', 'prec': '(8)', 'half_shift': '(False)'}), "(input_distribution='symmetric', bounds=lhs_get_bounds,\n prec=8, half_shift=False)\n", (36508, 36593), False, 'from aqt.jax.quantization import QuantOps\n'), ((36660, 36764), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'input_distribution': '"""symmetric"""', 'bounds': 'rhs_get_bounds', 'prec': '(8)', 'half_shift': '(False)'}), "(input_distribution='symmetric', bounds=rhs_get_bounds,\n prec=8, half_shift=False)\n", (36679, 36764), False, 'from aqt.jax.quantization import QuantOps\n'), ((36842, 36932), 'aqt.jax.get_bounds.GetBounds.Params', 'get_bounds.GetBounds.Params', ([], {'update_stats': '(True)', 'update_bounds': '(False)', 'module_name': '"""lhs"""'}), "(update_stats=True, update_bounds=False,\n module_name='lhs')\n", (36869, 36932), False, 'from aqt.jax import get_bounds\n'), ((36974, 37064), 'aqt.jax.get_bounds.GetBounds.Params', 'get_bounds.GetBounds.Params', ([], {'update_stats': '(True)', 'update_bounds': '(False)', 'module_name': '"""rhs"""'}), "(update_stats=True, update_bounds=False,\n module_name='rhs')\n", (37001, 37064), False, 'from aqt.jax import get_bounds\n'), ((37088, 37394), 'aqt.jax.quantization.quantized_dynamic_dot_general', 'quantization.quantized_dynamic_dot_general', ([], {'lhs_act': 'lhs', 'rhs_act': 'rhs', 'lhs_act_hparams': 'lhs_params', 'rhs_act_hparams': 'rhs_params', 'dot_dimension_numbers': '(((1,), (0,)), ((), ()))', 'quant_type': 'QuantType.aqt', 'lhs_get_bounds_params': 'lhs_get_bounds_params', 'rhs_get_bounds_params': 'rhs_get_bounds_params'}), '(lhs_act=lhs, rhs_act=rhs,\n lhs_act_hparams=lhs_params, rhs_act_hparams=rhs_params,\n dot_dimension_numbers=(((1,), (0,)), ((), ())), quant_type=QuantType.\n aqt, lhs_get_bounds_params=lhs_get_bounds_params, rhs_get_bounds_params\n =rhs_get_bounds_params)\n', (37130, 37394), False, 'from aqt.jax import quantization\n'), ((45126, 45219), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'bounds': '(6.0)', 'prec': 'prec', 'input_distribution': 'mock.ANY', 'half_shift': '(False)'}), '(bounds=6.0, prec=prec, input_distribution=mock.ANY,\n half_shift=False)\n', (45145, 45219), False, 'from aqt.jax.quantization import QuantOps\n'), ((5937, 5977), 'numpy.random.uniform', 'onp.random.uniform', (['(0)', '(1.0)'], {'size': '(10, 4)'}), '(0, 1.0, size=(10, 4))\n', (5955, 5977), True, 'import numpy as onp\n'), ((9521, 9572), 'numpy.random.uniform', 'onp.random.uniform', (['(0)', '(1.0)'], {'size': '(10, num_features)'}), '(0, 1.0, size=(10, num_features))\n', (9539, 9572), True, 'import numpy as onp\n'), ((9924, 9964), 'numpy.random.uniform', 'onp.random.uniform', (['(0)', '(1.0)'], {'size': '(10, 4)'}), '(0, 1.0, size=(10, 4))\n', (9942, 9964), True, 'import numpy as onp\n'), ((17038, 17078), 'numpy.random.uniform', 'onp.random.uniform', (['(0)', '(1.0)'], {'size': '(10, 1)'}), '(0, 1.0, size=(10, 1))\n', (17056, 17078), True, 'import numpy as onp\n'), ((24466, 24487), 'jax.numpy.array', 'jnp.array', (['act_bounds'], {}), '(act_bounds)\n', (24475, 24487), True, 'import jax.numpy as jnp\n'), ((26899, 26920), 'jax.numpy.array', 'jnp.array', (['act_bounds'], {}), '(act_bounds)\n', (26908, 26920), True, 'import jax.numpy as jnp\n'), ((27018, 27060), 'numpy.random.uniform', 'onp.random.uniform', (['(0)', '(1.0)'], {'size': '(4, 3, 2)'}), '(0, 1.0, size=(4, 3, 2))\n', (27036, 27060), True, 'import numpy as onp\n'), ((45262, 45339), 'unittest.mock.call', 'mock.call', (['mock.ANY'], {'hparams': 'act_hparams', 'get_bounds_params': 'get_bounds_params'}), '(mock.ANY, hparams=act_hparams, get_bounds_params=get_bounds_params)\n', (45271, 45339), False, 'from unittest import mock\n'), ((4138, 4158), 'jax.numpy.log2', 'jnp.log2', (['max_weight'], {}), '(max_weight)\n', (4146, 4158), True, 'import jax.numpy as jnp\n'), ((3020, 3085), 'aqt.jax.quantization.QuantOps.FloatQuant.FloatPrec', 'QuantOps.FloatQuant.FloatPrec', ([], {'exp_min': '(-11)', 'exp_max': '(4)', 'sig_bits': '(3)'}), '(exp_min=-11, exp_max=4, sig_bits=3)\n', (3049, 3085), False, 'from aqt.jax.quantization import QuantOps\n'), ((3387, 3452), 'aqt.jax.quantization.QuantOps.FloatQuant.FloatPrec', 'QuantOps.FloatQuant.FloatPrec', ([], {'exp_min': '(-23)', 'exp_max': '(8)', 'sig_bits': '(2)'}), '(exp_min=-23, exp_max=8, sig_bits=2)\n', (3416, 3452), False, 'from aqt.jax.quantization import QuantOps\n'), ((6904, 6961), 'aqt.jax.get_bounds.GetBounds.Params', 'GetBounds.Params', ([], {'update_stats': '(False)', 'update_bounds': '(False)'}), '(update_stats=False, update_bounds=False)\n', (6920, 6961), False, 'from aqt.jax.get_bounds import GetBounds\n'), ((22057, 22113), 'aqt.jax.get_bounds.GetBounds.Params', 'GetBounds.Params', ([], {'update_stats': '(True)', 'update_bounds': '(False)'}), '(update_stats=True, update_bounds=False)\n', (22073, 22113), False, 'from aqt.jax.get_bounds import GetBounds\n'), ((20416, 20481), 'aqt.jax.quantization.QuantOps.FloatQuant.FloatPrec', 'QuantOps.FloatQuant.FloatPrec', ([], {'exp_min': '(-11)', 'exp_max': '(4)', 'sig_bits': '(3)'}), '(exp_min=-11, exp_max=4, sig_bits=3)\n', (20445, 20481), False, 'from aqt.jax.quantization import QuantOps\n'), ((20721, 20786), 'aqt.jax.quantization.QuantOps.FloatQuant.FloatPrec', 'QuantOps.FloatQuant.FloatPrec', ([], {'exp_min': '(-11)', 'exp_max': '(4)', 'sig_bits': '(3)'}), '(exp_min=-11, exp_max=4, sig_bits=3)\n', (20750, 20786), False, 'from aqt.jax.quantization import QuantOps\n'), ((46838, 46851), 'jax.numpy.arange', 'jnp.arange', (['(6)'], {}), '(6)\n', (46848, 46851), True, 'import jax.numpy as jnp\n')]
|
import rNet as rNet
import numpy as np
def generate(net,seed_,num_to_gen):
net.reset()
x=np.zeros((1,1,vocab_size))
x[0,0,seed_]=1
out=index_to_char[seed_]
for t in range(0,num_to_gen):
p=net(x)[0,0,:]
ix = np.random.choice(range(vocab_size), p=p.ravel())
x=np.zeros(x.shape)
x[0,0,ix]=1
out+=index_to_char[ix]
return out
path_='data/tiny_shakespeare.txt'
# path_='data/tiny_nietzsche.txt'
raw=open(path_, 'r').read()
chars = list(set(raw))
chars.sort()
data_size, vocab_size = (len(raw), len(chars))
print('data has %d characters, %d unique.' % (data_size, vocab_size))
char_to_index = {ch: i for i, ch in enumerate(chars)}
index_to_char = {i: ch for i, ch in enumerate(chars)}
time_steps, batch_size, input_size, hidden_size, output_size = (100, 100, vocab_size, 512, vocab_size)
net = rNet.rNet()
net.add(rNet.LSTM([input_size,hidden_size]))
net.add(rNet.FC([hidden_size,output_size],activation=rNet.softmax()))
net.init()
# where to save the model
model_path='model/L'
cost = rNet.softmax_loss()
# settings for RMSprop + momentum
first_moment=[np.zeros_like(l.weights) for l in net.layers]
second_moment=[np.zeros_like(l.weights) for l in net.layers]
momentum=[np.zeros_like(l.weights) for l in net.layers]
smooth_loss = -np.log(1.0/vocab_size)*time_steps * batch_size # loss at iteration 0
cache0=None
count, count_t=(0,0)
epoch=0
text_pointers = np.random.randint(data_size-time_steps-1, size=batch_size)
learning_rate, nu, mom_decay,=(1e-3, 0.97, 0.9)
clip_range=(-5,5)
print('Learning rate: %f, nu: %f, mom_decay: %f'%(learning_rate,nu,mom_decay))
print('Clip range: ',clip_range)
while True:
# reset the state every 100 sequences (10000 characters) and save the model
if count % 100 == 0:
cache0 = None
print('Cache cleared')
net.save(model_path)
print('Model saved in %s'%(model_path))
for i in range(text_pointers.size):
if text_pointers[i] + time_steps + 1 >= data_size:
text_pointers[i] = 0
batch_in=np.zeros([time_steps, batch_size, vocab_size])
batch_out=np.zeros([time_steps, batch_size], dtype=np.uint8)
for i in range(batch_size):
b_=[char_to_index[c] for c in raw[text_pointers[i]:text_pointers[i] + time_steps + 1]]
batch_in[range(time_steps),i,b_[:-1]]=1
batch_out[:,i]=np.array(b_[1:])
loss,dW, cache0 = net.train_step(batch_in,batch_out, cache0=cache0, cost=cost)
smooth_loss = smooth_loss * 0.999 + loss * 0.001
if count % 10 == 0:
txt = generate(net, np.random.randint(vocab_size), 200)
print('----\n %s \n----' % (txt))
print('epoch: %d, iter %d, smooth loss: %f, loss: %f' % (epoch, count, smooth_loss/(time_steps*batch_size),loss/(time_steps*batch_size)))
# RMSprop + momentum parameter update
for param, dparam, mem, mem1, mom in zip(net.layers, dW, second_moment,first_moment,momentum):
np.clip(dparam,clip_range[0],clip_range[1],dparam)
mem = nu*mem + (1-nu)*dparam * dparam
mem1 = nu * mem1 + (1-nu) * dparam
mom=mom_decay*mom-learning_rate * dparam / np.sqrt(mem - mem1**2 + 1e-8)
param.weights += mom
text_pointers += time_steps
count_t+=time_steps
count += 1
if count_t >= data_size:
epoch += 1
count_t=0
text_pointers = np.random.randint(data_size - time_steps - 1, size=batch_size)
|
[
"rNet.rNet",
"numpy.zeros_like",
"numpy.log",
"rNet.LSTM",
"numpy.zeros",
"numpy.clip",
"numpy.random.randint",
"numpy.array",
"rNet.softmax",
"rNet.softmax_loss",
"numpy.sqrt"
] |
[((859, 870), 'rNet.rNet', 'rNet.rNet', ([], {}), '()\n', (868, 870), True, 'import rNet as rNet\n'), ((1052, 1071), 'rNet.softmax_loss', 'rNet.softmax_loss', ([], {}), '()\n', (1069, 1071), True, 'import rNet as rNet\n'), ((1426, 1488), 'numpy.random.randint', 'np.random.randint', (['(data_size - time_steps - 1)'], {'size': 'batch_size'}), '(data_size - time_steps - 1, size=batch_size)\n', (1443, 1488), True, 'import numpy as np\n'), ((98, 126), 'numpy.zeros', 'np.zeros', (['(1, 1, vocab_size)'], {}), '((1, 1, vocab_size))\n', (106, 126), True, 'import numpy as np\n'), ((879, 915), 'rNet.LSTM', 'rNet.LSTM', (['[input_size, hidden_size]'], {}), '([input_size, hidden_size])\n', (888, 915), True, 'import rNet as rNet\n'), ((1121, 1145), 'numpy.zeros_like', 'np.zeros_like', (['l.weights'], {}), '(l.weights)\n', (1134, 1145), True, 'import numpy as np\n'), ((1182, 1206), 'numpy.zeros_like', 'np.zeros_like', (['l.weights'], {}), '(l.weights)\n', (1195, 1206), True, 'import numpy as np\n'), ((1238, 1262), 'numpy.zeros_like', 'np.zeros_like', (['l.weights'], {}), '(l.weights)\n', (1251, 1262), True, 'import numpy as np\n'), ((2055, 2101), 'numpy.zeros', 'np.zeros', (['[time_steps, batch_size, vocab_size]'], {}), '([time_steps, batch_size, vocab_size])\n', (2063, 2101), True, 'import numpy as np\n'), ((2116, 2166), 'numpy.zeros', 'np.zeros', (['[time_steps, batch_size]'], {'dtype': 'np.uint8'}), '([time_steps, batch_size], dtype=np.uint8)\n', (2124, 2166), True, 'import numpy as np\n'), ((303, 320), 'numpy.zeros', 'np.zeros', (['x.shape'], {}), '(x.shape)\n', (311, 320), True, 'import numpy as np\n'), ((2365, 2381), 'numpy.array', 'np.array', (['b_[1:]'], {}), '(b_[1:])\n', (2373, 2381), True, 'import numpy as np\n'), ((2943, 2996), 'numpy.clip', 'np.clip', (['dparam', 'clip_range[0]', 'clip_range[1]', 'dparam'], {}), '(dparam, clip_range[0], clip_range[1], dparam)\n', (2950, 2996), True, 'import numpy as np\n'), ((3354, 3416), 'numpy.random.randint', 'np.random.randint', (['(data_size - time_steps - 1)'], {'size': 'batch_size'}), '(data_size - time_steps - 1, size=batch_size)\n', (3371, 3416), True, 'import numpy as np\n'), ((969, 983), 'rNet.softmax', 'rNet.softmax', ([], {}), '()\n', (981, 983), True, 'import rNet as rNet\n'), ((1299, 1323), 'numpy.log', 'np.log', (['(1.0 / vocab_size)'], {}), '(1.0 / vocab_size)\n', (1305, 1323), True, 'import numpy as np\n'), ((2570, 2599), 'numpy.random.randint', 'np.random.randint', (['vocab_size'], {}), '(vocab_size)\n', (2587, 2599), True, 'import numpy as np\n'), ((3134, 3166), 'numpy.sqrt', 'np.sqrt', (['(mem - mem1 ** 2 + 1e-08)'], {}), '(mem - mem1 ** 2 + 1e-08)\n', (3141, 3166), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Convert ECoG to NWB.
:Author: <NAME>, <NAME>
Modified by <NAME> on May 30, 2020
"""
from __future__ import print_function
import os
from datetime import datetime
from os import path
from pathlib import Path
import numpy as np
import pandas as pd
from hdmf.backends.hdf5 import H5DataIO
from ndx_ecog import ECoGSubject
from ndx_bipolar_scheme import BipolarSchemeTable, EcephysExt
from pynwb.file import DynamicTableRegion
from pynwb import NWBFile, TimeSeries, get_manager, NWBHDF5IO
from pynwb.ecephys import ElectricalSeries, LFP
import scipy.io as sio
from scipy.io.wavfile import read as wavread
from tqdm import tqdm
from ecogvis.functions.htk_to_nwb.HTK import readHTK
# get_manager must come after dynamic imports
manager = get_manager()
def get_analog(anin_path, num=1):
"""
Load analog data. Try:
1) analog[num].wav
2) ANIN[num].htk
Parameters
----------
blockpath: str
num: int
Returns
-------
fs, data
"""
wav_path = path.join(anin_path, 'analog' + str(num) + '.wav')
if os.path.isfile(wav_path):
rate, data = wavread(wav_path)
return float(rate), np.array(data, dtype=float)
htk_path = path.join(anin_path, 'ANIN' + str(num) + '.htk')
if os.path.isfile(htk_path):
htk_out = readHTK(htk_path, scale_s_rate=True)
return htk_out['sampling_rate'], htk_out['data'].ravel()
print('no analog path found for ' + str(num))
return None, None
def readhtks(htk_path, elecs=None, use_tqdm=True):
# First fix the order of htk files
all_files = np.array([f for f in Path(htk_path).glob('*.htk')])
numbers = [f.name.split('.')[0].split('Wav')[1] for f in Path(htk_path).glob('*.htk') if '._' not in str(f)]
new_numbers = [n[0] + '0' + n[1] if len(n) == 2 else n for n in numbers]
sorted_index = np.argsort(new_numbers)
sorted_files = all_files[sorted_index]
# Load data from files in correct order
data = []
if use_tqdm:
this_iter = tqdm(sorted_files, desc='reading electrodes')
else:
this_iter = sorted_files
for i in this_iter:
htk = readHTK(i, scale_s_rate=True)
data.append(htk['data'])
data = np.stack(data)
if len(data.shape) == 3:
data = data.transpose([2, 0, 1])
rate = htk['sampling_rate']
return rate, data.squeeze()
def get_bad_elecs(blockpath):
bad_channels_file = os.path.join(blockpath, 'Artifacts', 'badChannels.txt')
# I think bad channels is 1-indexed but I'm not sure
if os.path.isfile(bad_channels_file) and os.stat(
bad_channels_file).st_size:
dat = pd.read_csv(bad_channels_file, header=None, delimiter=' ',
engine='python')
bad_elecs_inds = dat.values.ravel() - 1
bad_elecs_inds = bad_elecs_inds[np.isfinite(bad_elecs_inds)]
else:
bad_elecs_inds = []
return bad_elecs_inds
def elecs_to_electrode_table(nwbfile, elecspath):
"""
Takes an NWB file and the elecs .mat file path, loads the anatomical and
location information for each electrode,
and writes this information to the NWB file.
Parameters:
-----------
nwbfile : object
An NWB file object.
elecspath : str
Path to the TDT_elecs_all.mat file for this subject. First, second,
and third columns of the key 'elecmatrix'
should be x, y, and z coordinates, respectively. For the 'anatomy'
field, second column should be the full electrode label and the
fourth column should be the anatomical location name.
Returns:
--------
nwb_file : object
The edited NWB file with the added electrode information.
"""
# Get anatomical and location information for electrodes.
elec_mat = sio.loadmat(elecspath)
labels = elec_mat['anatomy'][:, 1]
location = elec_mat['anatomy'][:, 3]
x = elec_mat['elecmatrix'][:, 0]
y = elec_mat['elecmatrix'][:, 1]
z = elec_mat['elecmatrix'][:, 2]
# Get MNI warped electrode coordinates.
if Path(elecspath.as_posix().split('.')[0] + '_warped.mat').is_file():
elec_mat_warped = sio.loadmat(elecspath.split('.')[0] + '_warped.mat')
x_warped = elec_mat_warped['elecmatrix'][:, 0]
y_warped = elec_mat_warped['elecmatrix'][:, 1]
z_warped = elec_mat_warped['elecmatrix'][:, 2]
else:
print('No warped electrode information found...filling with zeros.')
x_warped = np.zeros_like(x)
y_warped = np.zeros_like(y)
z_warped = np.zeros_like(z)
# Define electrode device label names.
group_labels = []
for current_group in labels:
name = current_group[0].rstrip('0123456789')
# Replace 'NaN' for 'null'
if name == 'NaN':
name = 'null'
group_labels.append(name)
# Get the list of unique electrode device label names
unique_group_indexes = np.unique(group_labels, return_index=True)[1]
unique_group_labels = [group_labels[f] for f in sorted(unique_group_indexes)]
# Add additional columns to the electodes table.
nwbfile.add_electrode_column('label', 'label of electrode')
nwbfile.add_electrode_column('bad', 'electrode identified as too noisy')
nwbfile.add_electrode_column('x_warped', 'x warped onto cvs_avg35_inMNI152')
nwbfile.add_electrode_column('y_warped', 'y warped onto cvs_avg35_inMNI152')
nwbfile.add_electrode_column('z_warped', 'z warped onto cvs_avg35_inMNI152')
nwbfile.add_electrode_column('null', 'if not connected to real electrode')
for group_label in unique_group_labels:
# Get region name and device label for the group.
if 'Depth' in group_label:
brain_area = group_label.split('Depth')[0]
elif 'Strip' in group_label:
brain_area = group_label.split('Strip')[0]
elif 'Grid' in group_label:
brain_area = group_label.split('Grid')[0]
elif 'Pole' in group_label:
brain_area = group_label.split('Pole')[0]
elif 'HeschlsGyrus' in group_label:
brain_area = 'HeschlsGyrus'
elif 'null' in group_label:
brain_area = 'null'
else:
brain_area = 'other'
# Create electrode device (same as the group).
device = nwbfile.create_device(group_label)
# Create electrode group with name, description, device object,
# and general location.
electrode_group = nwbfile.create_electrode_group(
name='{} electrodes'.format(group_label),
description='{}'.format(group_label),
device=device,
location=str(brain_area)
)
# Loop through the number of electrodes in this electrode group
elec_nums = np.where(np.array(group_labels) == group_label)[0]
for elec_num in elec_nums:
# Add the electrode information to the table.
elec_location = location[elec_num]
if len(elec_location) == 0:
# If no label is recorded for this electrode, set it to null
elec_location = 'null'
is_null = True
else:
elec_location = elec_location[0]
is_null = False
nwbfile.add_electrode(
id=elec_num,
x=x[elec_num],
y=y[elec_num],
z=z[elec_num],
imp=np.nan,
x_warped=x_warped[elec_num],
y_warped=y_warped[elec_num],
z_warped=z_warped[elec_num],
location=str(elec_location),
filtering='filtering',
group=electrode_group,
label=str(labels[elec_num][0]),
bad=False,
null=is_null,
)
return nwbfile
def chang2nwb(blockpath, out_file_path=None, save_to_file=False, htk_config=None):
"""
Parameters
----------
blockpath: str
out_file_path: None | str
if None, output = [blockpath]/[blockname].nwb
save_to_file : bool
If True, saves to file. If False, just returns nwbfile object
htk_config : dict
Dictionary cotaining HTK conversion paths and options. Example:
{
ecephys_path: 'path_to/ecephys_htk_files',
ecephys_type: 'raw', 'preprocessed' or 'high_gamma',
analog_path: 'path_to/analog_htk_files',
anin1: {present: True, name: 'microphone', type: 'acquisition'},
anin2: {present: True, name: 'speaker1', type: 'stimulus'},
anin3: {present: False, name: 'speaker2', type: 'stimulus'},
anin4: {present: False, name: 'custom', type: 'acquisition'},
metadata: metadata,
electrodes_file: electrodes_file,
bipolar_file: bipolar_file
}
Returns
-------
"""
metadata = {}
if htk_config is None:
blockpath = Path(blockpath)
else:
blockpath = Path(htk_config['ecephys_path'])
metadata = htk_config['metadata']
blockname = blockpath.parent.name
subject_id = blockpath.parent.parent.name[2:]
if out_file_path is None:
out_file_path = blockpath.resolve().parent / ''.join(['EC', subject_id, '_', blockname, '.nwb'])
# file paths
ecog_path = blockpath
anin_path = htk_config['analog_path']
bad_time_file = path.join(blockpath, 'Artifacts', 'badTimeSegments.mat')
# Create the NWB file object
nwbfile_dict = {
'session_description': blockname,
'identifier': blockname,
'session_start_time': datetime.now().astimezone(),
'institution': 'University of California, San Francisco',
'lab': 'Chang Lab'
}
if 'NWBFile' in metadata:
nwbfile_dict.update(metadata['NWBFile'])
nwbfile = NWBFile(**nwbfile_dict)
# Read electrophysiology data from HTK files
print('reading htk acquisition...', flush=True)
ecog_rate, data = readhtks(ecog_path)
data = data.squeeze()
print('done', flush=True)
# Get electrodes info from mat file
if htk_config['electrodes_file'] is not None:
nwbfile = elecs_to_electrode_table(
nwbfile=nwbfile,
elecspath=htk_config['electrodes_file'],
)
n_electrodes = nwbfile.electrodes[:].shape[0]
all_elecs = list(range(n_electrodes))
elecs_region = nwbfile.create_electrode_table_region(
region=all_elecs,
description='ECoG electrodes on brain'
)
else:
ecephys_dict = {
'Device': [{'name': 'auto_device'}],
'ElectricalSeries': [{'name': 'ECoG', 'description': 'description'}],
'ElectrodeGroup': [{'name': 'auto_group', 'description': 'auto_group',
'location': 'location', 'device': 'auto_device'}]
}
if 'Ecephys' in metadata:
ecephys_dict.update(metadata['Ecephys'])
# Create devices
for dev in ecephys_dict['Device']:
device = nwbfile.create_device(dev['name'])
# Electrode groups
for el_grp in ecephys_dict['ElectrodeGroup']:
device = nwbfile.devices[el_grp['device']]
electrode_group = nwbfile.create_electrode_group(
name=el_grp['name'],
description=el_grp['description'],
location=el_grp['location'],
device=device
)
# Electrodes table
n_electrodes = data.shape[1]
nwbfile.add_electrode_column('label', 'label of electrode')
nwbfile.add_electrode_column('bad', 'electrode identified as too noisy')
nwbfile.add_electrode_column('x_warped', 'x warped onto cvs_avg35_inMNI152')
nwbfile.add_electrode_column('y_warped', 'y warped onto cvs_avg35_inMNI152')
nwbfile.add_electrode_column('z_warped', 'z warped onto cvs_avg35_inMNI152')
nwbfile.add_electrode_column('null', 'if not connected to real electrode')
bad_elecs_inds = get_bad_elecs(blockpath)
for elec_counter in range(n_electrodes):
bad = elec_counter in bad_elecs_inds
nwbfile.add_electrode(
id=elec_counter,
x=np.nan,
y=np.nan,
z=np.nan,
imp=np.nan,
x_warped=np.nan,
y_warped=np.nan,
z_warped=np.nan,
location='',
filtering='none',
group=electrode_group,
label='',
bad=bad,
null=False,
)
all_elecs = list(range(n_electrodes))
elecs_region = nwbfile.create_electrode_table_region(
region=all_elecs,
description='ECoG electrodes on brain'
)
# Get Bipolar table from file
if htk_config['bipolar_file'] is not None:
df = pd.read_csv(htk_config['bipolar_file'], index_col='id', sep='\t')
# Create bipolar scheme table
bipolar_scheme_table = BipolarSchemeTable(
name='bipolar_scheme_table',
description='desc'
)
# Columns for bipolar scheme - all anodes and cathodes within the same
# bipolar row are considered to have the same group and location
bipolar_scheme_table.add_column(
name='group_name',
description='electrode group name'
)
bipolar_scheme_table.add_column(
name='location',
description='electrode location'
)
# Iterate over anode / cathode rows
for i, r in df.iterrows():
if isinstance(r['anodes'], str):
anodes = [int(a) for a in r['anodes'].split(',')]
else:
anodes = [int(r['anodes'])]
if isinstance(r['cathodes'], str):
cathodes = [int(a) for a in r['cathodes'].split(',')]
else:
cathodes = [int(r['cathodes'])]
bipolar_scheme_table.add_row(
anodes=anodes,
cathodes=cathodes,
group_name=nwbfile.electrodes['group_name'][anodes[0]],
location=nwbfile.electrodes['location'][anodes[0]]
)
bipolar_scheme_table.anodes.table = nwbfile.electrodes
bipolar_scheme_table.cathodes.table = nwbfile.electrodes
# Creates bipolar table region
elecs_region = DynamicTableRegion(
name='electrodes',
data=np.arange(0, df.shape[0]),
description='desc',
table=bipolar_scheme_table
)
ecephys_ext = EcephysExt(name='ecephys_ext')
ecephys_ext.bipolar_scheme_table = bipolar_scheme_table
nwbfile.add_lab_meta_data(ecephys_ext)
# Stores HTK electrophysiology data as raw, preprocessed or high gamma
if htk_config['ecephys_type'] == 'raw':
ecog_es = ElectricalSeries(name='ECoG',
data=H5DataIO(data[:, 0:n_electrodes], compression='gzip'),
electrodes=elecs_region,
rate=ecog_rate,
description='all Wav data')
nwbfile.add_acquisition(ecog_es)
elif htk_config['ecephys_type'] == 'preprocessed':
lfp = LFP()
ecog_es = ElectricalSeries(name='preprocessed',
data=H5DataIO(data[:, 0:n_electrodes], compression='gzip'),
electrodes=elecs_region,
rate=ecog_rate,
description='all Wav data')
lfp.add_electrical_series(ecog_es)
# Creates the ecephys processing module
ecephys_module = nwbfile.create_processing_module(
name='ecephys',
description='preprocessed electrophysiology data'
)
ecephys_module.add_data_interface(lfp)
elif htk_config['ecephys_type'] == 'high_gamma':
ecog_es = ElectricalSeries(name='high_gamma',
data=H5DataIO(data[:, 0:n_electrodes], compression='gzip'),
electrodes=elecs_region,
rate=ecog_rate,
description='all Wav data')
# Creates the ecephys processing module
ecephys_module = nwbfile.create_processing_module(
name='ecephys',
description='preprocessed electrophysiology data'
)
ecephys_module.add_data_interface(ecog_es)
# Add ANIN 1
if htk_config['anin1']['present']:
fs, data = get_analog(anin_path, 1)
ts = TimeSeries(
name=htk_config['anin1']['name'],
data=data,
unit='NA',
rate=fs,
)
if htk_config['anin1']['type'] == 'acquisition':
nwbfile.add_acquisition(ts)
else:
nwbfile.add_stimulus(ts)
print('ANIN1 saved with name "', htk_config['anin1']['name'], '" in ',
htk_config['anin1']['type'])
# Add ANIN 2
if htk_config['anin2']['present']:
fs, data = get_analog(anin_path, 2)
ts = TimeSeries(
name=htk_config['anin2']['name'],
data=data,
unit='NA',
rate=fs,
)
if htk_config['anin2']['type'] == 'acquisition':
nwbfile.add_acquisition(ts)
else:
nwbfile.add_stimulus(ts)
print('ANIN2 saved with name "', htk_config['anin2']['name'], '" in ',
htk_config['anin2']['type'])
# Add ANIN 3
if htk_config['anin3']['present']:
fs, data = get_analog(anin_path, 3)
ts = TimeSeries(
name=htk_config['anin3']['name'],
data=data,
unit='NA',
rate=fs,
)
if htk_config['anin3']['type'] == 'acquisition':
nwbfile.add_acquisition(ts)
else:
nwbfile.add_stimulus(ts)
print('ANIN3 saved with name "', htk_config['anin3']['name'], '" in ',
htk_config['anin3']['type'])
# Add ANIN 4
if htk_config['anin4']['present']:
fs, data = get_analog(anin_path, 4)
ts = TimeSeries(
name=htk_config['anin4']['name'],
data=data,
unit='NA',
rate=fs,
)
if htk_config['anin4']['type'] == 'acquisition':
nwbfile.add_acquisition(ts)
else:
nwbfile.add_stimulus(ts)
print('ANIN4 saved with name "', htk_config['anin4']['name'], '" in ',
htk_config['anin4']['type'])
# Add bad time segments
if os.path.exists(bad_time_file) and os.stat(bad_time_file).st_size:
bad_time = sio.loadmat(bad_time_file)['badTimeSegments']
for row in bad_time:
nwbfile.add_invalid_time_interval(start_time=row[0],
stop_time=row[1],
tags=('ECoG artifact',),
timeseries=ecog_es)
# Subject
subject_dict = {'subject_id': subject_id}
if 'Subject' in metadata:
subject_dict.update(metadata['Subject'])
subject = ECoGSubject(**subject_dict)
nwbfile.subject = subject
if save_to_file:
print('Saving HTK content to NWB file...')
# Export the NWB file
with NWBHDF5IO(str(out_file_path), manager=manager, mode='w') as io:
io.write(nwbfile)
# read check
with NWBHDF5IO(str(out_file_path), manager=manager, mode='r') as io:
io.read()
print('NWB file saved: ', str(out_file_path))
return nwbfile, out_file_path, subject_id, blockname
|
[
"scipy.io.loadmat",
"pandas.read_csv",
"hdmf.backends.hdf5.H5DataIO",
"scipy.io.wavfile.read",
"numpy.argsort",
"os.path.isfile",
"pathlib.Path",
"numpy.arange",
"pynwb.NWBFile",
"os.path.join",
"numpy.unique",
"numpy.zeros_like",
"os.path.exists",
"numpy.isfinite",
"ndx_bipolar_scheme.EcephysExt",
"ndx_ecog.ECoGSubject",
"datetime.datetime.now",
"numpy.stack",
"tqdm.tqdm",
"pynwb.ecephys.LFP",
"os.stat",
"ecogvis.functions.htk_to_nwb.HTK.readHTK",
"pynwb.TimeSeries",
"numpy.array",
"pynwb.get_manager",
"ndx_bipolar_scheme.BipolarSchemeTable"
] |
[((765, 778), 'pynwb.get_manager', 'get_manager', ([], {}), '()\n', (776, 778), False, 'from pynwb import NWBFile, TimeSeries, get_manager, NWBHDF5IO\n'), ((1076, 1100), 'os.path.isfile', 'os.path.isfile', (['wav_path'], {}), '(wav_path)\n', (1090, 1100), False, 'import os\n'), ((1268, 1292), 'os.path.isfile', 'os.path.isfile', (['htk_path'], {}), '(htk_path)\n', (1282, 1292), False, 'import os\n'), ((1855, 1878), 'numpy.argsort', 'np.argsort', (['new_numbers'], {}), '(new_numbers)\n', (1865, 1878), True, 'import numpy as np\n'), ((2218, 2232), 'numpy.stack', 'np.stack', (['data'], {}), '(data)\n', (2226, 2232), True, 'import numpy as np\n'), ((2425, 2480), 'os.path.join', 'os.path.join', (['blockpath', '"""Artifacts"""', '"""badChannels.txt"""'], {}), "(blockpath, 'Artifacts', 'badChannels.txt')\n", (2437, 2480), False, 'import os\n'), ((3800, 3822), 'scipy.io.loadmat', 'sio.loadmat', (['elecspath'], {}), '(elecspath)\n', (3811, 3822), True, 'import scipy.io as sio\n'), ((9414, 9470), 'os.path.join', 'path.join', (['blockpath', '"""Artifacts"""', '"""badTimeSegments.mat"""'], {}), "(blockpath, 'Artifacts', 'badTimeSegments.mat')\n", (9423, 9470), False, 'from os import path\n'), ((9852, 9875), 'pynwb.NWBFile', 'NWBFile', ([], {}), '(**nwbfile_dict)\n', (9859, 9875), False, 'from pynwb import NWBFile, TimeSeries, get_manager, NWBHDF5IO\n'), ((19324, 19351), 'ndx_ecog.ECoGSubject', 'ECoGSubject', ([], {}), '(**subject_dict)\n', (19335, 19351), False, 'from ndx_ecog import ECoGSubject\n'), ((1123, 1140), 'scipy.io.wavfile.read', 'wavread', (['wav_path'], {}), '(wav_path)\n', (1130, 1140), True, 'from scipy.io.wavfile import read as wavread\n'), ((1312, 1348), 'ecogvis.functions.htk_to_nwb.HTK.readHTK', 'readHTK', (['htk_path'], {'scale_s_rate': '(True)'}), '(htk_path, scale_s_rate=True)\n', (1319, 1348), False, 'from ecogvis.functions.htk_to_nwb.HTK import readHTK\n'), ((2017, 2062), 'tqdm.tqdm', 'tqdm', (['sorted_files'], {'desc': '"""reading electrodes"""'}), "(sorted_files, desc='reading electrodes')\n", (2021, 2062), False, 'from tqdm import tqdm\n'), ((2144, 2173), 'ecogvis.functions.htk_to_nwb.HTK.readHTK', 'readHTK', (['i'], {'scale_s_rate': '(True)'}), '(i, scale_s_rate=True)\n', (2151, 2173), False, 'from ecogvis.functions.htk_to_nwb.HTK import readHTK\n'), ((2546, 2579), 'os.path.isfile', 'os.path.isfile', (['bad_channels_file'], {}), '(bad_channels_file)\n', (2560, 2579), False, 'import os\n'), ((2647, 2722), 'pandas.read_csv', 'pd.read_csv', (['bad_channels_file'], {'header': 'None', 'delimiter': '""" """', 'engine': '"""python"""'}), "(bad_channels_file, header=None, delimiter=' ', engine='python')\n", (2658, 2722), True, 'import pandas as pd\n'), ((4484, 4500), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (4497, 4500), True, 'import numpy as np\n'), ((4520, 4536), 'numpy.zeros_like', 'np.zeros_like', (['y'], {}), '(y)\n', (4533, 4536), True, 'import numpy as np\n'), ((4556, 4572), 'numpy.zeros_like', 'np.zeros_like', (['z'], {}), '(z)\n', (4569, 4572), True, 'import numpy as np\n'), ((4932, 4974), 'numpy.unique', 'np.unique', (['group_labels'], {'return_index': '(True)'}), '(group_labels, return_index=True)\n', (4941, 4974), True, 'import numpy as np\n'), ((8963, 8978), 'pathlib.Path', 'Path', (['blockpath'], {}), '(blockpath)\n', (8967, 8978), False, 'from pathlib import Path\n'), ((9009, 9041), 'pathlib.Path', 'Path', (["htk_config['ecephys_path']"], {}), "(htk_config['ecephys_path'])\n", (9013, 9041), False, 'from pathlib import Path\n'), ((12948, 13013), 'pandas.read_csv', 'pd.read_csv', (["htk_config['bipolar_file']"], {'index_col': '"""id"""', 'sep': '"""\t"""'}), "(htk_config['bipolar_file'], index_col='id', sep='\\t')\n", (12959, 13013), True, 'import pandas as pd\n'), ((13084, 13151), 'ndx_bipolar_scheme.BipolarSchemeTable', 'BipolarSchemeTable', ([], {'name': '"""bipolar_scheme_table"""', 'description': '"""desc"""'}), "(name='bipolar_scheme_table', description='desc')\n", (13102, 13151), False, 'from ndx_bipolar_scheme import BipolarSchemeTable, EcephysExt\n'), ((14681, 14711), 'ndx_bipolar_scheme.EcephysExt', 'EcephysExt', ([], {'name': '"""ecephys_ext"""'}), "(name='ecephys_ext')\n", (14691, 14711), False, 'from ndx_bipolar_scheme import BipolarSchemeTable, EcephysExt\n'), ((16746, 16821), 'pynwb.TimeSeries', 'TimeSeries', ([], {'name': "htk_config['anin1']['name']", 'data': 'data', 'unit': '"""NA"""', 'rate': 'fs'}), "(name=htk_config['anin1']['name'], data=data, unit='NA', rate=fs)\n", (16756, 16821), False, 'from pynwb import NWBFile, TimeSeries, get_manager, NWBHDF5IO\n'), ((17265, 17340), 'pynwb.TimeSeries', 'TimeSeries', ([], {'name': "htk_config['anin2']['name']", 'data': 'data', 'unit': '"""NA"""', 'rate': 'fs'}), "(name=htk_config['anin2']['name'], data=data, unit='NA', rate=fs)\n", (17275, 17340), False, 'from pynwb import NWBFile, TimeSeries, get_manager, NWBHDF5IO\n'), ((17784, 17859), 'pynwb.TimeSeries', 'TimeSeries', ([], {'name': "htk_config['anin3']['name']", 'data': 'data', 'unit': '"""NA"""', 'rate': 'fs'}), "(name=htk_config['anin3']['name'], data=data, unit='NA', rate=fs)\n", (17794, 17859), False, 'from pynwb import NWBFile, TimeSeries, get_manager, NWBHDF5IO\n'), ((18303, 18378), 'pynwb.TimeSeries', 'TimeSeries', ([], {'name': "htk_config['anin4']['name']", 'data': 'data', 'unit': '"""NA"""', 'rate': 'fs'}), "(name=htk_config['anin4']['name'], data=data, unit='NA', rate=fs)\n", (18313, 18378), False, 'from pynwb import NWBFile, TimeSeries, get_manager, NWBHDF5IO\n'), ((18744, 18773), 'os.path.exists', 'os.path.exists', (['bad_time_file'], {}), '(bad_time_file)\n', (18758, 18773), False, 'import os\n'), ((1169, 1196), 'numpy.array', 'np.array', (['data'], {'dtype': 'float'}), '(data, dtype=float)\n', (1177, 1196), True, 'import numpy as np\n'), ((2584, 2610), 'os.stat', 'os.stat', (['bad_channels_file'], {}), '(bad_channels_file)\n', (2591, 2610), False, 'import os\n'), ((2837, 2864), 'numpy.isfinite', 'np.isfinite', (['bad_elecs_inds'], {}), '(bad_elecs_inds)\n', (2848, 2864), True, 'import numpy as np\n'), ((15370, 15375), 'pynwb.ecephys.LFP', 'LFP', ([], {}), '()\n', (15373, 15375), False, 'from pynwb.ecephys import ElectricalSeries, LFP\n'), ((18778, 18800), 'os.stat', 'os.stat', (['bad_time_file'], {}), '(bad_time_file)\n', (18785, 18800), False, 'import os\n'), ((18829, 18855), 'scipy.io.loadmat', 'sio.loadmat', (['bad_time_file'], {}), '(bad_time_file)\n', (18840, 18855), True, 'import scipy.io as sio\n'), ((9631, 9645), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (9643, 9645), False, 'from datetime import datetime\n'), ((14550, 14575), 'numpy.arange', 'np.arange', (['(0)', 'df.shape[0]'], {}), '(0, df.shape[0])\n', (14559, 14575), True, 'import numpy as np\n'), ((15031, 15084), 'hdmf.backends.hdf5.H5DataIO', 'H5DataIO', (['data[:, 0:n_electrodes]'], {'compression': '"""gzip"""'}), "(data[:, 0:n_electrodes], compression='gzip')\n", (15039, 15084), False, 'from hdmf.backends.hdf5 import H5DataIO\n'), ((1707, 1721), 'pathlib.Path', 'Path', (['htk_path'], {}), '(htk_path)\n', (1711, 1721), False, 'from pathlib import Path\n'), ((6792, 6814), 'numpy.array', 'np.array', (['group_labels'], {}), '(group_labels)\n', (6800, 6814), True, 'import numpy as np\n'), ((15472, 15525), 'hdmf.backends.hdf5.H5DataIO', 'H5DataIO', (['data[:, 0:n_electrodes]'], {'compression': '"""gzip"""'}), "(data[:, 0:n_electrodes], compression='gzip')\n", (15480, 15525), False, 'from hdmf.backends.hdf5 import H5DataIO\n'), ((1615, 1629), 'pathlib.Path', 'Path', (['htk_path'], {}), '(htk_path)\n', (1619, 1629), False, 'from pathlib import Path\n'), ((16145, 16198), 'hdmf.backends.hdf5.H5DataIO', 'H5DataIO', (['data[:, 0:n_electrodes]'], {'compression': '"""gzip"""'}), "(data[:, 0:n_electrodes], compression='gzip')\n", (16153, 16198), False, 'from hdmf.backends.hdf5 import H5DataIO\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os,time,cv2,scipy.io
import tensorflow as tf
# import tensorflow.contrib.slim as slim
import scipy.misc as sic
# import network as network
import subprocess
import numpy as np
from matplotlib.colors import hsv_to_rgb
# from skimage.measure import compare_ssim, compare_psnr
from glob import glob
def prepare_data(data_path='../data_new/Data_Polar_Clean/crop_npy/'):
train_items, val_items = [], []
folders1 = glob(data_path+'/*')
# print(folders1)
folders2 = []
for folder1 in folders1:
folders2 = folders2 + glob(folder1+'/Indoor/*') + glob(folder1+'/Outdoor/*')
# print(folders2)
folders2.sort()
for folder2 in folders2[1::5] + folders2[2::5]+folders2[3::5]+folders2[4::5]:
folder = folder2
imgs = glob(folder + '/*.npy')
imgs.sort()
# print(folder, len(imgs))
for idx in range(len(imgs)//2):
tmp_M = imgs[2*idx+1]
tmp_R = imgs[2*idx]
train_items.append([tmp_M,tmp_R])
# print(tmp_R, tmp_M)
for folder2 in folders2[::5]:
folder = folder2
imgs = glob(folder + '/*.npy')
imgs.sort()
print(folder, len(imgs))
for idx in range(len(imgs)//2):
tmp_M = imgs[2*idx+1]
tmp_R = imgs[2*idx]
val_items.append([tmp_M,tmp_R])
# print(tmp_R, tmp_M)
return train_items, val_items[::3]
def prepare_final_data(data_path='../data_new/Data_Polar_Clean/crop_npy/'):
train_items,val_items,test_items=[],[],[]
imgs = glob("../data_new/Data_Polar_Clean/MMR_1/train/*npy")
imgs.sort()
for idx in range(len(imgs)//2):
tmp_M = imgs[2*idx+1]
tmp_R = imgs[2*idx]
train_items.append([tmp_M,tmp_R])
# print(tmp_R, tmp_M)
imgs = glob("../data_new/Data_Polar_Clean/MMR_1/test/*npy")
imgs.sort()
for idx in range(len(imgs)//2):
tmp_M = imgs[2*idx+1]
tmp_R = imgs[2*idx]
test_items.append([tmp_M,tmp_R])
# print(tmp_R, tmp_M)
imgs = glob("../data_new/Data_Polar_Clean/MMR_1/val/*npy")
imgs.sort()
for idx in range(len(imgs)//2):
tmp_M = imgs[2*idx+1]
tmp_R = imgs[2*idx]
val_items.append([tmp_M,tmp_R])
# print(tmp_R, tmp_M)
return train_items, val_items, test_items
def prepare_item(item):
M_name, R_name = item
tmp_M = np.load(M_name)
tmp_R = np.load(R_name)
return tmp_M,tmp_R
def light_mask(h, w):
mid_h = h//5 + np.random.randint(h//5*3)
mid_w = w//5 + np.random.randint(w//5*3)
Light_low = 0.1+0.3*np.random.random()
Light_high= Light_low + 1*np.random.random()
row2 = np.concatenate([np.linspace(Light_low,0.8,mid_w),np.linspace(0.8,Light_low, w-mid_w)],axis=0)
mat2 = np.tile(row2[np.newaxis,:],[h,1])
row1 = np.concatenate([np.linspace(Light_low,0.8,mid_h),np.linspace(0.8,Light_low, h-mid_h)],axis=0)
mat1 = np.tile(row1[:,np.newaxis],[1,w])
mat = np.power(mat1*mat2, 2)
# mat = np.power(mat, 1/2.2)
sz = (20 + np.random.randint(20))*2 + 1
mask1=cv2.GaussianBlur(mat,(sz,sz),cv2.BORDER_DEFAULT)
return mask1
def shadow_mask(img):
h_orig,w_orig = img.shape[:2]
mask = np.ones((h_orig, w_orig))
w_crop = np.random.randint(10, w_orig//3)
h_crop = np.random.randint(10, h_orig//3)
try:
w_offset = np.random.randint(0, w_orig-w_crop-1)
h_offset = np.random.randint(0, h_orig-h_crop-1)
except:
print("Original W %d, desired W %d"%(w_orig,w_crop))
print("Original H %d, desired H %d"%(h_orig,h_crop))
print(mask.shape)
mask[h_offset:h_offset+h_crop-1,w_offset:w_offset+w_crop-1] = 0.2 + 0.4*np.random.rand()
w_crop = np.random.randint(10, w_orig//3)
h_crop = np.random.randint(10, h_orig//3)
try:
w_offset = np.random.randint(0, w_orig-w_crop-1)
h_offset = np.random.randint(0, h_orig-h_crop-1)
except:
print("Original W %d, desired W %d"%(w_orig,w_crop))
print("Original H %d, desired H %d"%(h_orig,h_crop))
print(mask.shape)
mask[h_offset:h_offset+h_crop-1,w_offset:w_offset+w_crop-1] = 0.3 + 0.4*np.random.rand()
w_crop = np.random.randint(10, w_orig//3)
h_crop = np.random.randint(10, h_orig//3)
try:
w_offset = np.random.randint(0, w_orig-w_crop-1)
h_offset = np.random.randint(0, h_orig-h_crop-1)
except:
print("Original W %d, desired W %d"%(w_orig,w_crop))
print("Original H %d, desired H %d"%(h_orig,h_crop))
print(mask.shape)
mask[h_offset:h_offset+h_crop-1,w_offset:w_offset+w_crop-1] = 0.4 + 0.4*np.random.rand()
return mask
def prepare_FNF(item):
#---------------Get R&T----------------$
T_name, R_name = item
syn_image1=cv2.imread(T_name,-1)
w=np.random.randint(256, 480)
h=round((w/syn_image1.shape[1])*syn_image1.shape[0])
t=cv2.resize(np.float32(syn_image1),(w,h),cv2.INTER_CUBIC)/255.0
r=cv2.resize(np.float32(cv2.imread(R_name,-1)),(w,h),cv2.INTER_CUBIC)/255.0
# h, w = nf.shape[:2]
alpha = 0.25 + 0.5*np.random.random()
gt_r = r
gt_r = (1-alpha)*gt_r
gt_t = alpha * t
nf = np.power(np.power(gt_t,2.2) + np.power(gt_r,2.2), 1/2.2)
pf = (0.5+ 0.7*np.random.random()) * t #
mask1= light_mask(h,w)
mask2= light_mask(h,w)
mask = np.sqrt(mask1*mask2)
shadow = shadow_mask(pf)
if np.random.random() < 0.5:
pf = np.power(np.power(pf,2.2) + 0.5* mask[:,:,np.newaxis],1/2.2)
else:
pf = np.power(np.power(pf,2.2) * mask[:,:,np.newaxis],1/2.2)
pf = pf*shadow[:,:,np.newaxis]
h = h//32 * 32
w = w//32 * 32
return pf[np.newaxis, :h, :w, :], gt_t[np.newaxis, :h, :w, :],gt_r[np.newaxis, :h, :w, :],nf[np.newaxis, :h, :w, :]
# def get_metrics(metrics,out_mask, gt_target,gt_reflection,pred_image_t,pred_image_r):
# metrics["T_ssim"] += compare_ssim(0.5*gt_target[0,:,:,4]*out_mask[0,:,:,0], 0.5*pred_image_t[0,:,:,4]*out_mask[0,:,:,0])
# metrics["T_psnr"] += compare_psnr(0.5*gt_target[0,:,:,4]*out_mask[0,:,:,0], 0.5*pred_image_t[0,:,:,4]*out_mask[0,:,:,0], 1)
# metrics["R_ssim"] += compare_ssim(0.5*gt_reflection[0,:,:,4]*out_mask[0,:,:,0], 0.5*pred_image_r[0,:,:,4]*out_mask[0,:,:,0])
# metrics["R_psnr"] += compare_psnr(0.5*gt_reflection[0,:,:,4]*out_mask[0,:,:,0], 0.5*pred_image_r[0,:,:,4]*out_mask[0,:,:,0], 1)
# return metrics
def save_concat_img(gt_input, gt_target, gt_reflection, pureflash, pred_image_t, pred_image_r, save_path, in_flash=None, is_test=False):
if is_test == True:
sic.imsave(save_path.replace(".jpg", "_0_input_ambient.jpg"), np.uint8(gt_input[0].clip(0,1) * 255.))
sic.imsave(save_path.replace(".jpg", "_5_input_flash.jpg"), np.uint8(in_flash[0].clip(0,1) * 255.))
sic.imsave(save_path.replace(".jpg", "_6_input_pureflash.jpg"), np.uint8(pureflash[0].clip(0,1) * 255.))
sic.imsave(save_path.replace(".jpg", "_1_pred_transmission.jpg"), np.uint8(pred_image_t[0].clip(0,1) * 255.))
sic.imsave(save_path.replace(".jpg", "_2_pred_refletion.jpg"), np.uint8(pred_image_r[0].clip(0,1) * 255.))
sic.imsave(save_path.replace(".jpg", "_3_gt_transmission.jpg"), np.uint8(gt_target[0].clip(0,1) * 255.))
sic.imsave(save_path.replace(".jpg", "_4_gt_reflection.jpg"), np.uint8(gt_reflection[0].clip(0,1) * 255.))
return 0
# out_img1= np.concatenate([gt_input[0], gt_target[0], gt_reflection[0]], axis=1)
h, w = gt_input.shape[1:3]
out_img1 = [gt_input[0], pred_image_t[0], gt_target[0]]
names = ["Input", "Pred", "GT"]
for idx, img in enumerate(out_img1):
cv2.putText(img, names[idx], (w//2-len(names[idx])*10, h-20), cv2.FONT_HERSHEY_PLAIN, 2.0, (255, 255, 255), 2)
out_img1 = np.hstack(out_img1)
# out_img2= np.concatenate([pureflash[0], pred_image_t[0],pred_image_r[0]], axis=1)
out_img2= [pureflash[0], gt_reflection[0], pred_image_r[0]]
# names = ["I_fo", "pred_R_a", "R_a"]
# for idx, img in enumerate(out_img2):
# print(img.shape)
# cv2.putText(img, names[idx], (w//2-len(names[idx])*10, h-20), cv2.FONT_HERSHEY_PLAIN, 2.0, (255, 255, 255), 2)
out_img2 = np.hstack(out_img2)
out_img = np.vstack([out_img1, out_img2])
out_img = np.minimum(np.maximum(out_img,0.0),1.0)*255.0
# cv2.imwrite("result/%s/%04d/val_%06d.jpg"%(task, epoch, id), np.uint8(out_img[::2,::2]))
# cv2.imwrite(save_path, np.uint8(out_img[::2,::2]))
sic.imsave(save_path, np.uint8(out_img[::2,::2]))
return out_img
def save_results(all_loss_test, metrics, id, task,epoch):
result=open("result/%s/score.txt"%task,'a')
result.write("Epc: %03d Loss: %.5f | SSIM: %.3f PSNR: %.2f | SSIM: %.3f PSNR: %.2f \n"%\
(epoch, np.mean(all_loss_test[np.where(all_loss_test)]), metrics["T_ssim"]/(id+1), metrics["T_psnr"]/(id+1), metrics["R_ssim"]/(id+1), metrics["R_psnr"]/(id+1)))
result.close()
def crop_shape(tmp_all, size=32):
h,w = tmp_all.shape[1:3]
h = h // size * size
w = w // size * size
return h, w
def cnts_add_display(epoch, cnts, step,crt, crt_t, st):
cnts["cnt"]+=1
step+=1
cnts["all_r"] += crt
cnts["all_t"] += crt_t
cnt, all_r, all_t = cnts["cnt"],cnts["all_r"],cnts["all_t"]
print("iter: %03d %03d %d || r:%.3f %.3f | t:%.3f %.3f |time:%.2f"%\
(epoch,cnt,step,crt,all_r/cnt,crt_t,all_t/cnt,time.time()-st))
return cnts, step
def save_all_out(output, path_prefix, HSV=0, I14=0,AoLP=0,DoLP=0):
sic.imsave("%s_I.jpg"%path_prefix,np.uint8(np.maximum(np.minimum(output[0,:,:,4]*255.0,255.0),0.0)))
if I14:
sic.imsave("%s_I14.jpg"%path_prefix,np.uint8(np.maximum(np.minimum(np.concatenate([output[0,:,:,i] for i in range(4)],axis=0)*255.0,255.0),0.0)))
if HSV:
sic.imsave("%s_HSV.jpg"%path_prefix,np.uint8(np.maximum(np.minimum(output[0,:,:,-3:]*255.0,255.0),0.0)))
if AoLP:
sic.imsave("%s_AoLP.jpg"%path_prefix,np.uint8(np.maximum(np.minimum(output[0,:,:,6]*255.0,255.0),0.0)))
if DoLP:
sic.imsave("%s_DoLP.jpg"%path_prefix,np.uint8(np.maximum(np.minimum(output[0,:,:,5]*255.0,255.0),0.0)))
def get_input(path, id):
raw_in_name = path + '/in/%04d.png'%id
raw_outR_name=path + '/out/%04d.png'%id
raw_outT_name=path + '/out/%04d.png'%id
temp_input = get_from_raw(raw_in_name, raw=None)
temp_output= np.concatenate([temp_input, temp_input],axis=3)
# temp_output= np.concatenate([get_from_raw(raw_outR_name,raw=None), get_from_raw(raw_outT_name,raw=None)],axis=3)
# temp_output=None
return temp_input, temp_output
def load_data(train_path, test_path, train_num, test_num):
train_in = []
test_in = []
train_out= []
test_out = []
for i in range(train_num):
temp_input, temp_output = get_input(train_path, i+1)
print('Train: ', i, temp_input.shape, temp_output.shape)
train_in.append(temp_input)
train_out.append(temp_output)
for i in range(test_num):
temp_input, temp_output = get_input(test_path, i+1)
print('Test: ', i, temp_input.shape, temp_output.shape)
test_in.append(temp_input)
test_out.append(temp_output)
return train_in, train_out, test_in, test_out
def get_from_raw(raw_name, raw=True):
if raw:
raw_img = read_raw(raw_name)
else:
raw_img = sic.imread(raw_name, mode='L')/255.
# print(np.mean(raw_img))
# print(raw_name, raw_img.shape)
h=raw_img.shape[0]//32*32
w=raw_img.shape[1]//32*32
return raw_split(raw_img[:h,:w])
# return raw2imgs(raw_img[:h,:w])
def raw2imgs(raw_img):
I3=raw_img[::2,::2]
I2=raw_img[::2,1::2]
I4=raw_img[1::2,::2]
I1=raw_img[1::2,1::2]
# I3=raw_img[::2,::2][::2,::2]
# I2=raw_img[::2,1::2][::2,::2]
# I4=raw_img[1::2,::2][::2,::2]
# I1=raw_img[1::2,1::2][::2,::2]
I = 0.5*(I1 + I3 + I2 + I4)
#print('I1: ', I1[np.isnan(I1)].shape)
##print('I2: ', I1[np.isnan(I2)].shape)
#print('I3: ', I1[np.isnan(I3)].shape)
#print('I4: ', I1[np.isnan(I4)].shape)
#print('I: ', I1[np.isnan(I)].shape, np.max(I), np.mean(I))
return I1, I2, I3, I4, I
def raw_split(raw_img):
I1, I2, I3, I4, I = raw2imgs(raw_img)
AoLP, DoLP=calculate_ADoLP(I1, I2, I3, I4, I)
I_p, I_np=I * DoLP, I*(1-DoLP)
# print('AoLP NaN: ', AoLP[np.isnan(AoLP)].shape)
# print('DoLP NaN: ', DoLP[np.isnan(DoLP)].shape)
# print('I_p NaN: ', I_p[np.isnan(I_p)].shape)
# print('I_np NaN: ', I_np[np.isnan(I_np)].shape)
data = [I1, I2, I3, I4, I, DoLP, AoLP, I_p, I_np]
data_expand = [I[np.newaxis, :,:,np.newaxis] for I in data]
return np.concatenate(data_expand,axis=3)
def pols2infor(raw_img):
I1, I2, I3, I4 = [raw_img[:,:,i] for i in range(4)]
I = (I1+I2+I3+I4)*0.5
AoLP, DoLP=calculate_ADoLP(I1, I2, I3, I4, I)
I_p, I_np=I * DoLP, I*(1-DoLP)
# print('AoLP NaN: ', AoLP[np.isnan(AoLP)].shape)
# print('DoLP NaN: ', DoLP[np.isnan(DoLP)].shape)
# print('I_p NaN: ', I_p[np.isnan(I_p)].shape)
# print('I_np NaN: ', I_np[np.isnan(I_np)].shape)
data = [I1, I2, I3, I4, I, DoLP, AoLP, I_p, I_np]
data_expand = [I[np.newaxis, :,:,np.newaxis] for I in data]
return np.concatenate(data_expand,axis=3)
def calculate_ADoLP(I1, I2, I3, I4, I):
Q = I1 - I3
U = I2 - I4
Q[Q == 0] = 0.0001
I[I == 0] = 0.0001
DoLP = np.sqrt(np.square(Q)+np.square(U))/I
AoLP = 0.5*np.arctan(U/Q)
# print(np.min(DoLP), np.max(DoLP))
# AoLP = (AoLP + 0.786)/(2*0.786)
DoLP[DoLP>1] = 1
return AoLP, DoLP
'''
def ad_new(raw):
Q = raw[:,:,:,0:1] - raw[:,:,:,2:3]
U = raw[:,:,:,1:2] - raw[:,:,:,3:4]
Q[Q == 0] = 1e-7
DoLP = np.sqrt(np.square(Q)+np.square(U))/raw[:,:,;,4:5]
AoLP = 0.5*np.arctan(U/Q)
# AoLP = (AoLP + 0.786)/(2*0.786)
return np.concatenate([raw, AoLP, DoLP],axis=3)
'''
def vis_ADoLP(AoLP, DoLP):
hsv = np.concatenate([AoLP[:,:,np.newaxis], DoLP[:,:,np.newaxis], np.ones([AoLP.shape[0], AoLP.shape[1], 1])],axis=2)
rgb = hsv_to_rgb(hsv)
return rgb
def vis_ADI(raw):
AoLP, DoLP, I=raw[:,:,2],raw[:,:,1],raw[:,:,0]
hsv = np.concatenate([AoLP[:,:,np.newaxis], DoLP[:,:,np.newaxis], I[:,:,np.newaxis]],axis=2)
rgb = hsv_to_rgb(hsv)
return rgb
def read_uint12_12p(path):
data = np.fromfile(path, dtype=np.uint8).astype("float32")
fst_uint8, mid_uint8, lst_uint8 = np.reshape(data, (data.shape[0] // 3, 3)).astype(np.uint16).T
fst_uint12 = fst_uint8 + (np.bitwise_and((mid_uint8 << 8), 3840))
snd_uint12 = (lst_uint8 << 4) + (mid_uint8 >> 4)
return np.reshape(np.concatenate((fst_uint12[:, None], snd_uint12[:, None]), axis=1), 2 * fst_uint12.shape[0])
def read_raw(path, imageSize = (2048, 2448)):
npimg = np.fromfile(path, dtype=np.uint16).astype("float32")
unit = float(npimg.shape[0])/(2048*2448)
if unit == 1:
if np.max(npimg)>4096:
npimg /= 65535.
else:
npimg /= 4095.
elif unit== 0.5 :
npimg = np.fromfile(path, dtype=np.uint8).astype("float32")
npimg /= 255.
else:
npimg = np.float32(read_uint12_12p(path))/4095
npimg = npimg.reshape(imageSize)
# print("Normalize- Max: %.4lf Min: %.4lf"%(np.max(npimg), np.min(npimg)))
return npimg
def whole_split(net_out):
key = '<KEY>, DoLP, AoLP, I_p, I_np'
key = key.split(', ')
data_dict = {}
for i in range(9):
data_dict[key[i]] = net_out[0,:,:,i]
return data_dict
def pols2difs(raw_img):
I1, I2, I3, I4 = [raw_img[:,:,i] for i in range(4)]
I = (I1+I2+I3+I4)*0.5
AoLP, DoLP=calculate_ADoLP(I1, I2, I3, I4, I)
I_p, I_np=I * DoLP, I*(1-DoLP)
# print('AoLP NaN: ', AoLP[np.isnan(AoLP)].shape)
# print('DoLP NaN: ', DoLP[np.isnan(DoLP)].shape)
# print('I_p NaN: ', I_p[np.isnan(I_p)].shape)
# print('I_np NaN: ', I_np[np.isnan(I_np)].shape)
data = [I1, I2, I3, I4, I, DoLP, AoLP, I_p, I_np, I1-I2, I1-I3, I1-I4, I2-I3, I2-I4, I3-I4]
data_expand = [I[np.newaxis, :,:,np.newaxis] for I in data]
return np.concatenate(data_expand,axis=3)
def mask(img):
h, w = img.shape[0], img.shape[1]
mask = np.zeros([h, w, 1])
x1 = np.random.randint(int(0.75*w))
x2 = x1 + int(0.25*w)+np.random.randint(int(0.75*w - x1))
y1 = np.random.randint(int(0.75*h))
y2 = y1 + int(0.25*h)+np.random.randint(int(0.75*h - y1))
mask[x1:x2, y1:y2, :] = 1
# print("x1, x2, y1, y2: ", x1, x2, y1, y2)
return mask
def crop_images(X,a,b,is_sq=False):
h_orig,w_orig = X.shape[1:3]
w_crop = np.random.randint(a, b)
r = w_crop/w_orig
h_crop = np.int(h_orig*r)
try:
w_offset = np.random.randint(0, w_orig-w_crop-1)
h_offset = np.random.randint(0, h_orig-h_crop-1)
except:
print("Original W %d, desired W %d"%(w_orig,w_crop))
print("Original H %d, desired H %d"%(h_orig,h_crop))
out = X[:,h_offset:h_offset+h_crop-1,w_offset:w_offset+w_crop-1,:]
h,w=out.shape[1:3]
h = h//32*32
w = w//32*32
return out[:,:h,:w,:]
def aug_ad(im_in, im_R, im_T):
#Crop
h_orig,w_orig = im_in.shape[1:3]
w_crop = np.random.randint(512, 641)
r = w_crop/w_orig
h_crop = np.int(h_orig*r)
h_crop = h_crop//32*32
w_crop = w_crop//32*32
try:
w_offset = np.random.randint(0, w_orig-w_crop-1)
h_offset = np.random.randint(0, h_orig-h_crop-1)
except:
print("Original W %d, desired W %d"%(w_orig,w_crop))
print("Original H %d, desired H %d"%(h_orig,h_crop))
im_in=im_in[:,h_offset:h_offset+h_crop,w_offset:w_offset+w_crop,:]
im_R = im_R[:,h_offset:h_offset+h_crop,w_offset:w_offset+w_crop,:]
im_T = im_T[:,h_offset:h_offset+h_crop,w_offset:w_offset+w_crop,:]
return ad_new(im_in), ad_new(im_R), ad_new(im_T)
def augmentation(im_in, im_R, im_T):
#Crop
h_orig,w_orig = im_in.shape[1:3]
# w_crop = 641#np.random.randint(640, 801)
# r = w_crop/w_orig
# h_crop = np.int(h_orig*r)
w_crop, h_crop = 512, 512
# h_crop = h_crop//32*32
# w_crop = w_crop//32*32
try:
w_offset = np.random.randint(0, w_orig-w_crop-1)
h_offset = np.random.randint(0, h_orig-h_crop-1)
except:
print("Original W %d, desired W %d"%(w_orig,w_crop))
print("Original H %d, desired H %d"%(h_orig,h_crop))
im_in=im_in[:,h_offset:h_offset+h_crop,w_offset:w_offset+w_crop,:]
im_R = im_R[:,h_offset:h_offset+h_crop,w_offset:w_offset+w_crop,:]
im_T = im_T[:,h_offset:h_offset+h_crop,w_offset:w_offset+w_crop,:]
return im_in, im_R, im_T
def crop_augmentation(im_R, im_T):
#Crop
h_orig,w_orig = im_R.shape[1:3]
h_crop = h_orig//224*224
w_crop = w_orig//224*224
size = min(h_crop,w_crop)
# w_crop = 641#np.random.randint(640, 801)
# r = w_crop/w_orig
# h_crop = np.int(h_orig*r)
if size > 640:
size = 640
w_crop = size
h_crop = size
try:
w_offset = np.random.randint(0, w_orig-w_crop)
h_offset = np.random.randint(0, h_orig-h_crop)
except:
print("Original W %d, desired W %d"%(w_orig,w_crop))
print("Original H %d, desired H %d"%(h_orig,h_crop))
im_R = im_R[:,h_offset:h_offset+h_crop,w_offset:w_offset+w_crop,:]
im_T = im_T[:,h_offset:h_offset+h_crop,w_offset:w_offset+w_crop,:]
return im_R, im_T
def crop_augmentation_list(img_list):
#Crop
h_orig,w_orig = img_list[0].shape[1:3]
h_crop = h_orig * 3 // 4 // 32 * 32
w_crop = w_orig * 3 // 4 // 32 * 32
try:
w_offset = np.random.randint(0, w_orig-w_crop)
h_offset = np.random.randint(0, h_orig-h_crop)
except:
print("Original W %d, desired W %d"%(w_orig,w_crop))
print("Original H %d, desired H %d"%(h_orig,h_crop))
crop_list = [img[:,h_offset:h_offset+h_crop,w_offset:w_offset+w_crop,:] for img in img_list]
return crop_list
def tf_calculate_ADoLP(I_all):
I1, I2, I3, I4 = I_all[:,:,:,:1], I_all[:,:,:,1:2], I_all[:,:,:,2:3], I_all[:,:,:,3:4]
I = 0.5 * (I1 + I2 + I3 + I4)+1e-4
Q = I1 - I3
U = I2 - I4
zero_mat = tf.zeros(tf.shape(I1), tf.float32)
ones_mat = 1e-4 * tf.ones(tf.shape(I1), tf.float32)
Q = tf.where(tf.equal(Q, zero_mat), ones_mat, Q)
DoLP = tf.divide(tf.sqrt(tf.square(Q)+tf.square(U)), I)
AoLP = 0.5*tf.atan(U/Q)
# AoLP = (AoLP + 0.786)/(2*0.786)
return AoLP, DoLP
def ADoLP_loss(gt, output):
AoLP1, DoLP1 = tf_calculate_ADoLP(gt)
AoLP2, DoLP2 = tf_calculate_ADoLP(output)
AoLP_loss = tf.reduce_mean(tf.abs(AoLP1 - AoLP2))
DoLP_loss = tf.reduce_mean(tf.abs(DoLP1 - DoLP2))
return AoLP_loss + DoLP_loss
def GC_augmentation(im_in):
#Flip
magic = np.random.random()
# print(im_in.shape)
if magic > 0.75:
im_in=im_in[:,::-1,:,:]
elif magic < 0.25:
im_in=im_in[:,:,::-1,:]
#Crop
h_orig,w_orig = im_in.shape[1:3]
h_crop = 224
w_crop = 224
try:
w_offset = np.random.randint(0, w_orig-w_crop-1)
h_offset = np.random.randint(0, h_orig-h_crop-1)
except:
print("Original W %d, desired W %d"%(w_orig,w_crop))
print("Original H %d, desired H %d"%(h_orig,h_crop))
im_in=im_in[:,h_offset:h_offset+h_crop,w_offset:w_offset+w_crop,:]
return im_in
|
[
"cv2.GaussianBlur",
"numpy.load",
"numpy.maximum",
"numpy.ones",
"numpy.random.randint",
"numpy.tile",
"glob.glob",
"tensorflow.abs",
"numpy.power",
"numpy.max",
"numpy.int",
"numpy.reshape",
"numpy.linspace",
"tensorflow.atan",
"tensorflow.equal",
"numpy.uint8",
"numpy.minimum",
"numpy.square",
"numpy.hstack",
"matplotlib.colors.hsv_to_rgb",
"numpy.arctan",
"numpy.concatenate",
"numpy.vstack",
"scipy.misc.imread",
"numpy.fromfile",
"numpy.float32",
"numpy.zeros",
"time.time",
"cv2.imread",
"tensorflow.shape",
"numpy.random.random",
"numpy.where",
"numpy.bitwise_and",
"tensorflow.square",
"numpy.random.rand",
"numpy.sqrt"
] |
[((554, 576), 'glob.glob', 'glob', (["(data_path + '/*')"], {}), "(data_path + '/*')\n", (558, 576), False, 'from glob import glob\n'), ((1701, 1754), 'glob.glob', 'glob', (['"""../data_new/Data_Polar_Clean/MMR_1/train/*npy"""'], {}), "('../data_new/Data_Polar_Clean/MMR_1/train/*npy')\n", (1705, 1754), False, 'from glob import glob\n'), ((1961, 2013), 'glob.glob', 'glob', (['"""../data_new/Data_Polar_Clean/MMR_1/test/*npy"""'], {}), "('../data_new/Data_Polar_Clean/MMR_1/test/*npy')\n", (1965, 2013), False, 'from glob import glob\n'), ((2219, 2270), 'glob.glob', 'glob', (['"""../data_new/Data_Polar_Clean/MMR_1/val/*npy"""'], {}), "('../data_new/Data_Polar_Clean/MMR_1/val/*npy')\n", (2223, 2270), False, 'from glob import glob\n'), ((2577, 2592), 'numpy.load', 'np.load', (['M_name'], {}), '(M_name)\n', (2584, 2592), True, 'import numpy as np\n'), ((2606, 2621), 'numpy.load', 'np.load', (['R_name'], {}), '(R_name)\n', (2613, 2621), True, 'import numpy as np\n'), ((2976, 3012), 'numpy.tile', 'np.tile', (['row2[np.newaxis, :]', '[h, 1]'], {}), '(row2[np.newaxis, :], [h, 1])\n', (2983, 3012), True, 'import numpy as np\n'), ((3128, 3164), 'numpy.tile', 'np.tile', (['row1[:, np.newaxis]', '[1, w]'], {}), '(row1[:, np.newaxis], [1, w])\n', (3135, 3164), True, 'import numpy as np\n'), ((3173, 3197), 'numpy.power', 'np.power', (['(mat1 * mat2)', '(2)'], {}), '(mat1 * mat2, 2)\n', (3181, 3197), True, 'import numpy as np\n'), ((3286, 3337), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['mat', '(sz, sz)', 'cv2.BORDER_DEFAULT'], {}), '(mat, (sz, sz), cv2.BORDER_DEFAULT)\n', (3302, 3337), False, 'import os, time, cv2, scipy.io\n'), ((3481, 3506), 'numpy.ones', 'np.ones', (['(h_orig, w_orig)'], {}), '((h_orig, w_orig))\n', (3488, 3506), True, 'import numpy as np\n'), ((3521, 3555), 'numpy.random.randint', 'np.random.randint', (['(10)', '(w_orig // 3)'], {}), '(10, w_orig // 3)\n', (3538, 3555), True, 'import numpy as np\n'), ((3568, 3602), 'numpy.random.randint', 'np.random.randint', (['(10)', '(h_orig // 3)'], {}), '(10, h_orig // 3)\n', (3585, 3602), True, 'import numpy as np\n'), ((4108, 4142), 'numpy.random.randint', 'np.random.randint', (['(10)', '(w_orig // 3)'], {}), '(10, w_orig // 3)\n', (4125, 4142), True, 'import numpy as np\n'), ((4155, 4189), 'numpy.random.randint', 'np.random.randint', (['(10)', '(h_orig // 3)'], {}), '(10, h_orig // 3)\n', (4172, 4189), True, 'import numpy as np\n'), ((4697, 4731), 'numpy.random.randint', 'np.random.randint', (['(10)', '(w_orig // 3)'], {}), '(10, w_orig // 3)\n', (4714, 4731), True, 'import numpy as np\n'), ((4744, 4778), 'numpy.random.randint', 'np.random.randint', (['(10)', '(h_orig // 3)'], {}), '(10, h_orig // 3)\n', (4761, 4778), True, 'import numpy as np\n'), ((5398, 5420), 'cv2.imread', 'cv2.imread', (['T_name', '(-1)'], {}), '(T_name, -1)\n', (5408, 5420), False, 'import os, time, cv2, scipy.io\n'), ((5427, 5454), 'numpy.random.randint', 'np.random.randint', (['(256)', '(480)'], {}), '(256, 480)\n', (5444, 5454), True, 'import numpy as np\n'), ((5988, 6010), 'numpy.sqrt', 'np.sqrt', (['(mask1 * mask2)'], {}), '(mask1 * mask2)\n', (5995, 6010), True, 'import numpy as np\n'), ((8451, 8470), 'numpy.hstack', 'np.hstack', (['out_img1'], {}), '(out_img1)\n', (8460, 8470), True, 'import numpy as np\n'), ((8885, 8904), 'numpy.hstack', 'np.hstack', (['out_img2'], {}), '(out_img2)\n', (8894, 8904), True, 'import numpy as np\n'), ((8925, 8956), 'numpy.vstack', 'np.vstack', (['[out_img1, out_img2]'], {}), '([out_img1, out_img2])\n', (8934, 8956), True, 'import numpy as np\n'), ((11133, 11181), 'numpy.concatenate', 'np.concatenate', (['[temp_input, temp_input]'], {'axis': '(3)'}), '([temp_input, temp_input], axis=3)\n', (11147, 11181), True, 'import numpy as np\n'), ((13488, 13523), 'numpy.concatenate', 'np.concatenate', (['data_expand'], {'axis': '(3)'}), '(data_expand, axis=3)\n', (13502, 13523), True, 'import numpy as np\n'), ((14072, 14107), 'numpy.concatenate', 'np.concatenate', (['data_expand'], {'axis': '(3)'}), '(data_expand, axis=3)\n', (14086, 14107), True, 'import numpy as np\n'), ((14920, 14935), 'matplotlib.colors.hsv_to_rgb', 'hsv_to_rgb', (['hsv'], {}), '(hsv)\n', (14930, 14935), False, 'from matplotlib.colors import hsv_to_rgb\n'), ((15038, 15136), 'numpy.concatenate', 'np.concatenate', (['[AoLP[:, :, np.newaxis], DoLP[:, :, np.newaxis], I[:, :, np.newaxis]]'], {'axis': '(2)'}), '([AoLP[:, :, np.newaxis], DoLP[:, :, np.newaxis], I[:, :, np.\n newaxis]], axis=2)\n', (15052, 15136), True, 'import numpy as np\n'), ((15136, 15151), 'matplotlib.colors.hsv_to_rgb', 'hsv_to_rgb', (['hsv'], {}), '(hsv)\n', (15146, 15151), False, 'from matplotlib.colors import hsv_to_rgb\n'), ((17021, 17056), 'numpy.concatenate', 'np.concatenate', (['data_expand'], {'axis': '(3)'}), '(data_expand, axis=3)\n', (17035, 17056), True, 'import numpy as np\n'), ((17123, 17142), 'numpy.zeros', 'np.zeros', (['[h, w, 1]'], {}), '([h, w, 1])\n', (17131, 17142), True, 'import numpy as np\n'), ((17534, 17557), 'numpy.random.randint', 'np.random.randint', (['a', 'b'], {}), '(a, b)\n', (17551, 17557), True, 'import numpy as np\n'), ((17595, 17613), 'numpy.int', 'np.int', (['(h_orig * r)'], {}), '(h_orig * r)\n', (17601, 17613), True, 'import numpy as np\n'), ((18129, 18156), 'numpy.random.randint', 'np.random.randint', (['(512)', '(641)'], {}), '(512, 641)\n', (18146, 18156), True, 'import numpy as np\n'), ((18194, 18212), 'numpy.int', 'np.int', (['(h_orig * r)'], {}), '(h_orig * r)\n', (18200, 18212), True, 'import numpy as np\n'), ((21811, 21829), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (21827, 21829), True, 'import numpy as np\n'), ((904, 927), 'glob.glob', 'glob', (["(folder + '/*.npy')"], {}), "(folder + '/*.npy')\n", (908, 927), False, 'from glob import glob\n'), ((1255, 1278), 'glob.glob', 'glob', (["(folder + '/*.npy')"], {}), "(folder + '/*.npy')\n", (1259, 1278), False, 'from glob import glob\n'), ((2692, 2721), 'numpy.random.randint', 'np.random.randint', (['(h // 5 * 3)'], {}), '(h // 5 * 3)\n', (2709, 2721), True, 'import numpy as np\n'), ((2738, 2767), 'numpy.random.randint', 'np.random.randint', (['(w // 5 * 3)'], {}), '(w // 5 * 3)\n', (2755, 2767), True, 'import numpy as np\n'), ((3702, 3743), 'numpy.random.randint', 'np.random.randint', (['(0)', '(w_orig - w_crop - 1)'], {}), '(0, w_orig - w_crop - 1)\n', (3719, 3743), True, 'import numpy as np\n'), ((3761, 3802), 'numpy.random.randint', 'np.random.randint', (['(0)', '(h_orig - h_crop - 1)'], {}), '(0, h_orig - h_crop - 1)\n', (3778, 3802), True, 'import numpy as np\n'), ((4289, 4330), 'numpy.random.randint', 'np.random.randint', (['(0)', '(w_orig - w_crop - 1)'], {}), '(0, w_orig - w_crop - 1)\n', (4306, 4330), True, 'import numpy as np\n'), ((4348, 4389), 'numpy.random.randint', 'np.random.randint', (['(0)', '(h_orig - h_crop - 1)'], {}), '(0, h_orig - h_crop - 1)\n', (4365, 4389), True, 'import numpy as np\n'), ((4878, 4919), 'numpy.random.randint', 'np.random.randint', (['(0)', '(w_orig - w_crop - 1)'], {}), '(0, w_orig - w_crop - 1)\n', (4895, 4919), True, 'import numpy as np\n'), ((4937, 4978), 'numpy.random.randint', 'np.random.randint', (['(0)', '(h_orig - h_crop - 1)'], {}), '(0, h_orig - h_crop - 1)\n', (4954, 4978), True, 'import numpy as np\n'), ((6049, 6067), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (6065, 6067), True, 'import numpy as np\n'), ((9201, 9228), 'numpy.uint8', 'np.uint8', (['out_img[::2, ::2]'], {}), '(out_img[::2, ::2])\n', (9209, 9228), True, 'import numpy as np\n'), ((14298, 14314), 'numpy.arctan', 'np.arctan', (['(U / Q)'], {}), '(U / Q)\n', (14307, 14314), True, 'import numpy as np\n'), ((15397, 15433), 'numpy.bitwise_and', 'np.bitwise_and', (['(mid_uint8 << 8)', '(3840)'], {}), '(mid_uint8 << 8, 3840)\n', (15411, 15433), True, 'import numpy as np\n'), ((15514, 15580), 'numpy.concatenate', 'np.concatenate', (['(fst_uint12[:, None], snd_uint12[:, None])'], {'axis': '(1)'}), '((fst_uint12[:, None], snd_uint12[:, None]), axis=1)\n', (15528, 15580), True, 'import numpy as np\n'), ((17642, 17683), 'numpy.random.randint', 'np.random.randint', (['(0)', '(w_orig - w_crop - 1)'], {}), '(0, w_orig - w_crop - 1)\n', (17659, 17683), True, 'import numpy as np\n'), ((17700, 17741), 'numpy.random.randint', 'np.random.randint', (['(0)', '(h_orig - h_crop - 1)'], {}), '(0, h_orig - h_crop - 1)\n', (17717, 17741), True, 'import numpy as np\n'), ((18303, 18344), 'numpy.random.randint', 'np.random.randint', (['(0)', '(w_orig - w_crop - 1)'], {}), '(0, w_orig - w_crop - 1)\n', (18320, 18344), True, 'import numpy as np\n'), ((18361, 18402), 'numpy.random.randint', 'np.random.randint', (['(0)', '(h_orig - h_crop - 1)'], {}), '(0, h_orig - h_crop - 1)\n', (18378, 18402), True, 'import numpy as np\n'), ((19127, 19168), 'numpy.random.randint', 'np.random.randint', (['(0)', '(w_orig - w_crop - 1)'], {}), '(0, w_orig - w_crop - 1)\n', (19144, 19168), True, 'import numpy as np\n'), ((19185, 19226), 'numpy.random.randint', 'np.random.randint', (['(0)', '(h_orig - h_crop - 1)'], {}), '(0, h_orig - h_crop - 1)\n', (19202, 19226), True, 'import numpy as np\n'), ((20000, 20037), 'numpy.random.randint', 'np.random.randint', (['(0)', '(w_orig - w_crop)'], {}), '(0, w_orig - w_crop)\n', (20017, 20037), True, 'import numpy as np\n'), ((20056, 20093), 'numpy.random.randint', 'np.random.randint', (['(0)', '(h_orig - h_crop)'], {}), '(0, h_orig - h_crop)\n', (20073, 20093), True, 'import numpy as np\n'), ((20610, 20647), 'numpy.random.randint', 'np.random.randint', (['(0)', '(w_orig - w_crop)'], {}), '(0, w_orig - w_crop)\n', (20627, 20647), True, 'import numpy as np\n'), ((20666, 20703), 'numpy.random.randint', 'np.random.randint', (['(0)', '(h_orig - h_crop)'], {}), '(0, h_orig - h_crop)\n', (20683, 20703), True, 'import numpy as np\n'), ((21200, 21212), 'tensorflow.shape', 'tf.shape', (['I1'], {}), '(I1)\n', (21208, 21212), True, 'import tensorflow as tf\n'), ((21301, 21322), 'tensorflow.equal', 'tf.equal', (['Q', 'zero_mat'], {}), '(Q, zero_mat)\n', (21309, 21322), True, 'import tensorflow as tf\n'), ((21414, 21428), 'tensorflow.atan', 'tf.atan', (['(U / Q)'], {}), '(U / Q)\n', (21421, 21428), True, 'import tensorflow as tf\n'), ((21642, 21663), 'tensorflow.abs', 'tf.abs', (['(AoLP1 - AoLP2)'], {}), '(AoLP1 - AoLP2)\n', (21648, 21663), True, 'import tensorflow as tf\n'), ((21697, 21718), 'tensorflow.abs', 'tf.abs', (['(DoLP1 - DoLP2)'], {}), '(DoLP1 - DoLP2)\n', (21703, 21718), True, 'import tensorflow as tf\n'), ((22091, 22132), 'numpy.random.randint', 'np.random.randint', (['(0)', '(w_orig - w_crop - 1)'], {}), '(0, w_orig - w_crop - 1)\n', (22108, 22132), True, 'import numpy as np\n'), ((22149, 22190), 'numpy.random.randint', 'np.random.randint', (['(0)', '(h_orig - h_crop - 1)'], {}), '(0, h_orig - h_crop - 1)\n', (22166, 22190), True, 'import numpy as np\n'), ((705, 733), 'glob.glob', 'glob', (["(folder1 + '/Outdoor/*')"], {}), "(folder1 + '/Outdoor/*')\n", (709, 733), False, 'from glob import glob\n'), ((2789, 2807), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2805, 2807), True, 'import numpy as np\n'), ((2839, 2857), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2855, 2857), True, 'import numpy as np\n'), ((2886, 2920), 'numpy.linspace', 'np.linspace', (['Light_low', '(0.8)', 'mid_w'], {}), '(Light_low, 0.8, mid_w)\n', (2897, 2920), True, 'import numpy as np\n'), ((2919, 2957), 'numpy.linspace', 'np.linspace', (['(0.8)', 'Light_low', '(w - mid_w)'], {}), '(0.8, Light_low, w - mid_w)\n', (2930, 2957), True, 'import numpy as np\n'), ((3038, 3072), 'numpy.linspace', 'np.linspace', (['Light_low', '(0.8)', 'mid_h'], {}), '(Light_low, 0.8, mid_h)\n', (3049, 3072), True, 'import numpy as np\n'), ((3071, 3109), 'numpy.linspace', 'np.linspace', (['(0.8)', 'Light_low', '(h - mid_h)'], {}), '(0.8, Light_low, h - mid_h)\n', (3082, 3109), True, 'import numpy as np\n'), ((4071, 4087), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4085, 4087), True, 'import numpy as np\n'), ((4658, 4674), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4672, 4674), True, 'import numpy as np\n'), ((5247, 5263), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (5261, 5263), True, 'import numpy as np\n'), ((5531, 5553), 'numpy.float32', 'np.float32', (['syn_image1'], {}), '(syn_image1)\n', (5541, 5553), True, 'import numpy as np\n'), ((5719, 5737), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5735, 5737), True, 'import numpy as np\n'), ((5822, 5841), 'numpy.power', 'np.power', (['gt_t', '(2.2)'], {}), '(gt_t, 2.2)\n', (5830, 5841), True, 'import numpy as np\n'), ((5843, 5862), 'numpy.power', 'np.power', (['gt_r', '(2.2)'], {}), '(gt_r, 2.2)\n', (5851, 5862), True, 'import numpy as np\n'), ((8985, 9009), 'numpy.maximum', 'np.maximum', (['out_img', '(0.0)'], {}), '(out_img, 0.0)\n', (8995, 9009), True, 'import numpy as np\n'), ((12158, 12188), 'scipy.misc.imread', 'sic.imread', (['raw_name'], {'mode': '"""L"""'}), "(raw_name, mode='L')\n", (12168, 12188), True, 'import scipy.misc as sic\n'), ((14857, 14899), 'numpy.ones', 'np.ones', (['[AoLP.shape[0], AoLP.shape[1], 1]'], {}), '([AoLP.shape[0], AoLP.shape[1], 1])\n', (14864, 14899), True, 'import numpy as np\n'), ((15213, 15246), 'numpy.fromfile', 'np.fromfile', (['path'], {'dtype': 'np.uint8'}), '(path, dtype=np.uint8)\n', (15224, 15246), True, 'import numpy as np\n'), ((15673, 15707), 'numpy.fromfile', 'np.fromfile', (['path'], {'dtype': 'np.uint16'}), '(path, dtype=np.uint16)\n', (15684, 15707), True, 'import numpy as np\n'), ((15803, 15816), 'numpy.max', 'np.max', (['npimg'], {}), '(npimg)\n', (15809, 15816), True, 'import numpy as np\n'), ((21257, 21269), 'tensorflow.shape', 'tf.shape', (['I1'], {}), '(I1)\n', (21265, 21269), True, 'import tensorflow as tf\n'), ((677, 704), 'glob.glob', 'glob', (["(folder1 + '/Indoor/*')"], {}), "(folder1 + '/Indoor/*')\n", (681, 704), False, 'from glob import glob\n'), ((3246, 3267), 'numpy.random.randint', 'np.random.randint', (['(20)'], {}), '(20)\n', (3263, 3267), True, 'import numpy as np\n'), ((5612, 5634), 'cv2.imread', 'cv2.imread', (['R_name', '(-1)'], {}), '(R_name, -1)\n', (5622, 5634), False, 'import os, time, cv2, scipy.io\n'), ((5892, 5910), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5908, 5910), True, 'import numpy as np\n'), ((6098, 6115), 'numpy.power', 'np.power', (['pf', '(2.2)'], {}), '(pf, 2.2)\n', (6106, 6115), True, 'import numpy as np\n'), ((6184, 6201), 'numpy.power', 'np.power', (['pf', '(2.2)'], {}), '(pf, 2.2)\n', (6192, 6201), True, 'import numpy as np\n'), ((10303, 10348), 'numpy.minimum', 'np.minimum', (['(output[0, :, :, 4] * 255.0)', '(255.0)'], {}), '(output[0, :, :, 4] * 255.0, 255.0)\n', (10313, 10348), True, 'import numpy as np\n'), ((14253, 14265), 'numpy.square', 'np.square', (['Q'], {}), '(Q)\n', (14262, 14265), True, 'import numpy as np\n'), ((14266, 14278), 'numpy.square', 'np.square', (['U'], {}), '(U)\n', (14275, 14278), True, 'import numpy as np\n'), ((15304, 15345), 'numpy.reshape', 'np.reshape', (['data', '(data.shape[0] // 3, 3)'], {}), '(data, (data.shape[0] // 3, 3))\n', (15314, 15345), True, 'import numpy as np\n'), ((21367, 21379), 'tensorflow.square', 'tf.square', (['Q'], {}), '(Q)\n', (21376, 21379), True, 'import tensorflow as tf\n'), ((21380, 21392), 'tensorflow.square', 'tf.square', (['U'], {}), '(U)\n', (21389, 21392), True, 'import tensorflow as tf\n'), ((10128, 10139), 'time.time', 'time.time', ([], {}), '()\n', (10137, 10139), False, 'import os, time, cv2, scipy.io\n'), ((10596, 10643), 'numpy.minimum', 'np.minimum', (['(output[0, :, :, -3:] * 255.0)', '(255.0)'], {}), '(output[0, :, :, -3:] * 255.0, 255.0)\n', (10606, 10643), True, 'import numpy as np\n'), ((10725, 10770), 'numpy.minimum', 'np.minimum', (['(output[0, :, :, 6] * 255.0)', '(255.0)'], {}), '(output[0, :, :, 6] * 255.0, 255.0)\n', (10735, 10770), True, 'import numpy as np\n'), ((10852, 10897), 'numpy.minimum', 'np.minimum', (['(output[0, :, :, 5] * 255.0)', '(255.0)'], {}), '(output[0, :, :, 5] * 255.0, 255.0)\n', (10862, 10897), True, 'import numpy as np\n'), ((15935, 15968), 'numpy.fromfile', 'np.fromfile', (['path'], {'dtype': 'np.uint8'}), '(path, dtype=np.uint8)\n', (15946, 15968), True, 'import numpy as np\n'), ((9492, 9515), 'numpy.where', 'np.where', (['all_loss_test'], {}), '(all_loss_test)\n', (9500, 9515), True, 'import numpy as np\n')]
|
import pyclesperanto_prototype as cle
import numpy as np
def test_maximum_y_projection():
test1 = cle.push(np.asarray([
[
[1, 0, 0, 0, 9],
[0, 2, 0, 8, 0],
[3, 0, 1, 0, 10],
[0, 4, 0, 7, 0],
[5, 0, 6, 0, 10]
], [
[0, 2, 0, 8, 0],
[1, 0, 0, 0, 9],
[3, 0, 1, 0, 10],
[0, 4, 0, 7, 0],
[5, 0, 6, 0, 10]
], [
[0, 2, 0, 8, 0],
[3, 0, 1, 0, 10],
[0, 4, 0, 7, 0],
[1, 0, 0, 0, 9],
[5, 0, 6, 0, 10]
], [
[0, 2, 0, 8, 0],
[1, 0, 0, 0, 9],
[0, 4, 0, 7, 0],
[3, 0, 1, 0, 10],
[5, 0, 6, 0, 10]
], [
[1, 0, 0, 0, 9],
[0, 4, 0, 7, 0],
[3, 0, 1, 0, 10],
[0, 2, 0, 8, 0],
[5, 0, 6, 0, 10]
]
]).T)
reference = cle.push(np.asarray([
[5, 4, 6, 8, 10],
[5, 4, 6, 8, 10],
[5, 4, 6, 8, 10],
[5, 4, 6, 8, 10],
[5, 4, 6, 8, 10]
]).T)
result = cle.create(reference)
cle.maximum_y_projection(test1, result)
a = cle.pull(result)
b = cle.pull(reference)
print(a)
assert (np.array_equal(a, b))
|
[
"numpy.array_equal",
"pyclesperanto_prototype.maximum_y_projection",
"numpy.asarray",
"pyclesperanto_prototype.pull",
"pyclesperanto_prototype.create"
] |
[((1129, 1150), 'pyclesperanto_prototype.create', 'cle.create', (['reference'], {}), '(reference)\n', (1139, 1150), True, 'import pyclesperanto_prototype as cle\n'), ((1155, 1194), 'pyclesperanto_prototype.maximum_y_projection', 'cle.maximum_y_projection', (['test1', 'result'], {}), '(test1, result)\n', (1179, 1194), True, 'import pyclesperanto_prototype as cle\n'), ((1204, 1220), 'pyclesperanto_prototype.pull', 'cle.pull', (['result'], {}), '(result)\n', (1212, 1220), True, 'import pyclesperanto_prototype as cle\n'), ((1229, 1248), 'pyclesperanto_prototype.pull', 'cle.pull', (['reference'], {}), '(reference)\n', (1237, 1248), True, 'import pyclesperanto_prototype as cle\n'), ((1276, 1296), 'numpy.array_equal', 'np.array_equal', (['a', 'b'], {}), '(a, b)\n', (1290, 1296), True, 'import numpy as np\n'), ((112, 595), 'numpy.asarray', 'np.asarray', (['[[[1, 0, 0, 0, 9], [0, 2, 0, 8, 0], [3, 0, 1, 0, 10], [0, 4, 0, 7, 0], [5, \n 0, 6, 0, 10]], [[0, 2, 0, 8, 0], [1, 0, 0, 0, 9], [3, 0, 1, 0, 10], [0,\n 4, 0, 7, 0], [5, 0, 6, 0, 10]], [[0, 2, 0, 8, 0], [3, 0, 1, 0, 10], [0,\n 4, 0, 7, 0], [1, 0, 0, 0, 9], [5, 0, 6, 0, 10]], [[0, 2, 0, 8, 0], [1, \n 0, 0, 0, 9], [0, 4, 0, 7, 0], [3, 0, 1, 0, 10], [5, 0, 6, 0, 10]], [[1,\n 0, 0, 0, 9], [0, 4, 0, 7, 0], [3, 0, 1, 0, 10], [0, 2, 0, 8, 0], [5, 0,\n 6, 0, 10]]]'], {}), '([[[1, 0, 0, 0, 9], [0, 2, 0, 8, 0], [3, 0, 1, 0, 10], [0, 4, 0, \n 7, 0], [5, 0, 6, 0, 10]], [[0, 2, 0, 8, 0], [1, 0, 0, 0, 9], [3, 0, 1, \n 0, 10], [0, 4, 0, 7, 0], [5, 0, 6, 0, 10]], [[0, 2, 0, 8, 0], [3, 0, 1,\n 0, 10], [0, 4, 0, 7, 0], [1, 0, 0, 0, 9], [5, 0, 6, 0, 10]], [[0, 2, 0,\n 8, 0], [1, 0, 0, 0, 9], [0, 4, 0, 7, 0], [3, 0, 1, 0, 10], [5, 0, 6, 0,\n 10]], [[1, 0, 0, 0, 9], [0, 4, 0, 7, 0], [3, 0, 1, 0, 10], [0, 2, 0, 8,\n 0], [5, 0, 6, 0, 10]]])\n', (122, 595), True, 'import numpy as np\n'), ((963, 1069), 'numpy.asarray', 'np.asarray', (['[[5, 4, 6, 8, 10], [5, 4, 6, 8, 10], [5, 4, 6, 8, 10], [5, 4, 6, 8, 10], [5,\n 4, 6, 8, 10]]'], {}), '([[5, 4, 6, 8, 10], [5, 4, 6, 8, 10], [5, 4, 6, 8, 10], [5, 4, 6,\n 8, 10], [5, 4, 6, 8, 10]])\n', (973, 1069), True, 'import numpy as np\n')]
|
import sys
sys.path.insert(0, "../../../")
import argparse
import torch
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as data_utils
import numpy as np
from paper_experiments.rotated_MNIST.mnist_loader_shifted_label_distribution_rotate import MnistRotatedDist
from paper_experiments.rotated_MNIST.mnist_loader_shifted_label_distribution_flip import MnistRotatedDistFlip
from paper_experiments.rotated_MNIST.mnist_loader import MnistRotated
from paper_experiments.rotated_MNIST.augmentations.model_baseline import Net
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target, _) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
_, target = target.max(dim=1)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
def test(args, model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target, _ in test_loader:
data, target = data.to(device), target.to(device)
_, target = target.max(dim=1)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
return test_loss, 100. * correct / len(test_loader.dataset)
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=0,
help='random seed (default: 1)')
parser.add_argument('--batch-size', type=int, default=128,
help='input batch size for training (default: 64)')
parser.add_argument('--epochs', type=int, default=200,
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.001,
help='learning rate (default: 0.01)')
parser.add_argument('--da', type=str, default='rotate', choices=['rotate', 'flip'],
help='type of data augmentation')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
# Set seed
torch.manual_seed(args.seed)
torch.backends.cudnn.benchmark = False
np.random.seed(args.seed)
device = torch.device("cuda")
kwargs = {'num_workers': 8, 'pin_memory': True} if use_cuda else {}
# Load supervised training
if args.da == 'rotate':
mnist_30 = MnistRotatedDist('../dataset/', train=True, thetas=[30.0], d_label=0, transform=True)
mnist_60 = MnistRotatedDist('../dataset/', train=True, thetas=[60.0], d_label=1, transform=True)
mnist_90 = MnistRotatedDist('../dataset/', train=True, thetas=[90.0], d_label=2, transform=True)
model_name = 'baseline_test_0_random_rotate_seed_' + str(args.seed)
elif args.da == 'flip':
mnist_30 = MnistRotatedDistFlip('../dataset/', train=True, thetas=[30.0], d_label=0)
mnist_60 = MnistRotatedDistFlip('../dataset/', train=True, thetas=[60.0], d_label=1)
mnist_90 = MnistRotatedDistFlip('../dataset/', train=True, thetas=[90.0], d_label=2)
model_name = 'baseline_test_0_random_flips_seed_' + str(args.seed)
mnist = data_utils.ConcatDataset([mnist_30, mnist_60, mnist_90])
train_size = int(0.9 * len(mnist))
val_size = len(mnist) - train_size
train_dataset, val_dataset = torch.utils.data.random_split(mnist, [train_size, val_size])
train_loader = data_utils.DataLoader(train_dataset,
batch_size=args.batch_size,
shuffle=True, **kwargs)
val_loader = data_utils.DataLoader(val_dataset,
batch_size=args.batch_size,
shuffle=False, **kwargs)
model = Net().to(device)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
best_val_acc = 0
for epoch in range(1, args.epochs + 1):
print('\n Epoch: ' + str(epoch))
train(args, model, device, train_loader, optimizer, epoch)
val_loss, val_acc = test(args, model, device, val_loader)
print(epoch, val_loss, val_acc)
# Save best
if val_acc >= best_val_acc:
best_val_acc = val_acc
torch.save(model, model_name + '.model')
torch.save(args, model_name + '.config')
# Test loader
mnist_0 = MnistRotated('../dataset/', train=False, thetas=[0.0], d_label=0)
test_loader = data_utils.DataLoader(mnist_0,
batch_size=args.batch_size,
shuffle=False, **kwargs)
model = torch.load(model_name + '.model').to(device)
_, test_acc = test(args, model, device, test_loader)
with open(model_name + '.txt', "w") as text_file:
text_file.write("Test Acc: " + str(test_acc))
if __name__ == '__main__':
main()
|
[
"paper_experiments.rotated_MNIST.mnist_loader_shifted_label_distribution_rotate.MnistRotatedDist",
"torch.utils.data.ConcatDataset",
"numpy.random.seed",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"paper_experiments.rotated_MNIST.mnist_loader.MnistRotated",
"paper_experiments.rotated_MNIST.mnist_loader_shifted_label_distribution_flip.MnistRotatedDistFlip",
"torch.manual_seed",
"torch.load",
"sys.path.insert",
"torch.utils.data.random_split",
"torch.save",
"paper_experiments.rotated_MNIST.augmentations.model_baseline.Net",
"torch.nn.functional.nll_loss",
"torch.cuda.is_available",
"torch.device",
"torch.no_grad"
] |
[((11, 42), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../../../"""'], {}), "(0, '../../../')\n", (26, 42), False, 'import sys\n'), ((1679, 1739), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch MNIST Example"""'}), "(description='PyTorch MNIST Example')\n", (1702, 1739), False, 'import argparse\n'), ((2631, 2659), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (2648, 2659), False, 'import torch\n'), ((2707, 2732), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (2721, 2732), True, 'import numpy as np\n'), ((2747, 2767), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (2759, 2767), False, 'import torch\n'), ((3687, 3743), 'torch.utils.data.ConcatDataset', 'data_utils.ConcatDataset', (['[mnist_30, mnist_60, mnist_90]'], {}), '([mnist_30, mnist_60, mnist_90])\n', (3711, 3743), True, 'import torch.utils.data as data_utils\n'), ((3856, 3916), 'torch.utils.data.random_split', 'torch.utils.data.random_split', (['mnist', '[train_size, val_size]'], {}), '(mnist, [train_size, val_size])\n', (3885, 3916), False, 'import torch\n'), ((3937, 4030), 'torch.utils.data.DataLoader', 'data_utils.DataLoader', (['train_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(train_dataset, batch_size=args.batch_size, shuffle=\n True, **kwargs)\n', (3958, 4030), True, 'import torch.utils.data as data_utils\n'), ((4126, 4218), 'torch.utils.data.DataLoader', 'data_utils.DataLoader', (['val_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(False)'}), '(val_dataset, batch_size=args.batch_size, shuffle=\n False, **kwargs)\n', (4147, 4218), True, 'import torch.utils.data as data_utils\n'), ((4899, 4964), 'paper_experiments.rotated_MNIST.mnist_loader.MnistRotated', 'MnistRotated', (['"""../dataset/"""'], {'train': '(False)', 'thetas': '[0.0]', 'd_label': '(0)'}), "('../dataset/', train=False, thetas=[0.0], d_label=0)\n", (4911, 4964), False, 'from paper_experiments.rotated_MNIST.mnist_loader import MnistRotated\n'), ((4983, 5070), 'torch.utils.data.DataLoader', 'data_utils.DataLoader', (['mnist_0'], {'batch_size': 'args.batch_size', 'shuffle': '(False)'}), '(mnist_0, batch_size=args.batch_size, shuffle=False,\n **kwargs)\n', (5004, 5070), True, 'import torch.utils.data as data_utils\n'), ((878, 904), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['output', 'target'], {}), '(output, target)\n', (888, 904), True, 'import torch.nn.functional as F\n'), ((1060, 1075), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1073, 1075), False, 'import torch\n'), ((2585, 2610), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2608, 2610), False, 'import torch\n'), ((2919, 3008), 'paper_experiments.rotated_MNIST.mnist_loader_shifted_label_distribution_rotate.MnistRotatedDist', 'MnistRotatedDist', (['"""../dataset/"""'], {'train': '(True)', 'thetas': '[30.0]', 'd_label': '(0)', 'transform': '(True)'}), "('../dataset/', train=True, thetas=[30.0], d_label=0,\n transform=True)\n", (2935, 3008), False, 'from paper_experiments.rotated_MNIST.mnist_loader_shifted_label_distribution_rotate import MnistRotatedDist\n'), ((3024, 3113), 'paper_experiments.rotated_MNIST.mnist_loader_shifted_label_distribution_rotate.MnistRotatedDist', 'MnistRotatedDist', (['"""../dataset/"""'], {'train': '(True)', 'thetas': '[60.0]', 'd_label': '(1)', 'transform': '(True)'}), "('../dataset/', train=True, thetas=[60.0], d_label=1,\n transform=True)\n", (3040, 3113), False, 'from paper_experiments.rotated_MNIST.mnist_loader_shifted_label_distribution_rotate import MnistRotatedDist\n'), ((3129, 3218), 'paper_experiments.rotated_MNIST.mnist_loader_shifted_label_distribution_rotate.MnistRotatedDist', 'MnistRotatedDist', (['"""../dataset/"""'], {'train': '(True)', 'thetas': '[90.0]', 'd_label': '(2)', 'transform': '(True)'}), "('../dataset/', train=True, thetas=[90.0], d_label=2,\n transform=True)\n", (3145, 3218), False, 'from paper_experiments.rotated_MNIST.mnist_loader_shifted_label_distribution_rotate import MnistRotatedDist\n'), ((3339, 3412), 'paper_experiments.rotated_MNIST.mnist_loader_shifted_label_distribution_flip.MnistRotatedDistFlip', 'MnistRotatedDistFlip', (['"""../dataset/"""'], {'train': '(True)', 'thetas': '[30.0]', 'd_label': '(0)'}), "('../dataset/', train=True, thetas=[30.0], d_label=0)\n", (3359, 3412), False, 'from paper_experiments.rotated_MNIST.mnist_loader_shifted_label_distribution_flip import MnistRotatedDistFlip\n'), ((3432, 3505), 'paper_experiments.rotated_MNIST.mnist_loader_shifted_label_distribution_flip.MnistRotatedDistFlip', 'MnistRotatedDistFlip', (['"""../dataset/"""'], {'train': '(True)', 'thetas': '[60.0]', 'd_label': '(1)'}), "('../dataset/', train=True, thetas=[60.0], d_label=1)\n", (3452, 3505), False, 'from paper_experiments.rotated_MNIST.mnist_loader_shifted_label_distribution_flip import MnistRotatedDistFlip\n'), ((3525, 3598), 'paper_experiments.rotated_MNIST.mnist_loader_shifted_label_distribution_flip.MnistRotatedDistFlip', 'MnistRotatedDistFlip', (['"""../dataset/"""'], {'train': '(True)', 'thetas': '[90.0]', 'd_label': '(2)'}), "('../dataset/', train=True, thetas=[90.0], d_label=2)\n", (3545, 3598), False, 'from paper_experiments.rotated_MNIST.mnist_loader_shifted_label_distribution_flip import MnistRotatedDistFlip\n'), ((4309, 4314), 'paper_experiments.rotated_MNIST.augmentations.model_baseline.Net', 'Net', ([], {}), '()\n', (4312, 4314), False, 'from paper_experiments.rotated_MNIST.augmentations.model_baseline import Net\n'), ((4772, 4812), 'torch.save', 'torch.save', (['model', "(model_name + '.model')"], {}), "(model, model_name + '.model')\n", (4782, 4812), False, 'import torch\n'), ((4825, 4865), 'torch.save', 'torch.save', (['args', "(model_name + '.config')"], {}), "(args, model_name + '.config')\n", (4835, 4865), False, 'import torch\n'), ((5160, 5193), 'torch.load', 'torch.load', (["(model_name + '.model')"], {}), "(model_name + '.model')\n", (5170, 5193), False, 'import torch\n'), ((1284, 1327), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['output', 'target'], {'reduction': '"""sum"""'}), "(output, target, reduction='sum')\n", (1294, 1327), True, 'import torch.nn.functional as F\n')]
|
__author__ = 'sibirrer'
import numpy as np
from astropy.cosmology import default_cosmology
from lenstronomy.Util import class_creator
from lenstronomy.Util import constants as const
from lenstronomy.Cosmo.lens_cosmo import LensCosmo
from lenstronomy.Analysis.kinematics_api import KinematicsAPI
class TDCosmography(KinematicsAPI):
"""
class equipped to perform a cosmographic analysis from a lens model with added measurements of time delays and
kinematics.
This class does not require any cosmological knowledge and can return angular diameter distance estimates
self-consistently integrating the kinematics routines and time delay estimates in the lens modeling.
This description follows Birrer et al. 2016, 2019.
"""
def __init__(self, z_lens, z_source, kwargs_model, cosmo_fiducial=None, lens_model_kinematics_bool=None,
light_model_kinematics_bool=None, kwargs_seeing={}, kwargs_aperture={}, anisotropy_model=None,
multi_observations=False, kwargs_lens_eqn_solver={}):
"""
:param z_lens: redshift of deflector
:param z_source: redshift of source
:param kwargs_model: model configurations (according to FittingSequence)
:param cosmo_fiducial: fiducial cosmology used to compute angular diameter distances where required
:param lens_model_kinematics_bool: (optional) bool list, corresponding to lens models being included into the
kinematics modeling
:param light_model_kinematics_bool: (optional) bool list, corresponding to lens light models being included
into the kinematics modeling
:param kwargs_seeing: seeing conditions (see observation class in Galkin)
:param kwargs_aperture: aperture keyword arguments (see aperture class in Galkin)
:param anisotropy_model: string, anisotropy model type
:param multi_observations: bool, if True, interprets kwargs_aperture and kwargs_seeing as lists of multiple
observations
"""
if cosmo_fiducial is None:
cosmo_fiducial = default_cosmology.get()
self._z_lens = z_lens
self._z_source = z_source
self._cosmo_fiducial = cosmo_fiducial
self._lens_cosmo = LensCosmo(z_lens=z_lens, z_source=z_source, cosmo=self._cosmo_fiducial)
self.LensModel, self.SourceModel, self.LensLightModel, self.PointSource, extinction_class = class_creator.create_class_instances(all_models=True, **kwargs_model,kwargs_lens_eqn_solver=kwargs_lens_eqn_solver)
super(TDCosmography, self).__init__(z_lens=z_lens, z_source=z_source, kwargs_model=kwargs_model,
cosmo=cosmo_fiducial, lens_model_kinematics_bool=lens_model_kinematics_bool,
light_model_kinematics_bool=light_model_kinematics_bool,
kwargs_seeing=kwargs_seeing, kwargs_aperture=kwargs_aperture,
anisotropy_model=anisotropy_model, multi_observations=multi_observations, kwargs_lens_eqn_solver=kwargs_lens_eqn_solver)
def time_delays(self, kwargs_lens, kwargs_ps, kappa_ext=0, original_ps_position=False):
"""
predicts the time delays of the image positions given the fiducial cosmology
:param kwargs_lens: lens model parameters
:param kwargs_ps: point source parameters
:param kappa_ext: external convergence (optional)
:param original_ps_position: boolean (only applies when first point source model is of type 'LENSED_POSITION'),
uses the image positions in the model parameters and does not re-compute images (which might be differently ordered)
in case of the lens equation solver
:return: time delays at image positions for the fixed cosmology
"""
fermat_pot, ra_pos, dec_pos = self.fermat_potential(kwargs_lens, kwargs_ps)
time_delay = self._lens_cosmo.time_delay_units(fermat_pot, kappa_ext)
return time_delay, ra_pos, dec_pos
def fermat_potential(self, kwargs_lens, kwargs_ps, original_ps_position=False):
"""
:param kwargs_lens: lens model keyword argument list
:param kwargs_ps: point source keyword argument list
:return: tuple of Fermat potential of all the image positions in the first point source list entry and ra/dec of the image positions used (Ji Won's modification)
"""
ra_pos, dec_pos = self.PointSource.image_position(kwargs_ps, kwargs_lens, original_position=original_ps_position)
ra_pos = ra_pos[0]
dec_pos = dec_pos[0]
ra_source, dec_source = self.LensModel.ray_shooting(ra_pos, dec_pos, kwargs_lens)
sigma_source = np.sqrt(np.var(ra_source) + np.var(dec_source))
if sigma_source > 0.001:
Warning('Source position computed from the different image positions do not trace back to the same position! '
'The error is %s mas and may be larger than what is required for an accurate relative time delay estimate!'
'See e.g. Birrer & Treu 2019.' % sigma_source * 1000)
ra_source = np.mean(ra_source)
dec_source = np.mean(dec_source)
fermat_pot = self.LensModel.fermat_potential(ra_pos, dec_pos, kwargs_lens, ra_source, dec_source)
return fermat_pot, ra_pos, dec_pos
def velocity_dispersion_dimension_less(self, kwargs_lens, kwargs_lens_light, kwargs_anisotropy, r_eff=None,
theta_E=None, gamma=None):
"""
sigma**2 = Dd/Dds * c**2 * J(kwargs_lens, kwargs_light, anisotropy)
(Equation 4.11 in Birrer et al. 2016 or Equation 6 in Birrer et al. 2019) J() is a dimensionless and
cosmological independent quantity only depending on angular units. This function returns J given the lens
and light parameters and the anisotropy choice without an external mass sheet correction.
:param kwargs_lens: lens model keyword arguments
:param kwargs_lens_light: lens light model keyword arguments
:param kwargs_anisotropy: stellar anisotropy keyword arguments
:param r_eff: projected half-light radius of the stellar light associated with the deflector galaxy, optional,
if set to None will be computed in this function with default settings that may not be accurate.
:return: dimensionless velocity dispersion (see e.g. Birrer et al. 2016, 2019)
"""
sigma_v = self.velocity_dispersion(kwargs_lens=kwargs_lens, kwargs_lens_light=kwargs_lens_light,
kwargs_anisotropy=kwargs_anisotropy, r_eff=r_eff, theta_E=theta_E,
gamma=gamma)
sigma_v *= 1000 # convert from [km/s] to [m/s]
J = sigma_v ** 2 * self._lens_cosmo.dds / self._lens_cosmo.ds / const.c ** 2
return J
def velocity_dispersion_map_dimension_less(self, kwargs_lens, kwargs_lens_light, kwargs_anisotropy, r_eff=None,
theta_E=None, gamma=None):
"""
sigma**2 = Dd/Dds * c**2 * J(kwargs_lens, kwargs_light, anisotropy)
(Equation 4.11 in Birrer et al. 2016 or Equation 6 in Birrer et al. 2019) J() is a dimensionless and
cosmological independent quantity only depending on angular units. This function returns J given the lens
and light parameters and the anisotropy choice without an external mass sheet correction.
This routine computes the IFU map of the kinematic quantities.
:param kwargs_lens: lens model keyword arguments
:param kwargs_lens_light: lens light model keyword arguments
:param kwargs_anisotropy: stellar anisotropy keyword arguments
:param r_eff: projected half-light radius of the stellar light associated with the deflector galaxy, optional,
if set to None will be computed in this function with default settings that may not be accurate.
:return: dimensionless velocity dispersion (see e.g. Birrer et al. 2016, 2019)
"""
sigma_v_map = self.velocity_dispersion_map(kwargs_lens=kwargs_lens, kwargs_lens_light=kwargs_lens_light,
kwargs_anisotropy=kwargs_anisotropy, r_eff=r_eff, theta_E=theta_E,
gamma=gamma)
sigma_v_map *= 1000 # convert from [km/s] to [m/s]
J_map = sigma_v_map ** 2 * self._lens_cosmo.dds / self._lens_cosmo.ds / const.c ** 2
return J_map
@staticmethod
def ddt_from_time_delay(d_fermat_model, dt_measured, kappa_s=0, kappa_ds=0, kappa_d=0):
"""
Time-delay distance in units of Mpc from the modeled Fermat potential and measured time delay from an image pair.
:param d_fermat_model: relative Fermat potential between two images from the same source in units arcsec^2
:param dt_measured: measured time delay between the same image pair in units of days
:return: D_dt, time-delay distance
"""
D_dt_model = dt_measured * const.day_s * const.c / const.Mpc / d_fermat_model / const.arcsec ** 2
D_dt = D_dt_model * (1-kappa_ds) / (1 - kappa_s) / (1 - kappa_d)
return D_dt
@staticmethod
def ds_dds_from_kinematics(sigma_v, J, kappa_s=0, kappa_ds=0):
"""
computes the estimate of the ratio of angular diameter distances Ds/Dds from the kinematic estimate of the lens
and the measured dispersion.
:param sigma_v: velocity dispersion [km/s]
:param J: dimensionless kinematic constraint (see Birrer et al. 2016, 2019)
:return: Ds/Dds
"""
ds_dds_model = (sigma_v * 1000) ** 2 / const.c ** 2 / J
ds_dds = ds_dds_model * (1 - kappa_ds) / (1 - kappa_s)
return ds_dds
def ddt_dd_from_time_delay_and_kinematics(self, d_fermat_model, dt_measured, sigma_v_measured, J, kappa_s=0,
kappa_ds=0, kappa_d=0):
"""
:param d_fermat_model: relative Fermat potential in units arcsec^2
:param dt_measured: measured relative time delay [days]
:param sigma_v_measured: 1-sigma Gaussian uncertainty in the measured velocity dispersion
:param J: modeled dimensionless kinematic estimate
:param kappa_s: LOS convergence from observer to source
:param kappa_ds: LOS convergence from deflector to source
:param kappa_d: LOS convergence from observer to deflector
:return: D_dt, D_d
"""
ddt = self.ddt_from_time_delay(d_fermat_model, dt_measured, kappa_s=kappa_s, kappa_ds=kappa_ds, kappa_d=kappa_d)
ds_dds = self.ds_dds_from_kinematics(sigma_v_measured, J, kappa_s=kappa_s, kappa_ds=kappa_ds)
dd = ddt / ds_dds / (1 + self._z_lens)
return ddt, dd
|
[
"lenstronomy.Cosmo.lens_cosmo.LensCosmo",
"numpy.var",
"astropy.cosmology.default_cosmology.get",
"numpy.mean",
"lenstronomy.Util.class_creator.create_class_instances"
] |
[((2250, 2321), 'lenstronomy.Cosmo.lens_cosmo.LensCosmo', 'LensCosmo', ([], {'z_lens': 'z_lens', 'z_source': 'z_source', 'cosmo': 'self._cosmo_fiducial'}), '(z_lens=z_lens, z_source=z_source, cosmo=self._cosmo_fiducial)\n', (2259, 2321), False, 'from lenstronomy.Cosmo.lens_cosmo import LensCosmo\n'), ((2422, 2542), 'lenstronomy.Util.class_creator.create_class_instances', 'class_creator.create_class_instances', ([], {'all_models': '(True)', 'kwargs_lens_eqn_solver': 'kwargs_lens_eqn_solver'}), '(all_models=True, **kwargs_model,\n kwargs_lens_eqn_solver=kwargs_lens_eqn_solver)\n', (2458, 2542), False, 'from lenstronomy.Util import class_creator\n'), ((5185, 5203), 'numpy.mean', 'np.mean', (['ra_source'], {}), '(ra_source)\n', (5192, 5203), True, 'import numpy as np\n'), ((5225, 5244), 'numpy.mean', 'np.mean', (['dec_source'], {}), '(dec_source)\n', (5232, 5244), True, 'import numpy as np\n'), ((2089, 2112), 'astropy.cosmology.default_cosmology.get', 'default_cosmology.get', ([], {}), '()\n', (2110, 2112), False, 'from astropy.cosmology import default_cosmology\n'), ((4767, 4784), 'numpy.var', 'np.var', (['ra_source'], {}), '(ra_source)\n', (4773, 4784), True, 'import numpy as np\n'), ((4787, 4805), 'numpy.var', 'np.var', (['dec_source'], {}), '(dec_source)\n', (4793, 4805), True, 'import numpy as np\n')]
|
import os
import random
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
def save_parameters(options, filename):
with open(filename, "w+") as f:
for key in options.keys():
f.write("{}: {}\n".format(key, options[key]))
# https://gist.github.com/KirillVladimirov/005ec7f762293d2321385580d3dbe335
def seed_everything(seed=1234):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# torch.cuda.manual_seed(seed)
# torch.backends.cudnn.deterministic = True
# https://blog.csdn.net/folk_/article/details/80208557
def train_val_split(logs_meta, labels, val_ratio=0.1):
total_num = len(labels)
train_index = list(range(total_num))
train_logs = {}
val_logs = {}
for key in logs_meta.keys():
train_logs[key] = []
val_logs[key] = []
train_labels = []
val_labels = []
val_num = int(total_num * val_ratio)
for i in range(val_num):
random_index = int(np.random.uniform(0, len(train_index)))
for key in logs_meta.keys():
val_logs[key].append(logs_meta[key][random_index])
val_labels.append(labels[random_index])
del train_index[random_index]
for i in range(total_num - val_num):
for key in logs_meta.keys():
train_logs[key].append(logs_meta[key][train_index[i]])
train_labels.append(labels[train_index[i]])
return train_logs, train_labels, val_logs, val_labels
def plot_train_valid_loss(save_dir):
train_loss = pd.read_csv(save_dir + "train_log.csv")
valid_loss = pd.read_csv(save_dir + "valid_log.csv")
sns.lineplot(x="epoch",y="loss" , data = train_loss, label="train loss")
sns.lineplot(x="epoch",y="loss" , data = valid_loss, label="valid loss")
plt.title("epoch vs train loss vs valid loss")
plt.legend
plt.savefig(save_dir+"train_valid_loss.png")
plt.show()
print("plot done")
def plot_sequence_len(save_dir):
normal_seq_len = []
with open(save_dir+"train", "r") as f:
for line in f.readlines():
line = line.split()
normal_seq_len.append(len(line))
with open(save_dir+"test_normal", 'r') as f:
for line in f.readlines():
normal_seq_len.append(len(line.split()))
abnormal_seq_line = []
with open(save_dir+"test_abnormal", "r") as f:
for line in f.readlines():
abnormal_seq_line.append(len(line.split()))
sns.distplot(normal_seq_len, label="normal")
sns.distplot(abnormal_seq_line, label = "abnormal")
plt.title("session length distribution")
plt.xlabel("num of log keys in a session")
plt.legend()
plt.show()
plt.close()
|
[
"matplotlib.pyplot.title",
"seaborn.lineplot",
"numpy.random.seed",
"matplotlib.pyplot.show",
"pandas.read_csv",
"torch.manual_seed",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.close",
"random.seed",
"seaborn.distplot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((469, 486), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (480, 486), False, 'import random\n'), ((536, 556), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (550, 556), True, 'import numpy as np\n'), ((561, 584), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (578, 584), False, 'import torch\n'), ((1655, 1694), 'pandas.read_csv', 'pd.read_csv', (["(save_dir + 'train_log.csv')"], {}), "(save_dir + 'train_log.csv')\n", (1666, 1694), True, 'import pandas as pd\n'), ((1712, 1751), 'pandas.read_csv', 'pd.read_csv', (["(save_dir + 'valid_log.csv')"], {}), "(save_dir + 'valid_log.csv')\n", (1723, 1751), True, 'import pandas as pd\n'), ((1756, 1826), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""epoch"""', 'y': '"""loss"""', 'data': 'train_loss', 'label': '"""train loss"""'}), "(x='epoch', y='loss', data=train_loss, label='train loss')\n", (1768, 1826), True, 'import seaborn as sns\n'), ((1833, 1903), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""epoch"""', 'y': '"""loss"""', 'data': 'valid_loss', 'label': '"""valid loss"""'}), "(x='epoch', y='loss', data=valid_loss, label='valid loss')\n", (1845, 1903), True, 'import seaborn as sns\n'), ((1910, 1956), 'matplotlib.pyplot.title', 'plt.title', (['"""epoch vs train loss vs valid loss"""'], {}), "('epoch vs train loss vs valid loss')\n", (1919, 1956), True, 'import matplotlib.pyplot as plt\n'), ((1976, 2022), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_dir + 'train_valid_loss.png')"], {}), "(save_dir + 'train_valid_loss.png')\n", (1987, 2022), True, 'import matplotlib.pyplot as plt\n'), ((2025, 2035), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2033, 2035), True, 'import matplotlib.pyplot as plt\n'), ((2583, 2627), 'seaborn.distplot', 'sns.distplot', (['normal_seq_len'], {'label': '"""normal"""'}), "(normal_seq_len, label='normal')\n", (2595, 2627), True, 'import seaborn as sns\n'), ((2632, 2681), 'seaborn.distplot', 'sns.distplot', (['abnormal_seq_line'], {'label': '"""abnormal"""'}), "(abnormal_seq_line, label='abnormal')\n", (2644, 2681), True, 'import seaborn as sns\n'), ((2688, 2728), 'matplotlib.pyplot.title', 'plt.title', (['"""session length distribution"""'], {}), "('session length distribution')\n", (2697, 2728), True, 'import matplotlib.pyplot as plt\n'), ((2733, 2775), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""num of log keys in a session"""'], {}), "('num of log keys in a session')\n", (2743, 2775), True, 'import matplotlib.pyplot as plt\n'), ((2780, 2792), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2790, 2792), True, 'import matplotlib.pyplot as plt\n'), ((2797, 2807), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2805, 2807), True, 'import matplotlib.pyplot as plt\n'), ((2812, 2823), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2821, 2823), True, 'import matplotlib.pyplot as plt\n')]
|
import numpy
from numpy import dot, sqrt
def binarize_vector(u):
return u > 0
def cosine_distance(u, v, binary=False):
"""Return the cosine distance between two vectors."""
if binary:
return cosine_distance_binary(u, v)
return 1.0 - dot(u, v) / (sqrt(dot(u, u)) * sqrt(dot(v, v)))
def cosine_distance_binary(u, v):
u = binarize_vector(u)
v = binarize_vector(v)
return (1.0 * (u * v).sum()) / numpy.sqrt((u.sum() * v.sum()))
def euclidean_distance(u, v):
"""Return the euclidean distance between two vectors."""
diff = u - v
return sqrt(dot(diff, diff))
def cityblock_distance(u, v):
"""Return the Manhattan/City Block distance between two vectors."""
return abs(u - v).sum()
def canberra_distance(u, v):
"""Return the canberra distance between two vectors."""
return numpy.sum(abs(u - v) / abs(u + v))
def correlation(u, v):
"""Return the correlation distance between two vectors."""
u_var = u - u.mean()
v_var = v - v.mean()
return 1.0 - dot(
u_var, v_var) / (sqrt(dot(u_var, u_var)) * sqrt(dot(v_var, v_var)))
def dice(u, v):
"""Return the dice coefficient between two vectors."""
u = u > 0
v = v > 0
return (2.0 * (u * v).sum()) / (u.sum() + v.sum())
def jaccard_distance(u, v):
"""return jaccard distance"""
u = numpy.asarray(u)
v = numpy.asarray(v)
return (numpy.double(
numpy.bitwise_and((u != v), numpy.bitwise_or(u != 0, v != 0)).sum()) /
numpy.double(numpy.bitwise_or(u != 0, v != 0).sum()))
def jaccard(u, v):
"""Return the Jaccard coefficient between two vectors."""
u = u > 0
v = v > 0
return (1.0 * (u * v).sum()) / (u + v).sum()
|
[
"numpy.dot",
"numpy.asarray",
"numpy.bitwise_or"
] |
[((1346, 1362), 'numpy.asarray', 'numpy.asarray', (['u'], {}), '(u)\n', (1359, 1362), False, 'import numpy\n'), ((1371, 1387), 'numpy.asarray', 'numpy.asarray', (['v'], {}), '(v)\n', (1384, 1387), False, 'import numpy\n'), ((592, 607), 'numpy.dot', 'dot', (['diff', 'diff'], {}), '(diff, diff)\n', (595, 607), False, 'from numpy import dot, sqrt\n'), ((261, 270), 'numpy.dot', 'dot', (['u', 'v'], {}), '(u, v)\n', (264, 270), False, 'from numpy import dot, sqrt\n'), ((1033, 1050), 'numpy.dot', 'dot', (['u_var', 'v_var'], {}), '(u_var, v_var)\n', (1036, 1050), False, 'from numpy import dot, sqrt\n'), ((279, 288), 'numpy.dot', 'dot', (['u', 'u'], {}), '(u, u)\n', (282, 288), False, 'from numpy import dot, sqrt\n'), ((297, 306), 'numpy.dot', 'dot', (['v', 'v'], {}), '(v, v)\n', (300, 306), False, 'from numpy import dot, sqrt\n'), ((1068, 1085), 'numpy.dot', 'dot', (['u_var', 'u_var'], {}), '(u_var, u_var)\n', (1071, 1085), False, 'from numpy import dot, sqrt\n'), ((1094, 1111), 'numpy.dot', 'dot', (['v_var', 'v_var'], {}), '(v_var, v_var)\n', (1097, 1111), False, 'from numpy import dot, sqrt\n'), ((1518, 1550), 'numpy.bitwise_or', 'numpy.bitwise_or', (['(u != 0)', '(v != 0)'], {}), '(u != 0, v != 0)\n', (1534, 1550), False, 'import numpy\n'), ((1450, 1482), 'numpy.bitwise_or', 'numpy.bitwise_or', (['(u != 0)', '(v != 0)'], {}), '(u != 0, v != 0)\n', (1466, 1482), False, 'import numpy\n')]
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from functools import partial
from typing import Callable, Sequence
import numpy as np
import pytest
import torch
from tests.text.helpers import TextTester
from tests.text.inputs import _inputs_multiple_references, _inputs_single_sentence_single_reference
from torchmetrics.functional.text.rouge import rouge_score
from torchmetrics.text.rouge import ROUGEScore
from torchmetrics.utilities.imports import _NLTK_AVAILABLE, _ROUGE_SCORE_AVAILABLE
if _ROUGE_SCORE_AVAILABLE:
from rouge_score.rouge_scorer import RougeScorer
from rouge_score.scoring import BootstrapAggregator
else:
RougeScorer, BootstrapAggregator = object, object
ROUGE_KEYS = ("rouge1", "rouge2", "rougeL", "rougeLsum")
def _compute_rouge_score(
preds: Sequence[str],
target: Sequence[Sequence[str]],
use_stemmer: bool,
rouge_level: str,
metric: str,
accumulate: str,
):
"""Evaluates rouge scores from rouge-score package for baseline evaluation."""
if isinstance(target, list) and all(isinstance(tgt, str) for tgt in target):
target = [target] if isinstance(preds, str) else [[tgt] for tgt in target]
if isinstance(preds, str):
preds = [preds]
if isinstance(target, str):
target = [[target]]
scorer = RougeScorer(ROUGE_KEYS, use_stemmer=use_stemmer)
aggregator = BootstrapAggregator()
for target_raw, pred_raw in zip(target, preds):
list_results = [scorer.score(tgt, pred_raw) for tgt in target_raw]
aggregator_avg = BootstrapAggregator()
if accumulate == "best":
key_curr = list(list_results[0].keys())[0]
all_fmeasure = torch.tensor([v[key_curr].fmeasure for v in list_results])
highest_idx = torch.argmax(all_fmeasure).item()
aggregator.add_scores(list_results[highest_idx])
elif accumulate == "avg":
for _score in list_results:
aggregator_avg.add_scores(_score)
_score = {rouge_key: scores.mid for rouge_key, scores in aggregator_avg.aggregate().items()}
aggregator.add_scores(_score)
else:
raise ValueError(f"Got unknown accumulate value {accumulate}. Expected to be one of ['best', 'avg']")
rs_scores = aggregator.aggregate()
rs_result = getattr(rs_scores[rouge_level].mid, metric)
return rs_result
@pytest.mark.skipif(not _NLTK_AVAILABLE, reason="test requires nltk")
@pytest.mark.parametrize(
["pl_rouge_metric_key", "use_stemmer"],
[
("rouge1_precision", True),
("rouge1_recall", True),
("rouge1_fmeasure", False),
("rouge2_precision", False),
("rouge2_recall", True),
("rouge2_fmeasure", True),
("rougeL_precision", False),
("rougeL_recall", False),
("rougeL_fmeasure", True),
("rougeLsum_precision", True),
("rougeLsum_recall", False),
("rougeLsum_fmeasure", False),
],
)
@pytest.mark.parametrize(
["preds", "targets"],
[
(_inputs_multiple_references.preds, _inputs_multiple_references.targets),
],
)
@pytest.mark.parametrize("accumulate", ["avg", "best"])
class TestROUGEScore(TextTester):
@pytest.mark.parametrize("ddp", [False, True])
@pytest.mark.parametrize("dist_sync_on_step", [False, True])
def test_rouge_score_class(
self, ddp, dist_sync_on_step, preds, targets, pl_rouge_metric_key, use_stemmer, accumulate
):
metric_args = {"use_stemmer": use_stemmer, "accumulate": accumulate}
rouge_level, metric = pl_rouge_metric_key.split("_")
rouge_metric = partial(
_compute_rouge_score, use_stemmer=use_stemmer, rouge_level=rouge_level, metric=metric, accumulate=accumulate
)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
targets=targets,
metric_class=ROUGEScore,
sk_metric=rouge_metric,
dist_sync_on_step=dist_sync_on_step,
metric_args=metric_args,
key=pl_rouge_metric_key,
)
def test_rouge_score_functional(self, preds, targets, pl_rouge_metric_key, use_stemmer, accumulate):
metric_args = {"use_stemmer": use_stemmer, "accumulate": accumulate}
rouge_level, metric = pl_rouge_metric_key.split("_")
rouge_metric = partial(
_compute_rouge_score, use_stemmer=use_stemmer, rouge_level=rouge_level, metric=metric, accumulate=accumulate
)
self.run_functional_metric_test(
preds,
targets,
metric_functional=rouge_score,
sk_metric=rouge_metric,
metric_args=metric_args,
key=pl_rouge_metric_key,
)
def test_rouge_metric_raises_errors_and_warnings():
"""Test that expected warnings and errors are raised."""
if not _NLTK_AVAILABLE:
with pytest.raises(
ModuleNotFoundError,
match="ROUGE metric requires that `nltk` is installed."
" Either as `pip install torchmetrics[text]` or `pip install nltk`.",
):
ROUGEScore()
def test_rouge_metric_wrong_key_value_error():
key = ("rouge1", "rouge")
with pytest.raises(ValueError):
ROUGEScore(rouge_keys=key)
with pytest.raises(ValueError):
rouge_score(
_inputs_single_sentence_single_reference.preds,
_inputs_single_sentence_single_reference.targets,
rouge_keys=key,
accumulate="best",
)
@pytest.mark.parametrize(
"pl_rouge_metric_key",
[
"rouge1_precision",
"rouge1_recall",
"rouge1_fmeasure",
"rouge2_precision",
"rouge2_recall",
"rouge2_fmeasure",
"rougeL_precision",
"rougeL_recall",
"rougeL_fmeasure",
"rougeLsum_precision",
"rougeLsum_recall",
"rougeLsum_fmeasure",
],
)
def test_rouge_metric_normalizer_tokenizer(pl_rouge_metric_key):
normalizer: Callable[[str], str] = lambda text: re.sub(r"[^a-z0-9]+", " ", text.lower())
tokenizer: Callable[[str], Sequence[str]] = lambda text: re.split(r"\s+", text)
rouge_level, metric = pl_rouge_metric_key.split("_")
original_score = _compute_rouge_score(
preds=_inputs_single_sentence_single_reference.preds,
target=_inputs_single_sentence_single_reference.targets,
rouge_level=rouge_level,
metric=metric,
accumulate="best",
use_stemmer=False,
)
scorer = ROUGEScore(
normalizer=normalizer, tokenizer=tokenizer, rouge_keys=rouge_level, accumulate="best", use_stemmer=False
)
scorer.update(
_inputs_single_sentence_single_reference.preds,
_inputs_single_sentence_single_reference.targets,
)
metrics_score = scorer.compute()
np.isclose(metrics_score[rouge_level + "_" + metric], original_score, atol=1e-8, equal_nan=True)
|
[
"functools.partial",
"re.split",
"rouge_score.rouge_scorer.RougeScorer",
"torch.argmax",
"rouge_score.scoring.BootstrapAggregator",
"numpy.isclose",
"pytest.mark.skipif",
"torchmetrics.text.rouge.ROUGEScore",
"pytest.raises",
"torchmetrics.functional.text.rouge.rouge_score",
"pytest.mark.parametrize",
"torch.tensor"
] |
[((2936, 3004), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not _NLTK_AVAILABLE)'], {'reason': '"""test requires nltk"""'}), "(not _NLTK_AVAILABLE, reason='test requires nltk')\n", (2954, 3004), False, 'import pytest\n'), ((3006, 3429), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["['pl_rouge_metric_key', 'use_stemmer']", "[('rouge1_precision', True), ('rouge1_recall', True), ('rouge1_fmeasure', \n False), ('rouge2_precision', False), ('rouge2_recall', True), (\n 'rouge2_fmeasure', True), ('rougeL_precision', False), ('rougeL_recall',\n False), ('rougeL_fmeasure', True), ('rougeLsum_precision', True), (\n 'rougeLsum_recall', False), ('rougeLsum_fmeasure', False)]"], {}), "(['pl_rouge_metric_key', 'use_stemmer'], [(\n 'rouge1_precision', True), ('rouge1_recall', True), ('rouge1_fmeasure',\n False), ('rouge2_precision', False), ('rouge2_recall', True), (\n 'rouge2_fmeasure', True), ('rougeL_precision', False), ('rougeL_recall',\n False), ('rougeL_fmeasure', True), ('rougeLsum_precision', True), (\n 'rougeLsum_recall', False), ('rougeLsum_fmeasure', False)])\n", (3029, 3429), False, 'import pytest\n'), ((3522, 3648), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["['preds', 'targets']", '[(_inputs_multiple_references.preds, _inputs_multiple_references.targets)]'], {}), "(['preds', 'targets'], [(_inputs_multiple_references\n .preds, _inputs_multiple_references.targets)])\n", (3545, 3648), False, 'import pytest\n'), ((3671, 3725), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""accumulate"""', "['avg', 'best']"], {}), "('accumulate', ['avg', 'best'])\n", (3694, 3725), False, 'import pytest\n'), ((6077, 6374), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""pl_rouge_metric_key"""', "['rouge1_precision', 'rouge1_recall', 'rouge1_fmeasure', 'rouge2_precision',\n 'rouge2_recall', 'rouge2_fmeasure', 'rougeL_precision', 'rougeL_recall',\n 'rougeL_fmeasure', 'rougeLsum_precision', 'rougeLsum_recall',\n 'rougeLsum_fmeasure']"], {}), "('pl_rouge_metric_key', ['rouge1_precision',\n 'rouge1_recall', 'rouge1_fmeasure', 'rouge2_precision', 'rouge2_recall',\n 'rouge2_fmeasure', 'rougeL_precision', 'rougeL_recall',\n 'rougeL_fmeasure', 'rougeLsum_precision', 'rougeLsum_recall',\n 'rougeLsum_fmeasure'])\n", (6100, 6374), False, 'import pytest\n'), ((1854, 1902), 'rouge_score.rouge_scorer.RougeScorer', 'RougeScorer', (['ROUGE_KEYS'], {'use_stemmer': 'use_stemmer'}), '(ROUGE_KEYS, use_stemmer=use_stemmer)\n', (1865, 1902), False, 'from rouge_score.rouge_scorer import RougeScorer\n'), ((1920, 1941), 'rouge_score.scoring.BootstrapAggregator', 'BootstrapAggregator', ([], {}), '()\n', (1939, 1941), False, 'from rouge_score.scoring import BootstrapAggregator\n'), ((3765, 3810), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ddp"""', '[False, True]'], {}), "('ddp', [False, True])\n", (3788, 3810), False, 'import pytest\n'), ((3816, 3875), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dist_sync_on_step"""', '[False, True]'], {}), "('dist_sync_on_step', [False, True])\n", (3839, 3875), False, 'import pytest\n'), ((7073, 7194), 'torchmetrics.text.rouge.ROUGEScore', 'ROUGEScore', ([], {'normalizer': 'normalizer', 'tokenizer': 'tokenizer', 'rouge_keys': 'rouge_level', 'accumulate': '"""best"""', 'use_stemmer': '(False)'}), "(normalizer=normalizer, tokenizer=tokenizer, rouge_keys=\n rouge_level, accumulate='best', use_stemmer=False)\n", (7083, 7194), False, 'from torchmetrics.text.rouge import ROUGEScore\n'), ((7385, 7487), 'numpy.isclose', 'np.isclose', (["metrics_score[rouge_level + '_' + metric]", 'original_score'], {'atol': '(1e-08)', 'equal_nan': '(True)'}), "(metrics_score[rouge_level + '_' + metric], original_score, atol=\n 1e-08, equal_nan=True)\n", (7395, 7487), True, 'import numpy as np\n'), ((2095, 2116), 'rouge_score.scoring.BootstrapAggregator', 'BootstrapAggregator', ([], {}), '()\n', (2114, 2116), False, 'from rouge_score.scoring import BootstrapAggregator\n'), ((4175, 4297), 'functools.partial', 'partial', (['_compute_rouge_score'], {'use_stemmer': 'use_stemmer', 'rouge_level': 'rouge_level', 'metric': 'metric', 'accumulate': 'accumulate'}), '(_compute_rouge_score, use_stemmer=use_stemmer, rouge_level=\n rouge_level, metric=metric, accumulate=accumulate)\n', (4182, 4297), False, 'from functools import partial\n'), ((4900, 5022), 'functools.partial', 'partial', (['_compute_rouge_score'], {'use_stemmer': 'use_stemmer', 'rouge_level': 'rouge_level', 'metric': 'metric', 'accumulate': 'accumulate'}), '(_compute_rouge_score, use_stemmer=use_stemmer, rouge_level=\n rouge_level, metric=metric, accumulate=accumulate)\n', (4907, 5022), False, 'from functools import partial\n'), ((5763, 5788), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5776, 5788), False, 'import pytest\n'), ((5798, 5824), 'torchmetrics.text.rouge.ROUGEScore', 'ROUGEScore', ([], {'rouge_keys': 'key'}), '(rouge_keys=key)\n', (5808, 5824), False, 'from torchmetrics.text.rouge import ROUGEScore\n'), ((5835, 5860), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5848, 5860), False, 'import pytest\n'), ((5870, 6022), 'torchmetrics.functional.text.rouge.rouge_score', 'rouge_score', (['_inputs_single_sentence_single_reference.preds', '_inputs_single_sentence_single_reference.targets'], {'rouge_keys': 'key', 'accumulate': '"""best"""'}), "(_inputs_single_sentence_single_reference.preds,\n _inputs_single_sentence_single_reference.targets, rouge_keys=key,\n accumulate='best')\n", (5881, 6022), False, 'from torchmetrics.functional.text.rouge import rouge_score\n'), ((6692, 6714), 're.split', 're.split', (['"""\\\\s+"""', 'text'], {}), "('\\\\s+', text)\n", (6700, 6714), False, 'import re\n'), ((2233, 2291), 'torch.tensor', 'torch.tensor', (['[v[key_curr].fmeasure for v in list_results]'], {}), '([v[key_curr].fmeasure for v in list_results])\n', (2245, 2291), False, 'import torch\n'), ((5440, 5607), 'pytest.raises', 'pytest.raises', (['ModuleNotFoundError'], {'match': '"""ROUGE metric requires that `nltk` is installed. Either as `pip install torchmetrics[text]` or `pip install nltk`."""'}), "(ModuleNotFoundError, match=\n 'ROUGE metric requires that `nltk` is installed. Either as `pip install torchmetrics[text]` or `pip install nltk`.'\n )\n", (5453, 5607), False, 'import pytest\n'), ((5661, 5673), 'torchmetrics.text.rouge.ROUGEScore', 'ROUGEScore', ([], {}), '()\n', (5671, 5673), False, 'from torchmetrics.text.rouge import ROUGEScore\n'), ((2318, 2344), 'torch.argmax', 'torch.argmax', (['all_fmeasure'], {}), '(all_fmeasure)\n', (2330, 2344), False, 'import torch\n')]
|
#!/usr/bin/python
# Note : This is designed for Python 3
import numpy as np
class orthogonal_optimization:
def __init__(self, db):
self.cost_function = db['compute_cost']
self.gradient_function = db['compute_gradient']
self.x_opt = None
self.cost_opt = None
self.db = {}
#self.db['run_debug_2'] = True
#self.db['run_debug_1'] = True
def calc_A(self, x):
G = self.gradient_function(x)
A = G.dot(x.T) - x.dot(G.T)
return [A,G]
# Applying Sherman-Morrison-Woodbury Theorem ( A faster way to update instead of recalculating inverse )
def constant_update_inv(self, x, G, M_inv, alpha_D):
if alpha_D == 0: return M_inv
d = x.shape[1]
I = np.eye(d)
# 1st update
U = alpha_D*G
V = x
E = np.linalg.inv(I + V.T.dot(M_inv).dot(U))
M_inv = M_inv - M_inv.dot(U).dot(E).dot(V.T).dot(M_inv)
# 2nd update
U = -alpha_D*x
V = G
E = np.linalg.inv(I + V.T.dot(M_inv).dot(U))
M_inv = M_inv - M_inv.dot(U).dot(E).dot(V.T).dot(M_inv)
return M_inv
def compute_gradient(self, x):
[A,G] = self.calc_A(x)
return A.dot(x)
def run(self, x_init, max_rep=400, α=2):
d = x_init.shape[0]
self.x_opt = x_init
I = np.eye(d)
converged = False
x_change = np.linalg.norm(x_init)
m = 0
in_cost = self.cost_function(self.x_opt)
while( (converged == False) and (m < max_rep)):
old_alpha = α
new_alpha = α
alpha_D = 0
cost_1 = self.cost_function(self.x_opt)
[A,g] = self.calc_A(self.x_opt)
M_inv = np.linalg.inv(I + new_alpha*A)
while(new_alpha > 0.000000001):
if True: M_inv = self.constant_update_inv(self.x_opt, g, M_inv, alpha_D) # using woodbury inverse update
else: M_inv = np.linalg.inv(I + new_alpha*A) # using slow inverse
#next_x_o = M_inv.dot(I - new_alpha*A).dot(self.x_opt)
#M_inv = np.linalg.inv(I + new_alpha*A) # using slow inverse
#next_x = M_inv.dot(I - new_alpha*A).dot(self.x_opt)
#print '\n'
#print '------------------------------------', np.linalg.norm(next_x - next_x_old)
next_x = M_inv.dot(I - new_alpha*A).dot(self.x_opt)
cost_2 = self.cost_function(next_x)
if 'run_debug_1' in self.db: print(new_alpha, cost_1, cost_2)
#if((cost_2 < cost_1) or (abs(cost_1 - cost_2)/abs(cost_1) < 0.0000001)):
if(cost_2 < cost_1):
x_change = np.linalg.norm(next_x - self.x_opt)
[self.x_opt,R] = np.linalg.qr(next_x) # QR ensures orthogonality
self.cost_opt = cost_2
break
else:
old_alpha = new_alpha
new_alpha = new_alpha*0.2
alpha_D = new_alpha - old_alpha
m += 1
if 'run_debug_2' in self.db: print('Cost Norm : %.3f'%cost_2)
if 'run_debug_3' in self.db: print('Gradient Norm : %.3f'%np.linalg.norm(self.compute_gradient(self.x_opt)))
#print(x_change)
if(x_change < 0.001*np.linalg.norm(self.x_opt)): converged = True
#out_cost = self.cost_function(self.x_opt)
#print('\t\tin cost %.3f , out cost %.3f'%(in_cost,out_cost))
#if out_cost > in_cost:
# import pdb; pdb.set_trace()
return self.x_opt
|
[
"numpy.eye",
"numpy.linalg.qr",
"numpy.linalg.inv",
"numpy.linalg.norm"
] |
[((667, 676), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (673, 676), True, 'import numpy as np\n'), ((1154, 1163), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (1160, 1163), True, 'import numpy as np\n'), ((1197, 1219), 'numpy.linalg.norm', 'np.linalg.norm', (['x_init'], {}), '(x_init)\n', (1211, 1219), True, 'import numpy as np\n'), ((1461, 1493), 'numpy.linalg.inv', 'np.linalg.inv', (['(I + new_alpha * A)'], {}), '(I + new_alpha * A)\n', (1474, 1493), True, 'import numpy as np\n'), ((1656, 1688), 'numpy.linalg.inv', 'np.linalg.inv', (['(I + new_alpha * A)'], {}), '(I + new_alpha * A)\n', (1669, 1688), True, 'import numpy as np\n'), ((2295, 2330), 'numpy.linalg.norm', 'np.linalg.norm', (['(next_x - self.x_opt)'], {}), '(next_x - self.x_opt)\n', (2309, 2330), True, 'import numpy as np\n'), ((2353, 2373), 'numpy.linalg.qr', 'np.linalg.qr', (['next_x'], {}), '(next_x)\n', (2365, 2373), True, 'import numpy as np\n'), ((2779, 2805), 'numpy.linalg.norm', 'np.linalg.norm', (['self.x_opt'], {}), '(self.x_opt)\n', (2793, 2805), True, 'import numpy as np\n')]
|
"""
Tests for contingency table analyses.
"""
import os
import warnings
import numpy as np
import statsmodels.stats.contingency_tables as ctab
import pandas as pd
from numpy.testing import assert_allclose, assert_equal
import statsmodels.api as sm
cur_dir = os.path.dirname(os.path.abspath(__file__))
fname = "contingency_table_r_results.csv"
fpath = os.path.join(cur_dir, 'results', fname)
r_results = pd.read_csv(fpath)
tables = [None, None, None]
tables[0] = np.asarray([[23, 15], [19, 31]])
tables[1] = np.asarray([[144, 33, 84, 126],
[2, 4, 14, 29],
[0, 2, 6, 25],
[0, 0, 1, 5]])
tables[2] = np.asarray([[20, 10, 5],
[3, 30, 15],
[0, 5, 40]])
def test_homogeneity():
for k,table in enumerate(tables):
st = sm.stats.SquareTable(table, shift_zeros=False)
hm = st.homogeneity()
assert_allclose(hm.statistic, r_results.loc[k, "homog_stat"])
assert_allclose(hm.df, r_results.loc[k, "homog_df"])
# Test Bhapkar via its relationship to Stuart_Maxwell.
hmb = st.homogeneity(method="bhapkar")
assert_allclose(hmb.statistic, hm.statistic / (1 - hm.statistic / table.sum()))
def test_SquareTable_from_data():
np.random.seed(434)
df = pd.DataFrame(index=range(100), columns=["v1", "v2"])
df["v1"] = np.random.randint(0, 5, 100)
df["v2"] = np.random.randint(0, 5, 100)
table = pd.crosstab(df["v1"], df["v2"])
rslt1 = ctab.SquareTable(table)
rslt2 = ctab.SquareTable.from_data(df)
rslt3 = ctab.SquareTable(np.asarray(table))
assert_equal(rslt1.summary().as_text(),
rslt2.summary().as_text())
assert_equal(rslt2.summary().as_text(),
rslt3.summary().as_text())
s = str(rslt1)
assert_equal(s.startswith('A 5x5 contingency table with counts:'), True)
assert_equal(rslt1.table[0, 0], 8.)
def test_SquareTable_nonsquare():
tab = [[1, 0, 3], [2, 1, 4], [3, 0, 5]]
df = pd.DataFrame(tab, index=[0, 1, 3], columns=[0, 2, 3])
df2 = ctab.SquareTable(df, shift_zeros=False)
e = np.asarray([[1, 0, 0, 3], [2, 0, 1, 4], [0, 0, 0, 0], [3, 0, 0, 5]],
dtype=np.float64)
assert_equal(e, df2.table)
def test_cumulative_odds():
table = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
table = np.asarray(table)
tbl_obj = ctab.Table(table)
cum_odds = tbl_obj.cumulative_oddsratios
assert_allclose(cum_odds[0, 0], 28 / float(5 * 11))
assert_allclose(cum_odds[0, 1], (3 * 15) / float(3 * 24), atol=1e-5,
rtol=1e-5)
assert_allclose(np.log(cum_odds), tbl_obj.cumulative_log_oddsratios,
atol=1e-5, rtol=1e-5)
def test_local_odds():
table = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
table = np.asarray(table)
tbl_obj = ctab.Table(table)
loc_odds = tbl_obj.local_oddsratios
assert_allclose(loc_odds[0, 0], 5 / 8.)
assert_allclose(loc_odds[0, 1], 12 / float(15), atol=1e-5,
rtol=1e-5)
assert_allclose(np.log(loc_odds), tbl_obj.local_log_oddsratios,
atol=1e-5, rtol=1e-5)
def test_shifting():
t = np.zeros((3, 4), dtype=np.float64)
result = np.full((3, 4), 0.5)
assert_equal(ctab.Table(t, shift_zeros=False).table, t)
assert_equal(ctab.Table(t, shift_zeros=True).table, result)
t = np.asarray([[0, 1, 2],
[3, 0, 4],
[5, 6, 0]], dtype=np.float64)
r = np.asarray([[0.5, 1, 2],
[3, 0.5, 4],
[5, 6, 0.5]], dtype=np.float64)
assert_equal(ctab.Table(t).table, r)
assert_equal(ctab.Table(t, shift_zeros=True).table, r)
def test_stratified_table_cube():
# Test that we can pass a rank 3 ndarray or a list of rank 2
# ndarrays to StratifiedTable and get the same results.
tab1 = [[[8, 9], [6, 7]], [[4, 9], [5, 5]], [[8, 8], [9, 11]]]
tab2 = np.asarray(tab1).T
ct1 = ctab.StratifiedTable(tab1)
ct2 = ctab.StratifiedTable(tab2)
assert_allclose(ct1.oddsratio_pooled, ct2.oddsratio_pooled)
assert_allclose(ct1.logodds_pooled, ct2.logodds_pooled)
def test_resids():
# CHD x serum data
table = [[12, 8, 31, 41], [307, 246, 439, 245]]
# These results come from SAS
fit = [[22.083, 17.583, 32.536, 19.798],
[296.92, 236.42, 437.46, 266.2]]
c2 = [[4.6037, 5.223, 0.0725, 22.704],
[0.3424, 0.3885, 0.0054, 1.6886]]
# These are regression tests
pr = np.array([[-2.14562121, -2.28538719, -0.26923882, 4.7649169 ],
[ 0.58514314, 0.62325942, 0.07342547, -1.29946443]])
sr = np.array([[-2.55112945, -2.6338782 , -0.34712127, 5.5751083 ],
[ 2.55112945, 2.6338782 , 0.34712127, -5.5751083 ]])
tab = ctab.Table(table)
assert_allclose(tab.fittedvalues, fit, atol=1e-4, rtol=1e-4)
assert_allclose(tab.chi2_contribs, c2, atol=1e-4, rtol=1e-4)
assert_allclose(tab.resid_pearson, pr, atol=1e-4, rtol=1e-4)
assert_allclose(tab.standardized_resids, sr, atol=1e-4, rtol=1e-4)
def test_ordinal_association():
for k,table in enumerate(tables):
row_scores = 1 + np.arange(table.shape[0])
col_scores = 1 + np.arange(table.shape[1])
# First set of scores
rslt = ctab.Table(table, shift_zeros=False).test_ordinal_association(row_scores, col_scores)
assert_allclose(rslt.statistic, r_results.loc[k, "lbl_stat"])
assert_allclose(rslt.null_mean, r_results.loc[k, "lbl_expval"])
assert_allclose(rslt.null_sd**2, r_results.loc[k, "lbl_var"])
assert_allclose(rslt.zscore**2, r_results.loc[k, "lbl_chi2"], rtol=1e-5, atol=1e-5)
assert_allclose(rslt.pvalue, r_results.loc[k, "lbl_pvalue"], rtol=1e-5, atol=1e-5)
# Second set of scores
rslt = ctab.Table(table, shift_zeros=False).test_ordinal_association(row_scores, col_scores**2)
assert_allclose(rslt.statistic, r_results.loc[k, "lbl2_stat"])
assert_allclose(rslt.null_mean, r_results.loc[k, "lbl2_expval"])
assert_allclose(rslt.null_sd**2, r_results.loc[k, "lbl2_var"])
assert_allclose(rslt.zscore**2, r_results.loc[k, "lbl2_chi2"])
assert_allclose(rslt.pvalue, r_results.loc[k, "lbl2_pvalue"], rtol=1e-5, atol=1e-5)
def test_chi2_association():
np.random.seed(8743)
table = np.random.randint(10, 30, size=(4, 4))
from scipy.stats import chi2_contingency
rslt_scipy = chi2_contingency(table)
b = ctab.Table(table).test_nominal_association()
assert_allclose(b.statistic, rslt_scipy[0])
assert_allclose(b.pvalue, rslt_scipy[1])
def test_symmetry():
for k,table in enumerate(tables):
st = sm.stats.SquareTable(table, shift_zeros=False)
b = st.symmetry()
assert_allclose(b.statistic, r_results.loc[k, "bowker_stat"])
assert_equal(b.df, r_results.loc[k, "bowker_df"])
assert_allclose(b.pvalue, r_results.loc[k, "bowker_pvalue"])
def test_mcnemar():
# Use chi^2 without continuity correction
b1 = ctab.mcnemar(tables[0], exact=False, correction=False)
st = sm.stats.SquareTable(tables[0])
b2 = st.homogeneity()
assert_allclose(b1.statistic, b2.statistic)
assert_equal(b2.df, 1)
# Use chi^2 with continuity correction
b3 = ctab.mcnemar(tables[0], exact=False, correction=True)
assert_allclose(b3.pvalue, r_results.loc[0, "homog_cont_p"])
# Use binomial reference distribution
b4 = ctab.mcnemar(tables[0], exact=True)
assert_allclose(b4.pvalue, r_results.loc[0, "homog_binom_p"])
def test_from_data_stratified():
df = pd.DataFrame([[1, 1, 1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1]]).T
e = np.asarray([[[0, 1], [1, 1]], [[2, 2], [1, 0]]])
# Test pandas
tab1 = ctab.StratifiedTable.from_data(0, 1, 2, df)
assert_equal(tab1.table, e)
# Test ndarray
tab1 = ctab.StratifiedTable.from_data(0, 1, 2, np.asarray(df))
assert_equal(tab1.table, e)
def test_from_data_2x2():
df = pd.DataFrame([[1, 1, 1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0, 0]]).T
e = np.asarray([[1, 2], [4, 1]])
# Test pandas
tab1 = ctab.Table2x2.from_data(df, shift_zeros=False)
assert_equal(tab1.table, e)
# Test ndarray
tab1 = ctab.Table2x2.from_data(np.asarray(df), shift_zeros=False)
assert_equal(tab1.table, e)
def test_cochranq():
# library(CVST)
# table1 = matrix(c(1, 0, 1, 1,
# 0, 1, 1, 1,
# 1, 1, 1, 0,
# 0, 1, 0, 0,
# 0, 1, 0, 0,
# 1, 0, 1, 0,
# 0, 1, 0, 0,
# 1, 1, 1, 1,
# 0, 1, 0, 0), ncol=4, byrow=TRUE)
# rslt1 = cochranq.test(table1)
# table2 = matrix(c(0, 0, 1, 1, 0,
# 0, 1, 0, 1, 0,
# 0, 1, 1, 0, 1,
# 1, 0, 0, 0, 1,
# 1, 1, 0, 0, 0,
# 1, 0, 1, 0, 0,
# 0, 1, 0, 0, 0,
# 0, 0, 1, 1, 0,
# 0, 0, 0, 0, 0), ncol=5, byrow=TRUE)
# rslt2 = cochranq.test(table2)
table = [[1, 0, 1, 1],
[0, 1, 1, 1],
[1, 1, 1, 0],
[0, 1, 0, 0],
[0, 1, 0, 0],
[1, 0, 1, 0],
[0, 1, 0, 0],
[1, 1, 1, 1],
[0, 1, 0, 0]]
table = np.asarray(table)
stat, pvalue, df = ctab.cochrans_q(table, return_object=False)
assert_allclose(stat, 4.2)
assert_allclose(df, 3)
table = [[0, 0, 1, 1, 0],
[0, 1, 0, 1, 0],
[0, 1, 1, 0, 1],
[1, 0, 0, 0, 1],
[1, 1, 0, 0, 0],
[1, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 1, 0],
[0, 0, 0, 0, 0]]
table = np.asarray(table)
stat, pvalue, df = ctab.cochrans_q(table, return_object=False)
assert_allclose(stat, 1.2174, rtol=1e-4)
assert_allclose(df, 4)
# Cochran's q and Mcnemar are equivalent for 2x2 tables
data = table[:, 0:2]
xtab = np.asarray(pd.crosstab(data[:, 0], data[:, 1]))
b1 = ctab.cochrans_q(data, return_object=True)
b2 = ctab.mcnemar(xtab, exact=False, correction=False)
assert_allclose(b1.statistic, b2.statistic)
assert_allclose(b1.pvalue, b2.pvalue)
# Test for printing bunch
assert_equal(str(b1).startswith("df 1\npvalue 0.65"), True)
class CheckStratifiedMixin:
@classmethod
def initialize(cls, tables, use_arr=False):
tables1 = tables if not use_arr else np.dstack(tables)
cls.rslt = ctab.StratifiedTable(tables1)
cls.rslt_0 = ctab.StratifiedTable(tables, shift_zeros=True)
tables_pandas = [pd.DataFrame(x) for x in tables]
cls.rslt_pandas = ctab.StratifiedTable(tables_pandas)
def test_oddsratio_pooled(self):
assert_allclose(self.rslt.oddsratio_pooled, self.oddsratio_pooled,
rtol=1e-4, atol=1e-4)
def test_logodds_pooled(self):
assert_allclose(self.rslt.logodds_pooled, self.logodds_pooled,
rtol=1e-4, atol=1e-4)
def test_null_odds(self):
rslt = self.rslt.test_null_odds(correction=True)
assert_allclose(rslt.statistic, self.mh_stat, rtol=1e-4, atol=1e-5)
assert_allclose(rslt.pvalue, self.mh_pvalue, rtol=1e-4, atol=1e-4)
def test_oddsratio_pooled_confint(self):
lcb, ucb = self.rslt.oddsratio_pooled_confint()
assert_allclose(lcb, self.or_lcb, rtol=1e-4, atol=1e-4)
assert_allclose(ucb, self.or_ucb, rtol=1e-4, atol=1e-4)
def test_logodds_pooled_confint(self):
lcb, ucb = self.rslt.logodds_pooled_confint()
assert_allclose(lcb, np.log(self.or_lcb), rtol=1e-4,
atol=1e-4)
assert_allclose(ucb, np.log(self.or_ucb), rtol=1e-4,
atol=1e-4)
def test_equal_odds(self):
if not hasattr(self, "or_homog"):
return
rslt = self.rslt.test_equal_odds(adjust=False)
assert_allclose(rslt.statistic, self.or_homog, rtol=1e-4, atol=1e-4)
assert_allclose(rslt.pvalue, self.or_homog_p, rtol=1e-4, atol=1e-4)
rslt = self.rslt.test_equal_odds(adjust=True)
assert_allclose(rslt.statistic, self.or_homog_adj, rtol=1e-4, atol=1e-4)
assert_allclose(rslt.pvalue, self.or_homog_adj_p, rtol=1e-4, atol=1e-4)
def test_pandas(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
assert_equal(self.rslt.summary().as_text(),
self.rslt_pandas.summary().as_text())
def test_from_data(self):
np.random.seed(241)
df = pd.DataFrame(index=range(100), columns=("v1", "v2", "strat"))
df["v1"] = np.random.randint(0, 2, 100)
df["v2"] = np.random.randint(0, 2, 100)
df["strat"] = np.kron(np.arange(10), np.ones(10))
tables = []
for k in range(10):
ii = np.arange(10*k, 10*(k+1))
tables.append(pd.crosstab(df.loc[ii, "v1"], df.loc[ii, "v2"]))
rslt1 = ctab.StratifiedTable(tables)
rslt2 = ctab.StratifiedTable.from_data("v1", "v2", "strat", df)
assert_equal(rslt1.summary().as_text(), rslt2.summary().as_text())
class TestStratified1(CheckStratifiedMixin):
"""
data = array(c(0, 0, 6, 5,
3, 0, 3, 6,
6, 2, 0, 4,
5, 6, 1, 0,
2, 5, 0, 0),
dim=c(2, 2, 5))
rslt = mantelhaen.test(data)
"""
@classmethod
def setup_class(cls):
tables = [None] * 5
tables[0] = np.array([[0, 0], [6, 5]])
tables[1] = np.array([[3, 0], [3, 6]])
tables[2] = np.array([[6, 2], [0, 4]])
tables[3] = np.array([[5, 6], [1, 0]])
tables[4] = np.array([[2, 5], [0, 0]])
cls.initialize(tables)
cls.oddsratio_pooled = 7
cls.logodds_pooled = np.log(7)
cls.mh_stat = 3.9286
cls.mh_pvalue = 0.04747
cls.or_lcb = 1.026713
cls.or_ucb = 47.725133
class TestStratified2(CheckStratifiedMixin):
"""
library(DescTools)
data = array(c(20, 14, 10, 24,
15, 12, 3, 15,
3, 2, 3, 2,
12, 3, 7, 5,
1, 0, 3, 2),
dim=c(2, 2, 5))
rslt = mantelhaen.test(data)
bd1 = BreslowDayTest(data, correct=FALSE)
bd2 = BreslowDayTest(data, correct=TRUE)
"""
@classmethod
def setup_class(cls):
tables = [None] * 5
tables[0] = np.array([[20, 14], [10, 24]])
tables[1] = np.array([[15, 12], [3, 15]])
tables[2] = np.array([[3, 2], [3, 2]])
tables[3] = np.array([[12, 3], [7, 5]])
tables[4] = np.array([[1, 0], [3, 2]])
# check array of int
cls.initialize(tables, use_arr=True)
cls.oddsratio_pooled = 3.5912
cls.logodds_pooled = np.log(3.5912)
cls.mh_stat = 11.8852
cls.mh_pvalue = 0.0005658
cls.or_lcb = 1.781135
cls.or_ucb = 7.240633
# Breslow Day test without Tarone adjustment
cls.or_homog = 1.8438
cls.or_homog_p = 0.7645
# Breslow Day test with Tarone adjustment
cls.or_homog_adj = 1.8436
cls.or_homog_adj_p = 0.7645
class TestStratified3(CheckStratifiedMixin):
"""
library(DescTools)
data = array(c(313, 512, 19, 89,
207, 353, 8, 17,
205, 120, 391, 202,
278, 139, 244, 131,
138, 53, 299, 94,
351, 22, 317, 24),
dim=c(2, 2, 6))
rslt = mantelhaen.test(data)
bd1 = BreslowDayTest(data, correct=FALSE)
bd2 = BreslowDayTest(data, correct=TRUE)
"""
@classmethod
def setup_class(cls):
tables = [None] * 6
tables[0] = np.array([[313, 512], [19, 89]])
tables[1] = np.array([[207, 353], [8, 17]])
tables[2] = np.array([[205, 120], [391, 202]])
tables[3] = np.array([[278, 139], [244, 131]])
tables[4] = np.array([[138, 53], [299, 94]])
tables[5] = np.array([[351, 22], [317, 24]])
cls.initialize(tables)
cls.oddsratio_pooled = 1.101879
cls.logodds_pooled = np.log(1.101879)
cls.mh_stat = 1.3368
cls.mh_pvalue = 0.2476
cls.or_lcb = 0.9402012
cls.or_ucb = 1.2913602
# Breslow Day test without Tarone adjustment
cls.or_homog = 18.83297
cls.or_homog_p = 0.002064786
# Breslow Day test with Tarone adjustment
cls.or_homog_adj = 18.83297
cls.or_homog_adj_p = 0.002064786
class Check2x2Mixin:
@classmethod
def initialize(cls):
cls.tbl_obj = ctab.Table2x2(cls.table)
cls.tbl_data_obj = ctab.Table2x2.from_data(cls.data)
def test_oddsratio(self):
assert_allclose(self.tbl_obj.oddsratio, self.oddsratio)
def test_log_oddsratio(self):
assert_allclose(self.tbl_obj.log_oddsratio, self.log_oddsratio)
def test_log_oddsratio_se(self):
assert_allclose(self.tbl_obj.log_oddsratio_se, self.log_oddsratio_se)
def test_oddsratio_pvalue(self):
assert_allclose(self.tbl_obj.oddsratio_pvalue(), self.oddsratio_pvalue)
def test_oddsratio_confint(self):
lcb1, ucb1 = self.tbl_obj.oddsratio_confint(0.05)
lcb2, ucb2 = self.oddsratio_confint
assert_allclose(lcb1, lcb2)
assert_allclose(ucb1, ucb2)
def test_riskratio(self):
assert_allclose(self.tbl_obj.riskratio, self.riskratio)
def test_log_riskratio(self):
assert_allclose(self.tbl_obj.log_riskratio, self.log_riskratio)
def test_log_riskratio_se(self):
assert_allclose(self.tbl_obj.log_riskratio_se, self.log_riskratio_se)
def test_riskratio_pvalue(self):
assert_allclose(self.tbl_obj.riskratio_pvalue(), self.riskratio_pvalue)
def test_riskratio_confint(self):
lcb1, ucb1 = self.tbl_obj.riskratio_confint(0.05)
lcb2, ucb2 = self.riskratio_confint
assert_allclose(lcb1, lcb2)
assert_allclose(ucb1, ucb2)
def test_log_riskratio_confint(self):
lcb1, ucb1 = self.tbl_obj.log_riskratio_confint(0.05)
lcb2, ucb2 = self.log_riskratio_confint
assert_allclose(lcb1, lcb2)
assert_allclose(ucb1, ucb2)
def test_from_data(self):
assert_equal(self.tbl_obj.summary().as_text(),
self.tbl_data_obj.summary().as_text())
def test_summary(self):
assert_equal(self.tbl_obj.summary().as_text(),
self.summary_string)
class Test2x2_1(Check2x2Mixin):
@classmethod
def setup_class(cls):
data = np.zeros((8, 2))
data[:, 0] = [0, 0, 1, 1, 0, 0, 1, 1]
data[:, 1] = [0, 1, 0, 1, 0, 1, 0, 1]
cls.data = np.asarray(data)
cls.table = np.asarray([[2, 2], [2, 2]])
cls.oddsratio = 1.
cls.log_oddsratio = 0.
cls.log_oddsratio_se = np.sqrt(2)
cls.oddsratio_confint = [0.062548836166112329, 15.987507702689751]
cls.oddsratio_pvalue = 1.
cls.riskratio = 1.
cls.log_riskratio = 0.
cls.log_riskratio_se = 1 / np.sqrt(2)
cls.riskratio_pvalue = 1.
cls.riskratio_confint = [0.25009765325990629,
3.9984381579173824]
cls.log_riskratio_confint = [-1.3859038243496782,
1.3859038243496782]
ss = [ ' Estimate SE LCB UCB p-value',
'---------------------------------------------------',
'Odds ratio 1.000 0.063 15.988 1.000',
'Log odds ratio 0.000 1.414 -2.772 2.772 1.000',
'Risk ratio 1.000 0.250 3.998 1.000',
'Log risk ratio 0.000 0.707 -1.386 1.386 1.000',
'---------------------------------------------------']
cls.summary_string = '\n'.join(ss)
cls.initialize()
|
[
"numpy.random.seed",
"pandas.read_csv",
"numpy.ones",
"numpy.random.randint",
"numpy.arange",
"os.path.join",
"pandas.DataFrame",
"numpy.full",
"os.path.abspath",
"warnings.simplefilter",
"statsmodels.stats.contingency_tables.mcnemar",
"warnings.catch_warnings",
"numpy.testing.assert_equal",
"numpy.testing.assert_allclose",
"statsmodels.stats.contingency_tables.Table2x2",
"numpy.dstack",
"statsmodels.stats.contingency_tables.SquareTable.from_data",
"numpy.asarray",
"statsmodels.stats.contingency_tables.StratifiedTable",
"statsmodels.api.stats.SquareTable",
"statsmodels.stats.contingency_tables.cochrans_q",
"pandas.crosstab",
"numpy.log",
"statsmodels.stats.contingency_tables.SquareTable",
"statsmodels.stats.contingency_tables.Table",
"numpy.zeros",
"statsmodels.stats.contingency_tables.StratifiedTable.from_data",
"statsmodels.stats.contingency_tables.Table2x2.from_data",
"numpy.array",
"scipy.stats.chi2_contingency",
"numpy.sqrt"
] |
[((354, 393), 'os.path.join', 'os.path.join', (['cur_dir', '"""results"""', 'fname'], {}), "(cur_dir, 'results', fname)\n", (366, 393), False, 'import os\n'), ((406, 424), 'pandas.read_csv', 'pd.read_csv', (['fpath'], {}), '(fpath)\n', (417, 424), True, 'import pandas as pd\n'), ((468, 500), 'numpy.asarray', 'np.asarray', (['[[23, 15], [19, 31]]'], {}), '([[23, 15], [19, 31]])\n', (478, 500), True, 'import numpy as np\n'), ((514, 591), 'numpy.asarray', 'np.asarray', (['[[144, 33, 84, 126], [2, 4, 14, 29], [0, 2, 6, 25], [0, 0, 1, 5]]'], {}), '([[144, 33, 84, 126], [2, 4, 14, 29], [0, 2, 6, 25], [0, 0, 1, 5]])\n', (524, 591), True, 'import numpy as np\n'), ((677, 727), 'numpy.asarray', 'np.asarray', (['[[20, 10, 5], [3, 30, 15], [0, 5, 40]]'], {}), '([[20, 10, 5], [3, 30, 15], [0, 5, 40]])\n', (687, 727), True, 'import numpy as np\n'), ((277, 302), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (292, 302), False, 'import os\n'), ((1302, 1321), 'numpy.random.seed', 'np.random.seed', (['(434)'], {}), '(434)\n', (1316, 1321), True, 'import numpy as np\n'), ((1399, 1427), 'numpy.random.randint', 'np.random.randint', (['(0)', '(5)', '(100)'], {}), '(0, 5, 100)\n', (1416, 1427), True, 'import numpy as np\n'), ((1443, 1471), 'numpy.random.randint', 'np.random.randint', (['(0)', '(5)', '(100)'], {}), '(0, 5, 100)\n', (1460, 1471), True, 'import numpy as np\n'), ((1484, 1515), 'pandas.crosstab', 'pd.crosstab', (["df['v1']", "df['v2']"], {}), "(df['v1'], df['v2'])\n", (1495, 1515), True, 'import pandas as pd\n'), ((1529, 1552), 'statsmodels.stats.contingency_tables.SquareTable', 'ctab.SquareTable', (['table'], {}), '(table)\n', (1545, 1552), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((1565, 1595), 'statsmodels.stats.contingency_tables.SquareTable.from_data', 'ctab.SquareTable.from_data', (['df'], {}), '(df)\n', (1591, 1595), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((1923, 1959), 'numpy.testing.assert_equal', 'assert_equal', (['rslt1.table[0, 0]', '(8.0)'], {}), '(rslt1.table[0, 0], 8.0)\n', (1935, 1959), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((2049, 2102), 'pandas.DataFrame', 'pd.DataFrame', (['tab'], {'index': '[0, 1, 3]', 'columns': '[0, 2, 3]'}), '(tab, index=[0, 1, 3], columns=[0, 2, 3])\n', (2061, 2102), True, 'import pandas as pd\n'), ((2114, 2153), 'statsmodels.stats.contingency_tables.SquareTable', 'ctab.SquareTable', (['df'], {'shift_zeros': '(False)'}), '(df, shift_zeros=False)\n', (2130, 2153), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((2163, 2254), 'numpy.asarray', 'np.asarray', (['[[1, 0, 0, 3], [2, 0, 1, 4], [0, 0, 0, 0], [3, 0, 0, 5]]'], {'dtype': 'np.float64'}), '([[1, 0, 0, 3], [2, 0, 1, 4], [0, 0, 0, 0], [3, 0, 0, 5]], dtype=\n np.float64)\n', (2173, 2254), True, 'import numpy as np\n'), ((2274, 2300), 'numpy.testing.assert_equal', 'assert_equal', (['e', 'df2.table'], {}), '(e, df2.table)\n', (2286, 2300), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((2390, 2407), 'numpy.asarray', 'np.asarray', (['table'], {}), '(table)\n', (2400, 2407), True, 'import numpy as np\n'), ((2422, 2439), 'statsmodels.stats.contingency_tables.Table', 'ctab.Table', (['table'], {}), '(table)\n', (2432, 2439), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((2845, 2862), 'numpy.asarray', 'np.asarray', (['table'], {}), '(table)\n', (2855, 2862), True, 'import numpy as np\n'), ((2877, 2894), 'statsmodels.stats.contingency_tables.Table', 'ctab.Table', (['table'], {}), '(table)\n', (2887, 2894), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((2940, 2980), 'numpy.testing.assert_allclose', 'assert_allclose', (['loc_odds[0, 0]', '(5 / 8.0)'], {}), '(loc_odds[0, 0], 5 / 8.0)\n', (2955, 2980), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((3216, 3250), 'numpy.zeros', 'np.zeros', (['(3, 4)'], {'dtype': 'np.float64'}), '((3, 4), dtype=np.float64)\n', (3224, 3250), True, 'import numpy as np\n'), ((3264, 3284), 'numpy.full', 'np.full', (['(3, 4)', '(0.5)'], {}), '((3, 4), 0.5)\n', (3271, 3284), True, 'import numpy as np\n'), ((3418, 3481), 'numpy.asarray', 'np.asarray', (['[[0, 1, 2], [3, 0, 4], [5, 6, 0]]'], {'dtype': 'np.float64'}), '([[0, 1, 2], [3, 0, 4], [5, 6, 0]], dtype=np.float64)\n', (3428, 3481), True, 'import numpy as np\n'), ((3530, 3599), 'numpy.asarray', 'np.asarray', (['[[0.5, 1, 2], [3, 0.5, 4], [5, 6, 0.5]]'], {'dtype': 'np.float64'}), '([[0.5, 1, 2], [3, 0.5, 4], [5, 6, 0.5]], dtype=np.float64)\n', (3540, 3599), True, 'import numpy as np\n'), ((4010, 4036), 'statsmodels.stats.contingency_tables.StratifiedTable', 'ctab.StratifiedTable', (['tab1'], {}), '(tab1)\n', (4030, 4036), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((4047, 4073), 'statsmodels.stats.contingency_tables.StratifiedTable', 'ctab.StratifiedTable', (['tab2'], {}), '(tab2)\n', (4067, 4073), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((4079, 4138), 'numpy.testing.assert_allclose', 'assert_allclose', (['ct1.oddsratio_pooled', 'ct2.oddsratio_pooled'], {}), '(ct1.oddsratio_pooled, ct2.oddsratio_pooled)\n', (4094, 4138), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((4143, 4198), 'numpy.testing.assert_allclose', 'assert_allclose', (['ct1.logodds_pooled', 'ct2.logodds_pooled'], {}), '(ct1.logodds_pooled, ct2.logodds_pooled)\n', (4158, 4198), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((4550, 4668), 'numpy.array', 'np.array', (['[[-2.14562121, -2.28538719, -0.26923882, 4.7649169], [0.58514314, \n 0.62325942, 0.07342547, -1.29946443]]'], {}), '([[-2.14562121, -2.28538719, -0.26923882, 4.7649169], [0.58514314, \n 0.62325942, 0.07342547, -1.29946443]])\n', (4558, 4668), True, 'import numpy as np\n'), ((4697, 4812), 'numpy.array', 'np.array', (['[[-2.55112945, -2.6338782, -0.34712127, 5.5751083], [2.55112945, 2.6338782,\n 0.34712127, -5.5751083]]'], {}), '([[-2.55112945, -2.6338782, -0.34712127, 5.5751083], [2.55112945, \n 2.6338782, 0.34712127, -5.5751083]])\n', (4705, 4812), True, 'import numpy as np\n'), ((4846, 4863), 'statsmodels.stats.contingency_tables.Table', 'ctab.Table', (['table'], {}), '(table)\n', (4856, 4863), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((4868, 4932), 'numpy.testing.assert_allclose', 'assert_allclose', (['tab.fittedvalues', 'fit'], {'atol': '(0.0001)', 'rtol': '(0.0001)'}), '(tab.fittedvalues, fit, atol=0.0001, rtol=0.0001)\n', (4883, 4932), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((4933, 4997), 'numpy.testing.assert_allclose', 'assert_allclose', (['tab.chi2_contribs', 'c2'], {'atol': '(0.0001)', 'rtol': '(0.0001)'}), '(tab.chi2_contribs, c2, atol=0.0001, rtol=0.0001)\n', (4948, 4997), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((4998, 5062), 'numpy.testing.assert_allclose', 'assert_allclose', (['tab.resid_pearson', 'pr'], {'atol': '(0.0001)', 'rtol': '(0.0001)'}), '(tab.resid_pearson, pr, atol=0.0001, rtol=0.0001)\n', (5013, 5062), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((5063, 5133), 'numpy.testing.assert_allclose', 'assert_allclose', (['tab.standardized_resids', 'sr'], {'atol': '(0.0001)', 'rtol': '(0.0001)'}), '(tab.standardized_resids, sr, atol=0.0001, rtol=0.0001)\n', (5078, 5133), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((6383, 6403), 'numpy.random.seed', 'np.random.seed', (['(8743)'], {}), '(8743)\n', (6397, 6403), True, 'import numpy as np\n'), ((6417, 6455), 'numpy.random.randint', 'np.random.randint', (['(10)', '(30)'], {'size': '(4, 4)'}), '(10, 30, size=(4, 4))\n', (6434, 6455), True, 'import numpy as np\n'), ((6519, 6542), 'scipy.stats.chi2_contingency', 'chi2_contingency', (['table'], {}), '(table)\n', (6535, 6542), False, 'from scipy.stats import chi2_contingency\n'), ((6602, 6645), 'numpy.testing.assert_allclose', 'assert_allclose', (['b.statistic', 'rslt_scipy[0]'], {}), '(b.statistic, rslt_scipy[0])\n', (6617, 6645), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((6650, 6690), 'numpy.testing.assert_allclose', 'assert_allclose', (['b.pvalue', 'rslt_scipy[1]'], {}), '(b.pvalue, rslt_scipy[1])\n', (6665, 6690), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((7114, 7168), 'statsmodels.stats.contingency_tables.mcnemar', 'ctab.mcnemar', (['tables[0]'], {'exact': '(False)', 'correction': '(False)'}), '(tables[0], exact=False, correction=False)\n', (7126, 7168), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((7179, 7210), 'statsmodels.api.stats.SquareTable', 'sm.stats.SquareTable', (['tables[0]'], {}), '(tables[0])\n', (7199, 7210), True, 'import statsmodels.api as sm\n'), ((7241, 7284), 'numpy.testing.assert_allclose', 'assert_allclose', (['b1.statistic', 'b2.statistic'], {}), '(b1.statistic, b2.statistic)\n', (7256, 7284), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((7289, 7311), 'numpy.testing.assert_equal', 'assert_equal', (['b2.df', '(1)'], {}), '(b2.df, 1)\n', (7301, 7311), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((7365, 7418), 'statsmodels.stats.contingency_tables.mcnemar', 'ctab.mcnemar', (['tables[0]'], {'exact': '(False)', 'correction': '(True)'}), '(tables[0], exact=False, correction=True)\n', (7377, 7418), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((7423, 7483), 'numpy.testing.assert_allclose', 'assert_allclose', (['b3.pvalue', "r_results.loc[0, 'homog_cont_p']"], {}), "(b3.pvalue, r_results.loc[0, 'homog_cont_p'])\n", (7438, 7483), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((7536, 7571), 'statsmodels.stats.contingency_tables.mcnemar', 'ctab.mcnemar', (['tables[0]'], {'exact': '(True)'}), '(tables[0], exact=True)\n', (7548, 7571), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((7576, 7637), 'numpy.testing.assert_allclose', 'assert_allclose', (['b4.pvalue', "r_results.loc[0, 'homog_binom_p']"], {}), "(b4.pvalue, r_results.loc[0, 'homog_binom_p'])\n", (7591, 7637), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((7808, 7856), 'numpy.asarray', 'np.asarray', (['[[[0, 1], [1, 1]], [[2, 2], [1, 0]]]'], {}), '([[[0, 1], [1, 1]], [[2, 2], [1, 0]]])\n', (7818, 7856), True, 'import numpy as np\n'), ((7887, 7930), 'statsmodels.stats.contingency_tables.StratifiedTable.from_data', 'ctab.StratifiedTable.from_data', (['(0)', '(1)', '(2)', 'df'], {}), '(0, 1, 2, df)\n', (7917, 7930), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((7935, 7962), 'numpy.testing.assert_equal', 'assert_equal', (['tab1.table', 'e'], {}), '(tab1.table, e)\n', (7947, 7962), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((8054, 8081), 'numpy.testing.assert_equal', 'assert_equal', (['tab1.table', 'e'], {}), '(tab1.table, e)\n', (8066, 8081), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((8196, 8224), 'numpy.asarray', 'np.asarray', (['[[1, 2], [4, 1]]'], {}), '([[1, 2], [4, 1]])\n', (8206, 8224), True, 'import numpy as np\n'), ((8255, 8301), 'statsmodels.stats.contingency_tables.Table2x2.from_data', 'ctab.Table2x2.from_data', (['df'], {'shift_zeros': '(False)'}), '(df, shift_zeros=False)\n', (8278, 8301), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((8306, 8333), 'numpy.testing.assert_equal', 'assert_equal', (['tab1.table', 'e'], {}), '(tab1.table, e)\n', (8318, 8333), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((8428, 8455), 'numpy.testing.assert_equal', 'assert_equal', (['tab1.table', 'e'], {}), '(tab1.table, e)\n', (8440, 8455), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((9544, 9561), 'numpy.asarray', 'np.asarray', (['table'], {}), '(table)\n', (9554, 9561), True, 'import numpy as np\n'), ((9586, 9629), 'statsmodels.stats.contingency_tables.cochrans_q', 'ctab.cochrans_q', (['table'], {'return_object': '(False)'}), '(table, return_object=False)\n', (9601, 9629), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((9634, 9660), 'numpy.testing.assert_allclose', 'assert_allclose', (['stat', '(4.2)'], {}), '(stat, 4.2)\n', (9649, 9660), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((9665, 9687), 'numpy.testing.assert_allclose', 'assert_allclose', (['df', '(3)'], {}), '(df, 3)\n', (9680, 9687), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((9971, 9988), 'numpy.asarray', 'np.asarray', (['table'], {}), '(table)\n', (9981, 9988), True, 'import numpy as np\n'), ((10013, 10056), 'statsmodels.stats.contingency_tables.cochrans_q', 'ctab.cochrans_q', (['table'], {'return_object': '(False)'}), '(table, return_object=False)\n', (10028, 10056), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((10061, 10103), 'numpy.testing.assert_allclose', 'assert_allclose', (['stat', '(1.2174)'], {'rtol': '(0.0001)'}), '(stat, 1.2174, rtol=0.0001)\n', (10076, 10103), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((10106, 10128), 'numpy.testing.assert_allclose', 'assert_allclose', (['df', '(4)'], {}), '(df, 4)\n', (10121, 10128), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((10283, 10324), 'statsmodels.stats.contingency_tables.cochrans_q', 'ctab.cochrans_q', (['data'], {'return_object': '(True)'}), '(data, return_object=True)\n', (10298, 10324), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((10334, 10383), 'statsmodels.stats.contingency_tables.mcnemar', 'ctab.mcnemar', (['xtab'], {'exact': '(False)', 'correction': '(False)'}), '(xtab, exact=False, correction=False)\n', (10346, 10383), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((10388, 10431), 'numpy.testing.assert_allclose', 'assert_allclose', (['b1.statistic', 'b2.statistic'], {}), '(b1.statistic, b2.statistic)\n', (10403, 10431), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((10436, 10473), 'numpy.testing.assert_allclose', 'assert_allclose', (['b1.pvalue', 'b2.pvalue'], {}), '(b1.pvalue, b2.pvalue)\n', (10451, 10473), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((854, 900), 'statsmodels.api.stats.SquareTable', 'sm.stats.SquareTable', (['table'], {'shift_zeros': '(False)'}), '(table, shift_zeros=False)\n', (874, 900), True, 'import statsmodels.api as sm\n'), ((939, 1000), 'numpy.testing.assert_allclose', 'assert_allclose', (['hm.statistic', "r_results.loc[k, 'homog_stat']"], {}), "(hm.statistic, r_results.loc[k, 'homog_stat'])\n", (954, 1000), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((1009, 1061), 'numpy.testing.assert_allclose', 'assert_allclose', (['hm.df', "r_results.loc[k, 'homog_df']"], {}), "(hm.df, r_results.loc[k, 'homog_df'])\n", (1024, 1061), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((1625, 1642), 'numpy.asarray', 'np.asarray', (['table'], {}), '(table)\n', (1635, 1642), True, 'import numpy as np\n'), ((2666, 2682), 'numpy.log', 'np.log', (['cum_odds'], {}), '(cum_odds)\n', (2672, 2682), True, 'import numpy as np\n'), ((3094, 3110), 'numpy.log', 'np.log', (['loc_odds'], {}), '(loc_odds)\n', (3100, 3110), True, 'import numpy as np\n'), ((3980, 3996), 'numpy.asarray', 'np.asarray', (['tab1'], {}), '(tab1)\n', (3990, 3996), True, 'import numpy as np\n'), ((5446, 5507), 'numpy.testing.assert_allclose', 'assert_allclose', (['rslt.statistic', "r_results.loc[k, 'lbl_stat']"], {}), "(rslt.statistic, r_results.loc[k, 'lbl_stat'])\n", (5461, 5507), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((5516, 5579), 'numpy.testing.assert_allclose', 'assert_allclose', (['rslt.null_mean', "r_results.loc[k, 'lbl_expval']"], {}), "(rslt.null_mean, r_results.loc[k, 'lbl_expval'])\n", (5531, 5579), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((5588, 5651), 'numpy.testing.assert_allclose', 'assert_allclose', (['(rslt.null_sd ** 2)', "r_results.loc[k, 'lbl_var']"], {}), "(rslt.null_sd ** 2, r_results.loc[k, 'lbl_var'])\n", (5603, 5651), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((5658, 5749), 'numpy.testing.assert_allclose', 'assert_allclose', (['(rslt.zscore ** 2)', "r_results.loc[k, 'lbl_chi2']"], {'rtol': '(1e-05)', 'atol': '(1e-05)'}), "(rslt.zscore ** 2, r_results.loc[k, 'lbl_chi2'], rtol=1e-05,\n atol=1e-05)\n", (5673, 5749), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((5750, 5838), 'numpy.testing.assert_allclose', 'assert_allclose', (['rslt.pvalue', "r_results.loc[k, 'lbl_pvalue']"], {'rtol': '(1e-05)', 'atol': '(1e-05)'}), "(rslt.pvalue, r_results.loc[k, 'lbl_pvalue'], rtol=1e-05,\n atol=1e-05)\n", (5765, 5838), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((5977, 6039), 'numpy.testing.assert_allclose', 'assert_allclose', (['rslt.statistic', "r_results.loc[k, 'lbl2_stat']"], {}), "(rslt.statistic, r_results.loc[k, 'lbl2_stat'])\n", (5992, 6039), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((6048, 6112), 'numpy.testing.assert_allclose', 'assert_allclose', (['rslt.null_mean', "r_results.loc[k, 'lbl2_expval']"], {}), "(rslt.null_mean, r_results.loc[k, 'lbl2_expval'])\n", (6063, 6112), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((6121, 6185), 'numpy.testing.assert_allclose', 'assert_allclose', (['(rslt.null_sd ** 2)', "r_results.loc[k, 'lbl2_var']"], {}), "(rslt.null_sd ** 2, r_results.loc[k, 'lbl2_var'])\n", (6136, 6185), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((6192, 6256), 'numpy.testing.assert_allclose', 'assert_allclose', (['(rslt.zscore ** 2)', "r_results.loc[k, 'lbl2_chi2']"], {}), "(rslt.zscore ** 2, r_results.loc[k, 'lbl2_chi2'])\n", (6207, 6256), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((6263, 6352), 'numpy.testing.assert_allclose', 'assert_allclose', (['rslt.pvalue', "r_results.loc[k, 'lbl2_pvalue']"], {'rtol': '(1e-05)', 'atol': '(1e-05)'}), "(rslt.pvalue, r_results.loc[k, 'lbl2_pvalue'], rtol=1e-05,\n atol=1e-05)\n", (6278, 6352), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((6766, 6812), 'statsmodels.api.stats.SquareTable', 'sm.stats.SquareTable', (['table'], {'shift_zeros': '(False)'}), '(table, shift_zeros=False)\n', (6786, 6812), True, 'import statsmodels.api as sm\n'), ((6847, 6908), 'numpy.testing.assert_allclose', 'assert_allclose', (['b.statistic', "r_results.loc[k, 'bowker_stat']"], {}), "(b.statistic, r_results.loc[k, 'bowker_stat'])\n", (6862, 6908), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((6917, 6966), 'numpy.testing.assert_equal', 'assert_equal', (['b.df', "r_results.loc[k, 'bowker_df']"], {}), "(b.df, r_results.loc[k, 'bowker_df'])\n", (6929, 6966), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((6975, 7035), 'numpy.testing.assert_allclose', 'assert_allclose', (['b.pvalue', "r_results.loc[k, 'bowker_pvalue']"], {}), "(b.pvalue, r_results.loc[k, 'bowker_pvalue'])\n", (6990, 7035), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((7682, 7778), 'pandas.DataFrame', 'pd.DataFrame', (['[[1, 1, 1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0, 0], [0, 0, 0, 0, 1, 1, 1, 1]]'], {}), '([[1, 1, 1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0, 0], [0, 0, 0,\n 0, 1, 1, 1, 1]])\n', (7694, 7778), True, 'import pandas as pd\n'), ((8034, 8048), 'numpy.asarray', 'np.asarray', (['df'], {}), '(df)\n', (8044, 8048), True, 'import numpy as np\n'), ((8119, 8185), 'pandas.DataFrame', 'pd.DataFrame', (['[[1, 1, 1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0, 0]]'], {}), '([[1, 1, 1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0, 0]])\n', (8131, 8185), True, 'import pandas as pd\n'), ((8389, 8403), 'numpy.asarray', 'np.asarray', (['df'], {}), '(df)\n', (8399, 8403), True, 'import numpy as np\n'), ((10237, 10272), 'pandas.crosstab', 'pd.crosstab', (['data[:, 0]', 'data[:, 1]'], {}), '(data[:, 0], data[:, 1])\n', (10248, 10272), True, 'import pandas as pd\n'), ((10761, 10790), 'statsmodels.stats.contingency_tables.StratifiedTable', 'ctab.StratifiedTable', (['tables1'], {}), '(tables1)\n', (10781, 10790), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((10812, 10858), 'statsmodels.stats.contingency_tables.StratifiedTable', 'ctab.StratifiedTable', (['tables'], {'shift_zeros': '(True)'}), '(tables, shift_zeros=True)\n', (10832, 10858), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((10943, 10978), 'statsmodels.stats.contingency_tables.StratifiedTable', 'ctab.StratifiedTable', (['tables_pandas'], {}), '(tables_pandas)\n', (10963, 10978), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((11026, 11123), 'numpy.testing.assert_allclose', 'assert_allclose', (['self.rslt.oddsratio_pooled', 'self.oddsratio_pooled'], {'rtol': '(0.0001)', 'atol': '(0.0001)'}), '(self.rslt.oddsratio_pooled, self.oddsratio_pooled, rtol=\n 0.0001, atol=0.0001)\n', (11041, 11123), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((11184, 11276), 'numpy.testing.assert_allclose', 'assert_allclose', (['self.rslt.logodds_pooled', 'self.logodds_pooled'], {'rtol': '(0.0001)', 'atol': '(0.0001)'}), '(self.rslt.logodds_pooled, self.logodds_pooled, rtol=0.0001,\n atol=0.0001)\n', (11199, 11276), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((11390, 11460), 'numpy.testing.assert_allclose', 'assert_allclose', (['rslt.statistic', 'self.mh_stat'], {'rtol': '(0.0001)', 'atol': '(1e-05)'}), '(rslt.statistic, self.mh_stat, rtol=0.0001, atol=1e-05)\n', (11405, 11460), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((11466, 11536), 'numpy.testing.assert_allclose', 'assert_allclose', (['rslt.pvalue', 'self.mh_pvalue'], {'rtol': '(0.0001)', 'atol': '(0.0001)'}), '(rslt.pvalue, self.mh_pvalue, rtol=0.0001, atol=0.0001)\n', (11481, 11536), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((11644, 11703), 'numpy.testing.assert_allclose', 'assert_allclose', (['lcb', 'self.or_lcb'], {'rtol': '(0.0001)', 'atol': '(0.0001)'}), '(lcb, self.or_lcb, rtol=0.0001, atol=0.0001)\n', (11659, 11703), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((11708, 11767), 'numpy.testing.assert_allclose', 'assert_allclose', (['ucb', 'self.or_ucb'], {'rtol': '(0.0001)', 'atol': '(0.0001)'}), '(ucb, self.or_ucb, rtol=0.0001, atol=0.0001)\n', (11723, 11767), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((12214, 12286), 'numpy.testing.assert_allclose', 'assert_allclose', (['rslt.statistic', 'self.or_homog'], {'rtol': '(0.0001)', 'atol': '(0.0001)'}), '(rslt.statistic, self.or_homog, rtol=0.0001, atol=0.0001)\n', (12229, 12286), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((12291, 12362), 'numpy.testing.assert_allclose', 'assert_allclose', (['rslt.pvalue', 'self.or_homog_p'], {'rtol': '(0.0001)', 'atol': '(0.0001)'}), '(rslt.pvalue, self.or_homog_p, rtol=0.0001, atol=0.0001)\n', (12306, 12362), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((12422, 12498), 'numpy.testing.assert_allclose', 'assert_allclose', (['rslt.statistic', 'self.or_homog_adj'], {'rtol': '(0.0001)', 'atol': '(0.0001)'}), '(rslt.statistic, self.or_homog_adj, rtol=0.0001, atol=0.0001)\n', (12437, 12498), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((12503, 12578), 'numpy.testing.assert_allclose', 'assert_allclose', (['rslt.pvalue', 'self.or_homog_adj_p'], {'rtol': '(0.0001)', 'atol': '(0.0001)'}), '(rslt.pvalue, self.or_homog_adj_p, rtol=0.0001, atol=0.0001)\n', (12518, 12578), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((12865, 12884), 'numpy.random.seed', 'np.random.seed', (['(241)'], {}), '(241)\n', (12879, 12884), True, 'import numpy as np\n'), ((12979, 13007), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', '(100)'], {}), '(0, 2, 100)\n', (12996, 13007), True, 'import numpy as np\n'), ((13027, 13055), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', '(100)'], {}), '(0, 2, 100)\n', (13044, 13055), True, 'import numpy as np\n'), ((13298, 13326), 'statsmodels.stats.contingency_tables.StratifiedTable', 'ctab.StratifiedTable', (['tables'], {}), '(tables)\n', (13318, 13326), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((13343, 13398), 'statsmodels.stats.contingency_tables.StratifiedTable.from_data', 'ctab.StratifiedTable.from_data', (['"""v1"""', '"""v2"""', '"""strat"""', 'df'], {}), "('v1', 'v2', 'strat', df)\n", (13373, 13398), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((13854, 13880), 'numpy.array', 'np.array', (['[[0, 0], [6, 5]]'], {}), '([[0, 0], [6, 5]])\n', (13862, 13880), True, 'import numpy as np\n'), ((13901, 13927), 'numpy.array', 'np.array', (['[[3, 0], [3, 6]]'], {}), '([[3, 0], [3, 6]])\n', (13909, 13927), True, 'import numpy as np\n'), ((13948, 13974), 'numpy.array', 'np.array', (['[[6, 2], [0, 4]]'], {}), '([[6, 2], [0, 4]])\n', (13956, 13974), True, 'import numpy as np\n'), ((13995, 14021), 'numpy.array', 'np.array', (['[[5, 6], [1, 0]]'], {}), '([[5, 6], [1, 0]])\n', (14003, 14021), True, 'import numpy as np\n'), ((14042, 14068), 'numpy.array', 'np.array', (['[[2, 5], [0, 0]]'], {}), '([[2, 5], [0, 0]])\n', (14050, 14068), True, 'import numpy as np\n'), ((14164, 14173), 'numpy.log', 'np.log', (['(7)'], {}), '(7)\n', (14170, 14173), True, 'import numpy as np\n'), ((14797, 14827), 'numpy.array', 'np.array', (['[[20, 14], [10, 24]]'], {}), '([[20, 14], [10, 24]])\n', (14805, 14827), True, 'import numpy as np\n'), ((14848, 14877), 'numpy.array', 'np.array', (['[[15, 12], [3, 15]]'], {}), '([[15, 12], [3, 15]])\n', (14856, 14877), True, 'import numpy as np\n'), ((14898, 14924), 'numpy.array', 'np.array', (['[[3, 2], [3, 2]]'], {}), '([[3, 2], [3, 2]])\n', (14906, 14924), True, 'import numpy as np\n'), ((14945, 14972), 'numpy.array', 'np.array', (['[[12, 3], [7, 5]]'], {}), '([[12, 3], [7, 5]])\n', (14953, 14972), True, 'import numpy as np\n'), ((14993, 15019), 'numpy.array', 'np.array', (['[[1, 0], [3, 2]]'], {}), '([[1, 0], [3, 2]])\n', (15001, 15019), True, 'import numpy as np\n'), ((15163, 15177), 'numpy.log', 'np.log', (['(3.5912)'], {}), '(3.5912)\n', (15169, 15177), True, 'import numpy as np\n'), ((16104, 16136), 'numpy.array', 'np.array', (['[[313, 512], [19, 89]]'], {}), '([[313, 512], [19, 89]])\n', (16112, 16136), True, 'import numpy as np\n'), ((16157, 16188), 'numpy.array', 'np.array', (['[[207, 353], [8, 17]]'], {}), '([[207, 353], [8, 17]])\n', (16165, 16188), True, 'import numpy as np\n'), ((16209, 16243), 'numpy.array', 'np.array', (['[[205, 120], [391, 202]]'], {}), '([[205, 120], [391, 202]])\n', (16217, 16243), True, 'import numpy as np\n'), ((16264, 16298), 'numpy.array', 'np.array', (['[[278, 139], [244, 131]]'], {}), '([[278, 139], [244, 131]])\n', (16272, 16298), True, 'import numpy as np\n'), ((16319, 16351), 'numpy.array', 'np.array', (['[[138, 53], [299, 94]]'], {}), '([[138, 53], [299, 94]])\n', (16327, 16351), True, 'import numpy as np\n'), ((16372, 16404), 'numpy.array', 'np.array', (['[[351, 22], [317, 24]]'], {}), '([[351, 22], [317, 24]])\n', (16380, 16404), True, 'import numpy as np\n'), ((16507, 16523), 'numpy.log', 'np.log', (['(1.101879)'], {}), '(1.101879)\n', (16513, 16523), True, 'import numpy as np\n'), ((16985, 17009), 'statsmodels.stats.contingency_tables.Table2x2', 'ctab.Table2x2', (['cls.table'], {}), '(cls.table)\n', (16998, 17009), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((17037, 17070), 'statsmodels.stats.contingency_tables.Table2x2.from_data', 'ctab.Table2x2.from_data', (['cls.data'], {}), '(cls.data)\n', (17060, 17070), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((17110, 17165), 'numpy.testing.assert_allclose', 'assert_allclose', (['self.tbl_obj.oddsratio', 'self.oddsratio'], {}), '(self.tbl_obj.oddsratio, self.oddsratio)\n', (17125, 17165), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((17210, 17273), 'numpy.testing.assert_allclose', 'assert_allclose', (['self.tbl_obj.log_oddsratio', 'self.log_oddsratio'], {}), '(self.tbl_obj.log_oddsratio, self.log_oddsratio)\n', (17225, 17273), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((17321, 17390), 'numpy.testing.assert_allclose', 'assert_allclose', (['self.tbl_obj.log_oddsratio_se', 'self.log_oddsratio_se'], {}), '(self.tbl_obj.log_oddsratio_se, self.log_oddsratio_se)\n', (17336, 17390), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((17660, 17687), 'numpy.testing.assert_allclose', 'assert_allclose', (['lcb1', 'lcb2'], {}), '(lcb1, lcb2)\n', (17675, 17687), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((17696, 17723), 'numpy.testing.assert_allclose', 'assert_allclose', (['ucb1', 'ucb2'], {}), '(ucb1, ucb2)\n', (17711, 17723), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((17764, 17819), 'numpy.testing.assert_allclose', 'assert_allclose', (['self.tbl_obj.riskratio', 'self.riskratio'], {}), '(self.tbl_obj.riskratio, self.riskratio)\n', (17779, 17819), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((17864, 17927), 'numpy.testing.assert_allclose', 'assert_allclose', (['self.tbl_obj.log_riskratio', 'self.log_riskratio'], {}), '(self.tbl_obj.log_riskratio, self.log_riskratio)\n', (17879, 17927), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((17975, 18044), 'numpy.testing.assert_allclose', 'assert_allclose', (['self.tbl_obj.log_riskratio_se', 'self.log_riskratio_se'], {}), '(self.tbl_obj.log_riskratio_se, self.log_riskratio_se)\n', (17990, 18044), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((18314, 18341), 'numpy.testing.assert_allclose', 'assert_allclose', (['lcb1', 'lcb2'], {}), '(lcb1, lcb2)\n', (18329, 18341), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((18350, 18377), 'numpy.testing.assert_allclose', 'assert_allclose', (['ucb1', 'ucb2'], {}), '(ucb1, ucb2)\n', (18365, 18377), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((18540, 18567), 'numpy.testing.assert_allclose', 'assert_allclose', (['lcb1', 'lcb2'], {}), '(lcb1, lcb2)\n', (18555, 18567), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((18576, 18603), 'numpy.testing.assert_allclose', 'assert_allclose', (['ucb1', 'ucb2'], {}), '(ucb1, ucb2)\n', (18591, 18603), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((18971, 18987), 'numpy.zeros', 'np.zeros', (['(8, 2)'], {}), '((8, 2))\n', (18979, 18987), True, 'import numpy as np\n'), ((19099, 19115), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (19109, 19115), True, 'import numpy as np\n'), ((19136, 19164), 'numpy.asarray', 'np.asarray', (['[[2, 2], [2, 2]]'], {}), '([[2, 2], [2, 2]])\n', (19146, 19164), True, 'import numpy as np\n'), ((19255, 19265), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (19262, 19265), True, 'import numpy as np\n'), ((3302, 3334), 'statsmodels.stats.contingency_tables.Table', 'ctab.Table', (['t'], {'shift_zeros': '(False)'}), '(t, shift_zeros=False)\n', (3312, 3334), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((3362, 3393), 'statsmodels.stats.contingency_tables.Table', 'ctab.Table', (['t'], {'shift_zeros': '(True)'}), '(t, shift_zeros=True)\n', (3372, 3393), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((3657, 3670), 'statsmodels.stats.contingency_tables.Table', 'ctab.Table', (['t'], {}), '(t)\n', (3667, 3670), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((3698, 3729), 'statsmodels.stats.contingency_tables.Table', 'ctab.Table', (['t'], {'shift_zeros': '(True)'}), '(t, shift_zeros=True)\n', (3708, 3729), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((5229, 5254), 'numpy.arange', 'np.arange', (['table.shape[0]'], {}), '(table.shape[0])\n', (5238, 5254), True, 'import numpy as np\n'), ((5280, 5305), 'numpy.arange', 'np.arange', (['table.shape[1]'], {}), '(table.shape[1])\n', (5289, 5305), True, 'import numpy as np\n'), ((6552, 6569), 'statsmodels.stats.contingency_tables.Table', 'ctab.Table', (['table'], {}), '(table)\n', (6562, 6569), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((10724, 10741), 'numpy.dstack', 'np.dstack', (['tables'], {}), '(tables)\n', (10733, 10741), True, 'import numpy as np\n'), ((10884, 10899), 'pandas.DataFrame', 'pd.DataFrame', (['x'], {}), '(x)\n', (10896, 10899), True, 'import pandas as pd\n'), ((11892, 11911), 'numpy.log', 'np.log', (['self.or_lcb'], {}), '(self.or_lcb)\n', (11898, 11911), True, 'import numpy as np\n'), ((11988, 12007), 'numpy.log', 'np.log', (['self.or_ucb'], {}), '(self.or_ucb)\n', (11994, 12007), True, 'import numpy as np\n'), ((12618, 12643), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (12641, 12643), False, 'import warnings\n'), ((12657, 12704), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'RuntimeWarning'], {}), "('ignore', RuntimeWarning)\n", (12678, 12704), False, 'import warnings\n'), ((13086, 13099), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (13095, 13099), True, 'import numpy as np\n'), ((13101, 13112), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (13108, 13112), True, 'import numpy as np\n'), ((13180, 13211), 'numpy.arange', 'np.arange', (['(10 * k)', '(10 * (k + 1))'], {}), '(10 * k, 10 * (k + 1))\n', (13189, 13211), True, 'import numpy as np\n'), ((19468, 19478), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (19475, 19478), True, 'import numpy as np\n'), ((5352, 5388), 'statsmodels.stats.contingency_tables.Table', 'ctab.Table', (['table'], {'shift_zeros': '(False)'}), '(table, shift_zeros=False)\n', (5362, 5388), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((5880, 5916), 'statsmodels.stats.contingency_tables.Table', 'ctab.Table', (['table'], {'shift_zeros': '(False)'}), '(table, shift_zeros=False)\n', (5890, 5916), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((13232, 13279), 'pandas.crosstab', 'pd.crosstab', (["df.loc[ii, 'v1']", "df.loc[ii, 'v2']"], {}), "(df.loc[ii, 'v1'], df.loc[ii, 'v2'])\n", (13243, 13279), True, 'import pandas as pd\n')]
|
import numpy as np
#add 1 to each array element
np.add(arr, 1)
#subtract 2 from each array element
np.subtract(arr, 2)
#multiply each array element by 3
np.multiply(arr, 3)
#divide each array element by 4 (returns np.nan for division by zero)
np.divide(arr, 4)
#raise each array element to 5th power
np.power(arr, 5)
|
[
"numpy.divide",
"numpy.multiply",
"numpy.subtract",
"numpy.power",
"numpy.add"
] |
[((49, 63), 'numpy.add', 'np.add', (['arr', '(1)'], {}), '(arr, 1)\n', (55, 63), True, 'import numpy as np\n'), ((100, 119), 'numpy.subtract', 'np.subtract', (['arr', '(2)'], {}), '(arr, 2)\n', (111, 119), True, 'import numpy as np\n'), ((154, 173), 'numpy.multiply', 'np.multiply', (['arr', '(3)'], {}), '(arr, 3)\n', (165, 173), True, 'import numpy as np\n'), ((244, 261), 'numpy.divide', 'np.divide', (['arr', '(4)'], {}), '(arr, 4)\n', (253, 261), True, 'import numpy as np\n'), ((301, 317), 'numpy.power', 'np.power', (['arr', '(5)'], {}), '(arr, 5)\n', (309, 317), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
import itertools
from sklearn.metrics import roc_auc_score, roc_curve, auc
def augment_agg(X):
mean = np.array(X.mean(axis = 1)).reshape(X.shape[0],1)
std = np.array(X.std(axis = 1)).reshape(X.shape[0],1)
rang = np.array((X.max(axis = 1) - X.min(axis = 1))).reshape(X.shape[0],1)
X = np.append(X,mean,1)
X = np.append(X,std,1)
X = np.append(X,rang,1)
return X
def longest_one_streak(lst):
return max(sum(1 for x in l if x == 1) for n, l in itertools.groupby(lst))
def optimal_cutoff(labels, preds):
####################################
# The optimal cut off would be where tpr is high and fpr is low
# tpr - (1-fpr) is zero or near to zero is the optimal cut off point
####################################
fpr, tpr, cutoff = roc_curve(labels, preds)
roc_auc = auc(fpr, tpr)
#print("Area under the ROC curve : %f" % roc_auc)
i = np.arange(len(tpr)) # index for df
roc = pd.DataFrame({'fpr' : pd.Series(fpr, index=i),'tpr' : pd.Series(tpr, index = i), '1-fpr' : pd.Series(1-fpr, index = i), 'tf' : pd.Series(tpr - (1-fpr), index = i), 'thresholds' : pd.Series(cutoff, index = i)})
return float(roc.ix[(roc.tf-0).abs().argsort()[:1]]["thresholds"])
def get_dynamics(VALUES, TEST_SEQ_LENGTH):
first_half = pd.Series([np.mean(i[:int(TEST_SEQ_LENGTH/2)]) for i in VALUES])
second_half = pd.Series([np.mean(i[int(TEST_SEQ_LENGTH/2):TEST_SEQ_LENGTH]) for i in VALUES])
dynamics = (second_half/first_half).fillna(0)
dynamics[np.isinf(dynamics)] = 0
return dynamics
def get_max_streak_length(arr, x):
# intitialize count
count = 0
# initialize max
result = 0
for i in arr:
# Reset count when 0 is found
if (i == x):
count += 1
# If 1 is found, increment count
# and update result if count
# becomes more.
else:
count = 0
result = max(result, count)
return result
|
[
"sklearn.metrics.roc_curve",
"numpy.isinf",
"numpy.append",
"sklearn.metrics.auc",
"pandas.Series",
"itertools.groupby"
] |
[((341, 362), 'numpy.append', 'np.append', (['X', 'mean', '(1)'], {}), '(X, mean, 1)\n', (350, 362), True, 'import numpy as np\n'), ((369, 389), 'numpy.append', 'np.append', (['X', 'std', '(1)'], {}), '(X, std, 1)\n', (378, 389), True, 'import numpy as np\n'), ((396, 417), 'numpy.append', 'np.append', (['X', 'rang', '(1)'], {}), '(X, rang, 1)\n', (405, 417), True, 'import numpy as np\n'), ((825, 849), 'sklearn.metrics.roc_curve', 'roc_curve', (['labels', 'preds'], {}), '(labels, preds)\n', (834, 849), False, 'from sklearn.metrics import roc_auc_score, roc_curve, auc\n'), ((864, 877), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (867, 877), False, 'from sklearn.metrics import roc_auc_score, roc_curve, auc\n'), ((1633, 1651), 'numpy.isinf', 'np.isinf', (['dynamics'], {}), '(dynamics)\n', (1641, 1651), True, 'import numpy as np\n'), ((1007, 1030), 'pandas.Series', 'pd.Series', (['fpr'], {'index': 'i'}), '(fpr, index=i)\n', (1016, 1030), True, 'import pandas as pd\n'), ((1039, 1062), 'pandas.Series', 'pd.Series', (['tpr'], {'index': 'i'}), '(tpr, index=i)\n', (1048, 1062), True, 'import pandas as pd\n'), ((1076, 1103), 'pandas.Series', 'pd.Series', (['(1 - fpr)'], {'index': 'i'}), '(1 - fpr, index=i)\n', (1085, 1103), True, 'import pandas as pd\n'), ((1112, 1147), 'pandas.Series', 'pd.Series', (['(tpr - (1 - fpr))'], {'index': 'i'}), '(tpr - (1 - fpr), index=i)\n', (1121, 1147), True, 'import pandas as pd\n'), ((1164, 1190), 'pandas.Series', 'pd.Series', (['cutoff'], {'index': 'i'}), '(cutoff, index=i)\n', (1173, 1190), True, 'import pandas as pd\n'), ((519, 541), 'itertools.groupby', 'itertools.groupby', (['lst'], {}), '(lst)\n', (536, 541), False, 'import itertools\n')]
|
### Language models and functions
## <NAME>
## Created: March 2020
import pandas as pd
import numpy as np
import math
from scipy.special import comb
## Zipf pmf and cdf
def zipf(k,a):
power_law = np.divide(1,np.power(k,a))
H = np.sum(np.divide(1,np.power(k,a)))
pmf = np.divide(power_law,H)
cdf = [np.sum(pmf[0:i]) for i in range(1,pmf.shape[0]+1)]
return pmf, cdf
## Zipf expected rank/value - theory
def zipf_E(k,a):
return np.sum(np.divide(1,np.power(k,a-1)))/np.sum(np.divide(1,np.power(k,a)))
## Zipf log-log transform
def zipf_loglog(x,a,b):
return (-1*a*x)+b
## Vocabulary construction where c is the number of unigrams
def construct_vocabulary(c):
return [int(i) for i in range(1,c+1,1)]
## Wright-Fisher model - Corpus size function
def corpus_size(t,alpha,beta):
return int(np.ceil(beta*np.exp(alpha*t)))
## Wright-Fisher model - Selection value function
def selection_function(t,A,B):
return A*(np.sin(B*t) + np.cos(B*t))
# Wright-Fisher model (with selection option)
def wright_fisher(c,a,alpha,beta,A,B,T,PV,NV,tau,set_seed=None,steady=False):
# parameters - descriptions
# 1. c - is the number of vocabulary words
# 2. a - is the Zipf parameter
# 3. alpha - is the rate of change for the corpus size
# 4. beta - is the initial corpus size
# 5. A - is the selection value
# 6. B - is the variable selection value
# 7. T - is the total time elapsed (number of years)
# 8. PV - word indices under positive selection
# 9. NV - word indices under negative selection
# 10. tau - the time where selection is induced
# seeding
np.random.seed(set_seed)
# vocabulary and ranks ranks
V = construct_vocabulary(c=c) # ngrams labels
ranks = V # assign ranks to the words
# initial conditions
t = 0
pmf, cdf = zipf(k=ranks,a=a) # initial probability distibution of words
initial_cs = corpus_size(t,alpha,beta) # initial corpus size (exponential function with parameters beta and alpha)
# initial fitness values
initial_fv = np.zeros(len(V)) + 1
if tau == 0:
initial_fv[[i-1 for i in PV]] = 1 + selection_function(tau,A,B)
initial_fv[[i-1 for i in NV]] = 1 - selection_function(tau,A,B)
fv_probs = np.multiply(pmf,initial_fv)
fv_probs_normal = np.divide(fv_probs,np.sum(fv_probs)) # update probabilities
if steady == True:
fv_probs_normal = pmf
# initial word counts
word_samples = V # sample words at least once
word_samples = np.append(word_samples,np.random.choice(V,initial_cs-c,replace=True,p=fv_probs_normal))
wp_u, wp_c = np.unique(word_samples,return_counts=True)
del word_samples
initial_count = np.zeros(len(V))
initial_count[[i-1 for i in wp_u]] = wp_c
initial_probs = np.zeros(len(V))
initial_probs[[i-1 for i in wp_u]] = np.divide(wp_c,np.sum(wp_c))
# time loop
fv_track = [initial_fv]
cs_track = [initial_cs]
count_track = [initial_count]
probs_track = [initial_probs]
for i in range(1,T):
# selection at t >= tau
fv = np.zeros(len(V)) + 1
if i >= tau:
fv[[i-1 for i in PV]] = 1 + selection_function(i,A,B)
fv[[i-1 for i in NV]] = 1 - selection_function(i,A,B)
fv_track.append(fv)
else:
fv_track.append(fv)
fv_probs = np.multiply(probs_track[i-1],fv_track[i-1])
fv_probs_normal = np.divide(fv_probs,np.sum(fv_probs)) # update probabilities
if steady == True:
fv_probs_normal = pmf
# Wright-Fisher
cs_track.append(corpus_size(i,alpha,beta)) # update corpus size
word_samples = V # sample words at least once
word_samples = np.append(word_samples,np.random.choice(V,cs_track[i]-c,replace=True,p=fv_probs_normal))
wp_u, wp_c = np.unique(word_samples,return_counts=True)
del word_samples
next_count = np.zeros(len(V))
next_count[[i-1 for i in wp_u]] = wp_c # update counts
count_track.append(next_count)
next_probs = np.zeros(len(V))
next_probs[[i-1 for i in wp_u]] = wp_c/np.sum(wp_c) # update probabilities
probs_track.append(next_probs)
# compute pscores, zscores, and convert outputs to dataframes
R = pd.DataFrame(np.matrix(count_track).T,index=V,columns=range(0,T))
P = pd.DataFrame(np.divide(R,np.sum(R,axis=0)),index=V,columns=range(0,T))
a = P.T - np.mean(P.T,axis=0)
b = np.std(P.T,axis=0)
Z = np.divide(a,b).T
S = pd.DataFrame(np.matrix(fv_track).T,index=V,columns=range(0,T))
N = pd.DataFrame({'N(t)':cs_track},index=R.columns)
del fv_track, cs_track, count_track, probs_track
return R, P, Z, S, N
# Wright-Fisher model (with selection option and zero words)
def wright_fisher_0(c,a,alpha,beta,A,B,T,PV,NV,tau,set_seed=None,steady=False):
# parameters - descriptions
# 1. c - is the number of vocabulary words
# 2. a - is the Zipf parameter
# 3. alpha - is the rate of change for the corpus size
# 4. beta - is the initial corpus size
# 5. A - is the selection value
# 6. B - is the variable selection value
# 7. T - is the total time elapsed (number of years)
# 8. PV - word indices under positive selection
# 9. NV - word indices under negative selection
# 10. tau - the time where selection is induced
# seeding
np.random.seed(set_seed)
# vocabulary and ranks ranks
V = construct_vocabulary(c=c) # ngrams labels
ranks = V # assign ranks to the words
# initial conditions
t = 0
pmf, cdf = zipf(k=ranks,a=a) # initial probability distibution of words
initial_cs = corpus_size(t,alpha,beta) # initial corpus size (exponential function with parameters beta and alpha)
# initial fitness values
initial_fv = np.zeros(len(V)) + 1
if tau == 0:
initial_fv[[i-1 for i in PV]] = 1 + selection_function(tau,A,B)
initial_fv[[i-1 for i in NV]] = 1 - selection_function(tau,A,B)
fv_probs = np.multiply(pmf,initial_fv)
fv_probs_normal = np.divide(fv_probs,np.sum(fv_probs)) # update probabilities
if steady == True:
fv_probs_normal = pmf
# initial word counts
word_samples = np.random.choice(V,initial_cs,replace=True,p=fv_probs_normal)
wp_u, wp_c = np.unique(word_samples,return_counts=True)
del word_samples
initial_count = np.zeros(len(V))
initial_count[[i-1 for i in wp_u]] = wp_c
initial_probs = np.zeros(len(V))
initial_probs[[i-1 for i in wp_u]] = np.divide(wp_c,np.sum(wp_c))
# time loop
fv_track = [initial_fv]
cs_track = [initial_cs]
count_track = [initial_count]
probs_track = [initial_probs]
for i in range(1,T):
# selection at t >= tau
fv = np.zeros(len(V)) + 1
if i >= tau:
fv[[i-1 for i in PV]] = 1 + selection_function(i,A,B)
fv[[i-1 for i in NV]] = 1 - selection_function(i,A,B)
fv_track.append(fv)
else:
fv_track.append(fv)
fv_probs = np.multiply(probs_track[i-1],fv_track[i-1])
fv_probs_normal = np.divide(fv_probs,np.sum(fv_probs)) # update probabilities
if steady == True:
fv_probs_normal = pmf
# Wright-Fisher
cs_track.append(corpus_size(i,alpha,beta)) # update corpus size
word_samples = np.random.choice(V,cs_track[i],replace=True,p=fv_probs_normal)
wp_u, wp_c = np.unique(word_samples,return_counts=True)
del word_samples
next_count = np.zeros(len(V))
next_count[[i-1 for i in wp_u]] = wp_c # update counts
count_track.append(next_count)
next_probs = np.zeros(len(V))
next_probs[[i-1 for i in wp_u]] = wp_c/np.sum(wp_c) # update probabilities
probs_track.append(next_probs)
# compute pscores, zscores, and convert outputs to dataframes
R = pd.DataFrame(np.matrix(count_track).T,index=V,columns=range(0,T))
P = pd.DataFrame(np.divide(R,np.sum(R,axis=0)),index=V,columns=range(0,T))
a = P.T - np.mean(P.T,axis=0)
b = np.std(P.T,axis=0)
Z = np.divide(a,b).T
S = pd.DataFrame(np.matrix(fv_track).T,index=V,columns=range(0,T))
N = pd.DataFrame({'N(t)':cs_track},index=R.columns)
del fv_track, cs_track, count_track, probs_track
return R, P, Z, S, N
# binomial (pmf)
def binomial(n,x,p,type='pmf'):
if x >= 0 and x <= n:
if type == 'pmf':
f = comb(n,x)*np.power(p,x)*np.power(1-p,n-x)
elif type == 'cdf':
f = 0
for i in range(0,x+1):
f += comb(n,i)*np.power(p,i)*np.power(1-p,n-i)
else:
f = 0
return f
# binomal (pmf)
def binomial_wf(n,x,p,c):
if x <= n-c:
p = binomial(n-c,x,p)
elif x > n-c:
p = 0
return p
# binomial expected value
def E(n,p):
return n*p
# binomial variance
def Var(n,p):
return n*p*(1-p)
# binomial skewness
def Skew(n,p):
return np.divide((1-p)-p,np.sqrt(n*p*(1-p)))
# binomial covariance
def Cov(n,p_i,p_j):
return -1*n*p_i*p_j
|
[
"pandas.DataFrame",
"numpy.divide",
"numpy.matrix",
"numpy.multiply",
"numpy.random.seed",
"numpy.sum",
"numpy.std",
"numpy.power",
"scipy.special.comb",
"numpy.mean",
"numpy.sin",
"numpy.exp",
"numpy.cos",
"numpy.random.choice",
"numpy.unique",
"numpy.sqrt"
] |
[((273, 296), 'numpy.divide', 'np.divide', (['power_law', 'H'], {}), '(power_law, H)\n', (282, 296), True, 'import numpy as np\n'), ((1608, 1632), 'numpy.random.seed', 'np.random.seed', (['set_seed'], {}), '(set_seed)\n', (1622, 1632), True, 'import numpy as np\n'), ((2192, 2220), 'numpy.multiply', 'np.multiply', (['pmf', 'initial_fv'], {}), '(pmf, initial_fv)\n', (2203, 2220), True, 'import numpy as np\n'), ((2536, 2579), 'numpy.unique', 'np.unique', (['word_samples'], {'return_counts': '(True)'}), '(word_samples, return_counts=True)\n', (2545, 2579), True, 'import numpy as np\n'), ((4182, 4201), 'numpy.std', 'np.std', (['P.T'], {'axis': '(0)'}), '(P.T, axis=0)\n', (4188, 4201), True, 'import numpy as np\n'), ((4296, 4345), 'pandas.DataFrame', 'pd.DataFrame', (["{'N(t)': cs_track}"], {'index': 'R.columns'}), "({'N(t)': cs_track}, index=R.columns)\n", (4308, 4345), True, 'import pandas as pd\n'), ((5090, 5114), 'numpy.random.seed', 'np.random.seed', (['set_seed'], {}), '(set_seed)\n', (5104, 5114), True, 'import numpy as np\n'), ((5674, 5702), 'numpy.multiply', 'np.multiply', (['pmf', 'initial_fv'], {}), '(pmf, initial_fv)\n', (5685, 5702), True, 'import numpy as np\n'), ((5869, 5933), 'numpy.random.choice', 'np.random.choice', (['V', 'initial_cs'], {'replace': '(True)', 'p': 'fv_probs_normal'}), '(V, initial_cs, replace=True, p=fv_probs_normal)\n', (5885, 5933), True, 'import numpy as np\n'), ((5945, 5988), 'numpy.unique', 'np.unique', (['word_samples'], {'return_counts': '(True)'}), '(word_samples, return_counts=True)\n', (5954, 5988), True, 'import numpy as np\n'), ((7517, 7536), 'numpy.std', 'np.std', (['P.T'], {'axis': '(0)'}), '(P.T, axis=0)\n', (7523, 7536), True, 'import numpy as np\n'), ((7631, 7680), 'pandas.DataFrame', 'pd.DataFrame', (["{'N(t)': cs_track}"], {'index': 'R.columns'}), "({'N(t)': cs_track}, index=R.columns)\n", (7643, 7680), True, 'import pandas as pd\n'), ((211, 225), 'numpy.power', 'np.power', (['k', 'a'], {}), '(k, a)\n', (219, 225), True, 'import numpy as np\n'), ((304, 320), 'numpy.sum', 'np.sum', (['pmf[0:i]'], {}), '(pmf[0:i])\n', (310, 320), True, 'import numpy as np\n'), ((2258, 2274), 'numpy.sum', 'np.sum', (['fv_probs'], {}), '(fv_probs)\n', (2264, 2274), True, 'import numpy as np\n'), ((2457, 2525), 'numpy.random.choice', 'np.random.choice', (['V', '(initial_cs - c)'], {'replace': '(True)', 'p': 'fv_probs_normal'}), '(V, initial_cs - c, replace=True, p=fv_probs_normal)\n', (2473, 2525), True, 'import numpy as np\n'), ((2761, 2773), 'numpy.sum', 'np.sum', (['wp_c'], {}), '(wp_c)\n', (2767, 2773), True, 'import numpy as np\n'), ((3177, 3225), 'numpy.multiply', 'np.multiply', (['probs_track[i - 1]', 'fv_track[i - 1]'], {}), '(probs_track[i - 1], fv_track[i - 1])\n', (3188, 3225), True, 'import numpy as np\n'), ((3607, 3650), 'numpy.unique', 'np.unique', (['word_samples'], {'return_counts': '(True)'}), '(word_samples, return_counts=True)\n', (3616, 3650), True, 'import numpy as np\n'), ((4157, 4177), 'numpy.mean', 'np.mean', (['P.T'], {'axis': '(0)'}), '(P.T, axis=0)\n', (4164, 4177), True, 'import numpy as np\n'), ((4206, 4221), 'numpy.divide', 'np.divide', (['a', 'b'], {}), '(a, b)\n', (4215, 4221), True, 'import numpy as np\n'), ((5740, 5756), 'numpy.sum', 'np.sum', (['fv_probs'], {}), '(fv_probs)\n', (5746, 5756), True, 'import numpy as np\n'), ((6170, 6182), 'numpy.sum', 'np.sum', (['wp_c'], {}), '(wp_c)\n', (6176, 6182), True, 'import numpy as np\n'), ((6586, 6634), 'numpy.multiply', 'np.multiply', (['probs_track[i - 1]', 'fv_track[i - 1]'], {}), '(probs_track[i - 1], fv_track[i - 1])\n', (6597, 6634), True, 'import numpy as np\n'), ((6864, 6929), 'numpy.random.choice', 'np.random.choice', (['V', 'cs_track[i]'], {'replace': '(True)', 'p': 'fv_probs_normal'}), '(V, cs_track[i], replace=True, p=fv_probs_normal)\n', (6880, 6929), True, 'import numpy as np\n'), ((6942, 6985), 'numpy.unique', 'np.unique', (['word_samples'], {'return_counts': '(True)'}), '(word_samples, return_counts=True)\n', (6951, 6985), True, 'import numpy as np\n'), ((7492, 7512), 'numpy.mean', 'np.mean', (['P.T'], {'axis': '(0)'}), '(P.T, axis=0)\n', (7499, 7512), True, 'import numpy as np\n'), ((7541, 7556), 'numpy.divide', 'np.divide', (['a', 'b'], {}), '(a, b)\n', (7550, 7556), True, 'import numpy as np\n'), ((8313, 8337), 'numpy.sqrt', 'np.sqrt', (['(n * p * (1 - p))'], {}), '(n * p * (1 - p))\n', (8320, 8337), True, 'import numpy as np\n'), ((250, 264), 'numpy.power', 'np.power', (['k', 'a'], {}), '(k, a)\n', (258, 264), True, 'import numpy as np\n'), ((926, 939), 'numpy.sin', 'np.sin', (['(B * t)'], {}), '(B * t)\n', (932, 939), True, 'import numpy as np\n'), ((940, 953), 'numpy.cos', 'np.cos', (['(B * t)'], {}), '(B * t)\n', (946, 953), True, 'import numpy as np\n'), ((3260, 3276), 'numpy.sum', 'np.sum', (['fv_probs'], {}), '(fv_probs)\n', (3266, 3276), True, 'import numpy as np\n'), ((3526, 3595), 'numpy.random.choice', 'np.random.choice', (['V', '(cs_track[i] - c)'], {'replace': '(True)', 'p': 'fv_probs_normal'}), '(V, cs_track[i] - c, replace=True, p=fv_probs_normal)\n', (3542, 3595), True, 'import numpy as np\n'), ((3864, 3876), 'numpy.sum', 'np.sum', (['wp_c'], {}), '(wp_c)\n', (3870, 3876), True, 'import numpy as np\n'), ((4017, 4039), 'numpy.matrix', 'np.matrix', (['count_track'], {}), '(count_track)\n', (4026, 4039), True, 'import numpy as np\n'), ((4100, 4117), 'numpy.sum', 'np.sum', (['R'], {'axis': '(0)'}), '(R, axis=0)\n', (4106, 4117), True, 'import numpy as np\n'), ((4241, 4260), 'numpy.matrix', 'np.matrix', (['fv_track'], {}), '(fv_track)\n', (4250, 4260), True, 'import numpy as np\n'), ((6669, 6685), 'numpy.sum', 'np.sum', (['fv_probs'], {}), '(fv_probs)\n', (6675, 6685), True, 'import numpy as np\n'), ((7199, 7211), 'numpy.sum', 'np.sum', (['wp_c'], {}), '(wp_c)\n', (7205, 7211), True, 'import numpy as np\n'), ((7352, 7374), 'numpy.matrix', 'np.matrix', (['count_track'], {}), '(count_track)\n', (7361, 7374), True, 'import numpy as np\n'), ((7435, 7452), 'numpy.sum', 'np.sum', (['R'], {'axis': '(0)'}), '(R, axis=0)\n', (7441, 7452), True, 'import numpy as np\n'), ((7576, 7595), 'numpy.matrix', 'np.matrix', (['fv_track'], {}), '(fv_track)\n', (7585, 7595), True, 'import numpy as np\n'), ((454, 472), 'numpy.power', 'np.power', (['k', '(a - 1)'], {}), '(k, a - 1)\n', (462, 472), True, 'import numpy as np\n'), ((491, 505), 'numpy.power', 'np.power', (['k', 'a'], {}), '(k, a)\n', (499, 505), True, 'import numpy as np\n'), ((815, 832), 'numpy.exp', 'np.exp', (['(alpha * t)'], {}), '(alpha * t)\n', (821, 832), True, 'import numpy as np\n'), ((7877, 7899), 'numpy.power', 'np.power', (['(1 - p)', '(n - x)'], {}), '(1 - p, n - x)\n', (7885, 7899), True, 'import numpy as np\n'), ((7853, 7863), 'scipy.special.comb', 'comb', (['n', 'x'], {}), '(n, x)\n', (7857, 7863), False, 'from scipy.special import comb\n'), ((7863, 7877), 'numpy.power', 'np.power', (['p', 'x'], {}), '(p, x)\n', (7871, 7877), True, 'import numpy as np\n'), ((7985, 8007), 'numpy.power', 'np.power', (['(1 - p)', '(n - i)'], {}), '(1 - p, n - i)\n', (7993, 8007), True, 'import numpy as np\n'), ((7961, 7971), 'scipy.special.comb', 'comb', (['n', 'i'], {}), '(n, i)\n', (7965, 7971), False, 'from scipy.special import comb\n'), ((7971, 7985), 'numpy.power', 'np.power', (['p', 'i'], {}), '(p, i)\n', (7979, 7985), True, 'import numpy as np\n')]
|
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch import optim
import matplotlib.pyplot as plt
import random
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
# Build dataset
class DiagnosisDataset(Dataset):
def __init__(self,data_list, seq_length, pad_value=0 ,drop_len=1):
self.seq_length = seq_length
self.pad_value = pad_value
self.drop_len = drop_len
self.data_list = [x for x in data_list if len(x) > self.drop_len]
self.input, self.target = self.input_target(self.data_list)
def __getitem__(self,idx):
inputs = np.array(self.input[idx]).astype(np.int64)
inputs = padding(inputs,self.seq_length, pad_value=self.pad_value)
targets = np.array(self.target[idx]).astype(np.int64)
return inputs, targets
def __len__(self):
return len(self.target)
def input_target(self,x):
inputs = []
targets = []
for data_ in x:
len_ = len(data_)
inputs.append(data_[:(len_-1)])
targets.append(data_[-1])
return inputs, targets
class PatientLSTM(nn.Module):
def __init__(self,n_feature,n_hidden,n_layer,drop_prob,vocab_size = 2626,padding_val = 2625):
super().__init__()
self.n_feature = n_feature
self.n_hidden = n_hidden
self.n_layer = n_layer
self.drop_prob = drop_prob
self.embedding = nn.Embedding(vocab_size,self.n_feature,padding_idx=padding_val)
self.lstm = nn.LSTM(self.n_feature,self.n_hidden,self.n_layer,batch_first=True)
self.dropout = nn.Dropout(self.drop_prob)
self.fc = nn.Linear(11*self.n_hidden,vocab_size)
def forward(self,x):
embedded = self.embedding(x)
lstm_ouput, hidden = self.lstm(embedded)
out = self.dropout(lstm_ouput)
out = out.reshape(-1,11*self.n_hidden)
out = self.fc(out)
return out, hidden
def padding(x_array, length, pad_value=0):
"""
x_array: to be padded
length: max length
"""
len_ = len(x_array)
len2pad = length-len_
assert len2pad >= 0,"padding length should >= the array length"
padded_x = np.pad(x_array,(0,len2pad),mode="constant",constant_values=pad_value)
return padded_x
def train_model(n_feature,n_hidden,n_layer,drop_prob,batch_size,input_size,train,val,num_epoch,pad_value=2625,save_path="save_model/latest_model.pth"):
trainset = DiagnosisDataset(train,input_size,pad_value=pad_value)
trainloader = DataLoader(trainset,batch_size=batch_size,shuffle=True)
valset = DiagnosisDataset(val,input_size,pad_value=pad_value)
valloader = DataLoader(valset,batch_size=500,shuffle=True)
#model define
model = PatientLSTM(n_feature,n_hidden,n_layer,drop_prob)
model.to(device)
#define loss
creteria = nn.CrossEntropyLoss()
# define optimizer
optimizer = optim.SGD(model.parameters(),lr=0.02, momentum=0.9)
print(f"{'epoch':15s}{'train_loss':20s}")
print("-"*60)
for epoch in range(num_epoch):
# set model into train mode
# h = model.init_hidden(batch_size)
model.train()
train_loss = []
for bt_idx, (inputs,targets) in enumerate(trainloader):
# set data to device
inputs, targets = inputs.to(device), targets.to(device)
# h = tuple([each.data for each in h])
# make predictions
output, (_) = model(inputs)
# compute loss
tr_loss = creteria(output, targets)
# set gradients to zero
model.zero_grad()
# backpropagate the gradients
tr_loss.backward()
train_loss.append(tr_loss.item())
# upadte the weights
optimizer.step()
# scheduler.step(tr_loss.item())
# set model eval mode
model.eval()
test_loss = []
total = 0
correct = 0
with torch.no_grad():
for inputs,targets in valloader:
inputs, targets = inputs.to(device), targets.to(device)
y_pred,_ = model(inputs)
loss = creteria(y_pred, targets)
test_loss.append(loss.item())
# apply softmax to final layer
y_pred = F.softmax(y_pred, 1).cpu()
# get max score and index
score, indx = torch.max(y_pred, 1)
correct += torch.eq(targets.cpu(),indx).sum()
total += targets.size()[0]
print(f"{epoch+1:4d}{np.mean(train_loss):18.4f}")
if epoch%50==0:
torch.save(model.state_dict(), f"save_model/model-{epoch}-{np.mean(train_loss):18.4f}.pth")
f = open("demofile1l.txt", "w")
f.write(f"{epoch+1:4d}{np.mean(train_loss):18.4f}\n")
f.close()
print("Save model..")
torch.save(model.state_dict(), save_path)
print("Training finished...")
def load_model(n_feature,n_hidden,n_layer,drop_prob,save_path):
# device = torch.device('cpu')
model = PatientLSTM(n_feature,n_hidden,n_layer,drop_prob)
model.load_state_dict(torch.load(save_path, map_location=device))
print("Model Loaded")
return model
def infer(x_test,model):
_x=torch.from_numpy(np.array(x_test)).view(1,-1)
with torch.no_grad():
y_hat, _ = model(_x.to(device))
y_hat = F.softmax(y_hat,1).cpu()
_, indx = torch.max(y_hat,1)
return indx.item()
|
[
"numpy.pad",
"torch.nn.Dropout",
"torch.utils.data.DataLoader",
"torch.nn.Embedding",
"torch.load",
"torch.nn.CrossEntropyLoss",
"torch.nn.functional.softmax",
"numpy.mean",
"torch.cuda.is_available",
"torch.max",
"numpy.array",
"torch.nn.Linear",
"torch.nn.LSTM",
"torch.no_grad"
] |
[((2338, 2411), 'numpy.pad', 'np.pad', (['x_array', '(0, len2pad)'], {'mode': '"""constant"""', 'constant_values': 'pad_value'}), "(x_array, (0, len2pad), mode='constant', constant_values=pad_value)\n", (2344, 2411), True, 'import numpy as np\n'), ((2680, 2737), 'torch.utils.data.DataLoader', 'DataLoader', (['trainset'], {'batch_size': 'batch_size', 'shuffle': '(True)'}), '(trainset, batch_size=batch_size, shuffle=True)\n', (2690, 2737), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((2818, 2866), 'torch.utils.data.DataLoader', 'DataLoader', (['valset'], {'batch_size': '(500)', 'shuffle': '(True)'}), '(valset, batch_size=500, shuffle=True)\n', (2828, 2866), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((2998, 3019), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (3017, 3019), False, 'from torch import nn\n'), ((239, 264), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (262, 264), False, 'import torch\n'), ((1572, 1637), 'torch.nn.Embedding', 'nn.Embedding', (['vocab_size', 'self.n_feature'], {'padding_idx': 'padding_val'}), '(vocab_size, self.n_feature, padding_idx=padding_val)\n', (1584, 1637), False, 'from torch import nn\n'), ((1656, 1726), 'torch.nn.LSTM', 'nn.LSTM', (['self.n_feature', 'self.n_hidden', 'self.n_layer'], {'batch_first': '(True)'}), '(self.n_feature, self.n_hidden, self.n_layer, batch_first=True)\n', (1663, 1726), False, 'from torch import nn\n'), ((1747, 1773), 'torch.nn.Dropout', 'nn.Dropout', (['self.drop_prob'], {}), '(self.drop_prob)\n', (1757, 1773), False, 'from torch import nn\n'), ((1792, 1833), 'torch.nn.Linear', 'nn.Linear', (['(11 * self.n_hidden)', 'vocab_size'], {}), '(11 * self.n_hidden, vocab_size)\n', (1801, 1833), False, 'from torch import nn\n'), ((5286, 5328), 'torch.load', 'torch.load', (['save_path'], {'map_location': 'device'}), '(save_path, map_location=device)\n', (5296, 5328), False, 'import torch\n'), ((5471, 5486), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5484, 5486), False, 'import torch\n'), ((5587, 5606), 'torch.max', 'torch.max', (['y_hat', '(1)'], {}), '(y_hat, 1)\n', (5596, 5606), False, 'import torch\n'), ((4114, 4129), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4127, 4129), False, 'import torch\n'), ((720, 745), 'numpy.array', 'np.array', (['self.input[idx]'], {}), '(self.input[idx])\n', (728, 745), True, 'import numpy as np\n'), ((865, 891), 'numpy.array', 'np.array', (['self.target[idx]'], {}), '(self.target[idx])\n', (873, 891), True, 'import numpy as np\n'), ((4555, 4575), 'torch.max', 'torch.max', (['y_pred', '(1)'], {}), '(y_pred, 1)\n', (4564, 4575), False, 'import torch\n'), ((5433, 5449), 'numpy.array', 'np.array', (['x_test'], {}), '(x_test)\n', (5441, 5449), True, 'import numpy as np\n'), ((5544, 5563), 'torch.nn.functional.softmax', 'F.softmax', (['y_hat', '(1)'], {}), '(y_hat, 1)\n', (5553, 5563), True, 'import torch.nn.functional as F\n'), ((4710, 4729), 'numpy.mean', 'np.mean', (['train_loss'], {}), '(train_loss)\n', (4717, 4729), True, 'import numpy as np\n'), ((4938, 4957), 'numpy.mean', 'np.mean', (['train_loss'], {}), '(train_loss)\n', (4945, 4957), True, 'import numpy as np\n'), ((4456, 4476), 'torch.nn.functional.softmax', 'F.softmax', (['y_pred', '(1)'], {}), '(y_pred, 1)\n', (4465, 4476), True, 'import torch.nn.functional as F\n'), ((4834, 4853), 'numpy.mean', 'np.mean', (['train_loss'], {}), '(train_loss)\n', (4841, 4853), True, 'import numpy as np\n')]
|
import datetime
import numbers
import numpy as np
from past.builtins import basestring
import netCDF4
def validate_calendar(calendar):
"""Validate calendar string for CF Conventions.
Parameters
----------
calendar : str
Returns
-------
out : str
same as input if the calendar is valid
Notes
-----
1. The 'none' value for the calendar attribute is not supported anywhere
in this code presently, so NotImplementedError is raised.
2. NetCDFError is raised for invalid calendars.
"""
if calendar in ['gregorian', 'standard', 'proleptic_gregorian', 'noleap',
'365_day', 'all_leap', '366_day', '360_day', 'julian']:
return calendar
elif calendar == 'none':
raise NotImplementedError("calendar is set to 'none'")
else:
# should be a better error...
raise NotImplementedError("Unknown calendar: {0}".format(calendar))
def _calendar_from_ncdataset(ncdataset):
"""Get calendar from a netCDF4._netCDF4.Dataset object.
Parameters
----------
ncdataset : netCDF4._netCDF4.Dataset
Returns
-------
out : str
calendar attribute of the time variable
Notes
-----
1. The 'none' value for the calendar attribute is not supported anywhere
in this code presently, so NotImplementedError is raised.
2. NetCDFError is raised for invalid calendars or if there is not time
variable in the dataset.
"""
if 'time' in ncdataset.variables:
if hasattr(ncdataset.variables['time'], 'calendar'):
return validate_calendar(ncdataset.variables['time'].calendar)
else:
return 'gregorian'
else:
# should be a better error...
raise NotImplementedError("NetCDF file has no time variable")
def get_calendar(nc_resource):
"""Get calendar from a NetCDF resource.
Parameters
----------
nc_resource : str or netCDF4._netCDF4.Dataset or netCDF4._netCDF4.Variable
Returns
-------
out : str
calendar attribute of the time variable
Notes
-----
1. The 'none' value for the calendar attribute is not supported anywhere
in this code presently, so NotImplementedError is raised.
2. NetCDFError is raised for invalid calendars or if there is not time
variable in the dataset.
"""
if hasattr(nc_resource, 'calendar'):
return validate_calendar(nc_resource.calendar)
elif isinstance(nc_resource, netCDF4._netCDF4.Dataset):
return _calendar_from_ncdataset(nc_resource)
elif isinstance(nc_resource, basestring):
nc = netCDF4.Dataset(nc_resource, 'r')
return _calendar_from_ncdataset(nc)
else:
msg = "Unknown NetCDF resource: {0}"
raise NotImplementedError(msg.format(str(nc_resource)))
def multiple_files_time_indice(nc_files, t):
if t < 0:
raise NotImplementedError("Starting from the end.")
for (i, nc_file) in enumerate(nc_files):
ncdataset = netCDF4.Dataset(nc_file, 'r')
if 'time' not in ncdataset.dimensions:
raise NotImplementedError() # should be a better error...
nt = ncdataset.dimensions['time'].size
if t < nt:
return (i, t)
t -= nt
raise NotImplementedError("overflow.") # should be a better error...
def _nearest_time_from_netcdf_time_units(nc_files, t, threshold=None):
if isinstance(nc_files, basestring):
nc_files = [nc_files]
previous_end_time = None
previous_nt = None
initial_calendar = None
initial_time_units = None
for (i, nc_file) in enumerate(nc_files):
ncdataset = netCDF4.Dataset(nc_file, 'r')
if 'time' not in ncdataset.variables:
raise NotImplementedError() # should be a better error...
nctime = ncdataset.variables['time']
if initial_calendar is None:
initial_calendar = _calendar_from_ncdataset(ncdataset)
initial_time_units = nctime.units
else:
current_calendar = _calendar_from_ncdataset(ncdataset)
# Here we should use a calendar compare that takes into account
# aliases.
c1 = (current_calendar != initial_calendar)
c2 = (nctime.units != initial_time_units)
if c1 or c2:
datetimes = netCDF4.num2date(nctime[:], nctime.units,
current_calendar)
nctime = netCDF4.date2num(datetimes, initial_time_units,
initial_calendar)
start_time = nctime[0]
end_time = nctime[-1]
if (t >= start_time) and (t <= end_time):
tn = int((np.abs(nctime[:]-t)).argmin())
if threshold and (abs(nctime[tn]-t) > threshold):
# should be a better error...
raise NotImplementedError("No value below threshold.")
return (i, tn)
elif t < start_time:
if previous_end_time is not None:
pdiff = np.abs(previous_end_time-t)
ndiff = np.abs(start_time-t)
if pdiff <= ndiff:
if threshold and (pdiff > threshold):
# should be a better error...
raise NotImplementedError("No value below threshold.")
return (i-1, previous_nt-1)
else:
if threshold and (ndiff > threshold):
# should be a better error...
raise NotImplementedError("No value below threshold.")
return(i, 0)
elif threshold and ((start_time-t) > threshold):
# should be a better error...
raise NotImplementedError("No value below threshold.")
return (0, 0)
previous_end_time = end_time
previous_nt = ncdataset.dimensions['time'].size
if threshold and (t-end_time > threshold):
# should be a better error...
raise NotImplementedError("No value below threshold.")
return (i, previous_nt-1)
def nearest_time(nc_files, t, threshold=None):
if isinstance(nc_files, basestring):
nc_files = [nc_files]
if isinstance(t, (list, set, tuple)):
if len(t) > 3:
t = netCDF4.netcdftime.netcdftime(t[0], t[1], t[2],
t[3], t[4], t[5])
else:
t = netCDF4.netcdftime.netcdftime(t[0], t[1], t[2], 12, 0, 0)
elif isinstance(t, basestring):
# Can't use time.strptime because of alternate NetCDF calendars
decode_t = t.split('T')
decode_date = decode_t[0].split('-')
yyyy = int(decode_date[0])
mm = int(decode_date[1])
dd = int(decode_date[2])
if len(decode_t) > 1:
decode_time = decode_t[1].split(':')
hh = int(decode_time[0])
mi = int(decode_time[1])
ss = int(decode_time[2])
else:
hh = 12
mi = 0
ss = 0
try:
t = datetime.datetime(yyyy, mm, dd, hh, mi, ss)
except ValueError:
t = netCDF4.netcdftime.datetime(yyyy, mm, dd, hh, mi, ss)
if isinstance(t, numbers.Number):
return _nearest_time_from_netcdf_time_units(nc_files, t, threshold)
elif isinstance(t, (datetime.datetime,
netCDF4.netcdftime._datetime.datetime)):
nc = netCDF4.Dataset(nc_files[0], 'r')
nctime = nc.variables['time']
t = netCDF4.date2num(t, nctime.units, _calendar_from_ncdataset(nc))
return _nearest_time_from_netcdf_time_units(nc_files, t, threshold)
else:
raise NotImplementedError()
def time_start_end(nc_resource):
"""Retrieve start and end date in a NetCDF file.
Parameters
----------
nc_resource : netCDF4._netCDF4.Dataset
Returns
-------
out : (netcdftime._datetime.datetime, netcdftime._datetime.datetime)
Tuple with start date and end date.
"""
if 'time' in nc_resource.variables:
nctime = nc_resource.variables['time']
nccalendar = getattr(nctime, 'calendar', 'gregorian')
datetime_min = netCDF4.num2date(
nctime[0], nctime.units, nccalendar)
datetime_max = netCDF4.num2date(
nctime[-1], nctime.units, nccalendar)
return (datetime_min, datetime_max)
else:
return (None, None)
def nc_datetime_to_iso(nc_datetime, force_gregorian_date=False,
raise_non_gregorian_dates=False):
"""Convert a NetCDF datetime to ISO format.
Parameters
----------
nc_datetime : netcdftime._datetime.datetime
force_gregorian_date : bool
Force output to be a valid gregorian calendar date. Only use this
if you know what you are doing, information will be lost about dates
in other valid CF-Convention calendars. In those cases, a nearest
gregorian date is forged.
raise_non_gregorian_dates : bool
In combination with force_gregorian_date, will raise an error if the
date is not a valid gregorian date, instead of returning the forged
nearest date.
Returns
-------
out : str
ISO formatted datetime.
Notes
-----
Does not support time zones.
"""
if force_gregorian_date:
try:
real_datetime = datetime.datetime(*nc_datetime.timetuple()[0:6])
except ValueError:
if raise_non_gregorian_dates:
raise
# Forging a nearest gregorian date. Try day-1 and if this works
# and hour < 12, set to (year,month,day-1,23,59,59), else
# set to (year,month+1,1,0,0,0).
year = nc_datetime.year
next_month = nc_datetime.month + 1
if next_month == 13:
next_month = 1
year += 1
real_datetime = datetime.datetime(year, next_month, 1, 0, 0, 0)
if nc_datetime.hour < 12:
try:
real_datetime = datetime.datetime(
nc_datetime.year, nc_datetime.month,
nc_datetime.day - 1)
real_datetime = datetime.datetime(
nc_datetime.year, nc_datetime.month,
nc_datetime.day - 1, 23, 59, 59)
except ValueError:
pass
return real_datetime.isoformat()
else:
return nc_datetime.strftime('%Y-%m-%dT%H:%M:%S')
|
[
"netCDF4.Dataset",
"numpy.abs",
"netCDF4.date2num",
"datetime.datetime",
"netCDF4.num2date",
"netCDF4.netcdftime.datetime",
"netCDF4.netcdftime.netcdftime"
] |
[((3029, 3058), 'netCDF4.Dataset', 'netCDF4.Dataset', (['nc_file', '"""r"""'], {}), "(nc_file, 'r')\n", (3044, 3058), False, 'import netCDF4\n'), ((3678, 3707), 'netCDF4.Dataset', 'netCDF4.Dataset', (['nc_file', '"""r"""'], {}), "(nc_file, 'r')\n", (3693, 3707), False, 'import netCDF4\n'), ((8254, 8307), 'netCDF4.num2date', 'netCDF4.num2date', (['nctime[0]', 'nctime.units', 'nccalendar'], {}), '(nctime[0], nctime.units, nccalendar)\n', (8270, 8307), False, 'import netCDF4\n'), ((8344, 8398), 'netCDF4.num2date', 'netCDF4.num2date', (['nctime[-1]', 'nctime.units', 'nccalendar'], {}), '(nctime[-1], nctime.units, nccalendar)\n', (8360, 8398), False, 'import netCDF4\n'), ((6344, 6409), 'netCDF4.netcdftime.netcdftime', 'netCDF4.netcdftime.netcdftime', (['t[0]', 't[1]', 't[2]', 't[3]', 't[4]', 't[5]'], {}), '(t[0], t[1], t[2], t[3], t[4], t[5])\n', (6373, 6409), False, 'import netCDF4\n'), ((6486, 6543), 'netCDF4.netcdftime.netcdftime', 'netCDF4.netcdftime.netcdftime', (['t[0]', 't[1]', 't[2]', '(12)', '(0)', '(0)'], {}), '(t[0], t[1], t[2], 12, 0, 0)\n', (6515, 6543), False, 'import netCDF4\n'), ((7498, 7531), 'netCDF4.Dataset', 'netCDF4.Dataset', (['nc_files[0]', '"""r"""'], {}), "(nc_files[0], 'r')\n", (7513, 7531), False, 'import netCDF4\n'), ((2646, 2679), 'netCDF4.Dataset', 'netCDF4.Dataset', (['nc_resource', '"""r"""'], {}), "(nc_resource, 'r')\n", (2661, 2679), False, 'import netCDF4\n'), ((4363, 4422), 'netCDF4.num2date', 'netCDF4.num2date', (['nctime[:]', 'nctime.units', 'current_calendar'], {}), '(nctime[:], nctime.units, current_calendar)\n', (4379, 4422), False, 'import netCDF4\n'), ((4493, 4558), 'netCDF4.date2num', 'netCDF4.date2num', (['datetimes', 'initial_time_units', 'initial_calendar'], {}), '(datetimes, initial_time_units, initial_calendar)\n', (4509, 4558), False, 'import netCDF4\n'), ((7121, 7164), 'datetime.datetime', 'datetime.datetime', (['yyyy', 'mm', 'dd', 'hh', 'mi', 'ss'], {}), '(yyyy, mm, dd, hh, mi, ss)\n', (7138, 7164), False, 'import datetime\n'), ((9984, 10031), 'datetime.datetime', 'datetime.datetime', (['year', 'next_month', '(1)', '(0)', '(0)', '(0)'], {}), '(year, next_month, 1, 0, 0, 0)\n', (10001, 10031), False, 'import datetime\n'), ((5070, 5099), 'numpy.abs', 'np.abs', (['(previous_end_time - t)'], {}), '(previous_end_time - t)\n', (5076, 5099), True, 'import numpy as np\n'), ((5122, 5144), 'numpy.abs', 'np.abs', (['(start_time - t)'], {}), '(start_time - t)\n', (5128, 5144), True, 'import numpy as np\n'), ((7208, 7261), 'netCDF4.netcdftime.datetime', 'netCDF4.netcdftime.datetime', (['yyyy', 'mm', 'dd', 'hh', 'mi', 'ss'], {}), '(yyyy, mm, dd, hh, mi, ss)\n', (7235, 7261), False, 'import netCDF4\n'), ((4734, 4755), 'numpy.abs', 'np.abs', (['(nctime[:] - t)'], {}), '(nctime[:] - t)\n', (4740, 4755), True, 'import numpy as np\n'), ((10127, 10202), 'datetime.datetime', 'datetime.datetime', (['nc_datetime.year', 'nc_datetime.month', '(nc_datetime.day - 1)'], {}), '(nc_datetime.year, nc_datetime.month, nc_datetime.day - 1)\n', (10144, 10202), False, 'import datetime\n'), ((10288, 10379), 'datetime.datetime', 'datetime.datetime', (['nc_datetime.year', 'nc_datetime.month', '(nc_datetime.day - 1)', '(23)', '(59)', '(59)'], {}), '(nc_datetime.year, nc_datetime.month, nc_datetime.day - 1,\n 23, 59, 59)\n', (10305, 10379), False, 'import datetime\n')]
|
import numpy as np
import torch
from mmdet.core.bbox.builder import BBOX_CODERS
from mmdet.core.bbox.coder.base_bbox_coder import BaseBBoxCoder
@BBOX_CODERS.register_module()
class DeltaOBBCoder(BaseBBoxCoder):
def __init__(self,
target_means=(0., 0., 0., 0., 0.),
target_stds=(0.1, 0.1, 0.2, 0.2, 0.1),
obb_encode='thetaobb'):
super(BaseBBoxCoder, self).__init__()
self.means = target_means
self.stds = target_stds
self.obb_encode = obb_encode
def encode(self, obbs, gt_obbs):
assert obbs.size(0) == gt_obbs.size(0)
if self.obb_encode == 'thetaobb':
assert obbs.size(0) == gt_obbs.size(0)
encoded_obbs = thetaobb2delta(obbs, gt_obbs, self.means, self.stds)
else:
raise(RuntimeError('do not support the encode mthod: {}'.format(self.obb_encode)))
return encoded_obbs
def decode(self,
obbs,
pred_obbs,
max_shape=None,
wh_ratio_clip=16 / 1000):
assert pred_obbs.size(0) == obbs.size(0)
if self.obb_encode == 'thetaobb':
decoded_obbs = delta2thetaobb(obbs, pred_obbs, self.means, self.stds,
max_shape, wh_ratio_clip)
else:
raise(RuntimeError('do not support the encode mthod: {}'.format(self.obb_encode)))
return decoded_obbs
def thetaobb2delta(proposals, gt, means=(0., 0., 0., 0., 0.), stds=(0.1, 0.1, 0.2, 0.2, 0.1)):
# proposals: (x1, y1, x2, y2)
# gt: (cx, cy, w, h, theta)
assert proposals.size(0) == gt.size(0)
proposals = proposals.float()
gt = gt.float()
px = (proposals[..., 0] + proposals[..., 2]) * 0.5
py = (proposals[..., 1] + proposals[..., 3]) * 0.5
pw = proposals[..., 2] - proposals[..., 0]
ph = proposals[..., 3] - proposals[..., 1]
pa = np.ones(proposals.shape[0], dtype=np.int32) * (-np.pi / 2.0)
pa = torch.from_numpy(np.stack(pa)).float().to(proposals.device)
gx = gt[..., 0]
gy = gt[..., 1]
gw = gt[..., 2]
gh = gt[..., 3]
ga = gt[..., 4]
dx = (gx - px) / pw
dy = (gy - py) / ph
dw = torch.log(gw / pw)
dh = torch.log(gh / ph)
da = ga - pa
deltas = torch.stack([dx, dy, dw, dh, da], dim=-1)
means = deltas.new_tensor(means).unsqueeze(0)
stds = deltas.new_tensor(stds).unsqueeze(0)
deltas = deltas.sub_(means).div_(stds)
return deltas
def delta2thetaobb(rois,
deltas,
means=[0., 0., 0., 0., 0.],
stds=[0.1, 0.1, 0.2, 0.2, 0.1],
max_shape=None,
wh_ratio_clip=16 / 1000):
means = deltas.new_tensor(means).repeat(1, deltas.size(1) // 5)
stds = deltas.new_tensor(stds).repeat(1, deltas.size(1) // 5)
denorm_deltas = deltas * stds + means
dx = denorm_deltas[:, 0::5]
dy = denorm_deltas[:, 1::5]
dw = denorm_deltas[:, 2::5]
dh = denorm_deltas[:, 3::5]
da = denorm_deltas[:, 4::5]
max_ratio = np.abs(np.log(wh_ratio_clip))
dw = dw.clamp(min=-max_ratio, max=max_ratio)
dh = dh.clamp(min=-max_ratio, max=max_ratio)
px = ((rois[:, 0] + rois[:, 2]) * 0.5).unsqueeze(1).expand_as(dx)
py = ((rois[:, 1] + rois[:, 3]) * 0.5).unsqueeze(1).expand_as(dy)
pw = (rois[:, 2] - rois[:, 0]).unsqueeze(1).expand_as(dw)
ph = (rois[:, 3] - rois[:, 1]).unsqueeze(1).expand_as(dh)
pa = np.ones(rois.shape[0], dtype=np.int32) * (-np.pi / 2.0)
pa = torch.from_numpy(np.stack(pa)).float().to(rois.device).unsqueeze(1).expand_as(da)
gw = pw * dw.exp()
gh = ph * dh.exp()
gx = torch.addcmul(px, 1, pw, dx) # gx = px + pw * dx
gy = torch.addcmul(py, 1, ph, dy) # gy = py + ph * dy
ga = da + pa
if max_shape is not None:
gx = gx.clamp(min=0, max=max_shape[1])
gy = gy.clamp(min=0, max=max_shape[0])
gw = gw.clamp(min=0, max=max_shape[1])
gh = gh.clamp(min=0, max=max_shape[0])
thetaobbs = torch.stack([gx, gy, gw, gh, ga], dim=-1).view_as(deltas)
return thetaobbs
|
[
"numpy.stack",
"torch.stack",
"numpy.log",
"torch.addcmul",
"numpy.ones",
"torch.log",
"mmdet.core.bbox.builder.BBOX_CODERS.register_module"
] |
[((148, 177), 'mmdet.core.bbox.builder.BBOX_CODERS.register_module', 'BBOX_CODERS.register_module', ([], {}), '()\n', (175, 177), False, 'from mmdet.core.bbox.builder import BBOX_CODERS\n'), ((2220, 2238), 'torch.log', 'torch.log', (['(gw / pw)'], {}), '(gw / pw)\n', (2229, 2238), False, 'import torch\n'), ((2248, 2266), 'torch.log', 'torch.log', (['(gh / ph)'], {}), '(gh / ph)\n', (2257, 2266), False, 'import torch\n'), ((2298, 2339), 'torch.stack', 'torch.stack', (['[dx, dy, dw, dh, da]'], {'dim': '(-1)'}), '([dx, dy, dw, dh, da], dim=-1)\n', (2309, 2339), False, 'import torch\n'), ((3696, 3724), 'torch.addcmul', 'torch.addcmul', (['px', '(1)', 'pw', 'dx'], {}), '(px, 1, pw, dx)\n', (3709, 3724), False, 'import torch\n'), ((3755, 3783), 'torch.addcmul', 'torch.addcmul', (['py', '(1)', 'ph', 'dy'], {}), '(py, 1, ph, dy)\n', (3768, 3783), False, 'import torch\n'), ((1931, 1974), 'numpy.ones', 'np.ones', (['proposals.shape[0]'], {'dtype': 'np.int32'}), '(proposals.shape[0], dtype=np.int32)\n', (1938, 1974), True, 'import numpy as np\n'), ((3093, 3114), 'numpy.log', 'np.log', (['wh_ratio_clip'], {}), '(wh_ratio_clip)\n', (3099, 3114), True, 'import numpy as np\n'), ((3489, 3527), 'numpy.ones', 'np.ones', (['rois.shape[0]'], {'dtype': 'np.int32'}), '(rois.shape[0], dtype=np.int32)\n', (3496, 3527), True, 'import numpy as np\n'), ((4057, 4098), 'torch.stack', 'torch.stack', (['[gx, gy, gw, gh, ga]'], {'dim': '(-1)'}), '([gx, gy, gw, gh, ga], dim=-1)\n', (4068, 4098), False, 'import torch\n'), ((2018, 2030), 'numpy.stack', 'np.stack', (['pa'], {}), '(pa)\n', (2026, 2030), True, 'import numpy as np\n'), ((3571, 3583), 'numpy.stack', 'np.stack', (['pa'], {}), '(pa)\n', (3579, 3583), True, 'import numpy as np\n')]
|
from .Player import Player
from .TileCollection import TileCollection
from .Center import Center
from .TileColor import TileColor
from .AzulAction import AzulAction
import random
import numpy as np
import math
class AzulBoard():
def __init__(self):
self.player1 = Player(1)
self.player2 = Player(-1)
self.bag = TileCollection(20, 20, 20, 20, 20, 0)
self.lid = TileCollection()
self.center = Center(self.bag, self.lid)
self.roundFinished = False
self.playerIDWhoHadWhiteLastRound = 0
def display(self):
print("---------------------------------------------------------")
print("Bag:", self.bag.toString())
self.lid.display()
self.center.display()
print()
self.player1.display()
print()
self.player2.display()
print("---------------------------------------------------------")
def toString(self):
return self.bag.toString() + self.lid.toString() + self.center.toString() + self.player1.toString() + self.player2.toString()
def fillWallsRandomly(self, prob: float):
self.player1.wall.cells = self.getValidRandomWall(prob)
self.player2.wall.cells = self.getValidRandomWall(prob)
def getValidRandomWall(self, prob: float):
valid = False
while not valid:
numpyWall = np.random.choice(a=[True, False], size=(5, 5), p = [prob, 1-prob])
valid = True
for line in numpyWall:
if line.all():
valid = False
return numpyWall.tolist()
def fillPlayerLinesRandomly(self, bag, lineHasTilesProb: float):
self.player1.playerLines.lines = self.getValidRandomPlayerLines(bag, self.player1, lineHasTilesProb)
self.player2.playerLines.lines = self.getValidRandomPlayerLines(bag, self.player2, lineHasTilesProb)
def getValidRandomPlayerLines(self, bag, player, lineHasTilesProb: float):
lines = []
for i in range(5):
addSomeTiles = np.random.uniform(0, 1) < lineHasTilesProb
if addSomeTiles:
validColors = player.wall.getValidColorsForRow(i)
color = np.random.choice(validColors)
number = min(np.random.randint(0, i + 1), bag.getCountOfColor(color)) # Don't remove tiles we don't have in the bag.
if number == 0:
color = None
else:
bag.removeTiles(color, number)
else:
color = None
number = 0
lines.append([i + 1, color, number])
return lines
def getNextState(self, player, actionInt):
action = AzulAction.getActionFromInt(actionInt, player)
return self.executeAction(action)
def getPlayerFromAction(self, action: AzulAction) -> Player:
if action.playerID == 1:
return self.player1
else:
return self.player2
def executeAction(self, action: AzulAction):
actionPlayer = self.getPlayerFromAction(action)
# Manipulate tiles from center
tilesInHand = self.center.takeTiles(action)
# Place tiles on player board
overflow = actionPlayer.placeTilesFromAction(action, tilesInHand)
# Potentially put overflow in lid
self.lid.addTiles(action.color, overflow.getCountOfColor(action.color))
if self.shouldFinishRound():
self.finishRound()
return self
def shouldFinishRound(self) -> bool:
for factory in self.center.factories:
if factory.tiles.getCount() > 0:
return False
if self.center.center.getCount() > 0:
return False
return True
def finishRound(self):
self.roundFinished = True
self.playerIDWhoHadWhiteLastRound = 0 # Reset
# Track if player1 had white tile
if (self.player1.hasWhiteTile):
self.playerIDWhoHadWhiteLastRound = self.player1.id
# move tiles to bag and lid
tilesToLid = self.player1.finishRound()
tilesToLid.moveAllTiles(self.lid)
if (self.player2.hasWhiteTile):
self.playerIDWhoHadWhiteLastRound = self.player2.id
tilesToLid = self.player2.finishRound()
tilesToLid.moveAllTiles(self.lid)
def setupNextRound(self):
self.roundFinished = False
self.center = Center(self.bag, self.lid)
self.player1.hasWhiteTile = False
self.player2.hasWhiteTile = False
def isGameFinished(self):
return self.player1.wall.hasFinishedRow() or self.player2.wall.hasFinishedRow()
def getAllTiles(self):
# Created as a sanity check. Make sure there are 20/20/20/20/20/1 tiles in the game at all times.
sumTiles = TileCollection()
sumTiles.addTilesFromCollection(self.bag)
sumTiles.addTilesFromCollection(self.lid)
sumTiles.addTilesFromCollection(self.player1.getAllTiles())
sumTiles.addTilesFromCollection(self.player2.getAllTiles())
sumTiles.addTilesFromCollection(self.center.getAllTiles())
return sumTiles
|
[
"numpy.random.uniform",
"numpy.random.randint",
"numpy.random.choice"
] |
[((1423, 1489), 'numpy.random.choice', 'np.random.choice', ([], {'a': '[True, False]', 'size': '(5, 5)', 'p': '[prob, 1 - prob]'}), '(a=[True, False], size=(5, 5), p=[prob, 1 - prob])\n', (1439, 1489), True, 'import numpy as np\n'), ((2122, 2145), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (2139, 2145), True, 'import numpy as np\n'), ((2287, 2316), 'numpy.random.choice', 'np.random.choice', (['validColors'], {}), '(validColors)\n', (2303, 2316), True, 'import numpy as np\n'), ((2347, 2374), 'numpy.random.randint', 'np.random.randint', (['(0)', '(i + 1)'], {}), '(0, i + 1)\n', (2364, 2374), True, 'import numpy as np\n')]
|
# Uses a learnable embedding for the countries which is fed as input to both encoders (at the respective dense layers).
# Copyright 2020 (c) Cognizant Digital Business, Evolutionary AI. All rights reserved. Issued under the Apache 2.0 License.
import os
import urllib.request
# Suppress noisy Tensorflow debug logging
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# noinspection PyPep8Naming
import keras.backend as K
import numpy as np
import pandas as pd
from keras.callbacks import EarlyStopping
from keras.constraints import Constraint
from keras.layers import Concatenate
from keras.layers import Dense
from keras.layers import Embedding
from keras.layers import Input
from keras.layers import LSTM
from keras.layers import Reshape
from keras.layers import Lambda
from keras.models import Model
import ongoing.predictors.base as base
from ongoing.predictors.base import BasePredictor
NB_LOOKBACK_DAYS = 21
NB_TEST_DAYS = 14
WINDOW_SIZE = 7
NUM_EPOCHS = 1000
LSTM_SIZE = 32
EMBED_SIZE = 4
NPI_DELAY = 0
class Positive(Constraint):
def __call__(self, w):
return K.abs(w)
# Functions to be used for lambda layers in model
def _combine_r_and_d(x):
r, d = x
return r * (1. - d)
class geoLSTMPredictor(BasePredictor):
"""
A class that computes a fitness for Prescriptor candidates.
"""
def __init__(self, path_to_model_weights=None, path_to_geos=None,
embed_size=EMBED_SIZE, lstm_size=LSTM_SIZE, nb_lookback_days=NB_LOOKBACK_DAYS, nb_test_days=NB_TEST_DAYS,
window_size=WINDOW_SIZE, npi_delay=NPI_DELAY, num_epochs=NUM_EPOCHS, seed=base.SEED):
super().__init__(seed=seed)
self.embed_size = embed_size
self.lstm_size = lstm_size
self.nb_lookback_days = nb_lookback_days
self.nb_test_days = nb_test_days
self.window_size = window_size
self.npi_delay = npi_delay
self.num_epochs = num_epochs
if path_to_model_weights and path_to_geos:
# Load model weights
nb_context = 2 # New cases rate and new deaths rate are used as context
nb_action = len(base.NPI_COLUMNS)
with open(path_to_geos, 'r') as f:
self.geos = [g.rstrip() for g in f.readlines()]
self.predictor, _ = self._construct_model(nb_context=nb_context,
nb_action=nb_action,
embed_size=self.embed_size,
lstm_size=self.lstm_size,
nb_lookback_days=self.nb_lookback_days)
self.predictor.load_weights(path_to_model_weights)
self.country_samples = None # will be set when fit() or predict() are called
def predict(self, data=None):
if self.train_df is None:
raise Exception("train_df must be defined before calling predict()")
if data is None:
data = self.test_df
if self.country_samples is None:
self.country_samples = self._create_country_samples(self.train_df,
list(self.train_df.GeoID.unique()),
self.nb_lookback_days,
self.npi_delay,
self.nb_test_days)
self.geos = list(self.country_samples.keys())
start_date = pd.to_datetime(data.Date.min(), format='%Y-%m-%d')
end_date = pd.to_datetime(data.Date.max(), format='%Y-%m-%d')
nb_days = (end_date - start_date).days + 1
# Prepare the output
forecast = {"CountryName": [],
"RegionName": [],
"Date": [],
"PredictedDailyTotalCases": [],
"PredictedDailyNewCases": [],
"PredictedDailyTotalDeaths": [],
"PredictedDailyNewDeaths": []}
# For each requested geo
geos = data.GeoID.unique()
for g in geos:
if g not in self.geos:
# the model was not trained for this geo: return zeroes
print("WARNING: The model was not trained for {}".format(g))
pred_total_cases = [0] * nb_days
pred_new_cases = [0] * nb_days
pred_total_deaths = [0] * nb_days
pred_new_deaths = [0] * nb_days
geo_start_date = start_date
else:
cdf = self.train_df[self.train_df.GeoID == g]
if len(cdf) == 0:
# we don't have historical data for this geo: return zeroes
pred_total_cases = [0] * nb_days
pred_new_cases = [0] * nb_days
pred_total_deaths = [0] * nb_days
pred_new_deaths = [0] * nb_days
geo_start_date = start_date
else:
last_known_date = cdf.Date.max()
# Start predicting from start_date, unless there's a gap since last known date
geo_start_date = min(last_known_date + np.timedelta64(1, 'D'), start_date)
npis_gdf = data[(data.Date >= geo_start_date - pd.Timedelta(days=self.npi_delay)) & (data.Date <= end_date - pd.Timedelta(days=self.npi_delay))]
pred_total_cases, pred_new_cases, pred_total_deaths, pred_new_deaths = self._get_new_cases_preds(cdf, g, npis_gdf)
# Append forecast data to results to return
country = data[data.GeoID == g].iloc[0].CountryName
region = data[data.GeoID == g].iloc[0].RegionName
for i, (ptot_cases, pnew_cases, ptot_deaths, pnew_deaths) in enumerate(zip(pred_total_cases, pred_new_cases, pred_total_deaths, pred_new_deaths)):
forecast["GeoID"].append(g)
forecast["CountryName"].append(country)
forecast["RegionName"].append(region)
current_date = geo_start_date + pd.offsets.Day(i)
forecast["Date"].append(current_date)
forecast["PredictedDailyTotalCases"].append(ptot_cases)
forecast["PredictedDailyNewCases"].append(pnew_cases)
forecast["PredictedDailyTotalDeaths"].append(ptot_deaths)
forecast["PredictedDailyNewDeaths"].append(pnew_deaths)
forecast_df = pd.DataFrame.from_dict(forecast)
# Return only the requested predictions (PredictedDailyTotalCases)
return forecast_df[(forecast_df.Date >= start_date) & (forecast_df.Date <= end_date)]
def _get_new_cases_preds(self, c_df, g, npis_df):
cdf = c_df[c_df.ConfirmedCases.notnull()]
initial_context_input = self.country_samples[g]['X_test_context'][-1]
initial_action_input = self.country_samples[g]['X_test_action'][-1]
country_id = np.array([self.geos.index(g)])
# Predictions with passed npis
cnpis_df = npis_df[npis_df.GeoID == g]
npis_sequence = np.array(cnpis_df[base.NPI_COLUMNS])
# Get the predictions with the passed NPIs
preds = self._roll_out_predictions(self.predictor,
initial_context_input,
initial_action_input,
country_id,
npis_sequence)
preds_cases = preds[:,0]
preds_deaths = preds[:,1]
# Gather info to convert to total cases
prev_confirmed_cases = np.array(cdf.ConfirmedCases)
prev_new_cases = np.array(cdf.NewCases)
initial_total_cases = prev_confirmed_cases[-1]
pop_size = np.array(cdf.Population)[-1] # Population size doesn't change over time
prev_confirmed_deaths = np.array(cdf.ConfirmedDeaths)
prev_new_deaths = np.array(cdf.NewDeaths)
initial_total_deaths = prev_confirmed_deaths[-1]
# Compute predictor's cases forecast
pred_total_cases, pred_new_cases = base.convert_ratios_to_total_cases(
preds_cases,
self.window_size,
prev_new_cases,
initial_total_cases,
pop_size)
# Compute predictor's deaths forecast
pred_total_deaths, pred_new_deaths = base.convert_ratios_to_total_deaths(
preds_deaths,
self.window_size,
prev_new_deaths,
initial_total_deaths)
return pred_total_cases, pred_new_cases, pred_total_deaths, pred_new_deaths
@staticmethod
def _create_country_samples(df: pd.DataFrame, geos: list, nb_lookback_days: int, npi_delay: int, nb_test_days: int) -> dict:
"""
For each country, creates numpy arrays for Keras
:param df: a Pandas DataFrame with historical data for countries (the "Oxford" dataset)
:param geos: a list of geo names
:return: a dictionary of train and test sets, for each specified country
"""
context_columns = ['PredictionRatio', 'DeathRatio']
action_columns = base.NPI_COLUMNS
outcome_columns = ['PredictionRatio', 'DeathRatio']
country_samples = {}
for i, g in enumerate(geos):
cdf = df[df.GeoID == g]
cdf = cdf[cdf.ConfirmedCases.notnull()]
context_data = np.array(cdf[context_columns])
action_data = np.array(cdf[action_columns])
outcome_data = np.array(cdf[outcome_columns])
context_samples = []
action_samples = []
outcome_samples = []
nb_total_days = outcome_data.shape[0]
for d in range(nb_lookback_days+npi_delay, nb_total_days):
context_samples.append(context_data[d - nb_lookback_days: d])
action_samples.append(action_data[d-npi_delay - nb_lookback_days: d-npi_delay])
outcome_samples.append(outcome_data[d])
if len(outcome_samples) > 0:
X_context = np.stack(context_samples, axis=0)
X_action = np.stack(action_samples, axis=0)
X_country = i*np.ones(X_context.shape[0])
y = np.stack(outcome_samples, axis=0)
country_samples[g] = {
'X_context': X_context,
'X_action': X_action,
'X_country': X_country,
'y': y,
'X_test_context': X_context[-nb_test_days:],
'X_test_action': X_action[-nb_test_days:],
'X_test_country': X_country[-nb_test_days:],
'y_test': y[-nb_test_days:],
}
return country_samples
# Function for performing roll outs into the future
@staticmethod
def _roll_out_predictions(predictor, initial_context_input, initial_action_input, country_id, future_action_sequence):
nb_roll_out_days = future_action_sequence.shape[0]
pred_output = np.zeros((nb_roll_out_days, 2))
context_input = np.expand_dims(np.copy(initial_context_input), axis=0)
action_input = np.expand_dims(np.copy(initial_action_input), axis=0)
country_input = np.expand_dims(np.copy(country_id), axis=0)
for d in range(nb_roll_out_days):
action_input[:, :-1] = action_input[:, 1:]
# Use the passed actions
action_sequence = future_action_sequence[d]
action_input[:, -1] = action_sequence
pred = predictor.predict([context_input, action_input, country_input])
pred_output[d] = pred[-1]
context_input[:, :-1] = context_input[:, 1:]
context_input[:, -1] = pred[-1]
return pred_output
def fit(self):
if self.train_df is None:
raise Exception("train_df must be defined bfr calling predict()")
self.country_samples = self._create_country_samples(self.train_df,
list(self.train_df.GeoID.unique()),
self.nb_lookback_days,
self.npi_delay,
self.nb_test_days)
self.geos = list(self.country_samples.keys())
# Aggregate data for training
all_X_context_list = [self.country_samples[c]['X_context']
for c in self.country_samples]
all_X_action_list = [self.country_samples[c]['X_action']
for c in self.country_samples]
all_X_country_list = [self.country_samples[c]['X_country']
for c in self.country_samples]
all_y_list = [self.country_samples[c]['y']
for c in self.country_samples]
X_context = np.concatenate(all_X_context_list)
X_action = np.concatenate(all_X_action_list)
X_country = np.concatenate(all_X_country_list)
y = np.concatenate(all_y_list)
# Clip outliers
MIN_VALUE = 0.
MAX_VALUE = 2.
X_context = np.clip(X_context, MIN_VALUE, MAX_VALUE)
y = np.clip(y, MIN_VALUE, MAX_VALUE)
X_context, X_action, X_country, y = self._permute_data(X_context, X_action, X_country, y)
self.predictor, training_model = self._construct_model(nb_context=X_context.shape[-1],
nb_action=X_action.shape[-1],
embed_size=self.embed_size,
lstm_size=self.lstm_size,
nb_lookback_days=self.nb_lookback_days)
history = self._train_model(training_model, X_context, X_action, X_country, y, epochs=self.num_epochs, verbose=0)
top_epoch = np.argmin(history.history['val_loss'])
train_loss = history.history['loss'][top_epoch]
val_loss = history.history['val_loss'][top_epoch]
print('Train Loss:', train_loss)
print('Val Loss:', val_loss)
# Shuffling data prior to train/val split
def _permute_data(self, X_context, X_action, X_country, y):
p = np.random.permutation(y.shape[0])
X_context = X_context[p]
X_action = X_action[p]
X_country = X_country[p]
y = y[p]
return X_context, X_action, X_country, y
# Construct model
def _construct_model(self, nb_context, nb_action, embed_size=10, lstm_size=32, nb_lookback_days=21):
# Create country embedding
country_id = Input(shape=(1,),
name='country_id')
emb = Embedding(len(self.geos), embed_size)(country_id)
emb = Reshape((embed_size,))(emb)
# Create context encoder
context_input = Input(shape=(nb_lookback_days, nb_context),
name='context_input')
x = LSTM(lstm_size,
return_sequences=False,
name='context_lstm')(context_input)
x = Concatenate(axis=1)([x, emb]) # concatenate the output of the LSTM with the country embedding prior to the dense layer
context_output = Dense(units=2,
activation='softplus',
name='context_dense')(x)
# Create action encoder
# Every aspect is monotonic and nonnegative except final bias
action_input = Input(shape=(nb_lookback_days, nb_action),
name='action_input')
x = LSTM(units=lstm_size,
kernel_constraint=Positive(),
recurrent_constraint=Positive(),
bias_constraint=Positive(),
return_sequences=False,
name='action_lstm')(action_input)
x = Concatenate(axis=1)([x, emb]) # concatenate the output of the LSTM with the country embedding prior to the dense layer
action_output = Dense(units=2,
activation='sigmoid',
name='action_dense')(x)
# Create prediction model
model_output = Lambda(_combine_r_and_d, name='prediction')(
[context_output, action_output])
model = Model(inputs=[context_input, action_input, country_id],
outputs=[model_output])
model.compile(loss='mae', optimizer='adam')
# Create training model, which includes loss to measure
# variance of action_output predictions
training_model = Model(inputs=[context_input, action_input, country_id],
outputs=[model_output])
training_model.compile(loss='mae',
optimizer='adam')
return model, training_model
# Train model
def _train_model(self, training_model, X_context, X_action, X_country, y, epochs=1, verbose=0):
early_stopping = EarlyStopping(patience=20,
restore_best_weights=True)
history = training_model.fit([X_context, X_action, X_country], [y],
epochs=epochs,
batch_size=32,
validation_split=0.1,
callbacks=[early_stopping],
verbose=verbose)
return history
def save_model(self, path_to_weights, path_to_country_list):
self.predictor.save_weights(path_to_weights)
with open(path_to_country_list, 'w') as f:
f.writelines("{}\n".format(g) for g in self.geos)
if __name__ == '__main__':
# Run all test cases
model = geoLSTMPredictor()
model.evaluate()
model.save_model('./ongoing/predictors/geolstm/models/model.h5', './ongoing/predictors/geolstm/models/countries.txt')
|
[
"ongoing.predictors.base.convert_ratios_to_total_deaths",
"numpy.ones",
"numpy.clip",
"keras.backend.abs",
"numpy.argmin",
"keras.models.Model",
"pandas.offsets.Day",
"keras.layers.Input",
"keras.layers.Reshape",
"numpy.copy",
"pandas.Timedelta",
"numpy.stack",
"pandas.DataFrame.from_dict",
"ongoing.predictors.base.convert_ratios_to_total_cases",
"keras.layers.Concatenate",
"numpy.random.permutation",
"numpy.concatenate",
"keras.layers.LSTM",
"numpy.zeros",
"keras.callbacks.EarlyStopping",
"numpy.array",
"keras.layers.Dense",
"keras.layers.Lambda",
"numpy.timedelta64"
] |
[((1081, 1089), 'keras.backend.abs', 'K.abs', (['w'], {}), '(w)\n', (1086, 1089), True, 'import keras.backend as K\n'), ((6557, 6589), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['forecast'], {}), '(forecast)\n', (6579, 6589), True, 'import pandas as pd\n'), ((7180, 7216), 'numpy.array', 'np.array', (['cnpis_df[base.NPI_COLUMNS]'], {}), '(cnpis_df[base.NPI_COLUMNS])\n', (7188, 7216), True, 'import numpy as np\n'), ((7717, 7745), 'numpy.array', 'np.array', (['cdf.ConfirmedCases'], {}), '(cdf.ConfirmedCases)\n', (7725, 7745), True, 'import numpy as np\n'), ((7771, 7793), 'numpy.array', 'np.array', (['cdf.NewCases'], {}), '(cdf.NewCases)\n', (7779, 7793), True, 'import numpy as np\n'), ((7973, 8002), 'numpy.array', 'np.array', (['cdf.ConfirmedDeaths'], {}), '(cdf.ConfirmedDeaths)\n', (7981, 8002), True, 'import numpy as np\n'), ((8029, 8052), 'numpy.array', 'np.array', (['cdf.NewDeaths'], {}), '(cdf.NewDeaths)\n', (8037, 8052), True, 'import numpy as np\n'), ((8199, 8315), 'ongoing.predictors.base.convert_ratios_to_total_cases', 'base.convert_ratios_to_total_cases', (['preds_cases', 'self.window_size', 'prev_new_cases', 'initial_total_cases', 'pop_size'], {}), '(preds_cases, self.window_size,\n prev_new_cases, initial_total_cases, pop_size)\n', (8233, 8315), True, 'import ongoing.predictors.base as base\n'), ((8465, 8575), 'ongoing.predictors.base.convert_ratios_to_total_deaths', 'base.convert_ratios_to_total_deaths', (['preds_deaths', 'self.window_size', 'prev_new_deaths', 'initial_total_deaths'], {}), '(preds_deaths, self.window_size,\n prev_new_deaths, initial_total_deaths)\n', (8500, 8575), True, 'import ongoing.predictors.base as base\n'), ((11132, 11163), 'numpy.zeros', 'np.zeros', (['(nb_roll_out_days, 2)'], {}), '((nb_roll_out_days, 2))\n', (11140, 11163), True, 'import numpy as np\n'), ((13017, 13051), 'numpy.concatenate', 'np.concatenate', (['all_X_context_list'], {}), '(all_X_context_list)\n', (13031, 13051), True, 'import numpy as np\n'), ((13071, 13104), 'numpy.concatenate', 'np.concatenate', (['all_X_action_list'], {}), '(all_X_action_list)\n', (13085, 13104), True, 'import numpy as np\n'), ((13125, 13159), 'numpy.concatenate', 'np.concatenate', (['all_X_country_list'], {}), '(all_X_country_list)\n', (13139, 13159), True, 'import numpy as np\n'), ((13172, 13198), 'numpy.concatenate', 'np.concatenate', (['all_y_list'], {}), '(all_y_list)\n', (13186, 13198), True, 'import numpy as np\n'), ((13290, 13330), 'numpy.clip', 'np.clip', (['X_context', 'MIN_VALUE', 'MAX_VALUE'], {}), '(X_context, MIN_VALUE, MAX_VALUE)\n', (13297, 13330), True, 'import numpy as np\n'), ((13343, 13375), 'numpy.clip', 'np.clip', (['y', 'MIN_VALUE', 'MAX_VALUE'], {}), '(y, MIN_VALUE, MAX_VALUE)\n', (13350, 13375), True, 'import numpy as np\n'), ((14088, 14126), 'numpy.argmin', 'np.argmin', (["history.history['val_loss']"], {}), "(history.history['val_loss'])\n", (14097, 14126), True, 'import numpy as np\n'), ((14442, 14475), 'numpy.random.permutation', 'np.random.permutation', (['y.shape[0]'], {}), '(y.shape[0])\n', (14463, 14475), True, 'import numpy as np\n'), ((14823, 14859), 'keras.layers.Input', 'Input', ([], {'shape': '(1,)', 'name': '"""country_id"""'}), "(shape=(1,), name='country_id')\n", (14828, 14859), False, 'from keras.layers import Input\n'), ((15051, 15116), 'keras.layers.Input', 'Input', ([], {'shape': '(nb_lookback_days, nb_context)', 'name': '"""context_input"""'}), "(shape=(nb_lookback_days, nb_context), name='context_input')\n", (15056, 15116), False, 'from keras.layers import Input\n'), ((15677, 15740), 'keras.layers.Input', 'Input', ([], {'shape': '(nb_lookback_days, nb_action)', 'name': '"""action_input"""'}), "(shape=(nb_lookback_days, nb_action), name='action_input')\n", (15682, 15740), False, 'from keras.layers import Input\n'), ((16479, 16558), 'keras.models.Model', 'Model', ([], {'inputs': '[context_input, action_input, country_id]', 'outputs': '[model_output]'}), '(inputs=[context_input, action_input, country_id], outputs=[model_output])\n', (16484, 16558), False, 'from keras.models import Model\n'), ((16771, 16850), 'keras.models.Model', 'Model', ([], {'inputs': '[context_input, action_input, country_id]', 'outputs': '[model_output]'}), '(inputs=[context_input, action_input, country_id], outputs=[model_output])\n', (16776, 16850), False, 'from keras.models import Model\n'), ((17156, 17209), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'patience': '(20)', 'restore_best_weights': '(True)'}), '(patience=20, restore_best_weights=True)\n', (17169, 17209), False, 'from keras.callbacks import EarlyStopping\n'), ((7868, 7892), 'numpy.array', 'np.array', (['cdf.Population'], {}), '(cdf.Population)\n', (7876, 7892), True, 'import numpy as np\n'), ((9496, 9526), 'numpy.array', 'np.array', (['cdf[context_columns]'], {}), '(cdf[context_columns])\n', (9504, 9526), True, 'import numpy as np\n'), ((9553, 9582), 'numpy.array', 'np.array', (['cdf[action_columns]'], {}), '(cdf[action_columns])\n', (9561, 9582), True, 'import numpy as np\n'), ((9610, 9640), 'numpy.array', 'np.array', (['cdf[outcome_columns]'], {}), '(cdf[outcome_columns])\n', (9618, 9640), True, 'import numpy as np\n'), ((11203, 11233), 'numpy.copy', 'np.copy', (['initial_context_input'], {}), '(initial_context_input)\n', (11210, 11233), True, 'import numpy as np\n'), ((11281, 11310), 'numpy.copy', 'np.copy', (['initial_action_input'], {}), '(initial_action_input)\n', (11288, 11310), True, 'import numpy as np\n'), ((11359, 11378), 'numpy.copy', 'np.copy', (['country_id'], {}), '(country_id)\n', (11366, 11378), True, 'import numpy as np\n'), ((14965, 14987), 'keras.layers.Reshape', 'Reshape', (['(embed_size,)'], {}), '((embed_size,))\n', (14972, 14987), False, 'from keras.layers import Reshape\n'), ((15159, 15219), 'keras.layers.LSTM', 'LSTM', (['lstm_size'], {'return_sequences': '(False)', 'name': '"""context_lstm"""'}), "(lstm_size, return_sequences=False, name='context_lstm')\n", (15163, 15219), False, 'from keras.layers import LSTM\n'), ((15281, 15300), 'keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(1)'}), '(axis=1)\n', (15292, 15300), False, 'from keras.layers import Concatenate\n'), ((15426, 15485), 'keras.layers.Dense', 'Dense', ([], {'units': '(2)', 'activation': '"""softplus"""', 'name': '"""context_dense"""'}), "(units=2, activation='softplus', name='context_dense')\n", (15431, 15485), False, 'from keras.layers import Dense\n'), ((16050, 16069), 'keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(1)'}), '(axis=1)\n', (16061, 16069), False, 'from keras.layers import Concatenate\n'), ((16194, 16251), 'keras.layers.Dense', 'Dense', ([], {'units': '(2)', 'activation': '"""sigmoid"""', 'name': '"""action_dense"""'}), "(units=2, activation='sigmoid', name='action_dense')\n", (16199, 16251), False, 'from keras.layers import Dense\n'), ((16373, 16416), 'keras.layers.Lambda', 'Lambda', (['_combine_r_and_d'], {'name': '"""prediction"""'}), "(_combine_r_and_d, name='prediction')\n", (16379, 16416), False, 'from keras.layers import Lambda\n'), ((10159, 10192), 'numpy.stack', 'np.stack', (['context_samples'], {'axis': '(0)'}), '(context_samples, axis=0)\n', (10167, 10192), True, 'import numpy as np\n'), ((10220, 10252), 'numpy.stack', 'np.stack', (['action_samples'], {'axis': '(0)'}), '(action_samples, axis=0)\n', (10228, 10252), True, 'import numpy as np\n'), ((10331, 10364), 'numpy.stack', 'np.stack', (['outcome_samples'], {'axis': '(0)'}), '(outcome_samples, axis=0)\n', (10339, 10364), True, 'import numpy as np\n'), ((6174, 6191), 'pandas.offsets.Day', 'pd.offsets.Day', (['i'], {}), '(i)\n', (6188, 6191), True, 'import pandas as pd\n'), ((10283, 10310), 'numpy.ones', 'np.ones', (['X_context.shape[0]'], {}), '(X_context.shape[0])\n', (10290, 10310), True, 'import numpy as np\n'), ((5293, 5315), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""D"""'], {}), "(1, 'D')\n", (5307, 5315), True, 'import numpy as np\n'), ((5396, 5429), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': 'self.npi_delay'}), '(days=self.npi_delay)\n', (5408, 5429), True, 'import pandas as pd\n'), ((5458, 5491), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': 'self.npi_delay'}), '(days=self.npi_delay)\n', (5470, 5491), True, 'import pandas as pd\n')]
|
"""
.. todo::
WRITEME
"""
import numpy as np
def is_iterable(obj):
"""
Robustly test whether an object is iterable.
Parameters
----------
obj : object
The object to be checked.
Returns
-------
is_iterable : bool
`True` if the object is iterable, `False` otherwise.
Notes
-----
This tests iterability by calling `iter()` and catching a `TypeError`.
Various other ways might occur to you, but they all have flaws:
* `hasattr(obj, '__len__')` will fail for objects that can be iterated
on despite not knowing their length a priori.
* `hasattr(obj, '__iter__')` will fail on objects like Theano tensors
that implement it solely to raise a `TypeError` (because Theano
tensors implement `__getitem__` semantics, Python 2.x will try
to iterate on them via this legacy method if `__iter__` is not
defined).
* `hasattr` has a tendency to swallow other exception-like objects
(`KeyboardInterrupt`, etc.) anyway, and should be avoided for this
reason in Python 2.x, but `getattr()` with a sentinel value suffers
from the exact same pitfalls above.
"""
try:
iter(obj)
except TypeError:
return False
return True
def contains_nan(arr):
"""
Test whether a numpy.ndarray contains any `np.nan` values.
Paramaters:
-----------
arr : np.ndarray
Returns
-------
contains_nan : bool
`True` if the array contains any `np.nan` values, `False` otherwise.
Notes
-----
Tests for the presence of `np.nan`'s using `np.isnan(np.min(ndarray))`.
This approach is faster and more memory efficient than the obvious
alternative, calling `np.any(np.isnan(ndarray))`, which requires the
construction of a boolean array with the same shape as the input array.
"""
return np.isnan(np.min(arr))
def contains_inf(arr):
"""
Test whether a numpy.ndarray contains any `np.inf` values.
Paramaters:
-----------
arr : np.ndarray
Returns
-------
contains_inf : bool
`True` if the array contains any `np.inf` values, `False` otherwise.
Notes
-----
Tests for the presence of `np.inf`'s by determining whether the
values returned by `np.nanmin(arr)` and `np.nanmax(arr)` are finite.
This approach is more memory efficient than the obvious alternative,
calling `np.any(np.isinf(ndarray))`, which requires the construction of a
boolean array with the same shape as the input array.
"""
return np.isinf(np.nanmax(arr)) or np.isinf(np.nanmin(arr))
def isfinite(arr):
"""
Test whether a numpy.ndarray contains any `np.inf` or `np.nan` values.
Paramaters:
-----------
arr : np.ndarray
Returns
-------
isfinite : bool
`True` if the array contains no np.inf or np.nan values, `False`
otherwise.
Notes
-----
Tests for the presence of `np.inf` or `np.nan` values by determining
whether the values returned by `np.min(arr)` and `np.max(arr)` are finite.
This approach is more memory efficient than the obvious alternative,
calling `np.any(np.isfinite(ndarray))`, which requires the construction of
a boolean array with the same shape as the input array.
"""
return np.isfinite(np.max(arr)) and np.isfinite(np.min(arr))
|
[
"numpy.nanmin",
"numpy.min",
"numpy.max",
"numpy.nanmax"
] |
[((1887, 1898), 'numpy.min', 'np.min', (['arr'], {}), '(arr)\n', (1893, 1898), True, 'import numpy as np\n'), ((2575, 2589), 'numpy.nanmax', 'np.nanmax', (['arr'], {}), '(arr)\n', (2584, 2589), True, 'import numpy as np\n'), ((2603, 2617), 'numpy.nanmin', 'np.nanmin', (['arr'], {}), '(arr)\n', (2612, 2617), True, 'import numpy as np\n'), ((3330, 3341), 'numpy.max', 'np.max', (['arr'], {}), '(arr)\n', (3336, 3341), True, 'import numpy as np\n'), ((3359, 3370), 'numpy.min', 'np.min', (['arr'], {}), '(arr)\n', (3365, 3370), True, 'import numpy as np\n')]
|
import bpy
import sys
from mathutils import Vector
import numpy as np
from abc import ABC, ABCMeta, abstractmethod
# Scene parameters
SCALE_FACTOR = 0.05 # factor used to scale an object when close enough to target
ROTATION_VEC = (3.0, 0., 0.) # rotation vector applied to an object when close enough to target
ORIGINAL_ROTATION_VEC = (0., 0., 0.) # original rotation vector for an object
DIST_LIM = 10.5 # distance threshold for which an object is influenced by a target
TARGET_SPEED = 1.5 # speed at which the target is moving in the scene
# TODO
# Still some confusing naming around different "object" types.
# One cleanup option would be to embed everything in this class.
# The major thing to take into consideration is the difference between an actual 3D blender object
# and a basic data structure with object info. For example with the current duplication method a Blender
# object shares the same mesh data with all its duplicates.
class Object(ABC):
sierpinski_scale = None # scale to use during a recursive "sierpinski" step
def __init__(self, radius: float, location: tuple, name: str):
"""
Base utility class for a 3D object
:param radius:
:param location:
:param name:
"""
self.loc = location
self.radius = radius
self.name = name
self.verts = []
self.edges = []
self.faces = []
self.mesh_data = None
self.obj = None
def _init_mesh(self):
mesh_data = bpy.data.meshes.new("{}_mesh_data".format(self.name))
mesh_data.from_pydata(self.verts, self.edges, self.faces)
mesh_data.update()
self.mesh_data = mesh_data
def _init_obj(self, link=True):
self.obj = bpy.data.objects.new(self.name, self.mesh_data)
if link:
scene = bpy.context.scene
scene.collection.objects.link(self.obj)
@staticmethod
def scale_objects(object: dict, grid_val, scale_factor=SCALE_FACTOR):
obj = object['object']
origin_scale = object['orgn_scale']
# grid value 1, object should end up with original size
if grid_val:
if obj.scale != origin_scale:
obj.scale = origin_scale
# grid value 0, object should end up scaled
else:
scaled_val = origin_scale * scale_factor
if obj.scale != scaled_val:
obj.scale = scaled_val
# keyframe change
obj.keyframe_insert("scale")
@staticmethod
def rotate_objects(object: dict, grid_val, rotation_vec=ROTATION_VEC, original_rot_vec=ORIGINAL_ROTATION_VEC):
obj = object['object']
# grid value 1, object should end up with original size
if grid_val:
if obj.rotation_euler != original_rot_vec:
obj.rotation_euler = original_rot_vec
# grid value 0, object should end up scaled
else:
if obj.rotation_euler != rotation_vec:
obj.rotation_euler = rotation_vec
# keyframe change
obj.keyframe_insert("rotation_euler")
@classmethod
def obj_replication(cls, obj: dict, max_depth: int):
"""Entry point to manage Replication-Shrink for a target object"""
object = cls(radius=obj['radius'], location=obj['location'])
obj['object'] = object.obj
sub_objs = [obj]
for i in range(max_depth):
new_sub_objs = []
for sub_obj in sub_objs:
new_sub_objs.extend(cls.replicate_shrink_step(sub_obj, i + 1))
# delete original
objs = bpy.data.objects
objs.remove(sub_obj['object'], True)
sub_objs = new_sub_objs
# Scale mesh data (all copies should follow)
for v in sub_objs[0]['object'].data.vertices:
v.co *= cls.sierpinski_scale
# Just at this point link object to scene
for sub_obj in sub_objs:
scene = bpy.context.scene
scene.collection.objects.link(sub_obj['object'])
return sub_objs
@classmethod
@abstractmethod
def replicate_shrink_step(cls, obj: dict, max_depth: int):
"""Replicates (mesh copy) the given object using "sierpinski" logic
all the resulting sub-objects are then returned"""
pass
class Cube(Object):
sierpinski_scale = 1/3
def __init__(self, radius: float, location: tuple):
super().__init__(radius=radius, location=location, name='cube')
loc = location
self.verts = [
(loc[0]+radius, loc[1]+radius, loc[2]-radius),
(loc[0]+radius, loc[1]-radius, loc[2]-radius),
(loc[0]-radius, loc[1]-radius, loc[2]-radius),
(loc[0]-radius, loc[1]+radius, loc[2]-radius),
(loc[0]+radius, loc[1]+radius, loc[2]+radius),
(loc[0]+radius, loc[1]-radius, loc[2]+radius),
(loc[0]-radius, loc[1]-radius, loc[2]+radius),
(loc[0]-radius, loc[1]+radius, loc[2]+radius),
]
self.faces = [
(0, 1, 2, 3),
(4, 7, 6, 5),
(0, 4, 5, 1),
(1, 5, 6, 2),
(2, 6, 7, 3),
(4, 0, 3, 7)
]
self._init_mesh()
self._init_obj()
@classmethod
def replicate_shrink_step(cls, cube: dict, max_depth: int):
radius = cube['radius']
loc = cube['location']
cube_obj = cube['object']
sub_cubes = []
# amount of shifting for the center of new object
center_shift = radius * (2 / 3)
for i, x in enumerate(np.linspace(loc[0] - center_shift, loc[0] + center_shift, 3)):
for j, y in enumerate(np.linspace(loc[1] - center_shift, loc[1] + center_shift, 3)):
for k, z in enumerate(np.linspace(loc[2] - center_shift, loc[2] + center_shift, 3)):
if i == j == 1 or j == k == 1 or k == i == 1:
continue
else:
cube_copy = cube_obj.copy()
# obj scaling (different from mesh one)
# keeps original dimensions, so need to keep track of depth
# cube_copy.scale = Vector((1 / 3**depth, 1 / 3**depth, 1 / 3**depth))
cube_copy.location = (x, y, z)
new_cube = {
'radius': radius * cls.sierpinski_scale,
'location': (x, y, z),
'object': cube_copy,
'orgn_scale': cube_copy.scale.copy()
}
sub_cubes.append(new_cube)
return sub_cubes
class Pyramid(Object):
sierpinski_scale = 1 / 2
def __init__(self, radius: float, location: tuple):
super().__init__(radius=radius, location=location, name='pyramid')
loc = location
self.verts = [
(loc[0]+radius, loc[1]+radius, loc[2]-radius),
(loc[0]+radius, loc[1]-radius, loc[2]-radius),
(loc[0]-radius, loc[1]-radius, loc[2]-radius),
(loc[0]-radius, loc[1]+radius, loc[2]-radius),
(loc[0], loc[1], loc[2]+radius),
]
self.faces = [
(0, 1, 2, 3),
(0, 1, 4),
(1, 2, 4),
(2, 3, 4),
(3, 0, 4),
]
self.sierpinski_scale = 1 / 2
self._init_mesh()
self._init_obj()
@classmethod
def replicate_shrink_step(cls, pyramid: dict, depth: int):
radius = pyramid['radius']
loc = pyramid['location']
pyramid_object = pyramid['object']
sub_pyramids = []
# amount of shifting for the center of new object
center_shift = radius / 2
# define the five locations for the five new sub-pyramids
new_loc_top = (loc[0], loc[1], loc[2] + radius)
new_loc_1 = (loc[0] + center_shift, loc[1] + center_shift, loc[2])
new_loc_2 = (loc[0] - center_shift, loc[1] + center_shift, loc[2])
new_loc_3 = (loc[0] + center_shift, loc[1] - center_shift, loc[2])
new_loc_4 = (loc[0] - center_shift, loc[1] - center_shift, loc[2])
new_locs = [new_loc_top, new_loc_1, new_loc_2, new_loc_3, new_loc_4]
for new_loc in new_locs:
pyramid_copy = pyramid_object.copy()
# obj scaling (different from mesh one)
# keeps original dimensions, so need to keep track of depth
# pyramid_copy.scale = Vector((1 / 2**depth, 1 / 2**depth, 1 / 2**depth))
pyramid_copy.location = new_loc
new_pyramid = {
'radius': radius * cls.sierpinski_scale,
'location': new_loc,
'object': pyramid_copy,
'orgn_scale': pyramid_copy.scale.copy()
}
sub_pyramids.append(new_pyramid)
return sub_pyramids
def update_grid(objs, target):
target_loc = target.location
for obj in objs:
dist = np.linalg.norm(np.array(target_loc) - np.array(obj['location']))
if dist < DIST_LIM:
Object.scale_objects(obj, 0)
else:
Object.scale_objects(obj, 1)
# test method to move a target object along an axis
# to trigger updates to the sierpinski sub-objects in the scene
def move_target(target):
(x, y, z) = target.location
target.location = (x + np.random.rand() - 0.5,
y + np.random.rand() - 0.5,
z - TARGET_SPEED + np.random.rand() - 0.5)
target.keyframe_insert("location")
# handler called at every frame change
def frame_handler(scene, objs, target, num_frames_change):
frame = scene.frame_current
# When reaching final frame, clear handlers
if frame >= bpy.context.scene.frame_end:
bpy.app.handlers.frame_change_pre.clear()
elif (frame % num_frames_change) == 0:
move_target(target)
# update grid
update_grid(objs, target)
def main(_):
NUM_FRAMES_CHANGE = 2 # higher values enable a more fluid transformation of objects, as frames between
# keyframings interpolate the object modification taking place.
bpy.ops.mesh.primitive_ico_sphere_add(
subdivisions=4,
radius=0.3,
location=(0, 0, 30))
#target.keyframe_insert("location")
target = bpy.context.scene.objects['Empty']
obj = {
'location': (0, 0, 0),
'radius': 15,
}
objs = Cube.obj_replication(obj, max_depth=3)
#objs = Pyramid.obj_replication(obj, max_depth=3)
bpy.app.handlers.frame_change_pre.clear()
bpy.app.handlers.frame_change_pre.append(lambda x: frame_handler(x, objs, target, NUM_FRAMES_CHANGE))
if __name__ == "__main__":
main(sys.argv[1:])
|
[
"numpy.random.rand",
"bpy.ops.mesh.primitive_ico_sphere_add",
"numpy.array",
"bpy.data.objects.new",
"numpy.linspace",
"bpy.app.handlers.frame_change_pre.clear"
] |
[((10220, 10311), 'bpy.ops.mesh.primitive_ico_sphere_add', 'bpy.ops.mesh.primitive_ico_sphere_add', ([], {'subdivisions': '(4)', 'radius': '(0.3)', 'location': '(0, 0, 30)'}), '(subdivisions=4, radius=0.3, location=\n (0, 0, 30))\n', (10257, 10311), False, 'import bpy\n'), ((10648, 10689), 'bpy.app.handlers.frame_change_pre.clear', 'bpy.app.handlers.frame_change_pre.clear', ([], {}), '()\n', (10687, 10689), False, 'import bpy\n'), ((1746, 1793), 'bpy.data.objects.new', 'bpy.data.objects.new', (['self.name', 'self.mesh_data'], {}), '(self.name, self.mesh_data)\n', (1766, 1793), False, 'import bpy\n'), ((9855, 9896), 'bpy.app.handlers.frame_change_pre.clear', 'bpy.app.handlers.frame_change_pre.clear', ([], {}), '()\n', (9894, 9896), False, 'import bpy\n'), ((5621, 5681), 'numpy.linspace', 'np.linspace', (['(loc[0] - center_shift)', '(loc[0] + center_shift)', '(3)'], {}), '(loc[0] - center_shift, loc[0] + center_shift, 3)\n', (5632, 5681), True, 'import numpy as np\n'), ((5718, 5778), 'numpy.linspace', 'np.linspace', (['(loc[1] - center_shift)', '(loc[1] + center_shift)', '(3)'], {}), '(loc[1] - center_shift, loc[1] + center_shift, 3)\n', (5729, 5778), True, 'import numpy as np\n'), ((9066, 9086), 'numpy.array', 'np.array', (['target_loc'], {}), '(target_loc)\n', (9074, 9086), True, 'import numpy as np\n'), ((9089, 9114), 'numpy.array', 'np.array', (["obj['location']"], {}), "(obj['location'])\n", (9097, 9114), True, 'import numpy as np\n'), ((9442, 9458), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (9456, 9458), True, 'import numpy as np\n'), ((9493, 9509), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (9507, 9509), True, 'import numpy as np\n'), ((9559, 9575), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (9573, 9575), True, 'import numpy as np\n'), ((5819, 5879), 'numpy.linspace', 'np.linspace', (['(loc[2] - center_shift)', '(loc[2] + center_shift)', '(3)'], {}), '(loc[2] - center_shift, loc[2] + center_shift, 3)\n', (5830, 5879), True, 'import numpy as np\n')]
|
# stdlib imports
import os
from collections import OrderedDict
from datetime import datetime, timedelta
import logging
# third party imports
import numpy as np
from obspy.core.trace import Stats
# local imports
from gmprocess.io.seedname import get_channel_name, get_units_type
from gmprocess.core.stationtrace import StationTrace, PROCESS_LEVELS
from gmprocess.core.stationstream import StationStream
from gmprocess.io.utils import is_binary
DATE_FMT = "%Y/%m/%d-%H:%M:%S.%f"
GMT_OFFSET = 8 * 3600 # CWB data is in local time, GMT +8
HDR_ROWS = 22
COLWIDTH = 10
NCOLS = 4
def is_cwb(filename, config=None):
"""Check to see if file is a Taiwan Central Weather Bureau strong motion
file.
Args:
filename (str):
Path to possible CWB data file.
config (dict):
Dictionary containing configuration.
Returns:
bool: True if CWB, False otherwise.
"""
logging.debug("Checking if format is cwb.")
if is_binary(filename):
return False
try:
f = open(filename, "rt", encoding="utf-8")
line = f.readline()
f.close()
if line.startswith("#Earthquake Information"):
return True
except UnicodeDecodeError:
return False
return False
def read_cwb(filename, config=None, **kwargs):
"""Read Taiwan Central Weather Bureau strong motion file.
Args:
filename (str):
Path to possible CWB data file.
config (dict):
Dictionary containing configuration.
kwargs (ref):
Other arguments will be ignored.
Returns:
Stream: Obspy Stream containing three channels of acceleration
data (cm/s**2).
"""
logging.debug("Starting read_cwb.")
if not is_cwb(filename, config):
raise Exception(f"{filename} is not a valid CWB strong motion data file.")
f = open(filename, "rt", encoding="utf-8")
# according to the powers that defined the Network.Station.Channel.Location
# "standard", Location is a two character field. Most data providers,
# including CWB here, don't provide this. We'll flag it as "--".
data = np.genfromtxt(
filename, skip_header=HDR_ROWS, delimiter=[COLWIDTH] * NCOLS
) # time, Z, NS, EW
hdr = _get_header_info(f, data)
f.close()
head, tail = os.path.split(filename)
hdr["standard"]["source_file"] = tail or os.path.basename(head)
hdr_z = hdr.copy()
hdr_z["channel"] = get_channel_name(
hdr["sampling_rate"], is_acceleration=True, is_vertical=True, is_north=False
)
hdr_z["standard"]["horizontal_orientation"] = np.nan
hdr_z["standard"]["vertical_orientation"] = np.nan
hdr_z["standard"]["units_type"] = get_units_type(hdr_z["channel"])
hdr_h1 = hdr.copy()
hdr_h1["channel"] = get_channel_name(
hdr["sampling_rate"], is_acceleration=True, is_vertical=False, is_north=True
)
hdr_h1["standard"]["horizontal_orientation"] = np.nan
hdr_h1["standard"]["vertical_orientation"] = np.nan
hdr_h1["standard"]["units_type"] = get_units_type(hdr_h1["channel"])
hdr_h2 = hdr.copy()
hdr_h2["channel"] = get_channel_name(
hdr["sampling_rate"], is_acceleration=True, is_vertical=False, is_north=False
)
hdr_h2["standard"]["horizontal_orientation"] = np.nan
hdr_h2["standard"]["vertical_orientation"] = np.nan
hdr_h2["standard"]["units_type"] = get_units_type(hdr_h2["channel"])
stats_z = Stats(hdr_z)
stats_h1 = Stats(hdr_h1)
stats_h2 = Stats(hdr_h2)
response = {"input_units": "counts", "output_units": "cm/s^2"}
trace_z = StationTrace(data=data[:, 1], header=stats_z)
trace_z.setProvenance("remove_response", response)
trace_h1 = StationTrace(data=data[:, 2], header=stats_h1)
trace_h1.setProvenance("remove_response", response)
trace_h2 = StationTrace(data=data[:, 3], header=stats_h2)
trace_h2.setProvenance("remove_response", response)
stream = StationStream([trace_z, trace_h1, trace_h2])
return [stream]
def _get_header_info(file, data):
"""Return stats structure from various headers.
Output is a dictionary like this:
- network (str): Always TW
- station (str)
- channel (str)
- location (str): Default is '--'
- starttime (datetime)
- duration (float)
- sampling_rate (float)
- delta (float)
- npts (int)
- coordinates:
- latitude (float)
- longitude (float)
- elevation (float): Default is np.nan
- standard (Defaults are either np.nan or '')
- horizontal_orientation (float): Rotation from north (degrees)
- instrument_period (float): Period of sensor (Hz)
- instrument_damping (float): Fraction of critical
- process_time (datetime): Reported date of processing
- process_level: Either 'V0', 'V1', 'V2', or 'V3'
- station_name (str): Long form station description
- sensor_serial_number (str): Reported sensor serial
- instrument (str)
- comments (str): Processing comments
- structure_type (str)
- corner_frequency (float): Sensor corner frequency (Hz)
- units (str)
- source (str): Network source description
- source_format (str): Always cwb
- format_specific
- dc_offset_z (float)
- dc_offset_h1 (float)
- dc_offset_h2 (float)
Args:
file (TextIOWrapper): File object containing data
data (ndarray): Array of strong motion data
Returns:
dictionary: Dictionary of header/metadata information
"""
hdr = OrderedDict()
coordinates = {}
standard = {}
format_specific = {}
hdr["location"] = "--"
while True:
line = file.readline()
if line.startswith("#StationCode"):
hdr["station"] = line.split(":")[1].strip()
logging.debug(f"station: {hdr['station']}")
if line.startswith("#StationName"):
standard["station_name"] = line.split(":")[1].strip()
logging.debug(f"station_name: {standard['station_name']}")
if line.startswith("#StationLongitude"):
coordinates["longitude"] = float(line.split(":")[1].strip())
if line.startswith("#StationLatitude"):
coordinates["latitude"] = float(line.split(":")[1].strip())
if line.startswith("#StartTime"):
timestr = ":".join(line.split(":")[1:]).strip()
hdr["starttime"] = datetime.strptime(timestr, DATE_FMT)
if line.startswith("#RecordLength"):
hdr["duration"] = float(line.split(":")[1].strip())
if line.startswith("#SampleRate"):
hdr["sampling_rate"] = int(line.split(":")[1].strip())
if line.startswith("#InstrumentKind"):
standard["instrument"] = line.split(":")[1].strip()
if line.startswith("#AmplitudeMAX. U:"):
format_specific["dc_offset_z"] = float(line.split("~")[1])
if line.startswith("#AmplitudeMAX. N:"):
format_specific["dc_offset_h1"] = float(line.split("~")[1])
if line.startswith("#AmplitudeMAX. E:"):
format_specific["dc_offset_h2"] = float(line.split("~")[1])
if line.startswith("#Data"):
break
# correct start time to GMT
hdr["starttime"] = hdr["starttime"] - timedelta(seconds=GMT_OFFSET)
nrows, _ = data.shape
# Add some optional information to the header
hdr["network"] = "TW"
hdr["delta"] = 1 / hdr["sampling_rate"]
hdr["calib"] = 1.0
standard["units_type"] = "acc"
standard["units"] = "cm/s^2"
hdr["source"] = "Taiwan Central Weather Bureau"
hdr["npts"] = nrows
secs = int(data[-1, 0])
microsecs = int((data[-1, 0] - secs) * 1e6)
hdr["endtime"] = hdr["starttime"] + timedelta(seconds=secs, microseconds=microsecs)
# Set defaults
logging.warning("Setting elevation to 0.0")
coordinates["elevation"] = 0.0
if "longitude" not in coordinates:
coordinates["longitude"] = np.nan
if "latitude" not in coordinates:
coordinates["latitude"] = np.nan
standard["instrument_period"] = np.nan
standard["instrument_damping"] = np.nan
standard["process_time"] = ""
standard["process_level"] = PROCESS_LEVELS["V1"]
standard["sensor_serial_number"] = ""
standard["comments"] = ""
standard["structure_type"] = ""
standard["corner_frequency"] = np.nan
standard["source"] = (
"Taiwan Strong Motion Instrumentation Program " + "via Central Weather Bureau"
)
standard["source_format"] = "cwb"
# these fields can be used for instrument correction
# when data is in counts
standard["instrument_sensitivity"] = np.nan
standard["volts_to_counts"] = np.nan
if "station_name" not in standard:
standard["station_name"] = ""
if "instrument" not in standard:
standard["instrument"] = ""
if "dc_offset_z" not in format_specific:
format_specific["dc_offset_z"] = np.nan
if "dc_offset_h2" not in format_specific:
format_specific["dc_offset_h2"] = np.nan
if "dc_offset_h1" not in format_specific:
format_specific["dc_offset_h1"] = np.nan
# Set dictionary
hdr["standard"] = standard
hdr["coordinates"] = coordinates
hdr["format_specific"] = format_specific
return hdr
|
[
"logging.debug",
"gmprocess.core.stationtrace.StationTrace",
"obspy.core.trace.Stats",
"os.path.basename",
"logging.warning",
"gmprocess.core.stationstream.StationStream",
"gmprocess.io.seedname.get_channel_name",
"numpy.genfromtxt",
"datetime.datetime.strptime",
"gmprocess.io.utils.is_binary",
"datetime.timedelta",
"collections.OrderedDict",
"gmprocess.io.seedname.get_units_type",
"os.path.split"
] |
[((924, 967), 'logging.debug', 'logging.debug', (['"""Checking if format is cwb."""'], {}), "('Checking if format is cwb.')\n", (937, 967), False, 'import logging\n'), ((975, 994), 'gmprocess.io.utils.is_binary', 'is_binary', (['filename'], {}), '(filename)\n', (984, 994), False, 'from gmprocess.io.utils import is_binary\n'), ((1721, 1756), 'logging.debug', 'logging.debug', (['"""Starting read_cwb."""'], {}), "('Starting read_cwb.')\n", (1734, 1756), False, 'import logging\n'), ((2160, 2235), 'numpy.genfromtxt', 'np.genfromtxt', (['filename'], {'skip_header': 'HDR_ROWS', 'delimiter': '([COLWIDTH] * NCOLS)'}), '(filename, skip_header=HDR_ROWS, delimiter=[COLWIDTH] * NCOLS)\n', (2173, 2235), True, 'import numpy as np\n'), ((2338, 2361), 'os.path.split', 'os.path.split', (['filename'], {}), '(filename)\n', (2351, 2361), False, 'import os\n'), ((2477, 2576), 'gmprocess.io.seedname.get_channel_name', 'get_channel_name', (["hdr['sampling_rate']"], {'is_acceleration': '(True)', 'is_vertical': '(True)', 'is_north': '(False)'}), "(hdr['sampling_rate'], is_acceleration=True, is_vertical=\n True, is_north=False)\n", (2493, 2576), False, 'from gmprocess.io.seedname import get_channel_name, get_units_type\n'), ((2736, 2768), 'gmprocess.io.seedname.get_units_type', 'get_units_type', (["hdr_z['channel']"], {}), "(hdr_z['channel'])\n", (2750, 2768), False, 'from gmprocess.io.seedname import get_channel_name, get_units_type\n'), ((2818, 2917), 'gmprocess.io.seedname.get_channel_name', 'get_channel_name', (["hdr['sampling_rate']"], {'is_acceleration': '(True)', 'is_vertical': '(False)', 'is_north': '(True)'}), "(hdr['sampling_rate'], is_acceleration=True, is_vertical=\n False, is_north=True)\n", (2834, 2917), False, 'from gmprocess.io.seedname import get_channel_name, get_units_type\n'), ((3080, 3113), 'gmprocess.io.seedname.get_units_type', 'get_units_type', (["hdr_h1['channel']"], {}), "(hdr_h1['channel'])\n", (3094, 3113), False, 'from gmprocess.io.seedname import get_channel_name, get_units_type\n'), ((3163, 3263), 'gmprocess.io.seedname.get_channel_name', 'get_channel_name', (["hdr['sampling_rate']"], {'is_acceleration': '(True)', 'is_vertical': '(False)', 'is_north': '(False)'}), "(hdr['sampling_rate'], is_acceleration=True, is_vertical=\n False, is_north=False)\n", (3179, 3263), False, 'from gmprocess.io.seedname import get_channel_name, get_units_type\n'), ((3426, 3459), 'gmprocess.io.seedname.get_units_type', 'get_units_type', (["hdr_h2['channel']"], {}), "(hdr_h2['channel'])\n", (3440, 3459), False, 'from gmprocess.io.seedname import get_channel_name, get_units_type\n'), ((3475, 3487), 'obspy.core.trace.Stats', 'Stats', (['hdr_z'], {}), '(hdr_z)\n', (3480, 3487), False, 'from obspy.core.trace import Stats\n'), ((3503, 3516), 'obspy.core.trace.Stats', 'Stats', (['hdr_h1'], {}), '(hdr_h1)\n', (3508, 3516), False, 'from obspy.core.trace import Stats\n'), ((3532, 3545), 'obspy.core.trace.Stats', 'Stats', (['hdr_h2'], {}), '(hdr_h2)\n', (3537, 3545), False, 'from obspy.core.trace import Stats\n'), ((3628, 3673), 'gmprocess.core.stationtrace.StationTrace', 'StationTrace', ([], {'data': 'data[:, 1]', 'header': 'stats_z'}), '(data=data[:, 1], header=stats_z)\n', (3640, 3673), False, 'from gmprocess.core.stationtrace import StationTrace, PROCESS_LEVELS\n'), ((3745, 3791), 'gmprocess.core.stationtrace.StationTrace', 'StationTrace', ([], {'data': 'data[:, 2]', 'header': 'stats_h1'}), '(data=data[:, 2], header=stats_h1)\n', (3757, 3791), False, 'from gmprocess.core.stationtrace import StationTrace, PROCESS_LEVELS\n'), ((3864, 3910), 'gmprocess.core.stationtrace.StationTrace', 'StationTrace', ([], {'data': 'data[:, 3]', 'header': 'stats_h2'}), '(data=data[:, 3], header=stats_h2)\n', (3876, 3910), False, 'from gmprocess.core.stationtrace import StationTrace, PROCESS_LEVELS\n'), ((3981, 4025), 'gmprocess.core.stationstream.StationStream', 'StationStream', (['[trace_z, trace_h1, trace_h2]'], {}), '([trace_z, trace_h1, trace_h2])\n', (3994, 4025), False, 'from gmprocess.core.stationstream import StationStream\n'), ((5592, 5605), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5603, 5605), False, 'from collections import OrderedDict\n'), ((7846, 7889), 'logging.warning', 'logging.warning', (['"""Setting elevation to 0.0"""'], {}), "('Setting elevation to 0.0')\n", (7861, 7889), False, 'import logging\n'), ((2407, 2429), 'os.path.basename', 'os.path.basename', (['head'], {}), '(head)\n', (2423, 2429), False, 'import os\n'), ((7315, 7344), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'GMT_OFFSET'}), '(seconds=GMT_OFFSET)\n', (7324, 7344), False, 'from datetime import datetime, timedelta\n'), ((7774, 7821), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'secs', 'microseconds': 'microsecs'}), '(seconds=secs, microseconds=microsecs)\n', (7783, 7821), False, 'from datetime import datetime, timedelta\n'), ((5856, 5899), 'logging.debug', 'logging.debug', (['f"""station: {hdr[\'station\']}"""'], {}), '(f"station: {hdr[\'station\']}")\n', (5869, 5899), False, 'import logging\n'), ((6022, 6080), 'logging.debug', 'logging.debug', (['f"""station_name: {standard[\'station_name\']}"""'], {}), '(f"station_name: {standard[\'station_name\']}")\n', (6035, 6080), False, 'import logging\n'), ((6456, 6492), 'datetime.datetime.strptime', 'datetime.strptime', (['timestr', 'DATE_FMT'], {}), '(timestr, DATE_FMT)\n', (6473, 6492), False, 'from datetime import datetime, timedelta\n')]
|
import numpy as np
import gym
from gym import spaces
import math
MAX_MARCH = 20
EPSILON = 0.1
DEG_TO_RAD = 0.0174533
WINDOW_SIZE = [300, 300]
#
# Objects
#
def generate_box(pos=None, size=[10, 25], inside_window=True, color=(255, 255, 255), is_goal=False,
is_visible=True, is_obstacle=True):
'''
Generate a box with width and height drawn randomly uniformly from size[0] to size[1]
if inside_window is True, we force the box to stay inside the window
'''
box_size = np.random.uniform([size[0], size[0]], [size[1], size[1]])
if pos is None:
if inside_window:
pos = np.random.uniform([box_size[0], box_size[1]],
[WINDOW_SIZE[0] - box_size[0], WINDOW_SIZE[1] - box_size[1]])
else:
pos = np.random.uniform(WINDOW_SIZE)
if inside_window:
return Box(pos, box_size, color=color, is_goal=is_goal)
else:
return Box(pos, box_size, color=color, is_goal=is_goal)
def generate_circle(pos=None, radius=[10, 25], inside_window=True, color=(255, 255, 255), is_goal=False,
is_visible=True, is_obstacle=True):
circ_rad = np.random.uniform(radius[0], radius[1])
if pos is None:
if inside_window:
pos = np.random.uniform([circ_rad, circ_rad], [WINDOW_SIZE[0]-circ_rad, WINDOW_SIZE[1]-circ_rad])
else:
pos = np.random.uniform(WINDOW_SIZE)
if inside_window:
return Circle(pos, circ_rad, color=color, is_goal=is_goal)
else:
return Circle(pos, circ_rad, color=color, is_goal=is_goal)
def dist(v):
'''calculate length of vector'''
return np.linalg.norm(v)
class Circle():
def __init__(self, center, radius, color=(255, 255, 255), is_goal=False, is_visible=True,
is_obstacle=True):
self.center = center
self.radius = radius
self.color = color
self.is_goal = is_goal
self.is_visible = is_visible
self.is_obstacle = is_obstacle
self.objects_type = 'circle'
def sdf(self, p):
return dist(self.center - p) - self.radius
def draw(self):
pygame.draw.circle(display, self.color, self.center, self.radius)
class Box():
def __init__(self, center, size, color=(255, 255, 255), is_goal=False, is_visible=True,
is_obstacle=True):
self.center = center
self.size = size #this is a size 2 array for length and height
self.color = color
self.rect = pygame.Rect(center-size, size*2)
self.is_goal = is_goal
self.is_visible = is_visible
self.is_obstacle = is_obstacle
self.objects_type = 'box'
def sdf(self, p):
offset = np.abs(p-self.center) - self.size
unsigned_dist = dist(np.clip(offset, 0, np.inf))
dist_inside_box = np.max(np.clip(offset, -np.inf, 0))
return unsigned_dist + dist_inside_box
def draw(self):
pygame.draw.rect(display, self.color, self.rect)
#
# Character Class
#
class Ray():
def __init__(self, start, angle, color='white', render_march=False):
'''
Ray for ray marching
if render_march is True, then we render the sdf circles used to calculate march
'''
self.start = start
self.angle = angle
self.color = color
self.render_march = render_march
self.touched_obj = None
self.obj_dist = np.inf
self.sdf = None
def update(self, start=None, angle=None):
'''
update position and angle, perform march, determine object and distance
'''
if start is not None:
self.start = start
if angle is not None:
self.angle = angle
self.march()
def march(self):
'''
perform ray march, find collision with object
'''
depth = 0
p = self.start
for i in range(MAX_MARCH):
dist, obj = self.sdf(p)
depth += dist
if self.render_march:
pygame.draw.circle(display, (255, 255, 255, 0.3), p, dist, width=1)
if dist < EPSILON:
self.touched_obj = obj
self.obj_dist = depth
return depth, obj
else:
p = p + np.array([np.cos(self.angle), np.sin(self.angle)]) * dist
self.touched_obj = obj
self.obj_dist = depth
return depth, obj
def draw(self):
end = self.start + np.array([np.cos(self.angle), np.sin(self.angle)]) * self.obj_dist
pygame.draw.line(display, self.color, self.start, end)
class Character:
def __init__(self, pos=[WINDOW_SIZE[0]/2, WINDOW_SIZE[1]/2], angle=0, color='yellow', size=5,
fov=120*DEG_TO_RAD, num_rays=30, render_rays=True, max_depth=424):
'''
Generate a character that can move through the window
pos: starting position
angle: starting angle (radians) angle always takes on values from -pi to pi
color: color
size: size
fov: range of angles character can see using rays
num_rays: fidelity of depth perception
draw_rays: whether or not to draw the characters rays
'''
self.pos = pos
self.angle = (angle + np.pi) % (2*np.pi) - np.pi
self.color = color
self.size = size
self.fov = fov
self.ray_splits = fov / num_rays
self.render_rays = render_rays
self.num_rays = num_rays
self.max_depth = max_depth
self.obstacle_sdf = None
self.visible_sdf = None
self.rays = []
fov_start = self.angle - self.fov/2
for i in range(num_rays):
self.rays.append(Ray(self.pos, fov_start + i*self.ray_splits))
# print(len(self.rays))
# print(self.num_rays)
def update_sdf_funcs(self, obstacle_sdf, visible_sdf):
'''
Update the current held sdf functions which allow the character
to calculate distance to objects and for rays
'''
self.obstacle_sdf = obstacle_sdf
self.visible_sdf = visible_sdf
fov_start = self.angle - self.fov/2
for i in range(self.num_rays):
self.rays[i].sdf = visible_sdf
self.rays[i].update(start=self.pos, angle=fov_start + i*self.ray_splits)
def update_rays(self):
'''
update the angle of the rays using own position and angle
'''
fov_start = self.angle - self.fov/2
for i in range(self.num_rays):
self.rays[i].update(start=self.pos, angle=fov_start + i*self.ray_splits)
def draw_rays(self):
'''
draw the rays coming from character
'''
for ray in self.rays:
ray.draw()
def draw(self):
'''
draw the character
'''
point1 = [self.pos[0] - (math.cos(self.angle+0.3))*self.size,
self.pos[1] - (math.sin(self.angle+0.3))*self.size]
point2 = [self.pos[0] - math.cos(self.angle)*self.size*.8, self.pos[1] - math.sin(self.angle)*self.size*.8]
point3 = [self.pos[0] - (math.cos(self.angle-0.3))*self.size,
self.pos[1] - (math.sin(self.angle-0.3))*self.size]
pygame.draw.polygon(
display,
self.color,
[self.pos, point1, point2, point3, self.pos]
)
if self.render_rays:
self.draw_rays()
def move(self, speed=0.5):
'''
move in the faced direction with number of pixels of speed
collision detection uses the same ray marching algorithm
after moving, update the rays
'''
collide_with_object = self.march_collision_detection(speed)
if collide_with_object is False:
self.pos[0] += math.cos(self.angle) * speed
self.pos[1] += math.sin(self.angle) * speed
else:
#collided with object, move with the given depth
dist_to_obj = collide_with_object[0]
self.pos[0] += math.cos(self.angle) * dist_to_obj
self.pos[1] += math.sin(self.angle) * dist_to_obj
self.update_rays()
return collide_with_object
def march_collision_detection(self, max_dist):
'''
perform ray march, used for collision detection. The max_dist is the speed we are
moving at. If the max_dist exceeds the sdf (i.e., we are colliding with an object),
then return the distance to the collided object
If sdf exceeds max_dist, then we have not collided on our path, so return False
(i.e., no object hit)
returns:
False - if no object collided with
dist, obj - if colliding with an object, return the distance that we are allowed to
travel and the object
'''
depth = 0
p = self.pos
for i in range(MAX_MARCH):
dist, obj = self.obstacle_sdf(p)
if dist < EPSILON:
#we have collided before passing the requisite distance
return depth-2*EPSILON, obj
if depth + dist > max_dist:
#we have enough room to move on the desired path
return False
else:
#we continue the march
depth += dist
p = p + np.array([np.cos(self.angle), np.sin(self.angle)]) * dist
return depth, obj
def rotate(self, angle=0.05):
self.angle += angle
self.angle = (self.angle + np.pi) % (2*np.pi) - np.pi
self.update_rays()
def ray_obs(self):
'''
Get all rays and their distances to objects
normalize_depth: divide depth readings by value
'''
ray_colors = []
ray_depths = []
for ray in self.rays:
# ray_colors.append(colors_dict[ray.touched_obj.color])
ray_colors.append(ray.touched_obj.color)
ray_depths.append(ray.obj_dist)
# if normalize_depth:
# ray_depths = np.array(ray_depths) / normalize_depth
# else:
# ray_depths = np.array(ray_depths)
ray_colors = np.array(ray_colors)
# background_colors = np.full(ray_colors.shape, 0)
ray_depths = np.clip(ray_depths, 0, self.max_depth) / self.max_depth
visual = (1 - ray_depths.reshape(-1, 1)) * ray_colors / 255
# return ray_depths, ray_colors
return visual
def randomize_location_and_angle(character, goal=None, world_size=[300, 300], sdf_func=None, sep=True):
'''
create a random location and start direction for the character
noting that we do not allow spawning into objects
sep: if set to True, we will make sure character has a minimum distance away
from the goal that is at least half the max distance possible from goal
to end of window
'''
#max distance from goal to end of window
max_goal_sep = dist(np.max([np.array(WINDOW_SIZE) - goal.center, goal.center], axis=0))
searching = True
while searching:
pos = np.random.uniform(WINDOW_SIZE)
goal_sep = dist(goal.center - pos)
if sdf_func(pos)[0] > 0 and (not sep or goal_sep > max_goal_sep / 2):
#position is okay
searching = False
character.pos = pos
character.angle = np.random.uniform(6.28)
# character.pos = np.array([100, 100])
# character.angle = 0
character.update_rays()
#
# Nav Environments
#
class GeneralNav(gym.Env):
metadata = {"render.modes": ['rgb_array', 'human'], 'video.frames_per_second': 24}
def __init__(self, num_rays=30, max_steps=200, num_objects=5,
rew_structure='dist', give_heading=0, verbose=0, flat=True,
world_gen_func=None, world_gen_params={}, world_size=[300, 300], skeleton=True):
'''
General Nav environment which can be used to test some general pygame things and see
that all of the object and distance detection things are working
When inheriting, should make sure to change the functions
step(), reset(), get_observation(), generate_world()
rew_structure: 'dist' - reward given based on distance to goal
'goal' - reward only given when goal reached
give_heading: whether to additionally give a distance and direction to goal
flat: whether to give observations in a flattened state
world_gen_func: a function can be passed to manually create a world
using some other rules. Note that it needs to generate objects, a goal, and
set the agent position and heading
The character will be passed as the argument
'''
super(GeneralNav, self).__init__()
if 'pygame' not in globals():
global pygame
import pygame
if not skeleton:
print('generating general')
self.total_rewards = 0
self.give_heading = give_heading
self.flat = flat
if give_heading:
self.observation_space = spaces.Box(low=0, high=1, shape=((num_rays + 1)*3,))
else:
# self.observation_space = spaces.Box(low=0, high=1, shape=(num_rays*2,), dtype=np.float)
self.observation_space = spaces.Box(low=0, high=1, shape=(num_rays*3,))
self.action_space = spaces.Discrete(4) #turn left, forward, right as actions
self.max_steps = max_steps
self.current_steps = 0
self.character = Character(max_depth=dist(world_size))
self.num_objects = num_objects
self.num_rays = num_rays
self.rew_structure = rew_structure
self.verbose = verbose
self.objects = []
self.world_gen_func = world_gen_func
self.world_gen_params = world_gen_params
self.world_size = world_size
if self.world_gen_func is None:
self.generate_world()
# randomize_location_and_angle(self.character)
else:
self.world_gen_func(self.character, **self.world_gen_params)
def step(self, action):
reward = -1
collide_with_object = False
done = False
info = {}
if action == 0:
self.character.rotate(-0.1)
if action == 1:
collide_with_object = self.character.move(10)
if action == 2:
self.character.rotate(0.1)
if action == 3:
pass
if self.rew_structure == 'dist':
goal = objects[-1]
dist_to_goal = np.clip(dist(goal.center - self.character.pos), 0, 1000) / 1000
reward = float(-dist_to_goal)
if collide_with_object is not False:
obj = collide_with_object[1]
if obj.is_goal:
if self.verbose:
print('goal reached!')
reward = float(100)
done = True
else:
# reward = -10
reward = float(-1)
observation = self.get_observation()
if self.current_steps > self.max_steps:
done = True
self.current_steps += 1
self.total_rewards += reward
if done and self.verbose:
print('done, total_reward:{}'.format(self.total_rewards))
return observation, reward, done, info
def get_observation(self):
# ray_depths, ray_colors = self.character.ray_obs()
# return np.append(ray_depths, ray_colors)
if self.give_heading > 0:
#tell where the goal is distance and heading
ray_obs = self.character.ray_obs()
goal = objects[-1]
dist_to_goal = np.clip(dist(goal.center - self.character.pos), 0, 1000) / 1000
heading = goal.center - self.character.pos
heading = np.arctan2(heading[1], heading[0])
if self.give_heading == 1:
#only give distance to goal
obs = np.vstack([ray_obs, [dist_to_goal, 0, 0]])
elif self.give_heading == 2:
#give distance and angle to goal
obs = np.vstack([ray_obs, [dist_to_goal, heading/3.14, 0]])
elif self.give_heading == 3:
#give distance and angle to goal and current agent angle
obs = np.vstack([ray_obs, [dist_to_goal, heading/3.14, self.character.angle]])
if self.flat:
return np.array(obs.reshape(-1), dtype='float')
else:
return np.array(obs, dtype='float')
else:
if self.flat:
return np.array(self.character.ray_obs().reshape(-1), dtype='float')
else:
return np.array(self.character.ray_obs(), dtype='float')
def reset(self):
self.generate_world()
def generate_walls(self):
self.objects.append(Box(np.array([0, 0]), np.array([1, self.world_size[1]]), color=(0, 255, 0)))
self.objects.append(Box(np.array([0, 0]), np.array([self.world_size[0], 1]), color=(0, 255, 0)))
self.objects.append(Box(np.array([0, self.world_size[1]]), np.array([self.world_size[0], 1]), color=(0, 255, 0)))
self.objects.append(Box(np.array([self.world_size[0], 0]), np.array([1, self.world_size[1]]), color=(0, 255, 0)))
def generate_box(self, pos=None, size=[10, 25], inside_window=True, color=(255, 255, 255), is_goal=False,
is_visible=True, is_obstacle=True):
'''
Generate a box with width and height drawn randomly uniformly from size[0] to size[1]
if inside_window is True, we force the box to stay inside the window
'''
box_size = np.random.uniform([size[0], size[0]], [size[1], size[1]])
if pos is None:
if inside_window:
pos = np.random.uniform([box_size[0], box_size[1]],
[self.world_size[0] - box_size[0], self.world_size[1] - box_size[1]])
else:
pos = np.random.uniform(self.world_size)
if inside_window:
return Box(pos, box_size, color=color, is_goal=is_goal, is_visible=is_visible, is_obstacle=is_obstacle)
else:
return Box(pos, box_size, color=color, is_goal=is_goal, is_visible=is_visible, is_obstacle=is_obstacle)
def generate_circle(self, pos=None, radius=[10, 25], inside_window=True, color=(255, 255, 255), is_goal=False,
is_visible=True, is_obstacle=True):
circ_rad = np.random.uniform(radius[0], radius[1])
if pos is None:
if inside_window:
pos = np.random.uniform([circ_rad, circ_rad], [self.world_size[0]-circ_rad, self.world_size[1]-circ_rad])
else:
pos = np.random.uniform(self.world_size)
if inside_window:
return Circle(pos, circ_rad, color=color, is_goal=is_goal, is_visible=is_visible, is_obstacle=is_obstacle)
else:
return Circle(pos, circ_rad, color=color, is_goal=is_goal, is_visible=is_visible, is_obstacle=is_obstacle)
def generate_world(self):
'''
World generation should end up with a list of objects as self.objects
Should end by calling
self.generate_walls (optional to include walls)
self.visible_objects, self.obstacles = self.decompose_objects(self.objects)
obstacle_sdf = self.get_sdf_func('obstacle')
visible_sdf = self.get_sdf_func('visible')
self.character.update_sdf_funcs(obstacle_sdf, visible_sdf)
'''
boxes = [self.generate_box() for i in range(5)]
circles = [self.generate_circle() for i in range(5)]
self.objects = boxes + circles
self.generate_walls()
self.visible_objects, self.obstacles, self.all_objects = self.decompose_objects(self.objects)
obstacle_sdf = self.get_sdf_func('obstacle')
visible_sdf = self.get_sdf_func('visible')
self.character.update_sdf_funcs(obstacle_sdf, visible_sdf)
def decompose_objects(self, objects):
'''
Take a list of objects and turn them into a dictionary
of usable pieces
We need to lists, one for visible objects (which vision rays
will use for collision detection), and obstacle objects
(which the player uses for collision detection).
Goals are not inherently obstacles, so when making a goal, make sure
to decided if it should have vision/collision detection included
'''
type_box = type(generate_box())
type_circle = type(generate_circle())
visible_objects = {'box_centers': [], 'box_sizes': [], 'boxes': [],
'circle_centers': [], 'circle_radii': [], 'circles': []}
obstacles = {'box_centers': [], 'box_sizes': [], 'boxes': [],
'circle_centers': [], 'circle_radii': [], 'circles': []}
all_objects = {'box_centers': [], 'box_sizes': [], 'boxes': [],
'circle_centers': [], 'circle_radii': [], 'circles': []}
for obj in objects:
if type(obj) == type_box:
all_objects['box_centers'].append(obj.center)
all_objects['box_sizes'].append(obj.size)
all_objects['boxes'].append(obj)
if obj.is_visible:
visible_objects['box_centers'].append(obj.center)
visible_objects['box_sizes'].append(obj.size)
visible_objects['boxes'].append(obj)
if obj.is_obstacle:
obstacles['box_centers'].append(obj.center)
obstacles['box_sizes'].append(obj.size)
obstacles['boxes'].append(obj)
elif type(obj) == type_circle:
all_objects['circle_centers'].append(obj.center)
all_objects['circle_radii'].append(obj.radius)
all_objects['circles'].append(obj)
if obj.is_visible:
visible_objects['circle_centers'].append(obj.center)
visible_objects['circle_radii'].append(obj.radius)
visible_objects['circles'].append(obj)
if obj.is_obstacle:
obstacles['circle_centers'].append(obj.center)
obstacles['circle_radii'].append(obj.radius)
obstacles['circles'].append(obj)
else:
raise Exception('Invalid object not of type box or circle in objects')
for key in visible_objects:
if key not in ['boxes', 'circles']:
visible_objects[key] = np.array(visible_objects[key])
for key in obstacles:
if key not in ['boxes', 'circles']:
obstacles[key] = np.array(obstacles[key])
return visible_objects, obstacles, all_objects
def box_sdfs(self, p, objects):
'''
compute all the sdf functions for boxes using global variables
box_centers
box_sizes
both are m x 2 arrays with each row representing a box
'''
box_centers = objects['box_centers']
box_sizes = objects['box_sizes']
if len(box_centers) > 0:
offset = np.abs(p - box_centers) - box_sizes
unsigned_dist = np.linalg.norm(np.clip(offset, 0, np.inf), axis=1)
dist_inside_box = np.max(np.clip(offset, -np.inf, 0), axis=1)
dists = unsigned_dist + dist_inside_box
return dists
else:
return np.array([])
def circle_sdfs(self, p, objects):
'''
compute all the sdf functions for circles using global variables
circle_centers (m x 2 array)
circle_radii (m x 1 array)
both arrays are 2 dimensional
'''
circle_centers = objects['circle_centers']
circle_radii = objects['circle_radii']
if len(circle_centers) > 0:
return np.linalg.norm((circle_centers - p), axis=1) - circle_radii
else:
return np.array([])
def scene_sdf(self, p, objects):
'''
Perform an sdf on the objects passed
The objects passed should be those generated by the decompose_objects
function
'''
box_dists = self.box_sdfs(p, objects)
circle_dists = self.circle_sdfs(p, objects)
dists = np.append(box_dists, circle_dists)
min_dist = np.min(dists)
obj_index = np.argmin(dists)
obj_select_list = objects['boxes'] + objects['circles']
return np.min(dists), obj_select_list[obj_index]
def get_sdf_func(self, typ='visible'):
'''
Get an sdf function to be passed down to the character and rays
'''
if typ == 'visible':
def sdf(p):
return self.scene_sdf(p, self.visible_objects)
return sdf
elif typ == 'obstacle':
def sdf(p):
return self.scene_sdf(p, self.obstacles)
return sdf
elif typ == 'all':
def sdf(p):
return self.scene_sdf(p, self.all_objects)
return sdf
else:
raise Exception('Invalid object type for sdf generator')
def render(self, mode='rgb_array'):
'''
Render out the scene using pygame. If mode=='human', render it to the screen
Otherwise only return an rgb_array of pixel colors using pygame
'''
if 'screen' not in globals():
pygame.init()
if mode == 'human':
globals()['screen'] = pygame.display.set_mode([self.world_size[0], self.world_size[1] + 10])
globals()['display'] = pygame.Surface([self.world_size[0], self.world_size[1] + 10])
display.fill((0, 0, 0))
self.character.draw()
self.draw_character_view()
for obj in self.objects:
obj.draw()
if mode == 'human':
screen.blit(display, (0, 0))
pygame.display.update()
if mode == 'rgb_array':
return pygame.surfarray.pixels3d(display)
def draw_character_view(self):
length = self.world_size[0] / self.num_rays
colors = self.character.ray_obs() * 255
for i in range(self.num_rays):
rect = pygame.Rect([i * length, 300, length, 10])
pygame.draw.rect(display, colors[i], rect)
class MorrisNav(GeneralNav):
metadata = {"render.modes": ['rgb_array', 'human'], 'video.frames_per_second': 24}
def __init__(self, num_rays=30, max_steps=None, give_heading=0, verbose=0,
platform_visible=False, ep_struct=1, platform_size=10, world_size=[300, 300],
platform_randomization=1, platform_randomization_spread=20,
global_cues=1, platform_fixed_duration=10, character_sep=False,
reward_shift=0, platform_reward=100):
'''
rew_structure: 'dist' - reward given based on distance to goal
'goal' - reward only given when goal reached
give_heading: whether to additionally give a distance and direction to goal
platform_visible: whether platform should be made visible
max_steps: how many steps an episode should last - default depends on episode structure
!!
ep_struct: important variable about what kind of test we will perform
1: the platform position does not reset between episodes, episodes are 200 steps max
2: the platform position resets each episode, and if the agent stays on a platform
for a while, rewards will be given and position reset
(implement later)
3: agent must stay on platform for 5 timesteps before reward is given and
episode resets
4: agent must explicitly perform an action to say when it is on the platform (not implemented)
!!
plaform_randomization: how the platform position will be randomized
1: fixed positions in one of four quadrants
2: some spot randomized close to the quadrant spots (given by platform_randomization_spread)
3: uniform random
global_cues: what global cues will be provided to the agent (not implemented)
1: all walls colored differently
2: all walls white with a "poster" hanging up
observation space: each ray gives an rgb value depending on distance from object, so this
gives num_rays*3 observations. Additionally a flag will be on/off depending on whether
the agent is currently on a platform
platform_fixed_time: once the agent reaches the plaform, it will not longer be allowed to
move forward, only rotate (mimic the "stay on platform and look around" phase). This controls
how many timesteps this happens for
character_sep: whether character should be forced to a randomized position far from platform
reward_shift: value the reward should be centered on (e.g., -1 will make every time step give
-1 reward, vs. 0 where the goal gives 1 reward)
'''
super(MorrisNav, self).__init__()
if 'pygame' not in globals():
global pygame
import pygame
self.total_rewards = 0
self.give_heading = give_heading
self.ep_struct = ep_struct
self.platform_visible = platform_visible
self.platform_size = platform_size
self.platform_randomization = platform_randomization
self.platform_randomization_spread = platform_randomization_spread
self.world_size = world_size
self.global_cues = global_cues
self.platform_fixed_duration = platform_fixed_duration
self.character_sep = character_sep
self.reward_shift = reward_shift
self.platform_reward = platform_reward
self.num_rays = num_rays
if give_heading:
self.observation_space = spaces.Box(low=0, high=1, shape=((num_rays + 1)*3 + 1,))
else:
# self.observation_space = spaces.Box(low=0, high=1, shape=(num_rays*2,), dtype=np.float)
self.observation_space = spaces.Box(low=0, high=1, shape=(num_rays*3 + 1,))
self.action_space = spaces.Discrete(4) #turn left, forward, right as actions
if max_steps is None:
if ep_struct == 1 or ep_struct == 3:
self.max_steps = 200
if ep_struct == 2:
self.max_steps = 1000
else:
self.max_steps = max_steps
if max_steps is not None:
self.max_steps = max_steps
self.current_steps = 0
self.duration_on_platform = 0
self.on_platform = False
self.character = Character(max_depth=dist(world_size))
self.verbose = verbose
self.objects = []
self.goal = None
self.generate_world()
def generate_world(self):
self.objects = []
if self.platform_randomization < 3:
quadrant_locations = np.array([self.world_size[0] / 4, self.world_size[1] / 4])
multipliers = np.array([1, 3])
randoms = np.random.choice(2, size=(2))
multipliers = multipliers[randoms] #get how much the x/y values should be multiplied by
pos = quadrant_locations * multipliers
if self.platform_randomization == 2:
#add a spread to the platform location from quadrant position
pos += np.random.uniform(-self.platform_randomization_spread, self.platform_randomization_spread,
size=(2))
elif self.platform_randomization == 3:
pos = None
platform = self.generate_box(pos=pos, size=[self.platform_size, self.platform_size], is_goal=True,
is_visible=self.platform_visible, is_obstacle=False)
self.objects.append(platform)
self.goal = platform
self.generate_walls()
self.visible_objects, self.obstacles, self.all_objects = self.decompose_objects(self.objects)
obstacle_sdf = self.get_sdf_func('obstacle')
visible_sdf = self.get_sdf_func('visible')
self.character.update_sdf_funcs(obstacle_sdf, visible_sdf)
def generate_walls(self):
if self.global_cues == 1:
self.objects.append(Box(np.array([0, 0]), np.array([1, self.world_size[1]]), color=(255, 0, 0)))
self.objects.append(Box(np.array([0, 0]), np.array([self.world_size[0], 1]), color=(0, 255, 0)))
self.objects.append(Box(np.array([0, self.world_size[1]]), np.array([self.world_size[0], 1]), color=(0, 0, 255)))
self.objects.append(Box(np.array([self.world_size[0], 0]), np.array([1, self.world_size[1]]), color=(255, 255, 255)))
elif self.global_cues == 2:
pass
def step(self, action):
reward = 0
collide_with_object = False
done = False
info = {}
if action == 0:
self.character.rotate(-0.1)
if action == 1:
if self.ep_struct >= 3 or not self.on_platform:
#if on the platform, must now be fixed onto it
collide_with_object = self.character.move(3)
if action == 2:
self.character.rotate(0.1)
if action == 3:
pass
# if collide_with_object is not False:
# obj = collide_with_object[1]
if self.on_platform:
self.duration_on_platform += 1
if self.ep_struct <= 2:
reward = self.platform_reward
if self.duration_on_platform >= self.platform_fixed_duration:
if self.ep_struct == 1:
#resetting episode in ep_struct 1
done = True
elif self.ep_struct == 2:
#only reset position in ep_struct 2, episode concludes at end of time
self.reset_character()
elif self.ep_struct == 3:
reward = self.platform_reward
done = True
observation = self.get_observation()
if self.current_steps > self.max_steps:
done = True
reward += self.reward_shift
self.current_steps += 1
self.total_rewards += reward
if done and self.verbose:
print('done, total_reward:{}'.format(self.total_rewards))
return observation, reward, done, info
def get_observation(self):
'''
Get observation reading the colors of the rays and also whether on platform or not
'''
# ray_depths, ray_colors = self.character.ray_obs()
# return np.append(ray_depths, ray_colors)
self.on_platform = np.all(np.abs(self.goal.center - self.character.pos) < self.goal.size)
if self.give_heading > 0:
raise Exception('Not implemented a give_heading > 0 condition for observation')
#tell where the goal is distance and heading
ray_obs = self.character.ray_obs()
goal = self.goal
dist_to_goal = np.clip(dist(goal.center - self.character.pos), 0, 1000) / 1000
heading = goal.center - self.character.pos
heading = np.arctan2(heading[1], heading[0])
if self.give_heading == 1:
#only give distance to goal
obs = np.vstack([ray_obs, [dist_to_goal, 0, 0]])
elif self.give_heading == 2:
#give distance and angle to goal
obs = np.vstack([ray_obs, [dist_to_goal, heading/3.14, 0]])
elif self.give_heading == 3:
#give distance and angle to goal and current agent angle
obs = np.vstack([ray_obs, [dist_to_goal, heading/3.14, self.character.angle]])
#!! Add code to show when on top of platform
if self.flat:
return np.array(obs.reshape(-1), dtype='float')
else:
return np.array(obs, dtype='float')
else:
obs = np.array(self.character.ray_obs().reshape(-1), dtype='float')
obs = np.append(obs, np.array([self.on_platform * 1]))
return obs
def reset(self):
if self.ep_struct == 2:
self.generate_world()
observation = self.get_observation()
self.current_steps = 0
self.total_rewards = 0
self.on_platform = False
self.duration_on_platform = 0
randomize_location_and_angle(self.character, self.goal, self.world_size, self.get_sdf_func('all'), self.character_sep)
return observation
def reset_character(self):
'''
Reset position of the character, used for ep_struct 2
'''
self.on_platform = False
self.duration_on_platform = 0
randomize_location_and_angle(self.character, self.goal, self.world_size, self.get_sdf_func('all'), self.character_sep)
|
[
"pygame.draw.line",
"numpy.abs",
"numpy.arctan2",
"pygame.Rect",
"gym.spaces.Discrete",
"numpy.argmin",
"numpy.clip",
"pygame.display.update",
"numpy.sin",
"numpy.linalg.norm",
"pygame.display.set_mode",
"numpy.append",
"math.cos",
"numpy.random.choice",
"pygame.draw.polygon",
"pygame.Surface",
"pygame.draw.rect",
"pygame.init",
"math.sin",
"numpy.min",
"numpy.cos",
"numpy.vstack",
"numpy.random.uniform",
"pygame.draw.circle",
"numpy.array",
"gym.spaces.Box",
"pygame.surfarray.pixels3d"
] |
[((505, 562), 'numpy.random.uniform', 'np.random.uniform', (['[size[0], size[0]]', '[size[1], size[1]]'], {}), '([size[0], size[0]], [size[1], size[1]])\n', (522, 562), True, 'import numpy as np\n'), ((1185, 1224), 'numpy.random.uniform', 'np.random.uniform', (['radius[0]', 'radius[1]'], {}), '(radius[0], radius[1])\n', (1202, 1224), True, 'import numpy as np\n'), ((1677, 1694), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (1691, 1694), True, 'import numpy as np\n'), ((11818, 11841), 'numpy.random.uniform', 'np.random.uniform', (['(6.28)'], {}), '(6.28)\n', (11835, 11841), True, 'import numpy as np\n'), ((2183, 2248), 'pygame.draw.circle', 'pygame.draw.circle', (['display', 'self.color', 'self.center', 'self.radius'], {}), '(display, self.color, self.center, self.radius)\n', (2201, 2248), False, 'import pygame\n'), ((2546, 2582), 'pygame.Rect', 'pygame.Rect', (['(center - size)', '(size * 2)'], {}), '(center - size, size * 2)\n', (2557, 2582), False, 'import pygame\n'), ((3001, 3049), 'pygame.draw.rect', 'pygame.draw.rect', (['display', 'self.color', 'self.rect'], {}), '(display, self.color, self.rect)\n', (3017, 3049), False, 'import pygame\n'), ((4682, 4736), 'pygame.draw.line', 'pygame.draw.line', (['display', 'self.color', 'self.start', 'end'], {}), '(display, self.color, self.start, end)\n', (4698, 4736), False, 'import pygame\n'), ((7508, 7598), 'pygame.draw.polygon', 'pygame.draw.polygon', (['display', 'self.color', '[self.pos, point1, point2, point3, self.pos]'], {}), '(display, self.color, [self.pos, point1, point2, point3,\n self.pos])\n', (7527, 7598), False, 'import pygame\n'), ((10599, 10619), 'numpy.array', 'np.array', (['ray_colors'], {}), '(ray_colors)\n', (10607, 10619), True, 'import numpy as np\n'), ((11546, 11576), 'numpy.random.uniform', 'np.random.uniform', (['WINDOW_SIZE'], {}), '(WINDOW_SIZE)\n', (11563, 11576), True, 'import numpy as np\n'), ((18446, 18503), 'numpy.random.uniform', 'np.random.uniform', (['[size[0], size[0]]', '[size[1], size[1]]'], {}), '([size[0], size[0]], [size[1], size[1]])\n', (18463, 18503), True, 'import numpy as np\n'), ((19281, 19320), 'numpy.random.uniform', 'np.random.uniform', (['radius[0]', 'radius[1]'], {}), '(radius[0], radius[1])\n', (19298, 19320), True, 'import numpy as np\n'), ((25254, 25288), 'numpy.append', 'np.append', (['box_dists', 'circle_dists'], {}), '(box_dists, circle_dists)\n', (25263, 25288), True, 'import numpy as np\n'), ((25308, 25321), 'numpy.min', 'np.min', (['dists'], {}), '(dists)\n', (25314, 25321), True, 'import numpy as np\n'), ((25342, 25358), 'numpy.argmin', 'np.argmin', (['dists'], {}), '(dists)\n', (25351, 25358), True, 'import numpy as np\n'), ((31271, 31289), 'gym.spaces.Discrete', 'spaces.Discrete', (['(4)'], {}), '(4)\n', (31286, 31289), False, 'from gym import spaces\n'), ((627, 738), 'numpy.random.uniform', 'np.random.uniform', (['[box_size[0], box_size[1]]', '[WINDOW_SIZE[0] - box_size[0], WINDOW_SIZE[1] - box_size[1]]'], {}), '([box_size[0], box_size[1]], [WINDOW_SIZE[0] - box_size[0],\n WINDOW_SIZE[1] - box_size[1]])\n', (644, 738), True, 'import numpy as np\n'), ((805, 835), 'numpy.random.uniform', 'np.random.uniform', (['WINDOW_SIZE'], {}), '(WINDOW_SIZE)\n', (822, 835), True, 'import numpy as np\n'), ((1289, 1389), 'numpy.random.uniform', 'np.random.uniform', (['[circ_rad, circ_rad]', '[WINDOW_SIZE[0] - circ_rad, WINDOW_SIZE[1] - circ_rad]'], {}), '([circ_rad, circ_rad], [WINDOW_SIZE[0] - circ_rad, \n WINDOW_SIZE[1] - circ_rad])\n', (1306, 1389), True, 'import numpy as np\n'), ((1413, 1443), 'numpy.random.uniform', 'np.random.uniform', (['WINDOW_SIZE'], {}), '(WINDOW_SIZE)\n', (1430, 1443), True, 'import numpy as np\n'), ((2768, 2791), 'numpy.abs', 'np.abs', (['(p - self.center)'], {}), '(p - self.center)\n', (2774, 2791), True, 'import numpy as np\n'), ((2831, 2857), 'numpy.clip', 'np.clip', (['offset', '(0)', 'np.inf'], {}), '(offset, 0, np.inf)\n', (2838, 2857), True, 'import numpy as np\n'), ((2892, 2919), 'numpy.clip', 'np.clip', (['offset', '(-np.inf)', '(0)'], {}), '(offset, -np.inf, 0)\n', (2899, 2919), True, 'import numpy as np\n'), ((10700, 10738), 'numpy.clip', 'np.clip', (['ray_depths', '(0)', 'self.max_depth'], {}), '(ray_depths, 0, self.max_depth)\n', (10707, 10738), True, 'import numpy as np\n'), ((13942, 13960), 'gym.spaces.Discrete', 'spaces.Discrete', (['(4)'], {}), '(4)\n', (13957, 13960), False, 'from gym import spaces\n'), ((16523, 16557), 'numpy.arctan2', 'np.arctan2', (['heading[1]', 'heading[0]'], {}), '(heading[1], heading[0])\n', (16533, 16557), True, 'import numpy as np\n'), ((24390, 24402), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (24398, 24402), True, 'import numpy as np\n'), ((24907, 24919), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (24915, 24919), True, 'import numpy as np\n'), ((25447, 25460), 'numpy.min', 'np.min', (['dists'], {}), '(dists)\n', (25453, 25460), True, 'import numpy as np\n'), ((26423, 26436), 'pygame.init', 'pygame.init', ([], {}), '()\n', (26434, 26436), False, 'import pygame\n'), ((26613, 26674), 'pygame.Surface', 'pygame.Surface', (['[self.world_size[0], self.world_size[1] + 10]'], {}), '([self.world_size[0], self.world_size[1] + 10])\n', (26627, 26674), False, 'import pygame\n'), ((26920, 26943), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (26941, 26943), False, 'import pygame\n'), ((27008, 27042), 'pygame.surfarray.pixels3d', 'pygame.surfarray.pixels3d', (['display'], {}), '(display)\n', (27033, 27042), False, 'import pygame\n'), ((27245, 27287), 'pygame.Rect', 'pygame.Rect', (['[i * length, 300, length, 10]'], {}), '([i * length, 300, length, 10])\n', (27256, 27287), False, 'import pygame\n'), ((27300, 27342), 'pygame.draw.rect', 'pygame.draw.rect', (['display', 'colors[i]', 'rect'], {}), '(display, colors[i], rect)\n', (27316, 27342), False, 'import pygame\n'), ((30973, 31031), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(1)', 'shape': '((num_rays + 1) * 3 + 1,)'}), '(low=0, high=1, shape=((num_rays + 1) * 3 + 1,))\n', (30983, 31031), False, 'from gym import spaces\n'), ((31183, 31235), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(1)', 'shape': '(num_rays * 3 + 1,)'}), '(low=0, high=1, shape=(num_rays * 3 + 1,))\n', (31193, 31235), False, 'from gym import spaces\n'), ((32105, 32163), 'numpy.array', 'np.array', (['[self.world_size[0] / 4, self.world_size[1] / 4]'], {}), '([self.world_size[0] / 4, self.world_size[1] / 4])\n', (32113, 32163), True, 'import numpy as np\n'), ((32190, 32206), 'numpy.array', 'np.array', (['[1, 3]'], {}), '([1, 3])\n', (32198, 32206), True, 'import numpy as np\n'), ((32229, 32256), 'numpy.random.choice', 'np.random.choice', (['(2)'], {'size': '(2)'}), '(2, size=2)\n', (32245, 32256), True, 'import numpy as np\n'), ((36497, 36531), 'numpy.arctan2', 'np.arctan2', (['heading[1]', 'heading[0]'], {}), '(heading[1], heading[0])\n', (36507, 36531), True, 'import numpy as np\n'), ((4140, 4207), 'pygame.draw.circle', 'pygame.draw.circle', (['display', '(255, 255, 255, 0.3)', 'p', 'dist'], {'width': '(1)'}), '(display, (255, 255, 255, 0.3), p, dist, width=1)\n', (4158, 4207), False, 'import pygame\n'), ((8078, 8098), 'math.cos', 'math.cos', (['self.angle'], {}), '(self.angle)\n', (8086, 8098), False, 'import math\n'), ((8134, 8154), 'math.sin', 'math.sin', (['self.angle'], {}), '(self.angle)\n', (8142, 8154), False, 'import math\n'), ((8327, 8347), 'math.cos', 'math.cos', (['self.angle'], {}), '(self.angle)\n', (8335, 8347), False, 'import math\n'), ((8389, 8409), 'math.sin', 'math.sin', (['self.angle'], {}), '(self.angle)\n', (8397, 8409), False, 'import math\n'), ((13645, 13699), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(1)', 'shape': '((num_rays + 1) * 3,)'}), '(low=0, high=1, shape=((num_rays + 1) * 3,))\n', (13655, 13699), False, 'from gym import spaces\n'), ((13863, 13911), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(1)', 'shape': '(num_rays * 3,)'}), '(low=0, high=1, shape=(num_rays * 3,))\n', (13873, 13911), False, 'from gym import spaces\n'), ((16664, 16706), 'numpy.vstack', 'np.vstack', (['[ray_obs, [dist_to_goal, 0, 0]]'], {}), '([ray_obs, [dist_to_goal, 0, 0]])\n', (16673, 16706), True, 'import numpy as np\n'), ((17251, 17279), 'numpy.array', 'np.array', (['obs'], {'dtype': '"""float"""'}), "(obs, dtype='float')\n", (17259, 17279), True, 'import numpy as np\n'), ((17637, 17653), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (17645, 17653), True, 'import numpy as np\n'), ((17655, 17688), 'numpy.array', 'np.array', (['[1, self.world_size[1]]'], {}), '([1, self.world_size[1]])\n', (17663, 17688), True, 'import numpy as np\n'), ((17742, 17758), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (17750, 17758), True, 'import numpy as np\n'), ((17760, 17793), 'numpy.array', 'np.array', (['[self.world_size[0], 1]'], {}), '([self.world_size[0], 1])\n', (17768, 17793), True, 'import numpy as np\n'), ((17847, 17880), 'numpy.array', 'np.array', (['[0, self.world_size[1]]'], {}), '([0, self.world_size[1]])\n', (17855, 17880), True, 'import numpy as np\n'), ((17882, 17915), 'numpy.array', 'np.array', (['[self.world_size[0], 1]'], {}), '([self.world_size[0], 1])\n', (17890, 17915), True, 'import numpy as np\n'), ((17969, 18002), 'numpy.array', 'np.array', (['[self.world_size[0], 0]'], {}), '([self.world_size[0], 0])\n', (17977, 18002), True, 'import numpy as np\n'), ((18004, 18037), 'numpy.array', 'np.array', (['[1, self.world_size[1]]'], {}), '([1, self.world_size[1]])\n', (18012, 18037), True, 'import numpy as np\n'), ((18580, 18699), 'numpy.random.uniform', 'np.random.uniform', (['[box_size[0], box_size[1]]', '[self.world_size[0] - box_size[0], self.world_size[1] - box_size[1]]'], {}), '([box_size[0], box_size[1]], [self.world_size[0] -\n box_size[0], self.world_size[1] - box_size[1]])\n', (18597, 18699), True, 'import numpy as np\n'), ((18778, 18812), 'numpy.random.uniform', 'np.random.uniform', (['self.world_size'], {}), '(self.world_size)\n', (18795, 18812), True, 'import numpy as np\n'), ((19397, 19505), 'numpy.random.uniform', 'np.random.uniform', (['[circ_rad, circ_rad]', '[self.world_size[0] - circ_rad, self.world_size[1] - circ_rad]'], {}), '([circ_rad, circ_rad], [self.world_size[0] - circ_rad, \n self.world_size[1] - circ_rad])\n', (19414, 19505), True, 'import numpy as np\n'), ((19537, 19571), 'numpy.random.uniform', 'np.random.uniform', (['self.world_size'], {}), '(self.world_size)\n', (19554, 19571), True, 'import numpy as np\n'), ((23474, 23504), 'numpy.array', 'np.array', (['visible_objects[key]'], {}), '(visible_objects[key])\n', (23482, 23504), True, 'import numpy as np\n'), ((23616, 23640), 'numpy.array', 'np.array', (['obstacles[key]'], {}), '(obstacles[key])\n', (23624, 23640), True, 'import numpy as np\n'), ((24091, 24114), 'numpy.abs', 'np.abs', (['(p - box_centers)'], {}), '(p - box_centers)\n', (24097, 24114), True, 'import numpy as np\n'), ((24170, 24196), 'numpy.clip', 'np.clip', (['offset', '(0)', 'np.inf'], {}), '(offset, 0, np.inf)\n', (24177, 24196), True, 'import numpy as np\n'), ((24243, 24270), 'numpy.clip', 'np.clip', (['offset', '(-np.inf)', '(0)'], {}), '(offset, -np.inf, 0)\n', (24250, 24270), True, 'import numpy as np\n'), ((24814, 24856), 'numpy.linalg.norm', 'np.linalg.norm', (['(circle_centers - p)'], {'axis': '(1)'}), '(circle_centers - p, axis=1)\n', (24828, 24856), True, 'import numpy as np\n'), ((26507, 26577), 'pygame.display.set_mode', 'pygame.display.set_mode', (['[self.world_size[0], self.world_size[1] + 10]'], {}), '([self.world_size[0], self.world_size[1] + 10])\n', (26530, 26577), False, 'import pygame\n'), ((32586, 32689), 'numpy.random.uniform', 'np.random.uniform', (['(-self.platform_randomization_spread)', 'self.platform_randomization_spread'], {'size': '(2)'}), '(-self.platform_randomization_spread, self.\n platform_randomization_spread, size=2)\n', (32603, 32689), True, 'import numpy as np\n'), ((35997, 36042), 'numpy.abs', 'np.abs', (['(self.goal.center - self.character.pos)'], {}), '(self.goal.center - self.character.pos)\n', (36003, 36042), True, 'import numpy as np\n'), ((36638, 36680), 'numpy.vstack', 'np.vstack', (['[ray_obs, [dist_to_goal, 0, 0]]'], {}), '([ray_obs, [dist_to_goal, 0, 0]])\n', (36647, 36680), True, 'import numpy as np\n'), ((37282, 37310), 'numpy.array', 'np.array', (['obs'], {'dtype': '"""float"""'}), "(obs, dtype='float')\n", (37290, 37310), True, 'import numpy as np\n'), ((37451, 37483), 'numpy.array', 'np.array', (['[self.on_platform * 1]'], {}), '([self.on_platform * 1])\n', (37459, 37483), True, 'import numpy as np\n'), ((7126, 7152), 'math.cos', 'math.cos', (['(self.angle + 0.3)'], {}), '(self.angle + 0.3)\n', (7134, 7152), False, 'import math\n'), ((7197, 7223), 'math.sin', 'math.sin', (['(self.angle + 0.3)'], {}), '(self.angle + 0.3)\n', (7205, 7223), False, 'import math\n'), ((7383, 7409), 'math.cos', 'math.cos', (['(self.angle - 0.3)'], {}), '(self.angle - 0.3)\n', (7391, 7409), False, 'import math\n'), ((7454, 7480), 'math.sin', 'math.sin', (['(self.angle - 0.3)'], {}), '(self.angle - 0.3)\n', (7462, 7480), False, 'import math\n'), ((11429, 11450), 'numpy.array', 'np.array', (['WINDOW_SIZE'], {}), '(WINDOW_SIZE)\n', (11437, 11450), True, 'import numpy as np\n'), ((16819, 16874), 'numpy.vstack', 'np.vstack', (['[ray_obs, [dist_to_goal, heading / 3.14, 0]]'], {}), '([ray_obs, [dist_to_goal, heading / 3.14, 0]])\n', (16828, 16874), True, 'import numpy as np\n'), ((33506, 33522), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (33514, 33522), True, 'import numpy as np\n'), ((33524, 33557), 'numpy.array', 'np.array', (['[1, self.world_size[1]]'], {}), '([1, self.world_size[1]])\n', (33532, 33557), True, 'import numpy as np\n'), ((33615, 33631), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (33623, 33631), True, 'import numpy as np\n'), ((33633, 33666), 'numpy.array', 'np.array', (['[self.world_size[0], 1]'], {}), '([self.world_size[0], 1])\n', (33641, 33666), True, 'import numpy as np\n'), ((33724, 33757), 'numpy.array', 'np.array', (['[0, self.world_size[1]]'], {}), '([0, self.world_size[1]])\n', (33732, 33757), True, 'import numpy as np\n'), ((33759, 33792), 'numpy.array', 'np.array', (['[self.world_size[0], 1]'], {}), '([self.world_size[0], 1])\n', (33767, 33792), True, 'import numpy as np\n'), ((33850, 33883), 'numpy.array', 'np.array', (['[self.world_size[0], 0]'], {}), '([self.world_size[0], 0])\n', (33858, 33883), True, 'import numpy as np\n'), ((33885, 33918), 'numpy.array', 'np.array', (['[1, self.world_size[1]]'], {}), '([1, self.world_size[1]])\n', (33893, 33918), True, 'import numpy as np\n'), ((36793, 36848), 'numpy.vstack', 'np.vstack', (['[ray_obs, [dist_to_goal, heading / 3.14, 0]]'], {}), '([ray_obs, [dist_to_goal, heading / 3.14, 0]])\n', (36802, 36848), True, 'import numpy as np\n'), ((4617, 4635), 'numpy.cos', 'np.cos', (['self.angle'], {}), '(self.angle)\n', (4623, 4635), True, 'import numpy as np\n'), ((4637, 4655), 'numpy.sin', 'np.sin', (['self.angle'], {}), '(self.angle)\n', (4643, 4655), True, 'import numpy as np\n'), ((7266, 7286), 'math.cos', 'math.cos', (['self.angle'], {}), '(self.angle)\n', (7274, 7286), False, 'import math\n'), ((7315, 7335), 'math.sin', 'math.sin', (['self.angle'], {}), '(self.angle)\n', (7323, 7335), False, 'import math\n'), ((17009, 17083), 'numpy.vstack', 'np.vstack', (['[ray_obs, [dist_to_goal, heading / 3.14, self.character.angle]]'], {}), '([ray_obs, [dist_to_goal, heading / 3.14, self.character.angle]])\n', (17018, 17083), True, 'import numpy as np\n'), ((36983, 37057), 'numpy.vstack', 'np.vstack', (['[ray_obs, [dist_to_goal, heading / 3.14, self.character.angle]]'], {}), '([ray_obs, [dist_to_goal, heading / 3.14, self.character.angle]])\n', (36992, 37057), True, 'import numpy as np\n'), ((4403, 4421), 'numpy.cos', 'np.cos', (['self.angle'], {}), '(self.angle)\n', (4409, 4421), True, 'import numpy as np\n'), ((4423, 4441), 'numpy.sin', 'np.sin', (['self.angle'], {}), '(self.angle)\n', (4429, 4441), True, 'import numpy as np\n'), ((9727, 9745), 'numpy.cos', 'np.cos', (['self.angle'], {}), '(self.angle)\n', (9733, 9745), True, 'import numpy as np\n'), ((9747, 9765), 'numpy.sin', 'np.sin', (['self.angle'], {}), '(self.angle)\n', (9753, 9765), True, 'import numpy as np\n')]
|
from shapely import geometry
from shapely.geometry import shape, Point
import geohash as gh
import numpy as np
import pandas as pd
import numpy as np
import sys
import pandas as pd
import datetime
import random
from random import choices
# get train acc and test acc
# Train acc from 2017 to 2019 May
#Test acc from 2019 May to Dec 2019
def traintestdataAcc(data, city):
train_data_2017_acc = data.loc[(data['UJAHR'] == 2017)]
train_data_2018_acc = data.loc[
(data['UJAHR'] == 2018)]
train_data_2019_acc = data.loc[
(data['UMONAT'] <= 5) & (data['UJAHR'] == 2019)]
frames = [train_data_2017_acc, train_data_2018_acc,train_data_2019_acc]
train_data_acc = pd.concat(frames)
train_data_acc.to_csv('../../../data_preprocessing/data/regions/'+city+'/train_acc.csv',index=False)
test_data_acc = data.loc[
(data['UMONAT'] > 5) & (data['UJAHR'] == 2019)]
test_data_acc.to_csv('../../../data_preprocessing/data/regions/'+city+'/test_acc.csv',index=False)
return train_data_acc,test_data_acc
def random_latlong(geohash):
dic = gh.bbox(geohash)
# getting min, max lat/lng
min_lng = dic.get('w')
min_lat = dic.get('s')
max_lng = dic.get('e')
max_lat = dic.get('n')
# generate random float between [min_lng, max_lng)
long = np.random.uniform(min_lng, max_lng)
# generate random float between [min_lat, max_lat)
lat = np.random.uniform(min_lat, max_lat)
return lat, long
def dow(date):
dayNumber = date.weekday()
day = -1
if dayNumber == 6:
day = 1
else:
day = dayNumber + 2
return day
def find_t_nonACC(t):
tm = str(t)
dateTimesplit = tm.split(' ')
dateFind = dateTimesplit[0]
timeFind = dateTimesplit[1]
datesplit = dateFind.split('-')
timesplit = timeFind.split(':')
frmt = '%Y-%m-%d'
datsend = datetime.datetime.strptime(dateFind, frmt)
dayofweek = dow(datsend)
year, month, day = datesplit[0], datesplit[1], datesplit[2]
month = int(month)
hour = int(timesplit[0])
return year, month, dayofweek, hour
def randomtimes(geohash, stime, etime, n):
frmt = '%d-%m-%Y %H:%M:%S'
stime = datetime.datetime.strptime(stime, frmt)
etime = datetime.datetime.strptime(etime, frmt)
td = etime - stime
k = []
t = random.random() * td + stime
year, month, dayofweek, hour = find_t_nonACC(t)
year = int(year)
lat, long = random_latlong(geohash)
return True, lat, long, year, month, dayofweek,hour
def trainNonacc(hann_grid_zeroacc,train,city):
t = []
a=[]
no_of_acc=len(train.index)
print('no of acc=',no_of_acc)
no_of_nonacc=no_of_acc*3
print('no of non acc in train=',no_of_nonacc)
for i in range(0,no_of_nonacc):
geohashVal=hann_grid_zeroacc['geohash'].values # 153m x153 m all geohashes
geoSelect=choices(geohashVal) # select one geohash with replacement
bol, lat, long, year, month, dayofweek,hour = randomtimes(geoSelect[0], '01-01-2017 00:00:00',
'31-05-2019 23:00:00', i)
if bol and [year, month, dayofweek,hour] not in t:
p = (year, month, dayofweek,hour)
k = (geoSelect[0], lat, long, year, month, dayofweek,hour)
a.append(k)
t.append(p)
i = i + 1
else:
continue
dt = pd.DataFrame(a)
dt.columns = ['geohash', 'non_acclat', 'non_acclong', 'UJAHR', 'UMONAT', 'UWOCHENTAG','hour']
dt['UMONAT'] = dt["UMONAT"].astype(str).astype(int)
dt['UJAHR'] = dt["UJAHR"].astype(str).astype(int)
train_non_acc_data=dt.loc[((dt['UJAHR']==2017) & (dt['UMONAT']<=12)|(dt['UJAHR']==2018) & (dt['UMONAT']<=12) | ((dt['UJAHR']==2019) & (dt['UMONAT']<=5)))]
train_non_acc_data.to_csv('../../../data_preprocessing/data/regions/'+city+'/train_nonaccdata.csv', index=False)
def testNonacc(hann_grid_zeroacc,test,city):
a=[]
t=[]
no_of_acc=len(test.index)
print('no of acc=',no_of_acc)
no_of_nonacc=no_of_acc*3
print('no of non acc in test=',no_of_nonacc)
for i in range(0,no_of_nonacc):
geohashVal=hann_grid_zeroacc['geohash'].values # 153m x153 m all geohashes
geoSelect=choices(geohashVal) # select one geohash with replacement
bol, lat, long, year, month, dayofweek,hour = randomtimes(geoSelect[0], '01-06-2019 00:00:00',
'31-12-2019 23:00:00', i)
if bol and [year, month, dayofweek,hour] not in t:
p = (year, month, dayofweek,hour)
k = (geoSelect[0], lat, long, year, month, dayofweek,hour)
a.append(k)
t.append(p)
i = i + 1
else:
continue
dt = pd.DataFrame(a)
dt.columns = ['geohash', 'non_acclat', 'non_acclong', 'UJAHR', 'UMONAT', 'UWOCHENTAG','hour']
dt['UMONAT'] = dt["UMONAT"].astype(str).astype(int)
dt['UJAHR'] = dt["UJAHR"].astype(str).astype(int)
test_data = dt.loc[(dt['UMONAT'] > 5) & (dt['UJAHR'] == 2019)]
test_data.to_csv('../../../data_preprocessing/data/regions/'+city+'/test_nonaccdata.csv', index=False)
if __name__ == "__main__":
cities = ['LS/hannover']#,'Bayern/munich','Bayern/nurenberg']
for city in cities:
region_grid=pd.read_csv('../../../data_preprocessing/data/regions/'+city+'/numberofGridRegionGeo7.csv',header=0)
region_selectedWithacc=pd.read_csv('../../../data_preprocessing/data/regions/'+city+'/acc_threeYear_hannover.csv',header=0)
train,test=traintestdataAcc(region_selectedWithacc, city)
# non acc cases generation
trainNonacc(region_grid,train,city)
testNonacc(region_grid,test,city)
print('finished for city=',city)
|
[
"pandas.DataFrame",
"numpy.random.uniform",
"pandas.read_csv",
"random.choices",
"random.random",
"datetime.datetime.strptime",
"geohash.bbox",
"pandas.concat"
] |
[((692, 709), 'pandas.concat', 'pd.concat', (['frames'], {}), '(frames)\n', (701, 709), True, 'import pandas as pd\n'), ((1090, 1106), 'geohash.bbox', 'gh.bbox', (['geohash'], {}), '(geohash)\n', (1097, 1106), True, 'import geohash as gh\n'), ((1312, 1347), 'numpy.random.uniform', 'np.random.uniform', (['min_lng', 'max_lng'], {}), '(min_lng, max_lng)\n', (1329, 1347), True, 'import numpy as np\n'), ((1413, 1448), 'numpy.random.uniform', 'np.random.uniform', (['min_lat', 'max_lat'], {}), '(min_lat, max_lat)\n', (1430, 1448), True, 'import numpy as np\n'), ((1870, 1912), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['dateFind', 'frmt'], {}), '(dateFind, frmt)\n', (1896, 1912), False, 'import datetime\n'), ((2186, 2225), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['stime', 'frmt'], {}), '(stime, frmt)\n', (2212, 2225), False, 'import datetime\n'), ((2238, 2277), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['etime', 'frmt'], {}), '(etime, frmt)\n', (2264, 2277), False, 'import datetime\n'), ((3415, 3430), 'pandas.DataFrame', 'pd.DataFrame', (['a'], {}), '(a)\n', (3427, 3430), True, 'import pandas as pd\n'), ((4811, 4826), 'pandas.DataFrame', 'pd.DataFrame', (['a'], {}), '(a)\n', (4823, 4826), True, 'import pandas as pd\n'), ((2868, 2887), 'random.choices', 'choices', (['geohashVal'], {}), '(geohashVal)\n', (2875, 2887), False, 'from random import choices\n'), ((4264, 4283), 'random.choices', 'choices', (['geohashVal'], {}), '(geohashVal)\n', (4271, 4283), False, 'from random import choices\n'), ((5366, 5475), 'pandas.read_csv', 'pd.read_csv', (["('../../../data_preprocessing/data/regions/' + city +\n '/numberofGridRegionGeo7.csv')"], {'header': '(0)'}), "('../../../data_preprocessing/data/regions/' + city +\n '/numberofGridRegionGeo7.csv', header=0)\n", (5377, 5475), True, 'import pandas as pd\n'), ((5499, 5608), 'pandas.read_csv', 'pd.read_csv', (["('../../../data_preprocessing/data/regions/' + city +\n '/acc_threeYear_hannover.csv')"], {'header': '(0)'}), "('../../../data_preprocessing/data/regions/' + city +\n '/acc_threeYear_hannover.csv', header=0)\n", (5510, 5608), True, 'import pandas as pd\n'), ((2320, 2335), 'random.random', 'random.random', ([], {}), '()\n', (2333, 2335), False, 'import random\n')]
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import argparse
import logging
import os
import subprocess
import random
import cv2
import numpy as np
import sys
python_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(0, python_dir)
from cuberite_process import CuberiteProcess
from repo import repo_home
logging.basicConfig(format="%(asctime)s [%(levelname)s]: %(message)s")
logging.getLogger().setLevel(logging.DEBUG)
def to_unit_vec(yaw, pitch):
pitch *= 3.14159 / 180
yaw *= 3.14159 / 180
return np.array(
[-1 * np.cos(pitch) * np.sin(yaw), -1 * np.sin(pitch), np.cos(pitch) * np.cos(yaw)]
)
def ground_height(blocks):
dirt_pct = np.mean(np.mean(blocks[:, :, :, 0] == 2, axis=1), axis=1)
if (dirt_pct > 0.25).any():
return np.argmax(dirt_pct)
return None
def change_block(schematic, b):
x, y, z = b
## change to red wool
schematic[y][z][x][0] = 35
schematic[y][z][x][1] = 14
def render(npy_p2b, out_dir, port, spp, img_size, mn=None):
npy_file = (
os.path.expanduser("~")
+ "/minecraft_houses/"
+ ".".join(npy_p2b.split(".")[1:-2])
+ "/schematic.npy"
)
schematic = np.load(npy_file)
print(schematic.shape)
house_name = os.path.basename(os.path.dirname(npy_file))
p2b = np.load(npy_p2b)
# remove blocks below ground-level
g = ground_height(schematic)
schematic = schematic[(g or 0) :, :, :, :]
ys, zs, xs = np.nonzero(schematic[:, :, :, 0] > 0)
xmid, ymid, zmid = np.mean(xs), np.mean(ys), np.mean(zs)
focus = np.array([xmid, ymid + 63, zmid]) # TODO: +63 only works for flat_world seed=0w
yaw, distance = list(map(int, npy_p2b.split(".")[-2].split("_")))
look = [yaw, 0]
look_xyz = to_unit_vec(*look)
camera = focus - (look_xyz * distance)
if mn == [0, 0]:
M, N = p2b.shape[:2]
while True:
m = random.randint(0, M - 1)
n = random.randint(0, N - 1)
if p2b[m][n][0] != -1:
break
else:
m, n = mn
print("Select pixel at {}".format((m, n)))
print("Mapped block {}".format(p2b[m][n]))
change_block(schematic, p2b[m][n])
logging.info("Launching cuberite at port {}".format(port))
p = CuberiteProcess(
"flat_world", seed=0, game_mode="creative", place_blocks_yzx=schematic, port=port
)
logging.info("Destroying cuberite at port {}".format(port))
p.destroy()
world_dir = os.path.join(p.workdir, "world")
render_view_bin = os.path.join(repo_home, "bin/render_view")
assert os.path.isfile(
render_view_bin
), "{} not found.\n\nTry running: make render_view".format(render_view_bin)
procs = []
chunky_id = "{}_{}".format(yaw, distance)
out_file = "{}/chunky_verify.{}.{}.png".format(out_dir, house_name, chunky_id)
call = [
str(a)
for a in [
"python3",
"{}/python/minecraft_render/render.py".format(repo_home),
"--world",
world_dir,
"--out",
out_file,
"--camera",
*camera,
"--look",
yaw,
0,
"--size",
*img_size,
"--spp",
spp,
]
]
logging.info("CALL: " + " ".join(call))
procs.append(subprocess.Popen(call))
for p in procs:
p.wait()
## draw the sampled pixel for a better view
img = cv2.imread(out_file)
cv2.circle(img, (n, m), 2, (255, 0, 0))
cv2.imwrite(out_file, img)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("npy_p2b")
parser.add_argument(
"--out-dir", "-o", required=True, help="Directory in which to write vision files"
)
parser.add_argument("--spp", type=int, default=25, help="samples per pixel")
parser.add_argument("--port", type=int, default=25565)
parser.add_argument("--size", type=int, nargs=2, default=[300, 225])
parser.add_argument("--mn", type=int, nargs=2, default=[0, 0])
args = parser.parse_args()
render(args.npy_p2b, args.out_dir, args.port, args.spp, args.size, args.mn)
|
[
"numpy.load",
"argparse.ArgumentParser",
"numpy.argmax",
"os.path.isfile",
"numpy.mean",
"numpy.sin",
"os.path.join",
"random.randint",
"cuberite_process.CuberiteProcess",
"cv2.imwrite",
"os.path.dirname",
"subprocess.Popen",
"cv2.circle",
"os.path.realpath",
"numpy.cos",
"logging.basicConfig",
"sys.path.insert",
"numpy.nonzero",
"cv2.imread",
"numpy.array",
"os.path.expanduser",
"logging.getLogger"
] |
[((249, 279), 'sys.path.insert', 'sys.path.insert', (['(0)', 'python_dir'], {}), '(0, python_dir)\n', (264, 279), False, 'import sys\n'), ((354, 424), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s [%(levelname)s]: %(message)s"""'}), "(format='%(asctime)s [%(levelname)s]: %(message)s')\n", (373, 424), False, 'import logging\n'), ((1231, 1248), 'numpy.load', 'np.load', (['npy_file'], {}), '(npy_file)\n', (1238, 1248), True, 'import numpy as np\n'), ((1347, 1363), 'numpy.load', 'np.load', (['npy_p2b'], {}), '(npy_p2b)\n', (1354, 1363), True, 'import numpy as np\n'), ((1501, 1538), 'numpy.nonzero', 'np.nonzero', (['(schematic[:, :, :, 0] > 0)'], {}), '(schematic[:, :, :, 0] > 0)\n', (1511, 1538), True, 'import numpy as np\n'), ((1613, 1646), 'numpy.array', 'np.array', (['[xmid, ymid + 63, zmid]'], {}), '([xmid, ymid + 63, zmid])\n', (1621, 1646), True, 'import numpy as np\n'), ((2307, 2409), 'cuberite_process.CuberiteProcess', 'CuberiteProcess', (['"""flat_world"""'], {'seed': '(0)', 'game_mode': '"""creative"""', 'place_blocks_yzx': 'schematic', 'port': 'port'}), "('flat_world', seed=0, game_mode='creative',\n place_blocks_yzx=schematic, port=port)\n", (2322, 2409), False, 'from cuberite_process import CuberiteProcess\n'), ((2517, 2549), 'os.path.join', 'os.path.join', (['p.workdir', '"""world"""'], {}), "(p.workdir, 'world')\n", (2529, 2549), False, 'import os\n'), ((2573, 2615), 'os.path.join', 'os.path.join', (['repo_home', '"""bin/render_view"""'], {}), "(repo_home, 'bin/render_view')\n", (2585, 2615), False, 'import os\n'), ((2627, 2658), 'os.path.isfile', 'os.path.isfile', (['render_view_bin'], {}), '(render_view_bin)\n', (2641, 2658), False, 'import os\n'), ((3503, 3523), 'cv2.imread', 'cv2.imread', (['out_file'], {}), '(out_file)\n', (3513, 3523), False, 'import cv2\n'), ((3528, 3567), 'cv2.circle', 'cv2.circle', (['img', '(n, m)', '(2)', '(255, 0, 0)'], {}), '(img, (n, m), 2, (255, 0, 0))\n', (3538, 3567), False, 'import cv2\n'), ((3572, 3598), 'cv2.imwrite', 'cv2.imwrite', (['out_file', 'img'], {}), '(out_file, img)\n', (3583, 3598), False, 'import cv2\n'), ((3641, 3666), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3664, 3666), False, 'import argparse\n'), ((220, 246), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (236, 246), False, 'import os\n'), ((425, 444), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (442, 444), False, 'import logging\n'), ((723, 763), 'numpy.mean', 'np.mean', (['(blocks[:, :, :, 0] == 2)'], {'axis': '(1)'}), '(blocks[:, :, :, 0] == 2, axis=1)\n', (730, 763), True, 'import numpy as np\n'), ((820, 839), 'numpy.argmax', 'np.argmax', (['dirt_pct'], {}), '(dirt_pct)\n', (829, 839), True, 'import numpy as np\n'), ((1310, 1335), 'os.path.dirname', 'os.path.dirname', (['npy_file'], {}), '(npy_file)\n', (1325, 1335), False, 'import os\n'), ((1563, 1574), 'numpy.mean', 'np.mean', (['xs'], {}), '(xs)\n', (1570, 1574), True, 'import numpy as np\n'), ((1576, 1587), 'numpy.mean', 'np.mean', (['ys'], {}), '(ys)\n', (1583, 1587), True, 'import numpy as np\n'), ((1589, 1600), 'numpy.mean', 'np.mean', (['zs'], {}), '(zs)\n', (1596, 1600), True, 'import numpy as np\n'), ((3382, 3404), 'subprocess.Popen', 'subprocess.Popen', (['call'], {}), '(call)\n', (3398, 3404), False, 'import subprocess\n'), ((1950, 1974), 'random.randint', 'random.randint', (['(0)', '(M - 1)'], {}), '(0, M - 1)\n', (1964, 1974), False, 'import random\n'), ((1991, 2015), 'random.randint', 'random.randint', (['(0)', '(N - 1)'], {}), '(0, N - 1)\n', (2005, 2015), False, 'import random\n'), ((603, 614), 'numpy.sin', 'np.sin', (['yaw'], {}), '(yaw)\n', (609, 614), True, 'import numpy as np\n'), ((621, 634), 'numpy.sin', 'np.sin', (['pitch'], {}), '(pitch)\n', (627, 634), True, 'import numpy as np\n'), ((636, 649), 'numpy.cos', 'np.cos', (['pitch'], {}), '(pitch)\n', (642, 649), True, 'import numpy as np\n'), ((652, 663), 'numpy.cos', 'np.cos', (['yaw'], {}), '(yaw)\n', (658, 663), True, 'import numpy as np\n'), ((1081, 1104), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (1099, 1104), False, 'import os\n'), ((587, 600), 'numpy.cos', 'np.cos', (['pitch'], {}), '(pitch)\n', (593, 600), True, 'import numpy as np\n')]
|
from typing import Optional, Union
import itertools as it
from collections import OrderedDict
import numpy as np
import pandas as pd
from ConfigSpace import ConfigurationSpace
class fANOVA:
def __init__(
self,
X: Union[pd.DataFrame, np.ndarray],
Y,
configspace: ConfigurationSpace,
seed=0,
num_trees=16,
bootstrapping=True,
points_per_tree=-1,
ratio_features: float = 7 / 10,
min_samples_split=0,
min_samples_leaf=0,
max_depth=64,
cutoffs=(-np.inf, np.inf),
instance_features: Optional[np.ndarray] = None,
pca_components: Optional[int] = None,
):
"""
Calculate and provide midpoints and sizes from the forest's
split values in order to get the marginals
Parameters
------------
X: matrix with the features, either a np.array or a pd.DataFrame (numerically encoded)
Y: vector with the response values (numerically encoded)
configspace : ConfigSpace instantiation
num_trees: number of trees in the forest to be fit
seed: seed for the forests randomness
bootstrapping: whether to bootstrap the data for each tree or not
points_per_tree: number of points used for each tree
(only subsampling if bootstrapping is false)
ratio_features: number of features to be used at each split, default is 70%
min_samples_split: minimum number of samples required to attempt to split
min_samples_leaf: minimum number of samples required in a leaf
max_depth: maximal depth of each tree in the forest
cutoffs: tuple of (lower, upper), all values outside this range will be
mapped to either the lower or the upper bound. (See:
"Generalized Functional ANOVA Diagnostics for High Dimensional
Functions of Dependent Variables" by Hooker.)
"""
self.cs = configspace
self.cs_params = self.cs.get_hyperparameters()
self.num_dims = len(self.cs_params)
self.num_trees = num_trees
from deepcave.evaluators.epm.fanova_forest import fANOVAForest
self.forest = fANOVAForest(
configspace=configspace,
seed=seed,
num_trees=num_trees,
bootstrapping=bootstrapping,
points_per_tree=points_per_tree,
ratio_features=ratio_features,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
max_depth=max_depth,
cutoffs=cutoffs,
instance_features=instance_features,
pca_components=pca_components,
)
self.forest.train(X, Y)
def quantify_importance(
self, dims, depth=1, sort=True
) -> dict[tuple, tuple[float, float, float, float]]:
"""
Inputs:
`depth`: How often dims should be combined.
Returns:
ordered dict on total importance
Dict[Tuple[dim_names] -> (
mean_fractions_individual,
mean_fractions_total,
std_fractions_individual,
std_fractions_total
)]
"""
if type(dims[0]) == str:
idx = []
for i, param in enumerate(dims):
idx.append(self.cs.get_idx_by_hyperparameter_name(param))
dimensions = idx
# make sure that all the V_U values are computed for each tree
else:
dimensions = dims
vu_individual, vu_total = self.forest.compute_marginals(dimensions, depth)
importance_dict = {}
for k in range(1, len(dimensions) + 1):
if k > depth:
break
for sub_dims in it.combinations(dimensions, k):
if type(dims[0]) == str:
dim_names = []
for j, dim in enumerate(sub_dims):
dim_names.append(self.cs.get_hyperparameter_by_idx(dim))
dim_names = tuple(dim_names)
importance_dict[dim_names] = {}
else:
importance_dict[sub_dims] = {}
# clean here to catch zero variance in a trees
non_zero_idx = np.nonzero(
[self.forest.trees_total_variance[t] for t in range(self.num_trees)]
)
if len(non_zero_idx[0]) == 0:
raise RuntimeError("Encountered zero total variance in all trees.")
fractions_total = np.array(
[
vu_total[sub_dims][t] / self.forest.trees_total_variance[t]
for t in non_zero_idx[0]
]
)
fractions_individual = np.array(
[
vu_individual[sub_dims][t] / self.forest.trees_total_variance[t]
for t in non_zero_idx[0]
]
)
if type(dims[0]) == str:
sub_dims = dim_names
importance_dict[sub_dims] = (
np.mean(fractions_individual),
np.mean(fractions_total),
np.std(fractions_individual),
np.std(fractions_total),
)
if sort:
sorted_importance_dict = {
k: v for k, v in sorted(importance_dict.items(), key=lambda item: item[1][1])
}
return sorted_importance_dict
return importance_dict
def marginal_mean_variance_for_values(self, dimlist, values_to_predict):
"""
Returns the marginal of selected parameters for specific values
Parameters
----------
dimlist: list
Contains the indices of ConfigSpace for the selected parameters
(starts with 0)
values_to_predict: list
Contains the values to be predicted
Returns
-------
tuple
marginal mean prediction and corresponding variance estimate
"""
sample = np.full(self.n_dims, np.nan, dtype=np.float)
for i in range(len(dimlist)):
sample[dimlist[i]] = values_to_predict[i]
return self.forest.forest.marginal_mean_variance_prediction(sample)
def get_most_important_pairwise_marginals(self, params=None, n=10):
"""
Returns the n most important pairwise marginals from the whole ConfigSpace
Parameters
----------
params: list of strings or ints
If specified, limit analysis to those parameters. If ints, interpreting as indices from ConfigurationSpace
n: int
The number of most relevant pairwise marginals that will be returned
Returns
-------
list:
Contains the n most important pairwise marginals
"""
self.tot_imp_dict = OrderedDict()
pairwise_marginals = []
if params is None:
dimensions = range(self.n_dims)
else:
if type(params[0]) == str:
idx = []
for i, param in enumerate(params):
idx.append(self.cs.get_idx_by_hyperparameter_name(param))
dimensions = idx
else:
dimensions = params
# pairs = it.combinations(dimensions,2)
pairs = [x for x in it.combinations(dimensions, 2)]
if params:
n = len(list(pairs))
for combi in pairs:
pairwise_marginal_performance = self.quantify_importance(combi)
tot_imp = pairwise_marginal_performance[combi]["individual importance"]
combi_names = [self.cs_params[combi[0]].name, self.cs_params[combi[1]].name]
pairwise_marginals.append((tot_imp, combi_names[0], combi_names[1]))
pairwise_marginal_performance = sorted(pairwise_marginals, reverse=True)
for marginal, p1, p2 in pairwise_marginal_performance[:n]:
self.tot_imp_dict[(p1, p2)] = marginal
return self.tot_imp_dict
def get_triple_marginals(self, params=None):
"""
Returns the n most important pairwise marginals from the whole ConfigSpace
Parameters
----------
params: list
The parameters
Returns
-------
list:
Contains most important triple marginals
"""
self.tot_imp_dict = OrderedDict()
triple_marginals = []
if len(params) < 3:
raise RuntimeError(
"Number of parameters have to be greater than %i. At least 3 parameters needed"
% len(params)
)
if type(params[0]) == str:
idx = []
for i, param in enumerate(params):
idx.append(self.cs.get_idx_by_hyperparameter_name(param))
dimensions = idx
else:
dimensions = params
triplets = [x for x in it.combinations(dimensions, 3)]
for combi in triplets:
triple_marginal_performance = self.quantify_importance(combi)
tot_imp = triple_marginal_performance[combi]["individual importance"]
combi_names = [
self.cs_params[combi[0]].name,
self.cs_params[combi[1]].name,
self.cs_params[combi[2]].name,
]
triple_marginals.append((tot_imp, combi_names[0], combi_names[1], combi_names[2]))
triple_marginal_performance = sorted(triple_marginals, reverse=True)
if params:
triple_marginal_performance = triple_marginal_performance[: len(list(triplets))]
for marginal, p1, p2, p3 in triple_marginal_performance:
self.tot_imp_dict[(p1, p2, p3)] = marginal
return self.tot_imp_dict
if __name__ == "__main__":
import sys
sys.path.insert(0, "../../")
import ConfigSpace
import ConfigSpace as CS
import ConfigSpace.hyperparameters as CSH
import numpy as np
from ConfigSpace.hyperparameters import (
CategoricalHyperparameter,
Constant,
UniformFloatHyperparameter,
UniformIntegerHyperparameter,
)
cs = CS.ConfigurationSpace(seed=1234)
alpha = CSH.UniformFloatHyperparameter(name="alpha", lower=0, upper=1)
beta = CSH.UniformFloatHyperparameter(name="beta", lower=0, upper=1)
gamma = CSH.UniformFloatHyperparameter(name="gamma", lower=0, upper=1)
gamma1 = CSH.UniformFloatHyperparameter(name="gamma1", lower=0, upper=1)
gamma2 = CSH.UniformFloatHyperparameter(name="gamma2", lower=0, upper=1)
gamma3 = CSH.UniformFloatHyperparameter(name="gamma3", lower=0, upper=1)
# Constants do not work
# gamma = CSH.Constant(name='gamma', value=1)
cs.add_hyperparameters([alpha, beta, gamma])
X = []
Y = []
for config in cs.sample_configuration(100):
cost = np.random.randn()
encoded = config.get_array()
X.append(encoded)
Y.append(cost)
X = np.array(X)
Y = np.array(Y)
conditional = {}
impute_values = {}
for idx, hp in enumerate(cs.get_hyperparameters()):
if idx not in conditional:
parents = cs.get_parents_of(hp.name)
if len(parents) == 0:
conditional[idx] = False
else:
conditional[idx] = True
if isinstance(hp, CategoricalHyperparameter):
impute_values[idx] = len(hp.choices)
elif isinstance(hp, (UniformFloatHyperparameter, UniformIntegerHyperparameter)):
impute_values[idx] = -1
elif isinstance(hp, Constant):
impute_values[idx] = 1
else:
raise ValueError
if conditional[idx] is True:
nonfinite_mask = ~np.isfinite(X[:, idx])
X[nonfinite_mask, idx] = impute_values[idx]
# f = fANOVA(X, Y, cs)
# imp = f.quantify_importance(cs.get_hyperparameter_names()[:3], depth=1)
# print(imp)
f = fANOVA(X, Y, cs)
imp = f.quantify_importance(cs.get_hyperparameter_names(), depth=1, sorted=False)
print(imp)
|
[
"numpy.full",
"ConfigSpace.ConfigurationSpace",
"numpy.random.randn",
"numpy.std",
"sys.path.insert",
"numpy.isfinite",
"deepcave.evaluators.epm.fanova_forest.fANOVAForest",
"itertools.combinations",
"numpy.mean",
"numpy.array",
"ConfigSpace.hyperparameters.UniformFloatHyperparameter",
"collections.OrderedDict"
] |
[((10094, 10122), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../../"""'], {}), "(0, '../../')\n", (10109, 10122), False, 'import sys\n'), ((10434, 10466), 'ConfigSpace.ConfigurationSpace', 'CS.ConfigurationSpace', ([], {'seed': '(1234)'}), '(seed=1234)\n', (10455, 10466), True, 'import ConfigSpace as CS\n'), ((10480, 10542), 'ConfigSpace.hyperparameters.UniformFloatHyperparameter', 'CSH.UniformFloatHyperparameter', ([], {'name': '"""alpha"""', 'lower': '(0)', 'upper': '(1)'}), "(name='alpha', lower=0, upper=1)\n", (10510, 10542), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((10554, 10615), 'ConfigSpace.hyperparameters.UniformFloatHyperparameter', 'CSH.UniformFloatHyperparameter', ([], {'name': '"""beta"""', 'lower': '(0)', 'upper': '(1)'}), "(name='beta', lower=0, upper=1)\n", (10584, 10615), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((10628, 10690), 'ConfigSpace.hyperparameters.UniformFloatHyperparameter', 'CSH.UniformFloatHyperparameter', ([], {'name': '"""gamma"""', 'lower': '(0)', 'upper': '(1)'}), "(name='gamma', lower=0, upper=1)\n", (10658, 10690), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((10704, 10767), 'ConfigSpace.hyperparameters.UniformFloatHyperparameter', 'CSH.UniformFloatHyperparameter', ([], {'name': '"""gamma1"""', 'lower': '(0)', 'upper': '(1)'}), "(name='gamma1', lower=0, upper=1)\n", (10734, 10767), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((10781, 10844), 'ConfigSpace.hyperparameters.UniformFloatHyperparameter', 'CSH.UniformFloatHyperparameter', ([], {'name': '"""gamma2"""', 'lower': '(0)', 'upper': '(1)'}), "(name='gamma2', lower=0, upper=1)\n", (10811, 10844), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((10858, 10921), 'ConfigSpace.hyperparameters.UniformFloatHyperparameter', 'CSH.UniformFloatHyperparameter', ([], {'name': '"""gamma3"""', 'lower': '(0)', 'upper': '(1)'}), "(name='gamma3', lower=0, upper=1)\n", (10888, 10921), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((11252, 11263), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (11260, 11263), True, 'import numpy as np\n'), ((11272, 11283), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (11280, 11283), True, 'import numpy as np\n'), ((2241, 2597), 'deepcave.evaluators.epm.fanova_forest.fANOVAForest', 'fANOVAForest', ([], {'configspace': 'configspace', 'seed': 'seed', 'num_trees': 'num_trees', 'bootstrapping': 'bootstrapping', 'points_per_tree': 'points_per_tree', 'ratio_features': 'ratio_features', 'min_samples_split': 'min_samples_split', 'min_samples_leaf': 'min_samples_leaf', 'max_depth': 'max_depth', 'cutoffs': 'cutoffs', 'instance_features': 'instance_features', 'pca_components': 'pca_components'}), '(configspace=configspace, seed=seed, num_trees=num_trees,\n bootstrapping=bootstrapping, points_per_tree=points_per_tree,\n ratio_features=ratio_features, min_samples_split=min_samples_split,\n min_samples_leaf=min_samples_leaf, max_depth=max_depth, cutoffs=cutoffs,\n instance_features=instance_features, pca_components=pca_components)\n', (2253, 2597), False, 'from deepcave.evaluators.epm.fanova_forest import fANOVAForest\n'), ((6309, 6353), 'numpy.full', 'np.full', (['self.n_dims', 'np.nan'], {'dtype': 'np.float'}), '(self.n_dims, np.nan, dtype=np.float)\n', (6316, 6353), True, 'import numpy as np\n'), ((7135, 7148), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7146, 7148), False, 'from collections import OrderedDict\n'), ((8674, 8687), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8685, 8687), False, 'from collections import OrderedDict\n'), ((11138, 11155), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (11153, 11155), True, 'import numpy as np\n'), ((3906, 3936), 'itertools.combinations', 'it.combinations', (['dimensions', 'k'], {}), '(dimensions, k)\n', (3921, 3936), True, 'import itertools as it\n'), ((4707, 4809), 'numpy.array', 'np.array', (['[(vu_total[sub_dims][t] / self.forest.trees_total_variance[t]) for t in\n non_zero_idx[0]]'], {}), '([(vu_total[sub_dims][t] / self.forest.trees_total_variance[t]) for\n t in non_zero_idx[0]])\n', (4715, 4809), True, 'import numpy as np\n'), ((4951, 5059), 'numpy.array', 'np.array', (['[(vu_individual[sub_dims][t] / self.forest.trees_total_variance[t]) for t in\n non_zero_idx[0]]'], {}), '([(vu_individual[sub_dims][t] / self.forest.trees_total_variance[t]\n ) for t in non_zero_idx[0]])\n', (4959, 5059), True, 'import numpy as np\n'), ((7623, 7653), 'itertools.combinations', 'it.combinations', (['dimensions', '(2)'], {}), '(dimensions, 2)\n', (7638, 7653), True, 'import itertools as it\n'), ((9203, 9233), 'itertools.combinations', 'it.combinations', (['dimensions', '(3)'], {}), '(dimensions, 3)\n', (9218, 9233), True, 'import itertools as it\n'), ((12080, 12102), 'numpy.isfinite', 'np.isfinite', (['X[:, idx]'], {}), '(X[:, idx])\n', (12091, 12102), True, 'import numpy as np\n'), ((5311, 5340), 'numpy.mean', 'np.mean', (['fractions_individual'], {}), '(fractions_individual)\n', (5318, 5340), True, 'import numpy as np\n'), ((5362, 5386), 'numpy.mean', 'np.mean', (['fractions_total'], {}), '(fractions_total)\n', (5369, 5386), True, 'import numpy as np\n'), ((5408, 5436), 'numpy.std', 'np.std', (['fractions_individual'], {}), '(fractions_individual)\n', (5414, 5436), True, 'import numpy as np\n'), ((5458, 5481), 'numpy.std', 'np.std', (['fractions_total'], {}), '(fractions_total)\n', (5464, 5481), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
import functools
import os.path
import random
import sys
import xml.etree.ElementTree
import numpy as np
import matplotlib.pyplot as plt
import skimage.data
import cv2
import PIL.Image
import pickle
def load_pascal_occluder(pascal_voc_root_path):
occluders = []
structuring_element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (8, 8))
annotation_paths = list_filepaths(os.path.join(pascal_voc_root_path, 'Annotations'))
for annotation_path in annotation_paths:
xml_root = xml.etree.ElementTree.parse(annotation_path).getroot()
is_segmented = (xml_root.find('segmented').text != '0')
if not is_segmented:
continue
boxes = []
for i_obj, obj in enumerate(xml_root.findall('object')):
is_person = (obj.find('name').text == 'person')
is_difficult = (obj.find('difficult').text != '0')
is_truncated = (obj.find('truncated').text != '0')
if not is_difficult and not is_truncated:
bndbox = obj.find('bndbox')
box = [int(bndbox.find(s).text) for s in ['xmin', 'ymin', 'xmax', 'ymax']]
boxes.append((i_obj, box))
if not boxes:
continue
im_filename = xml_root.find('filename').text
seg_filename = im_filename.replace('jpg', 'png')
im_path = os.path.join(pascal_voc_root_path, 'JPEGImages', im_filename)
seg_path = os.path.join(pascal_voc_root_path, 'SegmentationObject', seg_filename)
im = np.asarray(PIL.Image.open(im_path))
labels = np.asarray(PIL.Image.open(seg_path))
for i_obj, (xmin, ymin, xmax, ymax) in boxes:
object_mask = (labels[ymin:ymax, xmin:xmax] == i_obj + 1).astype(np.uint8) * 255
object_image = im[ymin:ymax, xmin:xmax]
if cv2.countNonZero(object_mask) < 500:
# Ignore small objects
continue
# Reduce the opacity of the mask along the border for smoother blending
eroded = cv2.erode(object_mask, structuring_element)
object_mask[eroded < object_mask] = 192
object_with_mask = np.concatenate([object_image, object_mask[..., np.newaxis]], axis=-1)
if object_with_mask.size == 0:
continue
# Downscale for efficiency
object_with_mask = resize_by_factor(object_with_mask, 0.5)
occluders.append(object_with_mask)
print("total # of occluders: ", len(occluders))
return occluders
def load_coco_person_occluder(data_path, data_split):
img_dir_path = os.path.join(data_path, f'{data_split}2017')
part_seg_path = os.path.join(data_path, 'densepose_output', 'DensePose_maskRCNN_output')
dp_dict = load_dp_result(part_seg_path, data_split)
print("loaded dp result..., total imgs: ", len(dp_dict.keys()))
from densepose.data.structures import DensePoseResult
from timer import Timer
load_timer = Timer()
occluders = []
structuring_element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (8, 8))
for img_name in dp_dict.keys():
img_path = os.path.join(img_dir_path, img_name)
load_timer.tic()
img = cv2.imread(img_path, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
img = img[:, :, ::-1].copy()
# img = np.asarray(PIL.Image.open(img_path))
load_timer.toc()
dp_outputs = dp_dict[img_name]
for output in dp_outputs:
encoded_dp = output['dp']
iuv_arr = DensePoseResult.decode_png_data(*encoded_dp)
_, h, w = iuv_arr.shape
dp_bbox = output['bbox']
xmin, ymin = int(dp_bbox[0] + 0.5), int(dp_bbox[1] + 0.5)
xmax, ymax = xmin+w, ymin+h
object_mask = (iuv_arr[0] != 0).astype(np.uint8) * 255
object_image = img[ymin:ymax, xmin:xmax]
if cv2.countNonZero(object_mask) < 5000:
# Ignore small objects or low resolution objects
continue
# Reduce the opacity of the mask along the border for smoother blending
eroded = cv2.erode(object_mask, structuring_element)
object_mask[eroded < object_mask] = 192
object_with_mask = np.concatenate([object_image, object_mask[..., np.newaxis]], axis=-1)
if object_with_mask.size == 0:
continue
# Downscale for efficiency
object_with_mask = resize_by_factor(object_with_mask, 0.5)
occluders.append(object_with_mask)
if len(occluders) > 5000:
break
print("img load time: ", load_timer.total_time)
print("total # of occluders: ", len(occluders))
return occluders
def load_dp_result(part_seg_path, data_split):
print(f'Load DensePose Result of COCO {data_split} set')
data_path = os.path.join(part_seg_path, f'coco_{data_split}.pkl')
with open(data_path, 'rb') as f:
raw_data_list = pickle.load(f)
data_dict = {}
for rd in raw_data_list:
key = rd['file_name'].split('/')[-1]
scores = rd['scores']
pred_data_list = []
for idx in range(len(scores)):
if scores[idx] > 0.5:
pred_data = {}
pred_data['bbox'] = rd['pred_boxes_XYXY'][idx]
pred_data['dp'] = rd['pred_densepose'].results[idx]
pred_data_list.append(pred_data)
data_dict[key] = pred_data_list
return data_dict
def occlude_with_objects(im, occluders):
"""Returns an augmented version of `im`, containing some occluders from the Pascal VOC dataset."""
result = im.copy()
width_height = np.asarray([im.shape[1], im.shape[0]])
count = np.random.randint(1, 5)
for _ in range(count):
occluder = random.choice(occluders)
im_scale_factor = min(width_height) / max(occluder.shape[:2])
random_scale_factor = np.random.uniform(0.2, 0.5)
scale_factor = random_scale_factor * im_scale_factor
try:
occluder = resize_by_factor(occluder, scale_factor)
except Exception as e:
print("error")
continue
# center = np.random.uniform([0, 0], width_height)
center = np.random.uniform(width_height/8, width_height/8*7)
paste_over(im_src=occluder, im_dst=result, center=center)
return result
def paste_over(im_src, im_dst, center):
"""Pastes `im_src` onto `im_dst` at a specified position, with alpha blending, in place.
Locations outside the bounds of `im_dst` are handled as expected (only a part or none of
`im_src` becomes visible).
Args:
im_src: The RGBA image to be pasted onto `im_dst`. Its size can be arbitrary.
im_dst: The target image.
alpha: A float (0.0-1.0) array of the same size as `im_src` controlling the alpha blending
at each pixel. Large values mean more visibility for `im_src`.
center: coordinates in `im_dst` where the center of `im_src` should be placed.
"""
width_height_src = np.asarray([im_src.shape[1], im_src.shape[0]])
width_height_dst = np.asarray([im_dst.shape[1], im_dst.shape[0]])
center = np.round(center).astype(np.int32)
raw_start_dst = center - width_height_src // 2
raw_end_dst = raw_start_dst + width_height_src
start_dst = np.clip(raw_start_dst, 0, width_height_dst)
end_dst = np.clip(raw_end_dst, 0, width_height_dst)
region_dst = im_dst[start_dst[1]:end_dst[1], start_dst[0]:end_dst[0]]
start_src = start_dst - raw_start_dst
end_src = width_height_src + (end_dst - raw_end_dst)
region_src = im_src[start_src[1]:end_src[1], start_src[0]:end_src[0]]
color_src = region_src[..., 0:3]
alpha = region_src[..., 3:].astype(np.float32)/255
im_dst[start_dst[1]:end_dst[1], start_dst[0]:end_dst[0]] = (
alpha * color_src + (1 - alpha) * region_dst)
return im_dst
def resize_by_factor(im, factor):
"""Returns a copy of `im` resized by `factor`, using bilinear interp for up and area interp
for downscaling.
"""
new_size = tuple(np.round(np.array([im.shape[1], im.shape[0]]) * factor).astype(int))
interp = cv2.INTER_LINEAR if factor > 1.0 else cv2.INTER_AREA
return cv2.resize(im, new_size, fx=factor, fy=factor, interpolation=interp)
def list_filepaths(dirpath):
names = os.listdir(dirpath)
paths = [os.path.join(dirpath, name) for name in names]
return sorted(filter(os.path.isfile, paths))
def main():
"""Demo of how to use the code"""
# path = 'something/something/VOCtrainval_11-May-2012/VOCdevkit/VOC2012'
path = sys.argv[1]
print('Loading occluders from Pascal VOC dataset...')
occluders = load_pascal_occluder(pascal_voc_root_path=path)
print('Found {} suitable objects'.format(len(occluders)))
original_im = cv2.resize(skimage.data.astronaut(), (256, 256))
fig, axarr = plt.subplots(3, 3, figsize=(7, 7))
for ax in axarr.ravel():
occluded_im = occlude_with_objects(original_im, occluders)
ax.imshow(occluded_im, interpolation="none")
ax.axis('off')
fig.tight_layout(h_pad=0)
# plt.savefig('examples.jpg', dpi=150, bbox_inches='tight')
plt.show()
if __name__ == '__main__':
dp_path = '/home/redarknight/projects/detectron2/projects/DensePose/'
sys.path.insert(0, dp_path)
occluder = load_coco_person_occluder('/media/disk2/hongsuk/data/COCO/2017/', data_split='train')
# img = occlude_with_objects(dummy, occluder)
|
[
"numpy.clip",
"numpy.random.randint",
"pickle.load",
"cv2.erode",
"numpy.round",
"timer.Timer",
"matplotlib.pyplot.subplots",
"cv2.resize",
"matplotlib.pyplot.show",
"cv2.countNonZero",
"numpy.asarray",
"numpy.concatenate",
"numpy.random.uniform",
"densepose.data.structures.DensePoseResult.decode_png_data",
"cv2.getStructuringElement",
"sys.path.insert",
"random.choice",
"cv2.imread",
"numpy.array"
] |
[((319, 371), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', '(8, 8)'], {}), '(cv2.MORPH_ELLIPSE, (8, 8))\n', (344, 371), False, 'import cv2\n'), ((2989, 2996), 'timer.Timer', 'Timer', ([], {}), '()\n', (2994, 2996), False, 'from timer import Timer\n'), ((3043, 3095), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', '(8, 8)'], {}), '(cv2.MORPH_ELLIPSE, (8, 8))\n', (3068, 3095), False, 'import cv2\n'), ((5689, 5727), 'numpy.asarray', 'np.asarray', (['[im.shape[1], im.shape[0]]'], {}), '([im.shape[1], im.shape[0]])\n', (5699, 5727), True, 'import numpy as np\n'), ((5740, 5763), 'numpy.random.randint', 'np.random.randint', (['(1)', '(5)'], {}), '(1, 5)\n', (5757, 5763), True, 'import numpy as np\n'), ((7079, 7125), 'numpy.asarray', 'np.asarray', (['[im_src.shape[1], im_src.shape[0]]'], {}), '([im_src.shape[1], im_src.shape[0]])\n', (7089, 7125), True, 'import numpy as np\n'), ((7149, 7195), 'numpy.asarray', 'np.asarray', (['[im_dst.shape[1], im_dst.shape[0]]'], {}), '([im_dst.shape[1], im_dst.shape[0]])\n', (7159, 7195), True, 'import numpy as np\n'), ((7363, 7406), 'numpy.clip', 'np.clip', (['raw_start_dst', '(0)', 'width_height_dst'], {}), '(raw_start_dst, 0, width_height_dst)\n', (7370, 7406), True, 'import numpy as np\n'), ((7421, 7462), 'numpy.clip', 'np.clip', (['raw_end_dst', '(0)', 'width_height_dst'], {}), '(raw_end_dst, 0, width_height_dst)\n', (7428, 7462), True, 'import numpy as np\n'), ((8274, 8342), 'cv2.resize', 'cv2.resize', (['im', 'new_size'], {'fx': 'factor', 'fy': 'factor', 'interpolation': 'interp'}), '(im, new_size, fx=factor, fy=factor, interpolation=interp)\n', (8284, 8342), False, 'import cv2\n'), ((8939, 8973), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(3)'], {'figsize': '(7, 7)'}), '(3, 3, figsize=(7, 7))\n', (8951, 8973), True, 'import matplotlib.pyplot as plt\n'), ((9245, 9255), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9253, 9255), True, 'import matplotlib.pyplot as plt\n'), ((9363, 9390), 'sys.path.insert', 'sys.path.insert', (['(0)', 'dp_path'], {}), '(0, dp_path)\n', (9378, 9390), False, 'import sys\n'), ((3227, 3297), 'cv2.imread', 'cv2.imread', (['img_path', '(cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)'], {}), '(img_path, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)\n', (3237, 3297), False, 'import cv2\n'), ((4989, 5003), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5000, 5003), False, 'import pickle\n'), ((5811, 5835), 'random.choice', 'random.choice', (['occluders'], {}), '(occluders)\n', (5824, 5835), False, 'import random\n'), ((5937, 5964), 'numpy.random.uniform', 'np.random.uniform', (['(0.2)', '(0.5)'], {}), '(0.2, 0.5)\n', (5954, 5964), True, 'import numpy as np\n'), ((6260, 6317), 'numpy.random.uniform', 'np.random.uniform', (['(width_height / 8)', '(width_height / 8 * 7)'], {}), '(width_height / 8, width_height / 8 * 7)\n', (6277, 6317), True, 'import numpy as np\n'), ((2051, 2094), 'cv2.erode', 'cv2.erode', (['object_mask', 'structuring_element'], {}), '(object_mask, structuring_element)\n', (2060, 2094), False, 'import cv2\n'), ((2178, 2247), 'numpy.concatenate', 'np.concatenate', (['[object_image, object_mask[..., np.newaxis]]'], {'axis': '(-1)'}), '([object_image, object_mask[..., np.newaxis]], axis=-1)\n', (2192, 2247), True, 'import numpy as np\n'), ((3548, 3592), 'densepose.data.structures.DensePoseResult.decode_png_data', 'DensePoseResult.decode_png_data', (['*encoded_dp'], {}), '(*encoded_dp)\n', (3579, 3592), False, 'from densepose.data.structures import DensePoseResult\n'), ((4146, 4189), 'cv2.erode', 'cv2.erode', (['object_mask', 'structuring_element'], {}), '(object_mask, structuring_element)\n', (4155, 4189), False, 'import cv2\n'), ((4273, 4342), 'numpy.concatenate', 'np.concatenate', (['[object_image, object_mask[..., np.newaxis]]'], {'axis': '(-1)'}), '([object_image, object_mask[..., np.newaxis]], axis=-1)\n', (4287, 4342), True, 'import numpy as np\n'), ((7210, 7226), 'numpy.round', 'np.round', (['center'], {}), '(center)\n', (7218, 7226), True, 'import numpy as np\n'), ((1844, 1873), 'cv2.countNonZero', 'cv2.countNonZero', (['object_mask'], {}), '(object_mask)\n', (1860, 1873), False, 'import cv2\n'), ((3912, 3941), 'cv2.countNonZero', 'cv2.countNonZero', (['object_mask'], {}), '(object_mask)\n', (3928, 3941), False, 'import cv2\n'), ((8137, 8173), 'numpy.array', 'np.array', (['[im.shape[1], im.shape[0]]'], {}), '([im.shape[1], im.shape[0]])\n', (8145, 8173), True, 'import numpy as np\n')]
|
from __future__ import print_function
import numpy as np
import pandas as pd
import numpy.testing as npt
import pytest
import os
from collections import OrderedDict
import lifetimes.estimation as estimation
import lifetimes.utils as utils
from lifetimes.datasets import load_cdnow_summary, load_cdnow_summary_data_with_monetary_value, load_donations,\
load_transaction_data
@pytest.fixture
def cdnow_customers():
return load_cdnow_summary()
PATH_SAVE_MODEL = './base_fitter.pkl'
PATH_SAVE_BGNBD_MODEL = './betageo_fitter.pkl'
class TestBaseFitter():
def test_repr(self):
base_fitter = estimation.BaseFitter()
assert repr(base_fitter) == '<lifetimes.BaseFitter>'
base_fitter.params_ = dict(x=12.3, y=42)
base_fitter.data = np.array([1, 2, 3])
assert repr(base_fitter) == '<lifetimes.BaseFitter: fitted with 3 subjects, x: 12.30, y: 42.00>'
base_fitter.data = None
assert repr(base_fitter) == '<lifetimes.BaseFitter: x: 12.30, y: 42.00>'
def test_unload_params(self):
base_fitter = estimation.BaseFitter()
with pytest.raises(ValueError):
base_fitter._unload_params()
base_fitter.params_ = dict(x=12.3, y=42)
npt.assert_array_almost_equal([12.3, 42], base_fitter._unload_params('x', 'y'))
def test_save_load_model(self):
base_fitter = estimation.BaseFitter()
base_fitter.save_model(PATH_SAVE_MODEL)
assert os.path.exists(PATH_SAVE_MODEL) == True
base_fitter_saved = estimation.BaseFitter()
base_fitter_saved.load_model(PATH_SAVE_MODEL)
assert repr(base_fitter) == repr(base_fitter_saved)
os.remove(PATH_SAVE_MODEL)
class TestBetaGeoBetaBinomFitter():
@pytest.fixture()
def donations(self):
return load_donations()
def test_params_out_is_close_to_Hardie_paper(self, donations):
donations = donations
bbtf = estimation.BetaGeoBetaBinomFitter()
bbtf.fit(
donations['frequency'],
donations['recency'],
donations['periods'],
donations['weights'],
)
expected = np.array([1.204, 0.750, 0.657, 2.783])
npt.assert_array_almost_equal(expected, np.array(bbtf._unload_params('alpha','beta','gamma','delta')),
decimal=2)
def test_prob_alive_is_close_to_Hardie_paper_table_6(self, donations):
"""Table 6: P(Alive in 2002) as a Function of Recency and Frequency"""
bbtf = estimation.BetaGeoBetaBinomFitter()
bbtf.fit(
donations['frequency'],
donations['recency'],
donations['periods'],
donations['weights'],
)
bbtf.data['prob_alive'] = bbtf.conditional_probability_alive(1, donations['frequency'], donations['recency'], donations['periods'])
# Expected probabilities for last year 1995-0 repeat, 1999-2 repeat, 2001-6 repeat
expected = np.array([0.11, 0.59, 0.93])
prob_list = np.zeros(3)
prob_list[0] = (bbtf.data[(bbtf.data['frequency'] == 0) & (bbtf.data['recency'] == 0)]['prob_alive'])
prob_list[1] = (bbtf.data[(bbtf.data['frequency'] == 2) & (bbtf.data['recency'] == 4)]['prob_alive'])
prob_list[2] = (bbtf.data[(bbtf.data['frequency'] == 6) & (bbtf.data['recency'] == 6)]['prob_alive'])
npt.assert_array_almost_equal(expected, prob_list, decimal=2)
def test_conditional_expectation_returns_same_value_as_Hardie_excel_sheet(self, donations):
"""
Total from Hardie's Conditional Expectations (II) sheet.
http://brucehardie.com/notes/010/BGBB_2011-01-20_XLSX.zip
"""
bbtf = estimation.BetaGeoBetaBinomFitter()
bbtf.fit(
donations['frequency'],
donations['recency'],
donations['periods'],
donations['weights'],
)
pred_purchases = bbtf.conditional_expected_number_of_purchases_up_to_time(5, donations['frequency'], donations['recency'], donations['periods']) * donations['weights']
expected = 12884.2 # Sum of column F Exp Tot
npt.assert_almost_equal(expected, pred_purchases.sum(), decimal=0)
def test_expected_purchases_in_n_periods_returns_same_value_as_Hardie_excel_sheet(self, donations):
"""Total expected from Hardie's In-Sample Fit sheet."""
bbtf = estimation.BetaGeoBetaBinomFitter()
bbtf.fit(
donations['frequency'],
donations['recency'],
donations['periods'],
donations['weights'],
)
expected = np.array([3454.9, 1253.1]) # Cells C18 and C24
estimated = bbtf.expected_number_of_transactions_in_first_n_periods(6).loc[[0,6]].values.flatten()
npt.assert_almost_equal(expected, estimated, decimal=0)
def test_fit_with_index(self, donations):
bbtf = estimation.BetaGeoBetaBinomFitter()
index = range(len(donations), 0, -1)
bbtf.fit(
donations['frequency'],
donations['recency'],
donations['periods'],
donations['weights'],
index=index
)
assert (bbtf.data.index == index).all() == True
bbtf = estimation.BetaGeoBetaBinomFitter()
bbtf.fit(
donations['frequency'],
donations['recency'],
donations['periods'],
donations['weights'],
index=None
)
assert (bbtf.data.index == index).all() == False
def test_fit_with_and_without_weights(self, donations):
exploded_dataset = pd.DataFrame(columns=['frequency', 'recency', 'periods'])
for _, row in donations.iterrows():
exploded_dataset = exploded_dataset.append(
pd.DataFrame(
[[row['frequency'], row['recency'], row['periods']]] * row['weights'],
columns = ['frequency', 'recency', 'periods']
))
exploded_dataset = exploded_dataset.astype(np.int64)
assert exploded_dataset.shape[0] == donations['weights'].sum()
bbtf_noweights = estimation.BetaGeoBetaBinomFitter()
bbtf_noweights.fit(
exploded_dataset['frequency'],
exploded_dataset['recency'],
exploded_dataset['periods'],
)
bbtf = estimation.BetaGeoBetaBinomFitter()
bbtf.fit(
donations['frequency'],
donations['recency'],
donations['periods'],
donations['weights'],
)
npt.assert_array_almost_equal(
np.array(bbtf_noweights._unload_params('alpha','beta','gamma','delta')),
np.array(bbtf._unload_params('alpha','beta','gamma','delta')),
decimal=4
)
class TestGammaGammaFitter():
@pytest.fixture()
def cdnow_customers_with_monetary_value(self):
return load_cdnow_summary_data_with_monetary_value()
def test_params_out_is_close_to_Hardie_paper(self, cdnow_customers_with_monetary_value):
returning_cdnow_customers_with_monetary_value = cdnow_customers_with_monetary_value[
cdnow_customers_with_monetary_value['frequency'] > 0
]
ggf = estimation.GammaGammaFitter()
ggf.fit(
returning_cdnow_customers_with_monetary_value['frequency'],
returning_cdnow_customers_with_monetary_value['monetary_value'],
iterative_fitting=3
)
expected = np.array([6.25, 3.74, 15.44])
npt.assert_array_almost_equal(expected, np.array(ggf._unload_params('p', 'q', 'v')), decimal=2)
def test_conditional_expected_average_profit(self, cdnow_customers_with_monetary_value):
ggf = estimation.GammaGammaFitter()
ggf.params_ = OrderedDict({'p':6.25, 'q':3.74, 'v':15.44})
summary = cdnow_customers_with_monetary_value.head(10)
estimates = ggf.conditional_expected_average_profit(summary['frequency'], summary['monetary_value'])
expected = np.array([24.65, 18.91, 35.17, 35.17, 35.17, 71.46, 18.91, 35.17, 27.28, 35.17]) # from Hardie spreadsheet http://brucehardie.com/notes/025/
npt.assert_allclose(estimates.values, expected, atol=0.1)
def test_customer_lifetime_value_with_bgf(self, cdnow_customers_with_monetary_value):
ggf = estimation.GammaGammaFitter()
ggf.params_ = OrderedDict({'p':6.25, 'q':3.74, 'v':15.44})
bgf = estimation.BetaGeoFitter()
bgf.fit(cdnow_customers_with_monetary_value['frequency'],
cdnow_customers_with_monetary_value['recency'],
cdnow_customers_with_monetary_value['T'],
iterative_fitting=3)
ggf_clv = ggf.customer_lifetime_value(
bgf,
cdnow_customers_with_monetary_value['frequency'],
cdnow_customers_with_monetary_value['recency'],
cdnow_customers_with_monetary_value['T'],
cdnow_customers_with_monetary_value['monetary_value']
)
utils_clv = utils._customer_lifetime_value(
bgf,
cdnow_customers_with_monetary_value['frequency'],
cdnow_customers_with_monetary_value['recency'],
cdnow_customers_with_monetary_value['T'],
ggf.conditional_expected_average_profit(cdnow_customers_with_monetary_value['frequency'],
cdnow_customers_with_monetary_value['monetary_value'])
)
npt.assert_equal(ggf_clv.values, utils_clv.values)
def test_fit_with_index(self, cdnow_customers_with_monetary_value):
returning_cdnow_customers_with_monetary_value = cdnow_customers_with_monetary_value[
cdnow_customers_with_monetary_value['frequency'] > 0
]
ggf = estimation.GammaGammaFitter()
index = range(len(returning_cdnow_customers_with_monetary_value), 0, -1)
ggf.fit(
returning_cdnow_customers_with_monetary_value['frequency'],
returning_cdnow_customers_with_monetary_value['monetary_value'],
iterative_fitting=1,
index=index
)
assert (ggf.data.index == index).all() == True
ggf = estimation.GammaGammaFitter()
ggf.fit(
returning_cdnow_customers_with_monetary_value['frequency'],
returning_cdnow_customers_with_monetary_value['monetary_value'],
iterative_fitting=1,
index=None
)
assert (ggf.data.index == index).all() == False
def test_params_out_is_close_to_Hardie_paper_with_q_constraint(self, cdnow_customers_with_monetary_value):
returning_cdnow_customers_with_monetary_value = cdnow_customers_with_monetary_value[
cdnow_customers_with_monetary_value['frequency'] > 0
]
ggf = estimation.GammaGammaFitter()
ggf.fit(
returning_cdnow_customers_with_monetary_value['frequency'],
returning_cdnow_customers_with_monetary_value['monetary_value'],
iterative_fitting=3,
q_constraint=True
)
expected = np.array([6.25, 3.74, 15.44])
npt.assert_array_almost_equal(expected, np.array(ggf._unload_params('p', 'q', 'v')), decimal=2)
def test_negative_log_likelihood_is_inf_when_q_constraint_true_and_q_lt_one(self):
frequency = 25
avg_monetary_value = 100
ggf = estimation.GammaGammaFitter()
assert np.isinf(ggf._negative_log_likelihood([6.25, -3.75, 15.44], frequency, avg_monetary_value, q_constraint=True))
class TestParetoNBDFitter():
def test_overflow_error(self):
ptf = estimation.ParetoNBDFitter()
params = np.array([10.465, 7.98565181e-03, 3.0516, 2.820])
freq = np.array([400., 500., 500.])
rec = np.array([5., 1., 4.])
age = np.array([6., 37., 37.])
assert all([r < 0 and not np.isinf(r) and not pd.isnull(r)
for r in ptf._log_A_0(params, freq, rec, age)])
def test_sum_of_scalar_inputs_to_negative_log_likelihood_is_equal_to_array(self):
ptf = estimation.ParetoNBDFitter
x = np.array([1, 3])
t_x = np.array([2, 2])
weights = np.array([1., 1.])
t = np.array([5, 6])
params = [1, 1, 1, 1]
assert ptf()._negative_log_likelihood(params, np.array([x[0]]), np.array([t_x[0]]), np.array([t[0]]), weights[0], 0) \
+ ptf()._negative_log_likelihood(params, np.array([x[1]]), np.array([t_x[1]]), np.array([t[1]]), weights[0], 0) \
== 2 * ptf()._negative_log_likelihood(params, x, t_x, t, weights, 0)
def test_params_out_is_close_to_Hardie_paper(self, cdnow_customers):
ptf = estimation.ParetoNBDFitter()
ptf.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'], iterative_fitting=3)
expected = np.array([ 0.553, 10.578, 0.606, 11.669])
npt.assert_array_almost_equal(expected, np.array(ptf._unload_params('r', 'alpha', 's', 'beta')), decimal=2)
def test_expectation_returns_same_value_as_R_BTYD(self, cdnow_customers):
""" From https://cran.r-project.org/web/packages/BTYD/BTYD.pdf """
ptf = estimation.ParetoNBDFitter()
ptf.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'], tol=1e-6)
expected = np.array([0.00000000, 0.05077821, 0.09916088, 0.14542507, 0.18979930,
0.23247466, 0.27361274, 0.31335159, 0.35181024, 0.38909211])
actual = ptf.expected_number_of_purchases_up_to_time(range(10))
npt.assert_allclose(expected, actual, atol=0.01)
def test_conditional_expectation_returns_same_value_as_R_BTYD(self, cdnow_customers):
""" From https://cran.r-project.org/web/packages/BTYD/vignettes/BTYD-walkthrough.pdf """
ptf = estimation.ParetoNBDFitter()
ptf.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
x = 26.00
t_x = 30.86
T = 31
t = 52
expected = 25.46
actual = ptf.conditional_expected_number_of_purchases_up_to_time(t, x, t_x, T)
assert abs(expected - actual) < 0.01
def test_conditional_expectation_underflow(self):
""" Test a pair of inputs for the ParetoNBD ptf.conditional_expected_number_of_purchases_up_to_time().
For a small change in the input, the result shouldn't change dramatically -- however, if the
function doesn't guard against numeric underflow, this change in input will result in an
underflow error.
"""
ptf = estimation.ParetoNBDFitter()
alpha = 10.58
beta = 11.67
r = 0.55
s = 0.61
ptf.params_ = OrderedDict({'alpha':alpha, 'beta':beta, 'r':r, 's':s})
# small change in inputs
left = ptf.conditional_expected_number_of_purchases_up_to_time(10, 132, 200, 200) # 6.2060517889632418
right = ptf.conditional_expected_number_of_purchases_up_to_time(10, 133, 200, 200) # 6.2528722475748113
assert abs(left - right) < 0.05
def test_conditional_probability_alive_is_between_0_and_1(self, cdnow_customers):
ptf = estimation.ParetoNBDFitter()
ptf.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
for freq in np.arange(0, 100, 10.):
for recency in np.arange(0, 100, 10.):
for t in np.arange(recency, 100, 10.):
assert 0.0 <= ptf.conditional_probability_alive(freq, recency, t) <= 1.0
def test_conditional_probability_alive(self, cdnow_customers):
"""
Target taken from page 8,
https://cran.r-project.org/web/packages/BTYD/vignettes/BTYD-walkthrough.pdf
"""
ptf = estimation.ParetoNBDFitter()
ptf.params_ = OrderedDict(
zip(['r', 'alpha', 's', 'beta'],
[0.5534, 10.5802, 0.6061, 11.6562]))
p_alive = ptf.conditional_probability_alive(26.00, 30.86, 31.00)
assert abs(p_alive - 0.9979) < 0.001
def test_conditional_probability_alive_overflow_error(self):
ptf = estimation.ParetoNBDFitter()
ptf.params_ = OrderedDict(
zip(['r', 'alpha', 's', 'beta'],
[10.465, 7.98565181e-03, 3.0516, 2.820]))
freq = np.array([40., 50., 50.])
rec = np.array([5., 1., 4.])
age = np.array([6., 37., 37.])
assert all([r <= 1 and r >= 0 and not np.isinf(r) and not pd.isnull(r)
for r in ptf.conditional_probability_alive(freq, rec, age)])
def test_conditional_probability_alive_matrix(self, cdnow_customers):
ptf = estimation.ParetoNBDFitter()
ptf.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
Z = ptf.conditional_probability_alive_matrix()
max_t = int(ptf.data['T'].max())
for t_x in range(Z.shape[0]):
for x in range(Z.shape[1]):
assert Z[t_x][x] == ptf.conditional_probability_alive(x, t_x, max_t)
def test_fit_with_index(self, cdnow_customers):
ptf = estimation.ParetoNBDFitter()
index = range(len(cdnow_customers), 0, -1)
ptf.fit(
cdnow_customers['frequency'],
cdnow_customers['recency'],
cdnow_customers['T'],
index=index
)
assert (ptf.data.index == index).all() == True
ptf = estimation.ParetoNBDFitter()
ptf.fit(
cdnow_customers['frequency'],
cdnow_customers['recency'],
cdnow_customers['T'],
index=None
)
assert (ptf.data.index == index).all() == False
def test_conditional_probability_of_n_purchases_up_to_time_is_between_0_and_1(self, cdnow_customers):
"""
Due to the large parameter space we take a random subset.
"""
ptf = estimation.ParetoNBDFitter()
ptf.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
for freq in np.random.choice(100, 5):
for recency in np.random.choice(100, 5):
for age in recency + np.random.choice(100, 5):
for t in np.random.choice(100, 5):
for n in np.random.choice(10, 5):
assert (
0.0
<= ptf.conditional_probability_of_n_purchases_up_to_time(n, t, freq, recency, age)
<= 1.0
)
def test_conditional_probability_of_n_purchases_up_to_time_adds_up_to_1(self, cdnow_customers):
"""
Due to the large parameter space we take a random subset. We also restrict our limits to keep the number of
values of n for which the probability needs to be calculated to a sane level.
"""
ptf = estimation.ParetoNBDFitter()
ptf.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
for freq in np.random.choice(10, 5):
for recency in np.random.choice(9, 5):
for age in np.random.choice(np.arange(recency, 10, 1), 5):
for t in 1 + np.random.choice(9, 5):
npt.assert_almost_equal(
np.sum([
ptf.conditional_probability_of_n_purchases_up_to_time(n, t, freq, recency, age)
for n in np.arange(0, 20, 1)
]),
1.0,
decimal=2
)
def test_fit_with_and_without_weights(self, cdnow_customers):
original_dataset_with_weights = cdnow_customers.copy()
original_dataset_with_weights = original_dataset_with_weights.groupby(['frequency', 'recency', 'T']).size()
original_dataset_with_weights = original_dataset_with_weights.reset_index()
original_dataset_with_weights = original_dataset_with_weights.rename(columns={0:'weights'})
pnbd_noweights = estimation.ParetoNBDFitter()
pnbd_noweights.fit(
cdnow_customers['frequency'],
cdnow_customers['recency'],
cdnow_customers['T'],
)
pnbd = estimation.ParetoNBDFitter()
pnbd.fit(
original_dataset_with_weights['frequency'],
original_dataset_with_weights['recency'],
original_dataset_with_weights['T'],
original_dataset_with_weights['weights'],
)
npt.assert_array_almost_equal(
np.array(pnbd_noweights._unload_params('r', 'alpha', 's', 'beta')),
np.array(pnbd._unload_params('r', 'alpha', 's', 'beta')),
decimal=2
)
class TestBetaGeoFitter():
def test_sum_of_scalar_inputs_to_negative_log_likelihood_is_equal_to_array(self):
bgf = estimation.BetaGeoFitter
x = np.array([1, 3])
t_x = np.array([2, 2])
t = np.array([5, 6])
weights = np.array([1])
params = [1, 1, 1, 1]
assert bgf._negative_log_likelihood(params, x[0], np.array([t_x[0]]), np.array([t[0]]), weights[0], 0) \
+ bgf._negative_log_likelihood(params, x[1], np.array([t_x[1]]), np.array([t[1]]), weights[0], 0) \
== 2 * bgf._negative_log_likelihood(params, x, t_x, t, weights, 0)
def test_params_out_is_close_to_Hardie_paper(self, cdnow_customers):
bfg = estimation.BetaGeoFitter()
bfg.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'], iterative_fitting=3)
expected = np.array([0.243, 4.414, 0.793, 2.426])
npt.assert_array_almost_equal(expected, np.array(bfg._unload_params('r', 'alpha', 'a', 'b')), decimal=3)
def test_conditional_expectation_returns_same_value_as_Hardie_excel_sheet(self, cdnow_customers):
bfg = estimation.BetaGeoFitter()
bfg.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
x = 2
t_x = 30.43
T = 38.86
t = 39
expected = 1.226
actual = bfg.conditional_expected_number_of_purchases_up_to_time(t, x, t_x, T)
assert abs(expected - actual) < 0.001
def test_expectation_returns_same_value_Hardie_excel_sheet(self, cdnow_customers):
bfg = estimation.BetaGeoFitter()
bfg.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'], tol=1e-6)
times = np.array([0.1429, 1.0, 3.00, 31.8571, 32.00, 78.00])
expected = np.array([0.0078 ,0.0532 ,0.1506 ,1.0405,1.0437, 1.8576])
actual = bfg.expected_number_of_purchases_up_to_time(times)
npt.assert_array_almost_equal(actual, expected, decimal=3)
def test_conditional_probability_alive_returns_1_if_no_repeat_purchases(self, cdnow_customers):
bfg = estimation.BetaGeoFitter()
bfg.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
assert bfg.conditional_probability_alive(0, 1, 1) == 1.0
def test_conditional_probability_alive_is_between_0_and_1(self, cdnow_customers):
bfg = estimation.BetaGeoFitter()
bfg.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
for i in range(0, 100, 10):
for j in range(0, 100, 10):
for k in range(j, 100, 10):
assert 0 <= bfg.conditional_probability_alive(i, j, k) <= 1.0
def test_fit_method_allows_for_better_accuracy_by_using_iterative_fitting(self, cdnow_customers):
bfg1 = estimation.BetaGeoFitter()
bfg2 = estimation.BetaGeoFitter()
np.random.seed(0)
bfg1.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
np.random.seed(0)
bfg2.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'], iterative_fitting=3)
assert bfg1._negative_log_likelihood_ >= bfg2._negative_log_likelihood_
def test_penalizer_term_will_shrink_coefs_to_0(self, cdnow_customers):
bfg_no_penalizer = estimation.BetaGeoFitter()
bfg_no_penalizer.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
params_1 = np.array(list(bfg_no_penalizer.params_.values()))
bfg_with_penalizer = estimation.BetaGeoFitter(penalizer_coef=0.1)
bfg_with_penalizer.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
params_2 = np.array(list(bfg_with_penalizer.params_.values()))
assert np.all(params_2 < params_1)
bfg_with_more_penalizer = estimation.BetaGeoFitter(penalizer_coef=10)
bfg_with_more_penalizer.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
params_3 = np.array(list(bfg_with_more_penalizer.params_.values()))
assert np.all(params_3 < params_2)
def test_conditional_probability_alive_matrix(self, cdnow_customers):
bfg = estimation.BetaGeoFitter()
bfg.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
Z = bfg.conditional_probability_alive_matrix()
max_t = int(bfg.data['T'].max())
assert Z[0][0] == 1
for t_x in range(Z.shape[0]):
for x in range(Z.shape[1]):
assert Z[t_x][x] == bfg.conditional_probability_alive(x, t_x, max_t)
def test_probability_of_n_purchases_up_to_time_same_as_R_BTYD(self):
""" See https://cran.r-project.org/web/packages/BTYD/BTYD.pdf """
bgf = estimation.BetaGeoFitter()
bgf.params_ = OrderedDict({'r':0.243, 'alpha':4.414, 'a':0.793, 'b':2.426})
# probability that a customer will make 10 repeat transactions in the
# time interval (0,2]
expected = 1.07869e-07
actual = bgf.probability_of_n_purchases_up_to_time(2,10)
assert abs(expected - actual) < 10e-5
# probability that a customer will make no repeat transactions in the
# time interval (0,39]
expected = 0.5737864
actual = bgf.probability_of_n_purchases_up_to_time(39,0)
assert abs(expected - actual) < 10e-5
# PMF
expected = np.array([0.0019995214, 0.0015170236, 0.0011633150, 0.0009003148, 0.0007023638,
0.0005517902, 0.0004361913, 0.0003467171, 0.0002769613, 0.0002222260])
actual = np.array([bgf.probability_of_n_purchases_up_to_time(30,n) for n in range(11,21)])
npt.assert_array_almost_equal(expected, actual, decimal=5)
def test_scaling_inputs_gives_same_or_similar_results(self, cdnow_customers):
bgf = estimation.BetaGeoFitter()
bgf.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
scale = 10
bgf_with_large_inputs = estimation.BetaGeoFitter()
bgf_with_large_inputs.fit(cdnow_customers['frequency'], scale * cdnow_customers['recency'], scale * cdnow_customers['T'], iterative_fitting=2)
assert bgf_with_large_inputs._scale < 1.
assert abs(bgf_with_large_inputs.conditional_probability_alive(1, scale * 1, scale * 2) - bgf.conditional_probability_alive(1, 1, 2)) < 10e-5
assert abs(bgf_with_large_inputs.conditional_probability_alive(1, scale * 2, scale * 10) - bgf.conditional_probability_alive(1, 2, 10)) < 10e-5
def test_save_load_bgnbd(self, cdnow_customers):
"""Test saving and loading model for BG/NBD."""
bgf = estimation.BetaGeoFitter(penalizer_coef=0.0)
bgf.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
bgf.save_model(PATH_SAVE_BGNBD_MODEL)
bgf_new = estimation.BetaGeoFitter()
bgf_new.load_model(PATH_SAVE_BGNBD_MODEL)
assert bgf_new.__dict__['penalizer_coef'] == bgf.__dict__['penalizer_coef']
assert bgf_new.__dict__['_scale'] == bgf.__dict__['_scale']
assert bgf_new.__dict__['params_'] == bgf.__dict__['params_']
assert bgf_new.__dict__['_negative_log_likelihood_'] == bgf.__dict__['_negative_log_likelihood_']
assert (bgf_new.__dict__['data'] == bgf.__dict__['data']).all().all()
assert bgf_new.__dict__['predict'](1, 1, 2, 5) == bgf.__dict__['predict'](1, 1, 2, 5)
assert bgf_new.expected_number_of_purchases_up_to_time(1) == bgf.expected_number_of_purchases_up_to_time(1)
# remove saved model
os.remove(PATH_SAVE_BGNBD_MODEL)
def test_save_load_bgnbd_no_data(self, cdnow_customers):
"""Test saving and loading model for BG/NBD without data."""
bgf = estimation.BetaGeoFitter(penalizer_coef=0.0)
bgf.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
bgf.save_model(PATH_SAVE_BGNBD_MODEL, save_data=False)
bgf_new = estimation.BetaGeoFitter()
bgf_new.load_model(PATH_SAVE_BGNBD_MODEL)
assert bgf_new.__dict__['penalizer_coef'] == bgf.__dict__['penalizer_coef']
assert bgf_new.__dict__['_scale'] == bgf.__dict__['_scale']
assert bgf_new.__dict__['params_'] == bgf.__dict__['params_']
assert bgf_new.__dict__['_negative_log_likelihood_'] == bgf.__dict__['_negative_log_likelihood_']
assert bgf_new.__dict__['predict'](1, 1, 2, 5) == bgf.__dict__['predict'](1, 1, 2, 5)
assert bgf_new.expected_number_of_purchases_up_to_time(1) == bgf.expected_number_of_purchases_up_to_time(1)
assert bgf_new.__dict__['data'] is None
# remove saved model
os.remove(PATH_SAVE_BGNBD_MODEL)
def test_save_load_bgnbd_no_generate_data(self, cdnow_customers):
"""Test saving and loading model for BG/NBD without generate_new_data method."""
bgf = estimation.BetaGeoFitter(penalizer_coef=0.0)
bgf.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
bgf.save_model(PATH_SAVE_BGNBD_MODEL, save_generate_data_method=False)
bgf_new = estimation.BetaGeoFitter()
bgf_new.load_model(PATH_SAVE_BGNBD_MODEL)
assert bgf_new.__dict__['penalizer_coef'] == bgf.__dict__['penalizer_coef']
assert bgf_new.__dict__['_scale'] == bgf.__dict__['_scale']
assert bgf_new.__dict__['params_'] == bgf.__dict__['params_']
assert bgf_new.__dict__['_negative_log_likelihood_'] == bgf.__dict__['_negative_log_likelihood_']
assert bgf_new.__dict__['predict'](1, 1, 2, 5) == bgf.__dict__['predict'](1, 1, 2, 5)
assert bgf_new.expected_number_of_purchases_up_to_time(1) == bgf.expected_number_of_purchases_up_to_time(1)
assert bgf_new.__dict__['generate_new_data'] is None
# remove saved model
os.remove(PATH_SAVE_BGNBD_MODEL)
def test_save_load_bgnbd_no_data_replace_with_empty_str(self, cdnow_customers):
"""Test saving and loading model for BG/NBD without data with replaced value empty str."""
bgf = estimation.BetaGeoFitter(penalizer_coef=0.0)
bgf.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
bgf.save_model(PATH_SAVE_BGNBD_MODEL, save_data=False, values_to_save=[''])
bgf_new = estimation.BetaGeoFitter()
bgf_new.load_model(PATH_SAVE_BGNBD_MODEL)
assert bgf_new.__dict__['penalizer_coef'] == bgf.__dict__['penalizer_coef']
assert bgf_new.__dict__['_scale'] == bgf.__dict__['_scale']
assert bgf_new.__dict__['params_'] == bgf.__dict__['params_']
assert bgf_new.__dict__['_negative_log_likelihood_'] == bgf.__dict__['_negative_log_likelihood_']
assert bgf_new.__dict__['predict'](1, 1, 2, 5) == bgf.__dict__['predict'](1, 1, 2, 5)
assert bgf_new.expected_number_of_purchases_up_to_time(1) == bgf.expected_number_of_purchases_up_to_time(1)
assert bgf_new.__dict__['data'] is ''
# remove saved model
os.remove(PATH_SAVE_BGNBD_MODEL)
def test_fit_with_index(self, cdnow_customers):
bgf = estimation.BetaGeoFitter(penalizer_coef=0.0)
index = range(len(cdnow_customers), 0, -1)
bgf.fit(
cdnow_customers['frequency'],
cdnow_customers['recency'],
cdnow_customers['T'],
index=index
)
assert (bgf.data.index == index).all() == True
bgf = estimation.BetaGeoFitter(penalizer_coef=0.0)
bgf.fit(
cdnow_customers['frequency'],
cdnow_customers['recency'],
cdnow_customers['T'],
index=None
)
assert (bgf.data.index == index).all() == False
def test_no_runtime_warnings_high_frequency(self, cdnow_customers):
old_settings = np.seterr(all='raise')
bgf = estimation.BetaGeoFitter(penalizer_coef=0.0)
bgf.fit(
cdnow_customers['frequency'],
cdnow_customers['recency'],
cdnow_customers['T'],
index=None
)
p_alive = bgf.conditional_probability_alive(frequency=1000, recency=10, T=100)
np.seterr(**old_settings)
assert p_alive == 0.
def test_using_weights_col_gives_correct_results(self, cdnow_customers):
cdnow_customers_weights = cdnow_customers.copy()
cdnow_customers_weights['weights'] = 1.0
cdnow_customers_weights = cdnow_customers_weights.groupby(['frequency', 'recency', 'T'])['weights'].sum()
cdnow_customers_weights = cdnow_customers_weights.reset_index()
assert (cdnow_customers_weights['weights'] > 1).any()
bgf_weights = estimation.BetaGeoFitter(penalizer_coef=0.0)
bgf_weights.fit(
cdnow_customers_weights['frequency'],
cdnow_customers_weights['recency'],
cdnow_customers_weights['T'],
weights=cdnow_customers_weights['weights']
)
bgf_no_weights = estimation.BetaGeoFitter(penalizer_coef=0.0)
bgf_no_weights.fit(
cdnow_customers['frequency'],
cdnow_customers['recency'],
cdnow_customers['T']
)
npt.assert_almost_equal(
np.array(bgf_no_weights._unload_params('r', 'alpha', 'a', 'b')),
np.array(bgf_weights._unload_params('r', 'alpha', 'a', 'b')),
decimal=4)
class TestModifiedBetaGammaFitter():
def test_sum_of_scalar_inputs_to_negative_log_likelihood_is_equal_to_array(self):
mbgf = estimation.ModifiedBetaGeoFitter
x = np.array([1, 3])
t_x = np.array([2, 2])
t = np.array([5, 6])
weights=np.array([1, 1])
params = [1, 1, 1, 1]
assert mbgf._negative_log_likelihood(params, np.array([x[0]]), np.array([t_x[0]]), np.array([t[0]]), weights[0], 0) \
+ mbgf._negative_log_likelihood(params, np.array([x[1]]), np.array([t_x[1]]), np.array([t[1]]), weights[0], 0) \
== 2 * mbgf._negative_log_likelihood(params, x, t_x, t, weights, 0)
def test_params_out_is_close_to_BTYDplus(self, cdnow_customers):
""" See https://github.com/mplatzer/BTYDplus """
mbfg = estimation.ModifiedBetaGeoFitter()
mbfg.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'], iterative_fitting=3)
expected = np.array([0.525, 6.183, 0.891, 1.614])
npt.assert_array_almost_equal(expected, np.array(mbfg._unload_params('r', 'alpha', 'a', 'b')), decimal=3)
def test_conditional_expectation_returns_same_value_as_Hardie_excel_sheet(self, cdnow_customers):
mbfg = estimation.ModifiedBetaGeoFitter()
mbfg.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
x = 2
t_x = 30.43
T = 38.86
t = 39
expected = 1.226
actual = mbfg.conditional_expected_number_of_purchases_up_to_time(t, x, t_x, T)
assert abs(expected - actual) < 0.05
def test_expectation_returns_same_value_Hardie_excel_sheet(self, cdnow_customers):
mbfg = estimation.ModifiedBetaGeoFitter()
mbfg.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'], tol=1e-6, iterative_fitting=3)
times = np.array([0.1429, 1.0, 3.00, 31.8571, 32.00, 78.00])
expected = np.array([0.0078, 0.0532, 0.1506, 1.0405, 1.0437, 1.8576])
actual = mbfg.expected_number_of_purchases_up_to_time(times)
npt.assert_allclose(actual, expected, rtol=0.05)
def test_conditional_probability_alive_returns_lessthan_1_if_no_repeat_purchases(self, cdnow_customers):
mbfg = estimation.ModifiedBetaGeoFitter()
mbfg.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
assert mbfg.conditional_probability_alive(0, 1, 1) < 1.0
def test_conditional_probability_alive_is_between_0_and_1(self, cdnow_customers):
mbfg = estimation.ModifiedBetaGeoFitter()
mbfg.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
for i in range(0, 100, 10):
for j in range(0, 100, 10):
for k in range(j, 100, 10):
assert 0 <= mbfg.conditional_probability_alive(i, j, k) <= 1.0
def test_fit_method_allows_for_better_accuracy_by_using_iterative_fitting(self, cdnow_customers):
mbfg1 = estimation.ModifiedBetaGeoFitter()
mbfg2 = estimation.ModifiedBetaGeoFitter()
np.random.seed(0)
mbfg1.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
np.random.seed(0)
mbfg2.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'], iterative_fitting=5)
assert mbfg1._negative_log_likelihood_ >= mbfg2._negative_log_likelihood_
def test_penalizer_term_will_shrink_coefs_to_0(self, cdnow_customers):
mbfg_no_penalizer = estimation.ModifiedBetaGeoFitter()
mbfg_no_penalizer.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
params_1 = np.array(list(mbfg_no_penalizer.params_.values()))
mbfg_with_penalizer = estimation.ModifiedBetaGeoFitter(penalizer_coef=0.1)
mbfg_with_penalizer.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'], iterative_fitting=3)
params_2 = np.array(list(mbfg_with_penalizer.params_.values()))
assert params_2.sum() < params_1.sum()
mbfg_with_more_penalizer = estimation.ModifiedBetaGeoFitter(penalizer_coef=1.)
mbfg_with_more_penalizer.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'], iterative_fitting=5)
params_3 = np.array(list(mbfg_with_more_penalizer.params_.values()))
assert params_3.sum() < params_2.sum()
def test_conditional_probability_alive_matrix(self, cdnow_customers):
mbfg = estimation.ModifiedBetaGeoFitter()
mbfg.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
Z = mbfg.conditional_probability_alive_matrix()
max_t = int(mbfg.data['T'].max())
for t_x in range(Z.shape[0]):
for x in range(Z.shape[1]):
assert Z[t_x][x] == mbfg.conditional_probability_alive(x, t_x, max_t)
def test_probability_of_n_purchases_up_to_time_same_as_R_BTYD(self):
""" See https://cran.r-project.org/web/packages/BTYD/BTYD.pdf """
mbgf = estimation.ModifiedBetaGeoFitter()
mbgf.params_ = OrderedDict({'r':0.243, 'alpha':4.414, 'a':0.793, 'b':2.426})
# probability that a customer will make 10 repeat transactions in the
# time interval (0,2]
expected = 1.07869e-07
actual = mbgf.probability_of_n_purchases_up_to_time(2, 10)
assert abs(expected - actual) < 10e-5
# PMF
expected = np.array([0.0019995214, 0.0015170236, 0.0011633150, 0.0009003148, 0.0007023638,
0.0005517902, 0.0004361913, 0.0003467171, 0.0002769613, 0.0002222260])
actual = np.array([mbgf.probability_of_n_purchases_up_to_time(30, n) for n in range(11, 21)])
npt.assert_allclose(expected, actual, rtol=0.5)
def test_scaling_inputs_gives_same_or_similar_results(self, cdnow_customers):
mbgf = estimation.ModifiedBetaGeoFitter()
mbgf.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
scale = 10.
mbgf_with_large_inputs = estimation.ModifiedBetaGeoFitter()
mbgf_with_large_inputs.fit(cdnow_customers['frequency'], scale * cdnow_customers['recency'], scale * cdnow_customers['T'], iterative_fitting=2)
assert mbgf_with_large_inputs._scale < 1.
assert abs(mbgf_with_large_inputs.conditional_probability_alive(1, scale * 1, scale * 2) - mbgf.conditional_probability_alive(1, 1, 2)) < 10e-2
assert abs(mbgf_with_large_inputs.conditional_probability_alive(1, scale * 2, scale * 10) - mbgf.conditional_probability_alive(1, 2, 10)) < 10e-2
def test_mgbf_does_not_hang_for_small_datasets_but_can_be_improved_with_iterative_fitting(self, cdnow_customers):
reduced_dataset = cdnow_customers.iloc[:2]
mbfg1 = estimation.ModifiedBetaGeoFitter()
mbfg2 = estimation.ModifiedBetaGeoFitter()
np.random.seed(0)
mbfg1.fit(reduced_dataset['frequency'], reduced_dataset['recency'], reduced_dataset['T'])
np.random.seed(0)
mbfg2.fit(reduced_dataset['frequency'], reduced_dataset['recency'], reduced_dataset['T'], iterative_fitting=10)
assert mbfg1._negative_log_likelihood_ >= mbfg2._negative_log_likelihood_
def test_purchase_predictions_do_not_differ_much_if_looking_at_hourly_or_daily_frequencies(self):
transaction_data = load_transaction_data(parse_dates=['date'])
daily_summary = utils.summary_data_from_transaction_data(transaction_data, 'id', 'date', observation_period_end=max(transaction_data.date), freq='D')
hourly_summary = utils.summary_data_from_transaction_data(transaction_data, 'id', 'date', observation_period_end=max(transaction_data.date), freq='h')
thirty_days = 30
hours_in_day = 24
mbfg = estimation.ModifiedBetaGeoFitter()
np.random.seed(0)
mbfg.fit(daily_summary['frequency'], daily_summary['recency'], daily_summary['T'])
thirty_day_prediction_from_daily_data = mbfg.expected_number_of_purchases_up_to_time(thirty_days)
np.random.seed(0)
mbfg.fit(hourly_summary['frequency'], hourly_summary['recency'], hourly_summary['T'])
thirty_day_prediction_from_hourly_data = mbfg.expected_number_of_purchases_up_to_time(thirty_days * hours_in_day)
npt.assert_almost_equal(thirty_day_prediction_from_daily_data, thirty_day_prediction_from_hourly_data)
def test_fit_with_index(self, cdnow_customers):
mbgf = estimation.ModifiedBetaGeoFitter()
index = range(len(cdnow_customers), 0, -1)
mbgf.fit(
cdnow_customers['frequency'],
cdnow_customers['recency'],
cdnow_customers['T'],
index=index
)
assert (mbgf.data.index == index).all() == True
mbgf = estimation.ModifiedBetaGeoFitter()
mbgf.fit(
cdnow_customers['frequency'],
cdnow_customers['recency'],
cdnow_customers['T'],
index=None
)
assert (mbgf.data.index == index).all() == False
|
[
"lifetimes.datasets.load_cdnow_summary",
"os.remove",
"numpy.random.seed",
"numpy.arange",
"numpy.testing.assert_array_almost_equal",
"lifetimes.datasets.load_cdnow_summary_data_with_monetary_value",
"pandas.DataFrame",
"numpy.testing.assert_almost_equal",
"lifetimes.estimation.ModifiedBetaGeoFitter",
"os.path.exists",
"pytest.raises",
"numpy.testing.assert_equal",
"numpy.random.choice",
"numpy.testing.assert_allclose",
"lifetimes.estimation.GammaGammaFitter",
"lifetimes.datasets.load_donations",
"pytest.fixture",
"numpy.isinf",
"lifetimes.estimation.BaseFitter",
"lifetimes.datasets.load_transaction_data",
"numpy.all",
"numpy.seterr",
"numpy.zeros",
"lifetimes.estimation.BetaGeoBetaBinomFitter",
"lifetimes.estimation.BetaGeoFitter",
"pandas.isnull",
"numpy.array",
"collections.OrderedDict",
"lifetimes.estimation.ParetoNBDFitter"
] |
[((433, 453), 'lifetimes.datasets.load_cdnow_summary', 'load_cdnow_summary', ([], {}), '()\n', (451, 453), False, 'from lifetimes.datasets import load_cdnow_summary, load_cdnow_summary_data_with_monetary_value, load_donations, load_transaction_data\n'), ((1745, 1761), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (1759, 1761), False, 'import pytest\n'), ((6830, 6846), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (6844, 6846), False, 'import pytest\n'), ((614, 637), 'lifetimes.estimation.BaseFitter', 'estimation.BaseFitter', ([], {}), '()\n', (635, 637), True, 'import lifetimes.estimation as estimation\n'), ((775, 794), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (783, 794), True, 'import numpy as np\n'), ((1070, 1093), 'lifetimes.estimation.BaseFitter', 'estimation.BaseFitter', ([], {}), '()\n', (1091, 1093), True, 'import lifetimes.estimation as estimation\n'), ((1371, 1394), 'lifetimes.estimation.BaseFitter', 'estimation.BaseFitter', ([], {}), '()\n', (1392, 1394), True, 'import lifetimes.estimation as estimation\n'), ((1527, 1550), 'lifetimes.estimation.BaseFitter', 'estimation.BaseFitter', ([], {}), '()\n', (1548, 1550), True, 'import lifetimes.estimation as estimation\n'), ((1674, 1700), 'os.remove', 'os.remove', (['PATH_SAVE_MODEL'], {}), '(PATH_SAVE_MODEL)\n', (1683, 1700), False, 'import os\n'), ((1802, 1818), 'lifetimes.datasets.load_donations', 'load_donations', ([], {}), '()\n', (1816, 1818), False, 'from lifetimes.datasets import load_cdnow_summary, load_cdnow_summary_data_with_monetary_value, load_donations, load_transaction_data\n'), ((1932, 1967), 'lifetimes.estimation.BetaGeoBetaBinomFitter', 'estimation.BetaGeoBetaBinomFitter', ([], {}), '()\n', (1965, 1967), True, 'import lifetimes.estimation as estimation\n'), ((2153, 2190), 'numpy.array', 'np.array', (['[1.204, 0.75, 0.657, 2.783]'], {}), '([1.204, 0.75, 0.657, 2.783])\n', (2161, 2190), True, 'import numpy as np\n'), ((2524, 2559), 'lifetimes.estimation.BetaGeoBetaBinomFitter', 'estimation.BetaGeoBetaBinomFitter', ([], {}), '()\n', (2557, 2559), True, 'import lifetimes.estimation as estimation\n'), ((2978, 3006), 'numpy.array', 'np.array', (['[0.11, 0.59, 0.93]'], {}), '([0.11, 0.59, 0.93])\n', (2986, 3006), True, 'import numpy as np\n'), ((3027, 3038), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (3035, 3038), True, 'import numpy as np\n'), ((3377, 3438), 'numpy.testing.assert_array_almost_equal', 'npt.assert_array_almost_equal', (['expected', 'prob_list'], {'decimal': '(2)'}), '(expected, prob_list, decimal=2)\n', (3406, 3438), True, 'import numpy.testing as npt\n'), ((3707, 3742), 'lifetimes.estimation.BetaGeoBetaBinomFitter', 'estimation.BetaGeoBetaBinomFitter', ([], {}), '()\n', (3740, 3742), True, 'import lifetimes.estimation as estimation\n'), ((4398, 4433), 'lifetimes.estimation.BetaGeoBetaBinomFitter', 'estimation.BetaGeoBetaBinomFitter', ([], {}), '()\n', (4431, 4433), True, 'import lifetimes.estimation as estimation\n'), ((4619, 4645), 'numpy.array', 'np.array', (['[3454.9, 1253.1]'], {}), '([3454.9, 1253.1])\n', (4627, 4645), True, 'import numpy as np\n'), ((4781, 4836), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['expected', 'estimated'], {'decimal': '(0)'}), '(expected, estimated, decimal=0)\n', (4804, 4836), True, 'import numpy.testing as npt\n'), ((4900, 4935), 'lifetimes.estimation.BetaGeoBetaBinomFitter', 'estimation.BetaGeoBetaBinomFitter', ([], {}), '()\n', (4933, 4935), True, 'import lifetimes.estimation as estimation\n'), ((5243, 5278), 'lifetimes.estimation.BetaGeoBetaBinomFitter', 'estimation.BetaGeoBetaBinomFitter', ([], {}), '()\n', (5276, 5278), True, 'import lifetimes.estimation as estimation\n'), ((5615, 5672), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['frequency', 'recency', 'periods']"}), "(columns=['frequency', 'recency', 'periods'])\n", (5627, 5672), True, 'import pandas as pd\n'), ((6147, 6182), 'lifetimes.estimation.BetaGeoBetaBinomFitter', 'estimation.BetaGeoBetaBinomFitter', ([], {}), '()\n', (6180, 6182), True, 'import lifetimes.estimation as estimation\n'), ((6362, 6397), 'lifetimes.estimation.BetaGeoBetaBinomFitter', 'estimation.BetaGeoBetaBinomFitter', ([], {}), '()\n', (6395, 6397), True, 'import lifetimes.estimation as estimation\n'), ((6913, 6958), 'lifetimes.datasets.load_cdnow_summary_data_with_monetary_value', 'load_cdnow_summary_data_with_monetary_value', ([], {}), '()\n', (6956, 6958), False, 'from lifetimes.datasets import load_cdnow_summary, load_cdnow_summary_data_with_monetary_value, load_donations, load_transaction_data\n'), ((7235, 7264), 'lifetimes.estimation.GammaGammaFitter', 'estimation.GammaGammaFitter', ([], {}), '()\n', (7262, 7264), True, 'import lifetimes.estimation as estimation\n'), ((7492, 7521), 'numpy.array', 'np.array', (['[6.25, 3.74, 15.44]'], {}), '([6.25, 3.74, 15.44])\n', (7500, 7521), True, 'import numpy as np\n'), ((7735, 7764), 'lifetimes.estimation.GammaGammaFitter', 'estimation.GammaGammaFitter', ([], {}), '()\n', (7762, 7764), True, 'import lifetimes.estimation as estimation\n'), ((7787, 7834), 'collections.OrderedDict', 'OrderedDict', (["{'p': 6.25, 'q': 3.74, 'v': 15.44}"], {}), "({'p': 6.25, 'q': 3.74, 'v': 15.44})\n", (7798, 7834), False, 'from collections import OrderedDict\n'), ((8024, 8109), 'numpy.array', 'np.array', (['[24.65, 18.91, 35.17, 35.17, 35.17, 71.46, 18.91, 35.17, 27.28, 35.17]'], {}), '([24.65, 18.91, 35.17, 35.17, 35.17, 71.46, 18.91, 35.17, 27.28, 35.17]\n )\n', (8032, 8109), True, 'import numpy as np\n'), ((8174, 8231), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['estimates.values', 'expected'], {'atol': '(0.1)'}), '(estimates.values, expected, atol=0.1)\n', (8193, 8231), True, 'import numpy.testing as npt\n'), ((8338, 8367), 'lifetimes.estimation.GammaGammaFitter', 'estimation.GammaGammaFitter', ([], {}), '()\n', (8365, 8367), True, 'import lifetimes.estimation as estimation\n'), ((8390, 8437), 'collections.OrderedDict', 'OrderedDict', (["{'p': 6.25, 'q': 3.74, 'v': 15.44}"], {}), "({'p': 6.25, 'q': 3.74, 'v': 15.44})\n", (8401, 8437), False, 'from collections import OrderedDict\n'), ((8450, 8476), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {}), '()\n', (8474, 8476), True, 'import lifetimes.estimation as estimation\n'), ((9536, 9586), 'numpy.testing.assert_equal', 'npt.assert_equal', (['ggf_clv.values', 'utils_clv.values'], {}), '(ggf_clv.values, utils_clv.values)\n', (9552, 9586), True, 'import numpy.testing as npt\n'), ((9843, 9872), 'lifetimes.estimation.GammaGammaFitter', 'estimation.GammaGammaFitter', ([], {}), '()\n', (9870, 9872), True, 'import lifetimes.estimation as estimation\n'), ((10257, 10286), 'lifetimes.estimation.GammaGammaFitter', 'estimation.GammaGammaFitter', ([], {}), '()\n', (10284, 10286), True, 'import lifetimes.estimation as estimation\n'), ((10869, 10898), 'lifetimes.estimation.GammaGammaFitter', 'estimation.GammaGammaFitter', ([], {}), '()\n', (10896, 10898), True, 'import lifetimes.estimation as estimation\n'), ((11157, 11186), 'numpy.array', 'np.array', (['[6.25, 3.74, 15.44]'], {}), '([6.25, 3.74, 15.44])\n', (11165, 11186), True, 'import numpy as np\n'), ((11449, 11478), 'lifetimes.estimation.GammaGammaFitter', 'estimation.GammaGammaFitter', ([], {}), '()\n', (11476, 11478), True, 'import lifetimes.estimation as estimation\n'), ((11688, 11716), 'lifetimes.estimation.ParetoNBDFitter', 'estimation.ParetoNBDFitter', ([], {}), '()\n', (11714, 11716), True, 'import lifetimes.estimation as estimation\n'), ((11734, 11781), 'numpy.array', 'np.array', (['[10.465, 0.00798565181, 3.0516, 2.82]'], {}), '([10.465, 0.00798565181, 3.0516, 2.82])\n', (11742, 11781), True, 'import numpy as np\n'), ((11799, 11830), 'numpy.array', 'np.array', (['[400.0, 500.0, 500.0]'], {}), '([400.0, 500.0, 500.0])\n', (11807, 11830), True, 'import numpy as np\n'), ((11842, 11867), 'numpy.array', 'np.array', (['[5.0, 1.0, 4.0]'], {}), '([5.0, 1.0, 4.0])\n', (11850, 11867), True, 'import numpy as np\n'), ((11879, 11906), 'numpy.array', 'np.array', (['[6.0, 37.0, 37.0]'], {}), '([6.0, 37.0, 37.0])\n', (11887, 11906), True, 'import numpy as np\n'), ((12179, 12195), 'numpy.array', 'np.array', (['[1, 3]'], {}), '([1, 3])\n', (12187, 12195), True, 'import numpy as np\n'), ((12210, 12226), 'numpy.array', 'np.array', (['[2, 2]'], {}), '([2, 2])\n', (12218, 12226), True, 'import numpy as np\n'), ((12245, 12265), 'numpy.array', 'np.array', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (12253, 12265), True, 'import numpy as np\n'), ((12276, 12292), 'numpy.array', 'np.array', (['[5, 6]'], {}), '([5, 6])\n', (12284, 12292), True, 'import numpy as np\n'), ((12745, 12773), 'lifetimes.estimation.ParetoNBDFitter', 'estimation.ParetoNBDFitter', ([], {}), '()\n', (12771, 12773), True, 'import lifetimes.estimation as estimation\n'), ((12910, 12950), 'numpy.array', 'np.array', (['[0.553, 10.578, 0.606, 11.669]'], {}), '([0.553, 10.578, 0.606, 11.669])\n', (12918, 12950), True, 'import numpy as np\n'), ((13236, 13264), 'lifetimes.estimation.ParetoNBDFitter', 'estimation.ParetoNBDFitter', ([], {}), '()\n', (13262, 13264), True, 'import lifetimes.estimation as estimation\n'), ((13391, 13518), 'numpy.array', 'np.array', (['[0.0, 0.05077821, 0.09916088, 0.14542507, 0.1897993, 0.23247466, 0.27361274,\n 0.31335159, 0.35181024, 0.38909211]'], {}), '([0.0, 0.05077821, 0.09916088, 0.14542507, 0.1897993, 0.23247466, \n 0.27361274, 0.31335159, 0.35181024, 0.38909211])\n', (13399, 13518), True, 'import numpy as np\n'), ((13614, 13662), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected', 'actual'], {'atol': '(0.01)'}), '(expected, actual, atol=0.01)\n', (13633, 13662), True, 'import numpy.testing as npt\n'), ((13865, 13893), 'lifetimes.estimation.ParetoNBDFitter', 'estimation.ParetoNBDFitter', ([], {}), '()\n', (13891, 13893), True, 'import lifetimes.estimation as estimation\n'), ((14643, 14671), 'lifetimes.estimation.ParetoNBDFitter', 'estimation.ParetoNBDFitter', ([], {}), '()\n', (14669, 14671), True, 'import lifetimes.estimation as estimation\n'), ((14771, 14830), 'collections.OrderedDict', 'OrderedDict', (["{'alpha': alpha, 'beta': beta, 'r': r, 's': s}"], {}), "({'alpha': alpha, 'beta': beta, 'r': r, 's': s})\n", (14782, 14830), False, 'from collections import OrderedDict\n'), ((15225, 15253), 'lifetimes.estimation.ParetoNBDFitter', 'estimation.ParetoNBDFitter', ([], {}), '()\n', (15251, 15253), True, 'import lifetimes.estimation as estimation\n'), ((15371, 15394), 'numpy.arange', 'np.arange', (['(0)', '(100)', '(10.0)'], {}), '(0, 100, 10.0)\n', (15380, 15394), True, 'import numpy as np\n'), ((15818, 15846), 'lifetimes.estimation.ParetoNBDFitter', 'estimation.ParetoNBDFitter', ([], {}), '()\n', (15844, 15846), True, 'import lifetimes.estimation as estimation\n'), ((16178, 16206), 'lifetimes.estimation.ParetoNBDFitter', 'estimation.ParetoNBDFitter', ([], {}), '()\n', (16204, 16206), True, 'import lifetimes.estimation as estimation\n'), ((16360, 16388), 'numpy.array', 'np.array', (['[40.0, 50.0, 50.0]'], {}), '([40.0, 50.0, 50.0])\n', (16368, 16388), True, 'import numpy as np\n'), ((16400, 16425), 'numpy.array', 'np.array', (['[5.0, 1.0, 4.0]'], {}), '([5.0, 1.0, 4.0])\n', (16408, 16425), True, 'import numpy as np\n'), ((16437, 16464), 'numpy.array', 'np.array', (['[6.0, 37.0, 37.0]'], {}), '([6.0, 37.0, 37.0])\n', (16445, 16464), True, 'import numpy as np\n'), ((16711, 16739), 'lifetimes.estimation.ParetoNBDFitter', 'estimation.ParetoNBDFitter', ([], {}), '()\n', (16737, 16739), True, 'import lifetimes.estimation as estimation\n'), ((17163, 17191), 'lifetimes.estimation.ParetoNBDFitter', 'estimation.ParetoNBDFitter', ([], {}), '()\n', (17189, 17191), True, 'import lifetimes.estimation as estimation\n'), ((17480, 17508), 'lifetimes.estimation.ParetoNBDFitter', 'estimation.ParetoNBDFitter', ([], {}), '()\n', (17506, 17508), True, 'import lifetimes.estimation as estimation\n'), ((17942, 17970), 'lifetimes.estimation.ParetoNBDFitter', 'estimation.ParetoNBDFitter', ([], {}), '()\n', (17968, 17970), True, 'import lifetimes.estimation as estimation\n'), ((18088, 18112), 'numpy.random.choice', 'np.random.choice', (['(100)', '(5)'], {}), '(100, 5)\n', (18104, 18112), True, 'import numpy as np\n'), ((18941, 18969), 'lifetimes.estimation.ParetoNBDFitter', 'estimation.ParetoNBDFitter', ([], {}), '()\n', (18967, 18969), True, 'import lifetimes.estimation as estimation\n'), ((19087, 19110), 'numpy.random.choice', 'np.random.choice', (['(10)', '(5)'], {}), '(10, 5)\n', (19103, 19110), True, 'import numpy as np\n'), ((20140, 20168), 'lifetimes.estimation.ParetoNBDFitter', 'estimation.ParetoNBDFitter', ([], {}), '()\n', (20166, 20168), True, 'import lifetimes.estimation as estimation\n'), ((20339, 20367), 'lifetimes.estimation.ParetoNBDFitter', 'estimation.ParetoNBDFitter', ([], {}), '()\n', (20365, 20367), True, 'import lifetimes.estimation as estimation\n'), ((20992, 21008), 'numpy.array', 'np.array', (['[1, 3]'], {}), '([1, 3])\n', (21000, 21008), True, 'import numpy as np\n'), ((21023, 21039), 'numpy.array', 'np.array', (['[2, 2]'], {}), '([2, 2])\n', (21031, 21039), True, 'import numpy as np\n'), ((21052, 21068), 'numpy.array', 'np.array', (['[5, 6]'], {}), '([5, 6])\n', (21060, 21068), True, 'import numpy as np\n'), ((21087, 21100), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (21095, 21100), True, 'import numpy as np\n'), ((21523, 21549), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {}), '()\n', (21547, 21549), True, 'import lifetimes.estimation as estimation\n'), ((21686, 21724), 'numpy.array', 'np.array', (['[0.243, 4.414, 0.793, 2.426]'], {}), '([0.243, 4.414, 0.793, 2.426])\n', (21694, 21724), True, 'import numpy as np\n'), ((21955, 21981), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {}), '()\n', (21979, 21981), True, 'import lifetimes.estimation as estimation\n'), ((22405, 22431), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {}), '()\n', (22429, 22431), True, 'import lifetimes.estimation as estimation\n'), ((22555, 22604), 'numpy.array', 'np.array', (['[0.1429, 1.0, 3.0, 31.8571, 32.0, 78.0]'], {}), '([0.1429, 1.0, 3.0, 31.8571, 32.0, 78.0])\n', (22563, 22604), True, 'import numpy as np\n'), ((22627, 22685), 'numpy.array', 'np.array', (['[0.0078, 0.0532, 0.1506, 1.0405, 1.0437, 1.8576]'], {}), '([0.0078, 0.0532, 0.1506, 1.0405, 1.0437, 1.8576])\n', (22635, 22685), True, 'import numpy as np\n'), ((22761, 22819), 'numpy.testing.assert_array_almost_equal', 'npt.assert_array_almost_equal', (['actual', 'expected'], {'decimal': '(3)'}), '(actual, expected, decimal=3)\n', (22790, 22819), True, 'import numpy.testing as npt\n'), ((22935, 22961), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {}), '()\n', (22959, 22961), True, 'import lifetimes.estimation as estimation\n'), ((23226, 23252), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {}), '()\n', (23250, 23252), True, 'import lifetimes.estimation as estimation\n'), ((23671, 23697), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {}), '()\n', (23695, 23697), True, 'import lifetimes.estimation as estimation\n'), ((23713, 23739), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {}), '()\n', (23737, 23739), True, 'import lifetimes.estimation as estimation\n'), ((23749, 23766), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (23763, 23766), True, 'import numpy as np\n'), ((23873, 23890), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (23887, 23890), True, 'import numpy as np\n'), ((24193, 24219), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {}), '()\n', (24217, 24219), True, 'import lifetimes.estimation as estimation\n'), ((24428, 24472), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {'penalizer_coef': '(0.1)'}), '(penalizer_coef=0.1)\n', (24452, 24472), True, 'import lifetimes.estimation as estimation\n'), ((24670, 24697), 'numpy.all', 'np.all', (['(params_2 < params_1)'], {}), '(params_2 < params_1)\n', (24676, 24697), True, 'import numpy as np\n'), ((24733, 24776), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {'penalizer_coef': '(10)'}), '(penalizer_coef=10)\n', (24757, 24776), True, 'import lifetimes.estimation as estimation\n'), ((24984, 25011), 'numpy.all', 'np.all', (['(params_3 < params_2)'], {}), '(params_3 < params_2)\n', (24990, 25011), True, 'import numpy as np\n'), ((25102, 25128), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {}), '()\n', (25126, 25128), True, 'import lifetimes.estimation as estimation\n'), ((25676, 25702), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {}), '()\n', (25700, 25702), True, 'import lifetimes.estimation as estimation\n'), ((25725, 25790), 'collections.OrderedDict', 'OrderedDict', (["{'r': 0.243, 'alpha': 4.414, 'a': 0.793, 'b': 2.426}"], {}), "({'r': 0.243, 'alpha': 4.414, 'a': 0.793, 'b': 2.426})\n", (25736, 25790), False, 'from collections import OrderedDict\n'), ((26319, 26477), 'numpy.array', 'np.array', (['[0.0019995214, 0.0015170236, 0.001163315, 0.0009003148, 0.0007023638, \n 0.0005517902, 0.0004361913, 0.0003467171, 0.0002769613, 0.000222226]'], {}), '([0.0019995214, 0.0015170236, 0.001163315, 0.0009003148, \n 0.0007023638, 0.0005517902, 0.0004361913, 0.0003467171, 0.0002769613, \n 0.000222226])\n', (26327, 26477), True, 'import numpy as np\n'), ((26606, 26664), 'numpy.testing.assert_array_almost_equal', 'npt.assert_array_almost_equal', (['expected', 'actual'], {'decimal': '(5)'}), '(expected, actual, decimal=5)\n', (26635, 26664), True, 'import numpy.testing as npt\n'), ((26762, 26788), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {}), '()\n', (26786, 26788), True, 'import lifetimes.estimation as estimation\n'), ((26936, 26962), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {}), '()\n', (26960, 26962), True, 'import lifetimes.estimation as estimation\n'), ((27590, 27634), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {'penalizer_coef': '(0.0)'}), '(penalizer_coef=0.0)\n', (27614, 27634), True, 'import lifetimes.estimation as estimation\n'), ((27796, 27822), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {}), '()\n', (27820, 27822), True, 'import lifetimes.estimation as estimation\n'), ((28526, 28558), 'os.remove', 'os.remove', (['PATH_SAVE_BGNBD_MODEL'], {}), '(PATH_SAVE_BGNBD_MODEL)\n', (28535, 28558), False, 'import os\n'), ((28704, 28748), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {'penalizer_coef': '(0.0)'}), '(penalizer_coef=0.0)\n', (28728, 28748), True, 'import lifetimes.estimation as estimation\n'), ((28927, 28953), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {}), '()\n', (28951, 28953), True, 'import lifetimes.estimation as estimation\n'), ((29628, 29660), 'os.remove', 'os.remove', (['PATH_SAVE_BGNBD_MODEL'], {}), '(PATH_SAVE_BGNBD_MODEL)\n', (29637, 29660), False, 'import os\n'), ((29835, 29879), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {'penalizer_coef': '(0.0)'}), '(penalizer_coef=0.0)\n', (29859, 29879), True, 'import lifetimes.estimation as estimation\n'), ((30074, 30100), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {}), '()\n', (30098, 30100), True, 'import lifetimes.estimation as estimation\n'), ((30788, 30820), 'os.remove', 'os.remove', (['PATH_SAVE_BGNBD_MODEL'], {}), '(PATH_SAVE_BGNBD_MODEL)\n', (30797, 30820), False, 'import os\n'), ((31019, 31063), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {'penalizer_coef': '(0.0)'}), '(penalizer_coef=0.0)\n', (31043, 31063), True, 'import lifetimes.estimation as estimation\n'), ((31263, 31289), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {}), '()\n', (31287, 31289), True, 'import lifetimes.estimation as estimation\n'), ((31962, 31994), 'os.remove', 'os.remove', (['PATH_SAVE_BGNBD_MODEL'], {}), '(PATH_SAVE_BGNBD_MODEL)\n', (31971, 31994), False, 'import os\n'), ((32062, 32106), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {'penalizer_coef': '(0.0)'}), '(penalizer_coef=0.0)\n', (32086, 32106), True, 'import lifetimes.estimation as estimation\n'), ((32395, 32439), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {'penalizer_coef': '(0.0)'}), '(penalizer_coef=0.0)\n', (32419, 32439), True, 'import lifetimes.estimation as estimation\n'), ((32758, 32780), 'numpy.seterr', 'np.seterr', ([], {'all': '"""raise"""'}), "(all='raise')\n", (32767, 32780), True, 'import numpy as np\n'), ((32795, 32839), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {'penalizer_coef': '(0.0)'}), '(penalizer_coef=0.0)\n', (32819, 32839), True, 'import lifetimes.estimation as estimation\n'), ((33102, 33127), 'numpy.seterr', 'np.seterr', ([], {}), '(**old_settings)\n', (33111, 33127), True, 'import numpy as np\n'), ((33612, 33656), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {'penalizer_coef': '(0.0)'}), '(penalizer_coef=0.0)\n', (33636, 33656), True, 'import lifetimes.estimation as estimation\n'), ((33914, 33958), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {'penalizer_coef': '(0.0)'}), '(penalizer_coef=0.0)\n', (33938, 33958), True, 'import lifetimes.estimation as estimation\n'), ((34501, 34517), 'numpy.array', 'np.array', (['[1, 3]'], {}), '([1, 3])\n', (34509, 34517), True, 'import numpy as np\n'), ((34532, 34548), 'numpy.array', 'np.array', (['[2, 2]'], {}), '([2, 2])\n', (34540, 34548), True, 'import numpy as np\n'), ((34561, 34577), 'numpy.array', 'np.array', (['[5, 6]'], {}), '([5, 6])\n', (34569, 34577), True, 'import numpy as np\n'), ((34594, 34610), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (34602, 34610), True, 'import numpy as np\n'), ((35114, 35148), 'lifetimes.estimation.ModifiedBetaGeoFitter', 'estimation.ModifiedBetaGeoFitter', ([], {}), '()\n', (35146, 35148), True, 'import lifetimes.estimation as estimation\n'), ((35286, 35324), 'numpy.array', 'np.array', (['[0.525, 6.183, 0.891, 1.614]'], {}), '([0.525, 6.183, 0.891, 1.614])\n', (35294, 35324), True, 'import numpy as np\n'), ((35557, 35591), 'lifetimes.estimation.ModifiedBetaGeoFitter', 'estimation.ModifiedBetaGeoFitter', ([], {}), '()\n', (35589, 35591), True, 'import lifetimes.estimation as estimation\n'), ((36017, 36051), 'lifetimes.estimation.ModifiedBetaGeoFitter', 'estimation.ModifiedBetaGeoFitter', ([], {}), '()\n', (36049, 36051), True, 'import lifetimes.estimation as estimation\n'), ((36197, 36246), 'numpy.array', 'np.array', (['[0.1429, 1.0, 3.0, 31.8571, 32.0, 78.0]'], {}), '([0.1429, 1.0, 3.0, 31.8571, 32.0, 78.0])\n', (36205, 36246), True, 'import numpy as np\n'), ((36269, 36327), 'numpy.array', 'np.array', (['[0.0078, 0.0532, 0.1506, 1.0405, 1.0437, 1.8576]'], {}), '([0.0078, 0.0532, 0.1506, 1.0405, 1.0437, 1.8576])\n', (36277, 36327), True, 'import numpy as np\n'), ((36405, 36453), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['actual', 'expected'], {'rtol': '(0.05)'}), '(actual, expected, rtol=0.05)\n', (36424, 36453), True, 'import numpy.testing as npt\n'), ((36579, 36613), 'lifetimes.estimation.ModifiedBetaGeoFitter', 'estimation.ModifiedBetaGeoFitter', ([], {}), '()\n', (36611, 36613), True, 'import lifetimes.estimation as estimation\n'), ((36880, 36914), 'lifetimes.estimation.ModifiedBetaGeoFitter', 'estimation.ModifiedBetaGeoFitter', ([], {}), '()\n', (36912, 36914), True, 'import lifetimes.estimation as estimation\n'), ((37335, 37369), 'lifetimes.estimation.ModifiedBetaGeoFitter', 'estimation.ModifiedBetaGeoFitter', ([], {}), '()\n', (37367, 37369), True, 'import lifetimes.estimation as estimation\n'), ((37386, 37420), 'lifetimes.estimation.ModifiedBetaGeoFitter', 'estimation.ModifiedBetaGeoFitter', ([], {}), '()\n', (37418, 37420), True, 'import lifetimes.estimation as estimation\n'), ((37430, 37447), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (37444, 37447), True, 'import numpy as np\n'), ((37555, 37572), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (37569, 37572), True, 'import numpy as np\n'), ((37878, 37912), 'lifetimes.estimation.ModifiedBetaGeoFitter', 'estimation.ModifiedBetaGeoFitter', ([], {}), '()\n', (37910, 37912), True, 'import lifetimes.estimation as estimation\n'), ((38124, 38176), 'lifetimes.estimation.ModifiedBetaGeoFitter', 'estimation.ModifiedBetaGeoFitter', ([], {'penalizer_coef': '(0.1)'}), '(penalizer_coef=0.1)\n', (38156, 38176), True, 'import lifetimes.estimation as estimation\n'), ((38465, 38517), 'lifetimes.estimation.ModifiedBetaGeoFitter', 'estimation.ModifiedBetaGeoFitter', ([], {'penalizer_coef': '(1.0)'}), '(penalizer_coef=1.0)\n', (38497, 38517), True, 'import lifetimes.estimation as estimation\n'), ((38869, 38903), 'lifetimes.estimation.ModifiedBetaGeoFitter', 'estimation.ModifiedBetaGeoFitter', ([], {}), '()\n', (38901, 38903), True, 'import lifetimes.estimation as estimation\n'), ((39427, 39461), 'lifetimes.estimation.ModifiedBetaGeoFitter', 'estimation.ModifiedBetaGeoFitter', ([], {}), '()\n', (39459, 39461), True, 'import lifetimes.estimation as estimation\n'), ((39485, 39550), 'collections.OrderedDict', 'OrderedDict', (["{'r': 0.243, 'alpha': 4.414, 'a': 0.793, 'b': 2.426}"], {}), "({'r': 0.243, 'alpha': 4.414, 'a': 0.793, 'b': 2.426})\n", (39496, 39550), False, 'from collections import OrderedDict\n'), ((39832, 39990), 'numpy.array', 'np.array', (['[0.0019995214, 0.0015170236, 0.001163315, 0.0009003148, 0.0007023638, \n 0.0005517902, 0.0004361913, 0.0003467171, 0.0002769613, 0.000222226]'], {}), '([0.0019995214, 0.0015170236, 0.001163315, 0.0009003148, \n 0.0007023638, 0.0005517902, 0.0004361913, 0.0003467171, 0.0002769613, \n 0.000222226])\n', (39840, 39990), True, 'import numpy as np\n'), ((40122, 40169), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected', 'actual'], {'rtol': '(0.5)'}), '(expected, actual, rtol=0.5)\n', (40141, 40169), True, 'import numpy.testing as npt\n'), ((40268, 40302), 'lifetimes.estimation.ModifiedBetaGeoFitter', 'estimation.ModifiedBetaGeoFitter', ([], {}), '()\n', (40300, 40302), True, 'import lifetimes.estimation as estimation\n'), ((40453, 40487), 'lifetimes.estimation.ModifiedBetaGeoFitter', 'estimation.ModifiedBetaGeoFitter', ([], {}), '()\n', (40485, 40487), True, 'import lifetimes.estimation as estimation\n'), ((41183, 41217), 'lifetimes.estimation.ModifiedBetaGeoFitter', 'estimation.ModifiedBetaGeoFitter', ([], {}), '()\n', (41215, 41217), True, 'import lifetimes.estimation as estimation\n'), ((41234, 41268), 'lifetimes.estimation.ModifiedBetaGeoFitter', 'estimation.ModifiedBetaGeoFitter', ([], {}), '()\n', (41266, 41268), True, 'import lifetimes.estimation as estimation\n'), ((41278, 41295), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (41292, 41295), True, 'import numpy as np\n'), ((41403, 41420), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (41417, 41420), True, 'import numpy as np\n'), ((41753, 41796), 'lifetimes.datasets.load_transaction_data', 'load_transaction_data', ([], {'parse_dates': "['date']"}), "(parse_dates=['date'])\n", (41774, 41796), False, 'from lifetimes.datasets import load_cdnow_summary, load_cdnow_summary_data_with_monetary_value, load_donations, load_transaction_data\n'), ((42180, 42214), 'lifetimes.estimation.ModifiedBetaGeoFitter', 'estimation.ModifiedBetaGeoFitter', ([], {}), '()\n', (42212, 42214), True, 'import lifetimes.estimation as estimation\n'), ((42224, 42241), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (42238, 42241), True, 'import numpy as np\n'), ((42448, 42465), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (42462, 42465), True, 'import numpy as np\n'), ((42691, 42797), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['thirty_day_prediction_from_daily_data', 'thirty_day_prediction_from_hourly_data'], {}), '(thirty_day_prediction_from_daily_data,\n thirty_day_prediction_from_hourly_data)\n', (42714, 42797), True, 'import numpy.testing as npt\n'), ((42862, 42896), 'lifetimes.estimation.ModifiedBetaGeoFitter', 'estimation.ModifiedBetaGeoFitter', ([], {}), '()\n', (42894, 42896), True, 'import lifetimes.estimation as estimation\n'), ((43188, 43222), 'lifetimes.estimation.ModifiedBetaGeoFitter', 'estimation.ModifiedBetaGeoFitter', ([], {}), '()\n', (43220, 43222), True, 'import lifetimes.estimation as estimation\n'), ((1107, 1132), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1120, 1132), False, 'import pytest\n'), ((1458, 1489), 'os.path.exists', 'os.path.exists', (['PATH_SAVE_MODEL'], {}), '(PATH_SAVE_MODEL)\n', (1472, 1489), False, 'import os\n'), ((15422, 15445), 'numpy.arange', 'np.arange', (['(0)', '(100)', '(10.0)'], {}), '(0, 100, 10.0)\n', (15431, 15445), True, 'import numpy as np\n'), ((18141, 18165), 'numpy.random.choice', 'np.random.choice', (['(100)', '(5)'], {}), '(100, 5)\n', (18157, 18165), True, 'import numpy as np\n'), ((19139, 19161), 'numpy.random.choice', 'np.random.choice', (['(9)', '(5)'], {}), '(9, 5)\n', (19155, 19161), True, 'import numpy as np\n'), ((5790, 5923), 'pandas.DataFrame', 'pd.DataFrame', (["([[row['frequency'], row['recency'], row['periods']]] * row['weights'])"], {'columns': "['frequency', 'recency', 'periods']"}), "([[row['frequency'], row['recency'], row['periods']]] * row[\n 'weights'], columns=['frequency', 'recency', 'periods'])\n", (5802, 5923), True, 'import pandas as pd\n'), ((15471, 15500), 'numpy.arange', 'np.arange', (['recency', '(100)', '(10.0)'], {}), '(recency, 100, 10.0)\n', (15480, 15500), True, 'import numpy as np\n'), ((12377, 12393), 'numpy.array', 'np.array', (['[x[0]]'], {}), '([x[0]])\n', (12385, 12393), True, 'import numpy as np\n'), ((12395, 12413), 'numpy.array', 'np.array', (['[t_x[0]]'], {}), '([t_x[0]])\n', (12403, 12413), True, 'import numpy as np\n'), ((12415, 12431), 'numpy.array', 'np.array', (['[t[0]]'], {}), '([t[0]])\n', (12423, 12431), True, 'import numpy as np\n'), ((12503, 12519), 'numpy.array', 'np.array', (['[x[1]]'], {}), '([x[1]])\n', (12511, 12519), True, 'import numpy as np\n'), ((12521, 12539), 'numpy.array', 'np.array', (['[t_x[1]]'], {}), '([t_x[1]])\n', (12529, 12539), True, 'import numpy as np\n'), ((12541, 12557), 'numpy.array', 'np.array', (['[t[1]]'], {}), '([t[1]])\n', (12549, 12557), True, 'import numpy as np\n'), ((18204, 18228), 'numpy.random.choice', 'np.random.choice', (['(100)', '(5)'], {}), '(100, 5)\n', (18220, 18228), True, 'import numpy as np\n'), ((18259, 18283), 'numpy.random.choice', 'np.random.choice', (['(100)', '(5)'], {}), '(100, 5)\n', (18275, 18283), True, 'import numpy as np\n'), ((19207, 19232), 'numpy.arange', 'np.arange', (['recency', '(10)', '(1)'], {}), '(recency, 10, 1)\n', (19216, 19232), True, 'import numpy as np\n'), ((21189, 21207), 'numpy.array', 'np.array', (['[t_x[0]]'], {}), '([t_x[0]])\n', (21197, 21207), True, 'import numpy as np\n'), ((21209, 21225), 'numpy.array', 'np.array', (['[t[0]]'], {}), '([t[0]])\n', (21217, 21225), True, 'import numpy as np\n'), ((21301, 21319), 'numpy.array', 'np.array', (['[t_x[1]]'], {}), '([t_x[1]])\n', (21309, 21319), True, 'import numpy as np\n'), ((21321, 21337), 'numpy.array', 'np.array', (['[t[1]]'], {}), '([t[1]])\n', (21329, 21337), True, 'import numpy as np\n'), ((34694, 34710), 'numpy.array', 'np.array', (['[x[0]]'], {}), '([x[0]])\n', (34702, 34710), True, 'import numpy as np\n'), ((34712, 34730), 'numpy.array', 'np.array', (['[t_x[0]]'], {}), '([t_x[0]])\n', (34720, 34730), True, 'import numpy as np\n'), ((34732, 34748), 'numpy.array', 'np.array', (['[t[0]]'], {}), '([t[0]])\n', (34740, 34748), True, 'import numpy as np\n'), ((34819, 34835), 'numpy.array', 'np.array', (['[x[1]]'], {}), '([x[1]])\n', (34827, 34835), True, 'import numpy as np\n'), ((34837, 34855), 'numpy.array', 'np.array', (['[t_x[1]]'], {}), '([t_x[1]])\n', (34845, 34855), True, 'import numpy as np\n'), ((34857, 34873), 'numpy.array', 'np.array', (['[t[1]]'], {}), '([t[1]])\n', (34865, 34873), True, 'import numpy as np\n'), ((11938, 11949), 'numpy.isinf', 'np.isinf', (['r'], {}), '(r)\n', (11946, 11949), True, 'import numpy as np\n'), ((11958, 11970), 'pandas.isnull', 'pd.isnull', (['r'], {}), '(r)\n', (11967, 11970), True, 'import pandas as pd\n'), ((16508, 16519), 'numpy.isinf', 'np.isinf', (['r'], {}), '(r)\n', (16516, 16519), True, 'import numpy as np\n'), ((16528, 16540), 'pandas.isnull', 'pd.isnull', (['r'], {}), '(r)\n', (16537, 16540), True, 'import pandas as pd\n'), ((18318, 18341), 'numpy.random.choice', 'np.random.choice', (['(10)', '(5)'], {}), '(10, 5)\n', (18334, 18341), True, 'import numpy as np\n'), ((19271, 19293), 'numpy.random.choice', 'np.random.choice', (['(9)', '(5)'], {}), '(9, 5)\n', (19287, 19293), True, 'import numpy as np\n'), ((19534, 19553), 'numpy.arange', 'np.arange', (['(0)', '(20)', '(1)'], {}), '(0, 20, 1)\n', (19543, 19553), True, 'import numpy as np\n')]
|
# Copyright (c) 2019, CNRS-LAAS
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
import typing as ty
import numpy as np
import fire_rs.firemodel.propagation
import fire_rs.geodata.environment
import fire_rs.geodata.geo_data
import fire_rs.geodata.wildfire
class RealWildfire:
"""Generate wildfire maps by combining ignition, propagation and weather changes operations"""
def __init__(self, start_timestamp: datetime.datetime,
environment: fire_rs.firemodel.propagation.Environment):
self._environment = environment
self.start_timestamp = start_timestamp
self._current_time = self.start_timestamp
# Pending ignitions
# type: ty.MutableMapping[float, ty.Tuple[int, int]]
self._pending_ignitions = {}
self._perimeter = None
self._fire_map = fire_rs.firemodel.propagation.empty_firemap(self._environment.raster)
self._action_log = []
def ignite(self, position: ty.Union[ty.Tuple[float, float], fire_rs.geodata.geo_data.Point]):
"""Set some location on fire"""
c = self._environment.raster.array_index(position)
self._pending_ignitions[c] = self._current_time.timestamp()
self._fire_map["ignition"][c] = self._pending_ignitions[c]
self._action_log.append(
(self._current_time, "{} position {} ".format("Ignite", str(position))))
def change_wind(self, speed, direction):
self._environment.update_area_wind(speed, direction)
self._action_log.append(
(self._current_time,
"{} to {} km/h {} °".format("Set Wind", str(speed), str(direction / np.pi * 180))))
def propagate(self, duration: datetime.timedelta):
if self._perimeter:
self._pending_ignitions = {**self._pending_ignitions, **self._perimeter.cells}
old_fire_map = self._fire_map.clone()
# First propagation
fireprop = fire_rs.firemodel.propagation.FirePropagation(self._environment)
# Mark burnt cells, so fire do not propagate over them again
mask = np.where(
(old_fire_map.data["ignition"] > 0) & (old_fire_map.data["ignition"] < np.inf))
if self._perimeter:
mask = np.where(self._perimeter.area_array | np.isfinite(self._perimeter.array))
fireprop.prop_data.data["ignition"][mask] = np.NaN
for k, v in self._pending_ignitions.items():
fireprop.set_ignition_cell((k[0], k[1], v))
fireprop.propagate((self._current_time + duration).timestamp())
# remove pending ignitions
self._pending_ignitions = {}
# Store firemap
self._fire_map = fireprop.ignitions()
# Fuse the old firemap wih the new one
self._fire_map.data["ignition"][mask] = old_fire_map["ignition"][mask]
# Advance time
self._current_time += duration
# Calculate perimeter
self._perimeter = fire_rs.geodata.wildfire.Perimeter(self._fire_map,
self.current_time.timestamp())
self._action_log.append(
(self._current_time,
"{} for {}".format("Propagate", str(duration))))
@property
def action_log(self) -> ty.Sequence[ty.Tuple[datetime.datetime, str]]:
return self._action_log
@property
def current_time(self) -> datetime.datetime:
return self._current_time
@property
def current_perimeter(self) -> ty.Optional[fire_rs.geodata.wildfire.Perimeter]:
return self._perimeter
@property
def fire_map(self) -> fire_rs.geodata.geo_data.GeoData:
return self._fire_map
def perimeter(self, threshold_time: ty.Union[datetime.datetime, float]):
t = threshold_time.timestamp() if isinstance(threshold_time,
datetime.datetime) else threshold_time
return fire_rs.geodata.wildfire.Perimeter(self._fire_map, t)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import fire_rs.geodata.display
# Test realwildfire
time = datetime.datetime.now()
area = [[480060.0, 485060.0], [6210074.0, 6215074.0]]
speed = 1.
direction = 0.
world = fire_rs.geodata.environment.World()
env = fire_rs.firemodel.propagation.Environment(area, speed, direction, world=world)
rw = RealWildfire(time, env)
actions = [(rw.ignite, ((area[0][0] + 1000.0, area[1][0] + 1000.),)),
(rw.propagate, (datetime.timedelta(minutes=30.),)),
(rw.change_wind, (3, np.pi / 4)),
(rw.propagate, (datetime.timedelta(minutes=31.),)),
(rw.change_wind, (3, np.pi / 2)),
(rw.ignite, ((area[0][0] + 3000.0, area[1][0] + 3000.),)),
(rw.propagate, (datetime.timedelta(minutes=32.),)),
(rw.change_wind, (3, 0.)),
(rw.propagate, (datetime.timedelta(minutes=33.),)),
(rw.change_wind, (3, np.pi / 4)),
(rw.propagate, (datetime.timedelta(minutes=34.),)),
(rw.change_wind, (3, np.pi / 2)),
(rw.propagate, (datetime.timedelta(minutes=35.),))
]
fig = plt.figure()
ax = fig.gca()
for action in actions:
fig.clear()
ax = fig.gca()
if len(action[1]) == 0:
action[0]()
else:
action[0](*action[1])
v_min = np.nanmin(rw.fire_map.data["ignition"][np.isfinite(rw.fire_map.data["ignition"])])
v_max = np.nanmax(rw.fire_map.data["ignition"][np.isfinite(rw.fire_map.data["ignition"])])
fig.colorbar(ax.matshow(rw.fire_map.data["ignition"], vmin=v_min, vmax=v_max),
format=fire_rs.geodata.display.SecondDateFormatter('%d/%m/%y %H:%M'), )
if rw.current_perimeter:
# if rw.current_perimeter.area_array is not None:
# ax.matshow(rw.current_perimeter.area_array)
fig.colorbar(
ax.matshow(rw.current_perimeter.array, cmap="Reds", vmin=v_min, vmax=v_max),
format=fire_rs.geodata.display.SecondDateFormatter('%d/%m/%y %H:%M'))
fig.show()
print("bye")
|
[
"numpy.isfinite",
"matplotlib.pyplot.figure",
"numpy.where",
"datetime.timedelta",
"datetime.datetime.now"
] |
[((5389, 5412), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5410, 5412), False, 'import datetime\n'), ((6491, 6503), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6501, 6503), True, 'import matplotlib.pyplot as plt\n'), ((3361, 3454), 'numpy.where', 'np.where', (["((old_fire_map.data['ignition'] > 0) & (old_fire_map.data['ignition'] < np.inf)\n )"], {}), "((old_fire_map.data['ignition'] > 0) & (old_fire_map.data[\n 'ignition'] < np.inf))\n", (3369, 3454), True, 'import numpy as np\n'), ((5781, 5813), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(30.0)'}), '(minutes=30.0)\n', (5799, 5813), False, 'import datetime\n'), ((5897, 5929), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(31.0)'}), '(minutes=31.0)\n', (5915, 5929), False, 'import datetime\n'), ((6087, 6119), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(32.0)'}), '(minutes=32.0)\n', (6105, 6119), False, 'import datetime\n'), ((6196, 6228), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(33.0)'}), '(minutes=33.0)\n', (6214, 6228), False, 'import datetime\n'), ((6312, 6344), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(34.0)'}), '(minutes=34.0)\n', (6330, 6344), False, 'import datetime\n'), ((6428, 6460), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(35.0)'}), '(minutes=35.0)\n', (6446, 6460), False, 'import datetime\n'), ((6755, 6796), 'numpy.isfinite', 'np.isfinite', (["rw.fire_map.data['ignition']"], {}), "(rw.fire_map.data['ignition'])\n", (6766, 6796), True, 'import numpy as np\n'), ((6854, 6895), 'numpy.isfinite', 'np.isfinite', (["rw.fire_map.data['ignition']"], {}), "(rw.fire_map.data['ignition'])\n", (6865, 6895), True, 'import numpy as np\n'), ((3548, 3582), 'numpy.isfinite', 'np.isfinite', (['self._perimeter.array'], {}), '(self._perimeter.array)\n', (3559, 3582), True, 'import numpy as np\n')]
|
#runas import numpy as np; n = 20; a = np.arange(n*n*n).reshape((n,n,n)).astype(np.uint8); b = 2. ; goodExpoMeasure(a, b)
#pythran export goodExpoMeasure(uint8[][][], float)
import numpy
def goodExpoMeasure(inRGB, sigma):
'''
Compute the good exposition image quality measure on 1 input image.
'''
R = inRGB[0,:,:].astype(numpy.float64)
G = inRGB[1,:,:].astype(numpy.float64)
B = inRGB[2,:,:].astype(numpy.float64)
goodExpoR = numpy.exp(- ((R - 128)**2) / sigma)
goodExpoG = numpy.exp(- ((G - 128)**2) / sigma)
goodExpoB = numpy.exp(- ((B - 128)**2) / sigma)
goodExpo = goodExpoR * goodExpoG * goodExpoB
goodExpo = (numpy.round(goodExpo, 2) * (2**8-1)).astype(numpy.uint8)
return goodExpo
|
[
"numpy.round",
"numpy.exp"
] |
[((455, 489), 'numpy.exp', 'numpy.exp', (['(-(R - 128) ** 2 / sigma)'], {}), '(-(R - 128) ** 2 / sigma)\n', (464, 489), False, 'import numpy\n'), ((507, 541), 'numpy.exp', 'numpy.exp', (['(-(G - 128) ** 2 / sigma)'], {}), '(-(G - 128) ** 2 / sigma)\n', (516, 541), False, 'import numpy\n'), ((559, 593), 'numpy.exp', 'numpy.exp', (['(-(B - 128) ** 2 / sigma)'], {}), '(-(B - 128) ** 2 / sigma)\n', (568, 593), False, 'import numpy\n'), ((662, 686), 'numpy.round', 'numpy.round', (['goodExpo', '(2)'], {}), '(goodExpo, 2)\n', (673, 686), False, 'import numpy\n')]
|
from __future__ import division, print_function
import numpy as np
from dipy.denoise.nlmeans_block import nlmeans_block
def non_local_means(arr, sigma, mask=None, patch_radius=1, block_radius=5,
rician=True):
r""" Non-local means for denoising 3D and 4D images, using
blockwise averaging approach
Parameters
----------
arr : 3D or 4D ndarray
The array to be denoised
mask : 3D ndarray
sigma : float
standard deviation of the noise estimated from the data
patch_radius : int
patch size is ``2 x patch_radius + 1``. Default is 1.
block_radius : int
block size is ``2 x block_radius + 1``. Default is 5.
rician : boolean
If True the noise is estimated as Rician, otherwise Gaussian noise
is assumed.
Returns
-------
denoised_arr : ndarray
the denoised ``arr`` which has the same shape as ``arr``.
References
----------
.. [Coupe08] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, C.
Barillot, An Optimized Blockwise Non Local Means Denoising
Filter for 3D Magnetic Resonance Images, IEEE Transactions on
Medical Imaging, 27(4):425-441, 2008
.. [Coupe11] <NAME>, <NAME>, <NAME>, <NAME>.
Adaptive Multiresolution Non-Local Means Filter for 3D MR Image
Denoising IET Image Processing, Institution of Engineering and
Technology, 2011
"""
if not np.isscalar(sigma) and not sigma.shape == (1, ):
raise ValueError("Sigma input needs to be of type float", sigma)
if mask is None and arr.ndim > 2:
mask = np.ones((arr.shape[0], arr.shape[1], arr.shape[2]), dtype='f8')
else:
mask = np.ascontiguousarray(mask, dtype='f8')
if mask.ndim != 3:
raise ValueError('mask needs to be a 3D ndarray', mask.shape)
if arr.ndim == 3:
return np.array(nlmeans_block(
np.double(arr),
mask,
patch_radius,
block_radius,
sigma,
np.int(rician))).astype(arr.dtype)
elif arr.ndim == 4:
denoised_arr = np.zeros_like(arr)
for i in range(arr.shape[-1]):
denoised_arr[..., i] = np.array(nlmeans_block(np.double(
arr[..., i]), mask, patch_radius, block_radius, sigma,
np.int(rician))).astype(arr.dtype)
return denoised_arr
else:
raise ValueError("Only 3D or 4D array are supported!", arr.shape)
|
[
"numpy.zeros_like",
"numpy.double",
"numpy.isscalar",
"numpy.ones",
"numpy.int",
"numpy.ascontiguousarray"
] |
[((1669, 1732), 'numpy.ones', 'np.ones', (['(arr.shape[0], arr.shape[1], arr.shape[2])'], {'dtype': '"""f8"""'}), "((arr.shape[0], arr.shape[1], arr.shape[2]), dtype='f8')\n", (1676, 1732), True, 'import numpy as np\n'), ((1758, 1796), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['mask'], {'dtype': '"""f8"""'}), "(mask, dtype='f8')\n", (1778, 1796), True, 'import numpy as np\n'), ((1494, 1512), 'numpy.isscalar', 'np.isscalar', (['sigma'], {}), '(sigma)\n', (1505, 1512), True, 'import numpy as np\n'), ((2164, 2182), 'numpy.zeros_like', 'np.zeros_like', (['arr'], {}), '(arr)\n', (2177, 2182), True, 'import numpy as np\n'), ((1965, 1979), 'numpy.double', 'np.double', (['arr'], {}), '(arr)\n', (1974, 1979), True, 'import numpy as np\n'), ((2082, 2096), 'numpy.int', 'np.int', (['rician'], {}), '(rician)\n', (2088, 2096), True, 'import numpy as np\n'), ((2280, 2302), 'numpy.double', 'np.double', (['arr[..., i]'], {}), '(arr[..., i])\n', (2289, 2302), True, 'import numpy as np\n'), ((2378, 2392), 'numpy.int', 'np.int', (['rician'], {}), '(rician)\n', (2384, 2392), True, 'import numpy as np\n')]
|
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import logging
from typing import Optional
import mxnet as mx
import numpy as np
import sockeye.constants as C
logger = logging.getLogger(__name__)
def get_initializer(default_init_type: str, default_init_scale: float, default_init_xavier_rand_type: str,
default_init_xavier_factor_type: str, embed_init_type: str, embed_init_sigma: float,
rnn_init_type: str) -> mx.initializer.Initializer:
"""
Returns a mixed MXNet initializer.
:param default_init_type: The default weight initializer type.
:param default_init_scale: The scale used for default weight initialization (only used with uniform initialization).
:param default_init_xavier_rand_type: Xavier random number generator type.
:param default_init_xavier_factor_type: Xavier factor type.
:param embed_init_type: Embedding matrix initialization type.
:param embed_init_sigma: Sigma for normal initialization of embedding matrix.
:param rnn_init_type: Initialization type for RNN h2h matrices.
:return: Mixed initializer.
"""
# default initializer
if default_init_type == C.INIT_XAVIER:
default_init = [(C.DEFAULT_INIT_PATTERN,
mx.init.Xavier(rnd_type=default_init_xavier_rand_type,
factor_type=default_init_xavier_factor_type,
magnitude=default_init_scale))]
elif default_init_type == C.INIT_UNIFORM:
default_init = [(C.DEFAULT_INIT_PATTERN, mx.init.Uniform(scale=default_init_scale))]
else:
raise ValueError("Unknown default initializer %s." % default_init_type)
# embedding initializer
if embed_init_type == C.EMBED_INIT_NORMAL:
embed_init = [(C.EMBED_INIT_PATTERN, mx.init.Normal(sigma=embed_init_sigma))]
elif embed_init_type == C.EMBED_INIT_DEFAULT:
embed_init = []
else:
raise ValueError('Unknown embedding initializer: %s' % embed_init_type)
# rnn initializer
if rnn_init_type == C.RNN_INIT_ORTHOGONAL:
rnn_init = [(C.RNN_INIT_PATTERN, mx.initializer.Orthogonal())]
elif rnn_init_type == C.RNN_INIT_ORTHOGONAL_STACKED:
rnn_init = [(C.RNN_INIT_PATTERN, StackedOrthogonalInit(scale=1.0, rand_type="eye"))]
elif rnn_init_type == C.RNN_INIT_DEFAULT:
rnn_init = []
else:
raise ValueError('Unknown RNN initializer: %s' % rnn_init_type)
params_init_pairs = embed_init + rnn_init + default_init
return mx.initializer.Mixed(*zip(*params_init_pairs))
@mx.init.register
class StackedOrthogonalInit(mx.initializer.Initializer):
"""
Initializes weight as Orthogonal matrix. Here we assume that the weight consists of stacked square matrices of
the same size.
For example one could have 3 (2,2) matrices resulting in a (6,2) matrix. This situation arises in RNNs when one
wants to perform multiple h2h transformations in a single matrix multiplication.
Reference:
Exact solutions to the nonlinear dynamics of learning in deep linear neural networks
arXiv preprint arXiv:1312.6120 (2013).
:param scale: Scaling factor of weight.
:param rand_type: use "uniform" or "normal" random number to initialize weight.
"eye" simply sets the matrix to an identity matrix.
"""
def __init__(self, scale=1.414, rand_type="uniform"):
super().__init__()
self.scale = scale
self.rand_type = rand_type
def _init_weight(self, sym_name, arr):
assert len(arr.shape) == 2, "Only 2d weight matrices supported."
base_dim = arr.shape[1]
stacked_dim = arr.shape[0] # base_dim * num_sub_matrices
assert stacked_dim % base_dim == 0, \
"Dim1 must be a multiple of dim2 (as weight = stacked square matrices)."
num_sub_matrices = stacked_dim // base_dim
logger.info("Initializing weight %s (shape=%s, num_sub_matrices=%d) with an orthogonal weight matrix.",
sym_name, arr.shape, num_sub_matrices)
for mat_idx in range(0, num_sub_matrices):
if self.rand_type == "uniform":
tmp = np.random.uniform(-1.0, 1.0, (base_dim, base_dim))
_, __, q = np.linalg.svd(tmp)
elif self.rand_type == "normal":
tmp = np.random.normal(0.0, 1.0, (base_dim, base_dim))
_, __, q = np.linalg.svd(tmp)
elif self.rand_type == "eye":
q = np.eye(base_dim)
else:
raise ValueError("unknown rand_type %s" % self.rand_type)
q = self.scale * q
arr[mat_idx * base_dim:mat_idx * base_dim + base_dim] = q
|
[
"mxnet.init.Uniform",
"numpy.random.uniform",
"mxnet.initializer.Orthogonal",
"numpy.linalg.svd",
"mxnet.init.Xavier",
"numpy.random.normal",
"numpy.eye",
"mxnet.init.Normal",
"logging.getLogger"
] |
[((689, 716), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (706, 716), False, 'import logging\n'), ((1780, 1914), 'mxnet.init.Xavier', 'mx.init.Xavier', ([], {'rnd_type': 'default_init_xavier_rand_type', 'factor_type': 'default_init_xavier_factor_type', 'magnitude': 'default_init_scale'}), '(rnd_type=default_init_xavier_rand_type, factor_type=\n default_init_xavier_factor_type, magnitude=default_init_scale)\n', (1794, 1914), True, 'import mxnet as mx\n'), ((2342, 2380), 'mxnet.init.Normal', 'mx.init.Normal', ([], {'sigma': 'embed_init_sigma'}), '(sigma=embed_init_sigma)\n', (2356, 2380), True, 'import mxnet as mx\n'), ((2658, 2685), 'mxnet.initializer.Orthogonal', 'mx.initializer.Orthogonal', ([], {}), '()\n', (2683, 2685), True, 'import mxnet as mx\n'), ((4712, 4762), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(1.0)', '(base_dim, base_dim)'], {}), '(-1.0, 1.0, (base_dim, base_dim))\n', (4729, 4762), True, 'import numpy as np\n'), ((4790, 4808), 'numpy.linalg.svd', 'np.linalg.svd', (['tmp'], {}), '(tmp)\n', (4803, 4808), True, 'import numpy as np\n'), ((2087, 2128), 'mxnet.init.Uniform', 'mx.init.Uniform', ([], {'scale': 'default_init_scale'}), '(scale=default_init_scale)\n', (2102, 2128), True, 'import mxnet as mx\n'), ((4876, 4924), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(1.0)', '(base_dim, base_dim)'], {}), '(0.0, 1.0, (base_dim, base_dim))\n', (4892, 4924), True, 'import numpy as np\n'), ((4952, 4970), 'numpy.linalg.svd', 'np.linalg.svd', (['tmp'], {}), '(tmp)\n', (4965, 4970), True, 'import numpy as np\n'), ((5033, 5049), 'numpy.eye', 'np.eye', (['base_dim'], {}), '(base_dim)\n', (5039, 5049), True, 'import numpy as np\n')]
|
"""
Algorithm entry poing.
Methods of the APPO class initiate all other components (rollout & policy workers and learners) in the main thread,
and then fork their separate processes.
All data structures that are shared between processes are also created during the construction of APPO.
This class contains the algorithm main loop. All the actual work is done in separate worker processes, so
the only task of the main loop is to collect summaries and stats from the workers and log/save them to disk.
Hyperparameters specific to policy gradient algorithms are defined in this file. See also algorithm.py.
"""
import json
import math
import multiprocessing
import os
import time
from collections import deque
from os.path import join
from queue import Empty
import numpy as np
import torch
from tensorboardX import SummaryWriter
from torch.multiprocessing import JoinableQueue as TorchJoinableQueue
from multi_sample_factory.algorithms.algorithm import ReinforcementLearningAlgorithm
from multi_sample_factory.algorithms.appo.actor_worker import ActorWorker
from multi_sample_factory.algorithms.appo.appo_utils import make_env_func, iterate_recursively, set_global_cuda_envvars
from multi_sample_factory.algorithms.appo.learner import LearnerWorker
from multi_sample_factory.algorithms.appo.policy_worker import PolicyWorker
from multi_sample_factory.algorithms.appo.population_based_training import PopulationBasedTraining
from multi_sample_factory.algorithms.appo.shared_buffers import SharedBuffers
from multi_sample_factory.algorithms.utils.algo_utils import EXTRA_PER_POLICY_SUMMARIES, EXTRA_EPISODIC_STATS_PROCESSING, \
ExperimentStatus
from multi_sample_factory.envs.env_utils import get_default_reward_shaping
from multi_sample_factory.utils.timing import Timing
from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, \
ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger
from multi_sample_factory.algorithms.utils.action_distributions import transform_action_space
if os.name == 'nt':
from multi_sample_factory.utils import Queue as MpQueue
else:
from faster_fifo import Queue as MpQueue
# noinspection PyUnresolvedReferences
import faster_fifo_reduction
torch.multiprocessing.set_sharing_strategy('file_system')
class APPO(ReinforcementLearningAlgorithm):
"""Async PPO."""
@classmethod
def add_cli_args(cls, parser):
p = parser
super().add_cli_args(p)
p.add_argument('--experiment_summaries_interval', default=20, type=int, help='How often in seconds we write avg. statistics about the experiment (reward, episode length, extra stats...)')
p.add_argument('--adam_eps', default=1e-6, type=float, help='Adam epsilon parameter (1e-8 to 1e-5 seem to reliably work okay, 1e-3 and up does not work)')
p.add_argument('--adam_beta1', default=0.9, type=float, help='Adam momentum decay coefficient')
p.add_argument('--adam_beta2', default=0.999, type=float, help='Adam second momentum decay coefficient')
p.add_argument('--gae_lambda', default=0.95, type=float, help='Generalized Advantage Estimation discounting (only used when V-trace is False')
p.add_argument(
'--rollout', default=32, type=int,
help='Length of the rollout from each environment in timesteps.'
'Once we collect this many timesteps on actor worker, we send this trajectory to the learner.'
'The length of the rollout will determine how many timesteps are used to calculate bootstrapped'
'Monte-Carlo estimates of discounted rewards, advantages, GAE, or V-trace targets. Shorter rollouts'
'reduce variance, but the estimates are less precise (bias vs variance tradeoff).'
'For RNN policies, this should be a multiple of --recurrence, so every rollout will be split'
'into (n = rollout / recurrence) segments for backpropagation. V-trace algorithm currently requires that'
'rollout == recurrence, which what you want most of the time anyway.'
'Rollout length is independent from the episode length. Episode length can be both shorter or longer than'
'rollout, although for PBT training it is currently recommended that rollout << episode_len'
'(see function finalize_trajectory in actor_worker.py)',
)
p.add_argument('--num_workers', default=multiprocessing.cpu_count(), type=int, help='Number of parallel environment workers. Should be less than num_envs and should divide num_envs')
p.add_argument(
'--recurrence', default=32, type=int,
help='Trajectory length for backpropagation through time. If recurrence=1 there is no backpropagation through time, and experience is shuffled completely randomly'
'For V-trace recurrence should be equal to rollout length.',
)
p.add_argument('--use_rnn', default=True, type=str2bool, help='Whether to use RNN core in a policy or not')
p.add_argument('--rnn_type', default='gru', choices=['gru', 'lstm'], type=str, help='Type of RNN cell to use if use_rnn is True')
p.add_argument('--rnn_num_layers', default=1, type=int, help='Number of RNN layers to use if use_rnn is True')
p.add_argument('--ppo_clip_ratio', default=0.1, type=float, help='We use unbiased clip(x, 1+e, 1/(1+e)) instead of clip(x, 1+e, 1-e) in the paper')
p.add_argument('--ppo_clip_value', default=1.0, type=float, help='Maximum absolute change in value estimate until it is clipped. Sensitive to value magnitude')
p.add_argument('--batch_size', default=1024, type=int, help='Minibatch size for SGD')
p.add_argument(
'--num_batches_per_iteration', default=1, type=int,
help='How many minibatches we collect before training on the collected experience. It is generally recommended to set this to 1 for most experiments, because any higher value will increase the policy lag.'
'But in some specific circumstances it can be beneficial to have a larger macro-batch in order to shuffle and decorrelate the minibatches.'
'Here and throughout the codebase: macro batch is the portion of experience that learner processes per iteration (consisting of 1 or several minibatches)',
)
p.add_argument('--ppo_epochs', default=1, type=int, help='Number of training epochs before a new batch of experience is collected')
p.add_argument(
'--num_minibatches_to_accumulate', default=-1, type=int,
help='This parameter governs the maximum number of minibatches the learner can accumulate before further experience collection is stopped.'
'The default value (-1) will set this to 2 * num_batches_per_iteration, so if the experience collection is faster than the training,'
'the learner will accumulate enough minibatches for 2 iterations of training (but no more). This is a good balance between policy-lag and throughput.'
'When the limit is reached, the learner will notify the actor workers that they ought to stop the experience collection until accumulated minibatches'
'are processed. Set this parameter to 1 * num_batches_per_iteration to further reduce policy-lag.'
'If the experience collection is very non-uniform, increasing this parameter can increase overall throughput, at the cost of increased policy-lag.'
'A value of 0 is treated specially. This means the experience accumulation is turned off, and all experience collection will be halted during training.'
'This is the regime with potentially lowest policy-lag.'
'When this parameter is 0 and num_workers * num_envs_per_worker * rollout == num_batches_per_iteration * batch_size, the algorithm is similar to'
'regular synchronous PPO.',
)
p.add_argument('--max_grad_norm', default=4.0, type=float, help='Max L2 norm of the gradient vector')
# components of the loss function
p.add_argument('--exploration_loss_coeff', default=0.003, type=float,
help='Coefficient for the exploration component of the loss function.')
p.add_argument('--value_loss_coeff', default=0.5, type=float, help='Coefficient for the critic loss')
p.add_argument('--kl_loss_coeff', default=0.0, type=float,
help='Coefficient for fixed KL loss (as used by Schulman et al. in https://arxiv.org/pdf/1707.06347.pdf). '
'Highly recommended for environments with continuous action spaces.',
)
p.add_argument('--exploration_loss', default='entropy', type=str, choices=['entropy', 'symmetric_kl'],
help='Usually the exploration loss is based on maximizing the entropy of the probability'
' distribution. Note that mathematically maximizing entropy of the categorical probability '
'distribution is exactly the same as minimizing the (regular) KL-divergence between'
' this distribution and a uniform prior. The downside of using the entropy term '
'(or regular asymmetric KL-divergence) is the fact that penalty does not increase as '
'probabilities of some actions approach zero. I.e. numerically, there is almost '
'no difference between an action distribution with a probability epsilon > 0 for '
'some action and an action distribution with a probability = zero for this action.'
' For many tasks the first (epsilon) distribution is preferrable because we keep some '
'(albeit small) amount of exploration, while the second distribution will never explore '
'this action ever again.'
'Unlike the entropy term, symmetric KL divergence between the action distribution '
'and a uniform prior approaches infinity when entropy of the distribution approaches zero,'
' so it can prevent the pathological situations where the agent stops exploring. '
'Empirically, symmetric KL-divergence yielded slightly better results on some problems.',
)
# APPO-specific
p.add_argument(
'--num_envs_per_worker', default=2, type=int,
help='Number of envs on a single CPU actor, in high-throughput configurations this should be in 10-30 range for Atari/VizDoom'
'Must be even for double-buffered sampling!',
)
p.add_argument(
'--worker_num_splits', default=2, type=int,
help='Typically we split a vector of envs into two parts for "double buffered" experience collection'
'Set this to 1 to disable double buffering. Set this to 3 for triple buffering!',
)
p.add_argument('--num_policies', default=1, type=int, help='Number of policies to train jointly')
p.add_argument('--policy_workers_per_policy', default=1, type=int, help='Number of policy workers that compute forward pass (per policy)')
p.add_argument(
'--max_policy_lag', default=10000, type=int,
help='Max policy lag in policy versions. Discard all experience that is older than this. This should be increased for configurations with multiple epochs of SGD because naturally'
'policy-lag may exceed this value.',
)
p.add_argument(
'--traj_buffers_excess_ratio', default=1.3, type=float,
help='Increase this value to make sure the system always has enough free trajectory buffers (can be useful when i.e. a lot of inactive agents in multi-agent envs)'
'Decrease this to 1.0 to save as much RAM as possible.',
)
p.add_argument(
'--decorrelate_experience_max_seconds', default=10, type=int,
help='Decorrelating experience serves two benefits. First: this is better for learning because samples from workers come from random moments in the episode, becoming more "i.i.d".'
'Second, and more important one: this is good for environments with highly non-uniform one-step times, including long and expensive episode resets. If experience is not decorrelated'
'then training batches will come in bursts e.g. after a bunch of environments finished resets and many iterations on the learner might be required,'
'which will increase the policy-lag of the new experience collected. The performance of the Sample Factory is best when experience is generated as more-or-less'
'uniform stream. Try increasing this to 100-200 seconds to smoothen the experience distribution in time right from the beginning (it will eventually spread out and settle anyway)',
)
p.add_argument(
'--decorrelate_envs_on_one_worker', default=True, type=str2bool,
help='In addition to temporal decorrelation of worker processes, also decorrelate envs within one worker process'
'For environments with a fixed episode length it can prevent the reset from happening in the same rollout for all envs simultaneously, which makes experience collection more uniform.',
)
p.add_argument('--with_vtrace', default=True, type=str2bool, help='Enables V-trace off-policy correction. If this is True, then GAE is not used')
p.add_argument('--vtrace_rho', default=1.0, type=float, help='rho_hat clipping parameter of the V-trace algorithm (importance sampling truncation)')
p.add_argument('--vtrace_c', default=1.0, type=float, help='c_hat clipping parameter of the V-trace algorithm. Low values for c_hat can reduce variance of the advantage estimates (similar to GAE lambda < 1)')
p.add_argument(
'--set_workers_cpu_affinity', default=True, type=str2bool,
help='Whether to assign workers to specific CPU cores or not. The logic is beneficial for most workloads because prevents a lot of context switching.'
'However for some environments it can be better to disable it, to allow one worker to use all cores some of the time. This can be the case for some DMLab environments with very expensive episode reset'
'that can use parallel CPU cores for level generation.',
)
p.add_argument(
'--force_envs_single_thread', default=True, type=str2bool,
help='Some environments may themselves use parallel libraries such as OpenMP or MKL. Since we parallelize environments on the level of workers, there is no need to keep this parallel semantic.'
'This flag uses threadpoolctl to force libraries such as OpenMP and MKL to use only a single thread within the environment.'
'Default value (True) is recommended unless you are running fewer workers than CPU cores.',
)
p.add_argument('--reset_timeout_seconds', default=120, type=int, help='Fail worker on initialization if not a single environment was reset in this time (worker probably got stuck)')
p.add_argument('--default_niceness', default=0, type=int, help='Niceness of the highest priority process (the learner). Values below zero require elevated privileges.')
p.add_argument(
'--train_in_background_thread', default=True, type=str2bool,
help='Using background thread for training is faster and allows preparing the next batch while training is in progress.'
'Unfortunately debugging can become very tricky in this case. So there is an option to use only a single thread on the learner to simplify the debugging.',
)
p.add_argument('--learner_main_loop_num_cores', default=1, type=int, help='When batching on the learner is the bottleneck, increasing the number of cores PyTorch uses can improve the performance')
p.add_argument('--actor_worker_gpus', default=[], type=int, nargs='*', help='By default, actor workers only use CPUs. Changes this if e.g. you need GPU-based rendering on the actors')
# PBT stuff
p.add_argument('--with_pbt', default=False, type=str2bool, help='Enables population-based training basic features')
p.add_argument('--pbt_mix_policies_in_one_env', default=True, type=str2bool, help='For multi-agent envs, whether we mix different policies in one env.')
p.add_argument('--pbt_period_env_steps', default=int(5e6), type=int, help='Periodically replace the worst policies with the best ones and perturb the hyperparameters')
p.add_argument('--pbt_start_mutation', default=int(2e7), type=int, help='Allow initial diversification, start PBT after this many env steps')
p.add_argument('--pbt_replace_fraction', default=0.3, type=float, help='A portion of policies performing worst to be replace by better policies (rounded up)')
p.add_argument('--pbt_mutation_rate', default=0.15, type=float, help='Probability that a parameter mutates')
p.add_argument('--pbt_replace_reward_gap', default=0.1, type=float, help='Relative gap in true reward when replacing weights of the policy with a better performing one')
p.add_argument('--pbt_replace_reward_gap_absolute', default=1e-6, type=float, help='Absolute gap in true reward when replacing weights of the policy with a better performing one')
p.add_argument('--pbt_optimize_batch_size', default=False, type=str2bool, help='Whether to optimize batch size or not (experimental)')
p.add_argument(
'--pbt_target_objective', default='true_reward', type=str,
help='Policy stat to optimize with PBT. true_reward (default) is equal to raw env reward if not specified, but can also be any other per-policy stat.'
'For DMlab-30 use value "dmlab_target_objective" (which is capped human normalized score)',
)
# CPC|A options
p.add_argument('--use_cpc', default=False, type=str2bool, help='Use CPC|A as an auxiliary loss durning learning')
p.add_argument('--cpc_forward_steps', default=8, type=int, help='Number of forward prediction steps for CPC')
p.add_argument('--cpc_time_subsample', default=6, type=int, help='Number of timesteps to sample from each batch. This should be less than recurrence to decorrelate experience.')
p.add_argument('--cpc_forward_subsample', default=2, type=int, help='Number of forward steps to sample for loss computation. This should be less than cpc_forward_steps to decorrelate gradients.')
# debugging options
p.add_argument('--benchmark', default=False, type=str2bool, help='Benchmark mode')
p.add_argument('--sampler_only', default=False, type=str2bool, help='Do not send experience to the learner, measuring sampling throughput')
def __init__(self, cfg):
super().__init__(cfg)
# we should not use CUDA in the main thread, only on the workers
set_global_cuda_envvars(cfg)
tmp_env = make_env_func(self.cfg, env_config=None)
self.obs_space = tmp_env.observation_space
self.action_space = transform_action_space(tmp_env.action_space)
self.num_agents = tmp_env.num_agents
self.reward_shaping_scheme = None
if self.cfg.with_pbt:
self.reward_shaping_scheme = get_default_reward_shaping(tmp_env)
tmp_env.close()
# shared memory allocation
self.traj_buffers = SharedBuffers(self.cfg, self.num_agents, self.obs_space, self.action_space)
self.actor_workers = None
self.report_queue = MpQueue(40 * 1000 * 1000)
self.policy_workers = dict()
self.policy_queues = dict()
self.learner_workers = dict()
self.workers_by_handle = None
self.policy_inputs = [[] for _ in range(self.cfg.num_policies)]
self.policy_outputs = dict()
for worker_idx in range(self.cfg.num_workers):
for split_idx in range(self.cfg.worker_num_splits):
self.policy_outputs[(worker_idx, split_idx)] = dict()
self.policy_avg_stats = dict()
self.policy_lag = [dict() for _ in range(self.cfg.num_policies)]
self.last_timing = dict()
self.env_steps = dict()
self.samples_collected = [0 for _ in range(self.cfg.num_policies)]
self.total_env_steps_since_resume = 0
# currently this applies only to the current run, not experiment as a whole
# to change this behavior we'd need to save the state of the main loop to a filesystem
self.total_train_seconds = 0
self.last_report = time.time()
self.last_experiment_summaries = 0
self.report_interval = 5.0 # sec
self.experiment_summaries_interval = self.cfg.experiment_summaries_interval # sec
self.avg_stats_intervals = (2, 12, 60) # 10 seconds, 1 minute, 5 minutes
self.fps_stats = deque([], maxlen=max(self.avg_stats_intervals))
self.throughput_stats = [deque([], maxlen=5) for _ in range(self.cfg.num_policies)]
self.avg_stats = dict()
self.stats = dict() # regular (non-averaged) stats
self.writers = dict()
writer_keys = list(range(self.cfg.num_policies))
for key in writer_keys:
summary_dir = join(summaries_dir(experiment_dir(cfg=self.cfg)), str(key))
summary_dir = ensure_dir_exists(summary_dir)
self.writers[key] = SummaryWriter(summary_dir, flush_secs=20)
self.pbt = PopulationBasedTraining(self.cfg, self.reward_shaping_scheme, self.writers)
def _cfg_dict(self):
if isinstance(self.cfg, dict):
return self.cfg
else:
return vars(self.cfg)
def _save_cfg(self):
cfg_dict = self._cfg_dict()
with open(cfg_file(self.cfg), 'w') as json_file:
json.dump(cfg_dict, json_file, indent=2)
def initialize(self):
self._save_cfg()
save_git_diff(experiment_dir(cfg=self.cfg))
init_file_logger(experiment_dir(self.cfg))
def finalize(self):
pass
def create_actor_worker(self, idx, actor_queue):
learner_queues = {p: w.task_queue for p, w in self.learner_workers.items()}
return ActorWorker(
self.cfg, self.obs_space, self.action_space, self.num_agents, idx, self.traj_buffers,
task_queue=actor_queue, policy_queues=self.policy_queues,
report_queue=self.report_queue, learner_queues=learner_queues,
)
# noinspection PyProtectedMember
def init_subset(self, indices, actor_queues):
"""
Initialize a subset of actor workers (rollout workers) and wait until the first reset() is completed for all
envs on these workers.
This function will retry if the worker process crashes during the initial reset.
:param indices: indices of actor workers to initialize
:param actor_queues: task queues corresponding to these workers
:return: initialized workers
"""
reset_timelimit_seconds = self.cfg.reset_timeout_seconds # fail worker if not a single env was reset in that time
workers = dict()
last_env_initialized = dict()
for i in indices:
w = self.create_actor_worker(i, actor_queues[i])
w.init()
w.request_reset()
workers[i] = w
last_env_initialized[i] = time.time()
total_num_envs = self.cfg.num_workers * self.cfg.num_envs_per_worker
envs_initialized = [0] * self.cfg.num_workers
workers_finished = set()
while len(workers_finished) < len(workers):
failed_worker = -1
try:
report = self.report_queue.get(timeout=1.0)
if 'initialized_env' in report:
worker_idx, split_idx, env_i = report['initialized_env']
last_env_initialized[worker_idx] = time.time()
envs_initialized[worker_idx] += 1
log.debug(
'Progress for %d workers: %d/%d envs initialized...',
len(indices), sum(envs_initialized), total_num_envs,
)
elif 'finished_reset' in report:
workers_finished.add(report['finished_reset'])
elif 'critical_error' in report:
failed_worker = report['critical_error']
except Empty:
pass
for worker_idx, w in workers.items():
if worker_idx in workers_finished:
continue
time_passed = time.time() - last_env_initialized[worker_idx]
timeout = time_passed > reset_timelimit_seconds
if timeout or failed_worker == worker_idx or not w.process.is_alive():
envs_initialized[worker_idx] = 0
log.error('Worker %d is stuck or failed (%.3f). Reset!', w.worker_idx, time_passed)
log.debug('Status: %r', w.process.is_alive())
stuck_worker = w
stuck_worker.process.kill()
new_worker = self.create_actor_worker(worker_idx, actor_queues[worker_idx])
new_worker.init()
new_worker.request_reset()
last_env_initialized[worker_idx] = time.time()
workers[worker_idx] = new_worker
del stuck_worker
return workers.values()
# noinspection PyUnresolvedReferences
def init_workers(self):
"""
Initialize all types of workers and start their worker processes.
"""
actor_queues = [MpQueue(2 * 1000 * 1000) for _ in range(self.cfg.num_workers)]
policy_worker_queues = dict()
for policy_id in range(self.cfg.num_policies):
policy_worker_queues[policy_id] = []
for i in range(self.cfg.policy_workers_per_policy):
policy_worker_queues[policy_id].append(TorchJoinableQueue())
log.info('Initializing learners...')
policy_locks = [multiprocessing.Lock() for _ in range(self.cfg.num_policies)]
resume_experience_collection_cv = [multiprocessing.Condition() for _ in range(self.cfg.num_policies)]
learner_idx = 0
for policy_id in range(self.cfg.num_policies):
learner_worker = LearnerWorker(
learner_idx, policy_id, self.cfg, self.obs_space, self.action_space,
self.report_queue, policy_worker_queues[policy_id], self.traj_buffers,
policy_locks[policy_id], resume_experience_collection_cv[policy_id],
)
learner_worker.start_process()
learner_worker.init()
self.learner_workers[policy_id] = learner_worker
learner_idx += 1
log.info('Initializing policy workers...')
for policy_id in range(self.cfg.num_policies):
self.policy_workers[policy_id] = []
policy_queue = MpQueue()
self.policy_queues[policy_id] = policy_queue
for i in range(self.cfg.policy_workers_per_policy):
policy_worker = PolicyWorker(
i, policy_id, self.cfg, self.obs_space, self.action_space, self.traj_buffers,
policy_queue, actor_queues, self.report_queue, policy_worker_queues[policy_id][i],
policy_locks[policy_id], resume_experience_collection_cv[policy_id],
)
self.policy_workers[policy_id].append(policy_worker)
policy_worker.start_process()
log.info('Initializing actors...')
# We support actor worker initialization in groups, which can be useful for some envs that
# e.g. crash when too many environments are being initialized in parallel.
# Currently the limit is not used since it is not required for any envs supported out of the box,
# so we parallelize initialization as hard as we can.
# If this is required for your environment, perhaps a better solution would be to use global locks,
# like FileLock (see doom_gym.py)
self.actor_workers = []
max_parallel_init = int(1e9) # might be useful to limit this for some envs
worker_indices = list(range(self.cfg.num_workers))
for i in range(0, self.cfg.num_workers, max_parallel_init):
workers = self.init_subset(worker_indices[i:i + max_parallel_init], actor_queues)
self.actor_workers.extend(workers)
def init_pbt(self):
if self.cfg.with_pbt:
self.pbt.init(self.learner_workers, self.actor_workers)
def finish_initialization(self):
"""Wait until policy workers are fully initialized."""
for policy_id, workers in self.policy_workers.items():
for w in workers:
log.debug('Waiting for policy worker %d-%d to finish initialization...', policy_id, w.worker_idx)
w.init()
log.debug('Policy worker %d-%d initialized!', policy_id, w.worker_idx)
def update_env_steps_actor(self):
for w in self.actor_workers:
w.update_env_steps(self.env_steps)
def process_report(self, report):
"""Process stats from various types of workers."""
if 'policy_id' in report:
policy_id = report['policy_id']
if 'learner_env_steps' in report:
if policy_id in self.env_steps:
delta = report['learner_env_steps'] - self.env_steps[policy_id]
self.total_env_steps_since_resume += delta
self.env_steps[policy_id] = report['learner_env_steps']
if 'episodic' in report:
s = report['episodic']
for _, key, value in iterate_recursively(s):
if key not in self.policy_avg_stats:
self.policy_avg_stats[key] = [deque(maxlen=self.cfg.stats_avg) for _ in range(self.cfg.num_policies)]
self.policy_avg_stats[key][policy_id].append(value)
for extra_stat_func in EXTRA_EPISODIC_STATS_PROCESSING:
extra_stat_func(policy_id, key, value, self.cfg)
if 'train' in report:
self.report_train_summaries(report['train'], policy_id)
if 'samples' in report:
self.samples_collected[policy_id] += report['samples']
if 'timing' in report:
for k, v in report['timing'].items():
if k not in self.avg_stats:
self.avg_stats[k] = deque([], maxlen=50)
self.avg_stats[k].append(v)
if 'stats' in report:
self.stats.update(report['stats'])
def report(self):
"""
Called periodically (every X seconds, see report_interval).
Print experiment stats (FPS, avg rewards) to console and dump TF summaries collected from workers to disk.
"""
if len(self.env_steps) < self.cfg.num_policies:
return
now = time.time()
self.fps_stats.append((now, self.total_env_steps_since_resume))
if len(self.fps_stats) <= 1:
return
fps = []
for avg_interval in self.avg_stats_intervals:
past_moment, past_frames = self.fps_stats[max(0, len(self.fps_stats) - 1 - avg_interval)]
fps.append((self.total_env_steps_since_resume - past_frames) / (now - past_moment))
sample_throughput = dict()
for policy_id in range(self.cfg.num_policies):
self.throughput_stats[policy_id].append((now, self.samples_collected[policy_id]))
if len(self.throughput_stats[policy_id]) > 1:
past_moment, past_samples = self.throughput_stats[policy_id][0]
sample_throughput[policy_id] = (self.samples_collected[policy_id] - past_samples) / (now - past_moment)
else:
sample_throughput[policy_id] = math.nan
total_env_steps = sum(self.env_steps.values())
self.print_stats(fps, sample_throughput, total_env_steps)
if time.time() - self.last_experiment_summaries > self.experiment_summaries_interval:
self.report_experiment_summaries(fps[0], sample_throughput)
self.last_experiment_summaries = time.time()
def print_stats(self, fps, sample_throughput, total_env_steps):
fps_str = []
for interval, fps_value in zip(self.avg_stats_intervals, fps):
fps_str.append(f'{int(interval * self.report_interval)} sec: {fps_value:.1f}')
fps_str = f'({", ".join(fps_str)})'
samples_per_policy = ', '.join([f'{p}: {s:.1f}' for p, s in sample_throughput.items()])
lag_stats = self.policy_lag[0]
lag = AttrDict()
for key in ['min', 'avg', 'max']:
lag[key] = lag_stats.get(f'version_diff_{key}', -1)
policy_lag_str = f'min: {lag.min:.1f}, avg: {lag.avg:.1f}, max: {lag.max:.1f}'
log.debug(
'Fps is %s. Total num frames: %d. Throughput: %s. Samples: %d. Policy #0 lag: (%s)',
fps_str, total_env_steps, samples_per_policy, sum(self.samples_collected), policy_lag_str,
)
if 'reward' in self.policy_avg_stats:
policy_reward_stats = []
for policy_id in range(self.cfg.num_policies):
reward_stats = self.policy_avg_stats['reward'][policy_id]
if len(reward_stats) > 0:
policy_reward_stats.append((policy_id, f'{np.mean(reward_stats):.3f}'))
log.debug('Avg episode reward: %r', policy_reward_stats)
def report_train_summaries(self, stats, policy_id):
#if(self.learner_workers[0].get_rank != 0):
# return
for key, scalar in stats.items():
self.writers[policy_id].add_scalar(f'train/{key}', scalar, self.env_steps[policy_id])
if 'version_diff' in key:
self.policy_lag[policy_id][key] = scalar
def report_experiment_summaries(self, fps, sample_throughput):
#if(self.learner_workers[0].get_rank != 0):
# return
memory_mb = memory_consumption_mb()
default_policy = 0
for policy_id, env_steps in self.env_steps.items():
if policy_id == default_policy:
self.writers[policy_id].add_scalar('0_aux/_fps', fps, env_steps)
self.writers[policy_id].add_scalar('0_aux/master_process_memory_mb', float(memory_mb), env_steps)
for key, value in self.avg_stats.items():
if len(value) >= value.maxlen or (len(value) > 10 and self.total_train_seconds > 300):
self.writers[policy_id].add_scalar(f'stats/{key}', np.mean(value), env_steps)
for key, value in self.stats.items():
self.writers[policy_id].add_scalar(f'stats/{key}', value, env_steps)
if not math.isnan(sample_throughput[policy_id]):
self.writers[policy_id].add_scalar('0_aux/_sample_throughput', sample_throughput[policy_id], env_steps)
for key, stat in self.policy_avg_stats.items():
if len(stat[policy_id]) >= stat[policy_id].maxlen or (len(stat[policy_id]) > 10 and self.total_train_seconds > 300):
stat_value = np.mean(stat[policy_id])
writer = self.writers[policy_id]
# custom summaries have their own sections in tensorboard
if '/' in key:
avg_tag = key
min_tag = f'{key}_min'
max_tag = f'{key}_max'
else:
avg_tag = f'0_aux/avg_{key}'
min_tag = f'0_aux/avg_{key}_min'
max_tag = f'0_aux/avg_{key}_max'
writer.add_scalar(avg_tag, float(stat_value), env_steps)
# for key stats report min/max as well
if key in ('reward', 'true_reward', 'len'):
writer.add_scalar(min_tag, float(min(stat[policy_id])), env_steps)
writer.add_scalar(max_tag, float(max(stat[policy_id])), env_steps)
for extra_summaries_func in EXTRA_PER_POLICY_SUMMARIES:
extra_summaries_func(policy_id, self.policy_avg_stats, env_steps, self.writers[policy_id], self.cfg)
def _should_end_training(self):
end = len(self.env_steps) > 0 and all(s > self.cfg.train_for_env_steps for s in self.env_steps.values())
end |= self.total_train_seconds > self.cfg.train_for_seconds
if self.cfg.benchmark:
end |= self.total_env_steps_since_resume >= int(2e6)
end |= sum(self.samples_collected) >= int(1e6)
return end
def run(self):
"""
This function contains the main loop of the algorithm, as well as initialization/cleanup code.
:return: ExperimentStatus (SUCCESS, FAILURE, INTERRUPTED). Useful in testing.
"""
status = ExperimentStatus.SUCCESS
if os.path.isfile(done_filename(self.cfg)):
log.warning('Training already finished! Remove "done" file to continue training')
return status
self.init_workers()
self.init_pbt()
self.finish_initialization()
log.info('Collecting experience...')
timing = Timing()
with timing.timeit('experience'):
# noinspection PyBroadException
try:
while not self._should_end_training():
try:
reports = self.report_queue.get_many(timeout=0.1)
for report in reports:
self.process_report(report)
except Empty:
pass
if time.time() - self.last_report > self.report_interval:
self.report()
now = time.time()
self.total_train_seconds += now - self.last_report
self.last_report = now
self.update_env_steps_actor()
self.pbt.update(self.env_steps, self.policy_avg_stats)
except Exception:
log.exception('Exception in driver loop')
status = ExperimentStatus.FAILURE
except KeyboardInterrupt:
log.warning('Keyboard interrupt detected in driver loop, exiting...')
status = ExperimentStatus.INTERRUPTED
for learner in self.learner_workers.values():
# timeout is needed here because some environments may crash on KeyboardInterrupt (e.g. VizDoom)
# Therefore the learner train loop will never do another iteration and will never save the model.
# This is not an issue with normal exit, e.g. due to desired number of frames reached.
learner.save_model(timeout=5.0)
all_workers = self.actor_workers
for workers in self.policy_workers.values():
all_workers.extend(workers)
all_workers.extend(self.learner_workers.values())
child_processes = list_child_processes()
time.sleep(0.1)
log.debug('Closing workers...')
for i, w in enumerate(all_workers):
w.close()
time.sleep(0.01)
for i, w in enumerate(all_workers):
w.join()
log.debug('Workers joined!')
# VizDoom processes often refuse to die for an unidentified reason, so we're force killing them with a hack
kill_processes(child_processes)
fps = self.total_env_steps_since_resume / timing.experience
log.info('Collected %r, FPS: %.1f', self.env_steps, fps)
log.info('Timing: %s', timing)
if self._should_end_training():
with open(done_filename(self.cfg), 'w') as fobj:
fobj.write(f'{self.env_steps}')
time.sleep(0.5)
log.info('Done!')
return status
|
[
"multi_sample_factory.utils.utils.log.error",
"multi_sample_factory.utils.utils.cfg_file",
"multiprocessing.Lock",
"numpy.mean",
"multi_sample_factory.utils.utils.list_child_processes",
"collections.deque",
"multiprocessing.cpu_count",
"multi_sample_factory.algorithms.appo.population_based_training.PopulationBasedTraining",
"multi_sample_factory.algorithms.appo.appo_utils.set_global_cuda_envvars",
"multi_sample_factory.algorithms.utils.action_distributions.transform_action_space",
"torch.multiprocessing.set_sharing_strategy",
"multi_sample_factory.utils.utils.memory_consumption_mb",
"multi_sample_factory.utils.utils.experiment_dir",
"multiprocessing.Condition",
"multi_sample_factory.envs.env_utils.get_default_reward_shaping",
"multi_sample_factory.utils.utils.log.exception",
"multi_sample_factory.utils.utils.ensure_dir_exists",
"json.dump",
"multi_sample_factory.algorithms.appo.actor_worker.ActorWorker",
"multi_sample_factory.algorithms.appo.appo_utils.make_env_func",
"multi_sample_factory.utils.utils.log.info",
"multi_sample_factory.algorithms.appo.appo_utils.iterate_recursively",
"math.isnan",
"multi_sample_factory.algorithms.appo.policy_worker.PolicyWorker",
"multi_sample_factory.algorithms.appo.shared_buffers.SharedBuffers",
"time.sleep",
"multi_sample_factory.utils.utils.kill_processes",
"multi_sample_factory.utils.utils.done_filename",
"multi_sample_factory.algorithms.appo.learner.LearnerWorker",
"tensorboardX.SummaryWriter",
"multi_sample_factory.utils.utils.log.warning",
"faster_fifo.Queue",
"multi_sample_factory.utils.timing.Timing",
"time.time",
"torch.multiprocessing.JoinableQueue",
"multi_sample_factory.utils.utils.log.debug",
"multi_sample_factory.utils.utils.AttrDict"
] |
[((2328, 2385), 'torch.multiprocessing.set_sharing_strategy', 'torch.multiprocessing.set_sharing_strategy', (['"""file_system"""'], {}), "('file_system')\n", (2370, 2385), False, 'import torch\n'), ((19399, 19427), 'multi_sample_factory.algorithms.appo.appo_utils.set_global_cuda_envvars', 'set_global_cuda_envvars', (['cfg'], {}), '(cfg)\n', (19422, 19427), False, 'from multi_sample_factory.algorithms.appo.appo_utils import make_env_func, iterate_recursively, set_global_cuda_envvars\n'), ((19447, 19487), 'multi_sample_factory.algorithms.appo.appo_utils.make_env_func', 'make_env_func', (['self.cfg'], {'env_config': 'None'}), '(self.cfg, env_config=None)\n', (19460, 19487), False, 'from multi_sample_factory.algorithms.appo.appo_utils import make_env_func, iterate_recursively, set_global_cuda_envvars\n'), ((19567, 19611), 'multi_sample_factory.algorithms.utils.action_distributions.transform_action_space', 'transform_action_space', (['tmp_env.action_space'], {}), '(tmp_env.action_space)\n', (19589, 19611), False, 'from multi_sample_factory.algorithms.utils.action_distributions import transform_action_space\n'), ((19896, 19971), 'multi_sample_factory.algorithms.appo.shared_buffers.SharedBuffers', 'SharedBuffers', (['self.cfg', 'self.num_agents', 'self.obs_space', 'self.action_space'], {}), '(self.cfg, self.num_agents, self.obs_space, self.action_space)\n', (19909, 19971), False, 'from multi_sample_factory.algorithms.appo.shared_buffers import SharedBuffers\n'), ((20036, 20061), 'faster_fifo.Queue', 'MpQueue', (['(40 * 1000 * 1000)'], {}), '(40 * 1000 * 1000)\n', (20043, 20061), True, 'from faster_fifo import Queue as MpQueue\n'), ((21058, 21069), 'time.time', 'time.time', ([], {}), '()\n', (21067, 21069), False, 'import time\n'), ((21945, 22020), 'multi_sample_factory.algorithms.appo.population_based_training.PopulationBasedTraining', 'PopulationBasedTraining', (['self.cfg', 'self.reward_shaping_scheme', 'self.writers'], {}), '(self.cfg, self.reward_shaping_scheme, self.writers)\n', (21968, 22020), False, 'from multi_sample_factory.algorithms.appo.population_based_training import PopulationBasedTraining\n'), ((22681, 22913), 'multi_sample_factory.algorithms.appo.actor_worker.ActorWorker', 'ActorWorker', (['self.cfg', 'self.obs_space', 'self.action_space', 'self.num_agents', 'idx', 'self.traj_buffers'], {'task_queue': 'actor_queue', 'policy_queues': 'self.policy_queues', 'report_queue': 'self.report_queue', 'learner_queues': 'learner_queues'}), '(self.cfg, self.obs_space, self.action_space, self.num_agents,\n idx, self.traj_buffers, task_queue=actor_queue, policy_queues=self.\n policy_queues, report_queue=self.report_queue, learner_queues=\n learner_queues)\n', (22692, 22913), False, 'from multi_sample_factory.algorithms.appo.actor_worker import ActorWorker\n'), ((26522, 26558), 'multi_sample_factory.utils.utils.log.info', 'log.info', (['"""Initializing learners..."""'], {}), "('Initializing learners...')\n", (26530, 26558), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((27327, 27369), 'multi_sample_factory.utils.utils.log.info', 'log.info', (['"""Initializing policy workers..."""'], {}), "('Initializing policy workers...')\n", (27335, 27369), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((28111, 28145), 'multi_sample_factory.utils.utils.log.info', 'log.info', (['"""Initializing actors..."""'], {}), "('Initializing actors...')\n", (28119, 28145), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((31577, 31588), 'time.time', 'time.time', ([], {}), '()\n', (31586, 31588), False, 'import time\n'), ((33297, 33307), 'multi_sample_factory.utils.utils.AttrDict', 'AttrDict', ([], {}), '()\n', (33305, 33307), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((34675, 34698), 'multi_sample_factory.utils.utils.memory_consumption_mb', 'memory_consumption_mb', ([], {}), '()\n', (34696, 34698), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((37877, 37913), 'multi_sample_factory.utils.utils.log.info', 'log.info', (['"""Collecting experience..."""'], {}), "('Collecting experience...')\n", (37885, 37913), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((37932, 37940), 'multi_sample_factory.utils.timing.Timing', 'Timing', ([], {}), '()\n', (37938, 37940), False, 'from multi_sample_factory.utils.timing import Timing\n'), ((39731, 39753), 'multi_sample_factory.utils.utils.list_child_processes', 'list_child_processes', ([], {}), '()\n', (39751, 39753), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((39763, 39778), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (39773, 39778), False, 'import time\n'), ((39787, 39818), 'multi_sample_factory.utils.utils.log.debug', 'log.debug', (['"""Closing workers..."""'], {}), "('Closing workers...')\n", (39796, 39818), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((39987, 40015), 'multi_sample_factory.utils.utils.log.debug', 'log.debug', (['"""Workers joined!"""'], {}), "('Workers joined!')\n", (39996, 40015), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((40141, 40172), 'multi_sample_factory.utils.utils.kill_processes', 'kill_processes', (['child_processes'], {}), '(child_processes)\n', (40155, 40172), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((40250, 40306), 'multi_sample_factory.utils.utils.log.info', 'log.info', (['"""Collected %r, FPS: %.1f"""', 'self.env_steps', 'fps'], {}), "('Collected %r, FPS: %.1f', self.env_steps, fps)\n", (40258, 40306), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((40315, 40345), 'multi_sample_factory.utils.utils.log.info', 'log.info', (['"""Timing: %s"""', 'timing'], {}), "('Timing: %s', timing)\n", (40323, 40345), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((40505, 40520), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (40515, 40520), False, 'import time\n'), ((40529, 40546), 'multi_sample_factory.utils.utils.log.info', 'log.info', (['"""Done!"""'], {}), "('Done!')\n", (40537, 40546), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((19771, 19806), 'multi_sample_factory.envs.env_utils.get_default_reward_shaping', 'get_default_reward_shaping', (['tmp_env'], {}), '(tmp_env)\n', (19797, 19806), False, 'from multi_sample_factory.envs.env_utils import get_default_reward_shaping\n'), ((21437, 21456), 'collections.deque', 'deque', (['[]'], {'maxlen': '(5)'}), '([], maxlen=5)\n', (21442, 21456), False, 'from collections import deque\n'), ((21820, 21850), 'multi_sample_factory.utils.utils.ensure_dir_exists', 'ensure_dir_exists', (['summary_dir'], {}), '(summary_dir)\n', (21837, 21850), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((21883, 21924), 'tensorboardX.SummaryWriter', 'SummaryWriter', (['summary_dir'], {'flush_secs': '(20)'}), '(summary_dir, flush_secs=20)\n', (21896, 21924), False, 'from tensorboardX import SummaryWriter\n'), ((22293, 22333), 'json.dump', 'json.dump', (['cfg_dict', 'json_file'], {'indent': '(2)'}), '(cfg_dict, json_file, indent=2)\n', (22302, 22333), False, 'import json\n'), ((22408, 22436), 'multi_sample_factory.utils.utils.experiment_dir', 'experiment_dir', ([], {'cfg': 'self.cfg'}), '(cfg=self.cfg)\n', (22422, 22436), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((22463, 22487), 'multi_sample_factory.utils.utils.experiment_dir', 'experiment_dir', (['self.cfg'], {}), '(self.cfg)\n', (22477, 22487), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((23861, 23872), 'time.time', 'time.time', ([], {}), '()\n', (23870, 23872), False, 'import time\n'), ((26166, 26190), 'faster_fifo.Queue', 'MpQueue', (['(2 * 1000 * 1000)'], {}), '(2 * 1000 * 1000)\n', (26173, 26190), True, 'from faster_fifo import Queue as MpQueue\n'), ((26583, 26605), 'multiprocessing.Lock', 'multiprocessing.Lock', ([], {}), '()\n', (26603, 26605), False, 'import multiprocessing\n'), ((26688, 26715), 'multiprocessing.Condition', 'multiprocessing.Condition', ([], {}), '()\n', (26713, 26715), False, 'import multiprocessing\n'), ((26864, 27101), 'multi_sample_factory.algorithms.appo.learner.LearnerWorker', 'LearnerWorker', (['learner_idx', 'policy_id', 'self.cfg', 'self.obs_space', 'self.action_space', 'self.report_queue', 'policy_worker_queues[policy_id]', 'self.traj_buffers', 'policy_locks[policy_id]', 'resume_experience_collection_cv[policy_id]'], {}), '(learner_idx, policy_id, self.cfg, self.obs_space, self.\n action_space, self.report_queue, policy_worker_queues[policy_id], self.\n traj_buffers, policy_locks[policy_id], resume_experience_collection_cv[\n policy_id])\n', (26877, 27101), False, 'from multi_sample_factory.algorithms.appo.learner import LearnerWorker\n'), ((27501, 27510), 'faster_fifo.Queue', 'MpQueue', ([], {}), '()\n', (27508, 27510), True, 'from faster_fifo import Queue as MpQueue\n'), ((32838, 32849), 'time.time', 'time.time', ([], {}), '()\n', (32847, 32849), False, 'import time\n'), ((34094, 34150), 'multi_sample_factory.utils.utils.log.debug', 'log.debug', (['"""Avg episode reward: %r"""', 'policy_reward_stats'], {}), "('Avg episode reward: %r', policy_reward_stats)\n", (34103, 34150), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((37632, 37655), 'multi_sample_factory.utils.utils.done_filename', 'done_filename', (['self.cfg'], {}), '(self.cfg)\n', (37645, 37655), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((37670, 37756), 'multi_sample_factory.utils.utils.log.warning', 'log.warning', (['"""Training already finished! Remove "done" file to continue training"""'], {}), '(\n \'Training already finished! Remove "done" file to continue training\')\n', (37681, 37756), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((39897, 39913), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (39907, 39913), False, 'import time\n'), ((4567, 4594), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (4592, 4594), False, 'import multiprocessing\n'), ((22242, 22260), 'multi_sample_factory.utils.utils.cfg_file', 'cfg_file', (['self.cfg'], {}), '(self.cfg)\n', (22250, 22260), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((27665, 27919), 'multi_sample_factory.algorithms.appo.policy_worker.PolicyWorker', 'PolicyWorker', (['i', 'policy_id', 'self.cfg', 'self.obs_space', 'self.action_space', 'self.traj_buffers', 'policy_queue', 'actor_queues', 'self.report_queue', 'policy_worker_queues[policy_id][i]', 'policy_locks[policy_id]', 'resume_experience_collection_cv[policy_id]'], {}), '(i, policy_id, self.cfg, self.obs_space, self.action_space,\n self.traj_buffers, policy_queue, actor_queues, self.report_queue,\n policy_worker_queues[policy_id][i], policy_locks[policy_id],\n resume_experience_collection_cv[policy_id])\n', (27677, 27919), False, 'from multi_sample_factory.algorithms.appo.policy_worker import PolicyWorker\n'), ((29364, 29465), 'multi_sample_factory.utils.utils.log.debug', 'log.debug', (['"""Waiting for policy worker %d-%d to finish initialization..."""', 'policy_id', 'w.worker_idx'], {}), "('Waiting for policy worker %d-%d to finish initialization...',\n policy_id, w.worker_idx)\n", (29373, 29465), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((29503, 29573), 'multi_sample_factory.utils.utils.log.debug', 'log.debug', (['"""Policy worker %d-%d initialized!"""', 'policy_id', 'w.worker_idx'], {}), "('Policy worker %d-%d initialized!', policy_id, w.worker_idx)\n", (29512, 29573), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((30302, 30324), 'multi_sample_factory.algorithms.appo.appo_utils.iterate_recursively', 'iterate_recursively', (['s'], {}), '(s)\n', (30321, 30324), False, 'from multi_sample_factory.algorithms.appo.appo_utils import make_env_func, iterate_recursively, set_global_cuda_envvars\n'), ((32638, 32649), 'time.time', 'time.time', ([], {}), '()\n', (32647, 32649), False, 'import time\n'), ((35457, 35497), 'math.isnan', 'math.isnan', (['sample_throughput[policy_id]'], {}), '(sample_throughput[policy_id])\n', (35467, 35497), False, 'import math\n'), ((21753, 21781), 'multi_sample_factory.utils.utils.experiment_dir', 'experiment_dir', ([], {'cfg': 'self.cfg'}), '(cfg=self.cfg)\n', (21767, 21781), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((24381, 24392), 'time.time', 'time.time', ([], {}), '()\n', (24390, 24392), False, 'import time\n'), ((25091, 25102), 'time.time', 'time.time', ([], {}), '()\n', (25100, 25102), False, 'import time\n'), ((25364, 25451), 'multi_sample_factory.utils.utils.log.error', 'log.error', (['"""Worker %d is stuck or failed (%.3f). Reset!"""', 'w.worker_idx', 'time_passed'], {}), "('Worker %d is stuck or failed (%.3f). Reset!', w.worker_idx,\n time_passed)\n", (25373, 25451), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((25837, 25848), 'time.time', 'time.time', ([], {}), '()\n', (25846, 25848), False, 'import time\n'), ((26491, 26511), 'torch.multiprocessing.JoinableQueue', 'TorchJoinableQueue', ([], {}), '()\n', (26509, 26511), True, 'from torch.multiprocessing import JoinableQueue as TorchJoinableQueue\n'), ((31113, 31133), 'collections.deque', 'deque', (['[]'], {'maxlen': '(50)'}), '([], maxlen=50)\n', (31118, 31133), False, 'from collections import deque\n'), ((35846, 35870), 'numpy.mean', 'np.mean', (['stat[policy_id]'], {}), '(stat[policy_id])\n', (35853, 35870), True, 'import numpy as np\n'), ((38824, 38865), 'multi_sample_factory.utils.utils.log.exception', 'log.exception', (['"""Exception in driver loop"""'], {}), "('Exception in driver loop')\n", (38837, 38865), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((38970, 39039), 'multi_sample_factory.utils.utils.log.warning', 'log.warning', (['"""Keyboard interrupt detected in driver loop, exiting..."""'], {}), "('Keyboard interrupt detected in driver loop, exiting...')\n", (38981, 39039), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((40409, 40432), 'multi_sample_factory.utils.utils.done_filename', 'done_filename', (['self.cfg'], {}), '(self.cfg)\n', (40422, 40432), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((38512, 38523), 'time.time', 'time.time', ([], {}), '()\n', (38521, 38523), False, 'import time\n'), ((30437, 30469), 'collections.deque', 'deque', ([], {'maxlen': 'self.cfg.stats_avg'}), '(maxlen=self.cfg.stats_avg)\n', (30442, 30469), False, 'from collections import deque\n'), ((35266, 35280), 'numpy.mean', 'np.mean', (['value'], {}), '(value)\n', (35273, 35280), True, 'import numpy as np\n'), ((38388, 38399), 'time.time', 'time.time', ([], {}), '()\n', (38397, 38399), False, 'import time\n'), ((34052, 34073), 'numpy.mean', 'np.mean', (['reward_stats'], {}), '(reward_stats)\n', (34059, 34073), True, 'import numpy as np\n')]
|
"""
Module description:
"""
__version__ = '0.3.1'
__author__ = '<NAME>, <NAME>'
__email__ = '<EMAIL>, <EMAIL>'
from types import SimpleNamespace
import typing as t
import numpy as np
import logging as pylog
from elliot.utils import logging
from hyperopt import STATUS_OK
class ModelCoordinator(object):
"""
This class handles the selection of hyperparameters for the hyperparameter tuning realized with HyperOpt.
"""
def __init__(self, data_objs, base: SimpleNamespace, params, model_class: t.ClassVar, test_fold_index: int):
"""
The constructor creates a Placeholder of the recommender model.
:param base: a SimpleNamespace that contains the configuration (main level) options
:param params: a SimpleNamespace that contains the hyper-parameters of the model
:param model_class: the class of the recommendation model
"""
self.logger = logging.get_logger(self.__class__.__name__, pylog.CRITICAL if base.config_test else pylog.DEBUG)
self.data_objs = data_objs
self.base = base
self.params = params
self.model_class = model_class
self.test_fold_index = test_fold_index
self.model_config_index = 0
def objective(self, args):
"""
This function respect the signature, and the return format required for HyperOpt optimization
:param args: a Dictionary that contains the new hyper-parameter values that will be used in the current run
:return: it returns a Dictionary with loss, and status being required by HyperOpt,
and params, and results being required by the framework
"""
sampled_namespace = SimpleNamespace(**args)
model_params = SimpleNamespace(**self.params[0].__dict__)
self.logger.info("Hyperparameter tuning exploration:")
for (k, v) in sampled_namespace.__dict__.items():
model_params.__setattr__(k, v)
self.logger.info(f"{k} set to {model_params.__getattribute__(k)}")
losses = []
results = []
for trainval_index, data_obj in enumerate(self.data_objs):
self.logger.info(f"Exploration: Hyperparameter exploration number {self.model_config_index+1}")
self.logger.info(f"Exploration: Test Fold exploration number {self.test_fold_index+1}")
self.logger.info(f"Exploration: Train-Validation Fold exploration number {trainval_index+1}")
model = self.model_class(data=data_obj, config=self.base, params=model_params)
model.train()
losses.append(model.get_loss())
results.append(model.get_results())
self.model_config_index += 1
loss = np.average(losses)
results = self._average_results(results)
return {
'loss': loss,
'status': STATUS_OK,
'params': model.get_params(),
'val_results': {k: result_dict["val_results"] for k, result_dict in results.items()},
'val_statistical_results': {k: result_dict["val_statistical_results"] for k, result_dict in model.get_results().items()},
'test_results': {k: result_dict["test_results"] for k, result_dict in results.items()},
'test_statistical_results': {k: result_dict["test_statistical_results"] for k, result_dict in model.get_results().items()},
'name': model.name
}
def single(self):
"""
This function respect the signature, and the return format required for HyperOpt optimization
:param args: a Dictionary that contains the new hyper-parameter values that will be used in the current run
:return: it returns a Dictionary with loss, and status being required by HyperOpt,
and params, and results being required by the framework
"""
self.logger.info("Hyperparameters:")
for k, v in self.params.__dict__.items():
self.logger.info(f"{k} set to {v}")
losses = []
results = []
for trainval_index, data_obj in enumerate(self.data_objs):
self.logger.info(f"Exploration: Test Fold exploration number {self.test_fold_index+1}")
self.logger.info(f"Exploration: Train-Validation Fold exploration number {trainval_index+1}")
model = self.model_class(data=data_obj, config=self.base, params=self.params)
model.train()
losses.append(model.get_loss())
results.append(model.get_results())
loss = np.average(losses)
results = self._average_results(results)
return {
'loss': loss,
'status': STATUS_OK,
'params': model.get_params(),
'val_results': {k: result_dict["val_results"] for k, result_dict in results.items()},
'val_statistical_results': {k: result_dict["val_statistical_results"] for k, result_dict in model.get_results().items()},
'test_results': {k: result_dict["test_results"] for k, result_dict in results.items()},
'test_statistical_results': {k: result_dict["test_statistical_results"] for k, result_dict in model.get_results().items()},
'name': model.name
}
@staticmethod
def _average_results(results_list):
ks = list(results_list[0].keys())
eval_result_types = ["val_results", "test_results"]
metrics = list(results_list[0][ks[0]]["val_results"].keys())
return {k: {type_: {metric: np.average([fold_result[k][type_][metric]
for fold_result in results_list])
for metric in metrics}
for type_ in eval_result_types}
for k in ks}
|
[
"numpy.average",
"types.SimpleNamespace",
"elliot.utils.logging.get_logger"
] |
[((916, 1017), 'elliot.utils.logging.get_logger', 'logging.get_logger', (['self.__class__.__name__', '(pylog.CRITICAL if base.config_test else pylog.DEBUG)'], {}), '(self.__class__.__name__, pylog.CRITICAL if base.\n config_test else pylog.DEBUG)\n', (934, 1017), False, 'from elliot.utils import logging\n'), ((1681, 1704), 'types.SimpleNamespace', 'SimpleNamespace', ([], {}), '(**args)\n', (1696, 1704), False, 'from types import SimpleNamespace\n'), ((1728, 1770), 'types.SimpleNamespace', 'SimpleNamespace', ([], {}), '(**self.params[0].__dict__)\n', (1743, 1770), False, 'from types import SimpleNamespace\n'), ((2701, 2719), 'numpy.average', 'np.average', (['losses'], {}), '(losses)\n', (2711, 2719), True, 'import numpy as np\n'), ((4500, 4518), 'numpy.average', 'np.average', (['losses'], {}), '(losses)\n', (4510, 4518), True, 'import numpy as np\n'), ((5462, 5537), 'numpy.average', 'np.average', (['[fold_result[k][type_][metric] for fold_result in results_list]'], {}), '([fold_result[k][type_][metric] for fold_result in results_list])\n', (5472, 5537), True, 'import numpy as np\n')]
|
import os, sys
import torch
from torch.utils.data import Dataset
import imageio as io
import cv2
from sklearn.model_selection import StratifiedKFold
import numpy as np
from numpy.lib.stride_tricks import as_strided
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
def imread(p):
img = io.imread(p)
# opencv faster
img = cv2.resize(img, (224, 224),
interpolation=cv2.INTER_CUBIC)
#img = imresize(img, (224, 224), 3)
img = img.astype('float32')/255.0
img -= mean
img /= std
return np.transpose(img, (2, 0, 1))
class ImageList(Dataset):
def __init__(self, root, videos):
self.root = root
self.videos = videos
def __getitem__(self, index):
vid = self.videos[index] # path to video folder (of images)
path = os.path.join(self.root, vid)
img_list = os.listdir(path)
img_list = [os.path.join(path, name)
for name in sorted(img_list)]
video = np.stack([imread(p) for p in img_list])
return torch.from_numpy(video)
def __len__(self):
return len(self.videos)
class VideoList(Dataset):
def __init__(self, root, videos, for_train=False, seq_length=16):
self.root = root
self.videos = videos
self.for_train = for_train
self.seq_length = seq_length
# pick randomly 1 sequence per video to train,
# pick evenly 20 sequences per video to validate/test
def __getitem__(self, index):
name, c = self.videos[index]
path = os.path.join(self.root, name + '.npy')
feat = np.load(path)
n, d = feat.shape # d=2048
if self.for_train:
start = np.random.randint(0, n-self.seq_length)
feat = feat[start:start+self.seq_length]
feat = feat[None, ...] # RxLxD, R = 1
frame_indexes = np.array(range(start, start+self.seq_length))
frame_indexes = frame_indexes[None, ...]
else:
R = 20 # Sample the 20 sequences
S = (n-self.seq_length) // (R-1)
indexes = np.array(range(n))
sn, sd = feat.strides
i_sn, = indexes.strides
feat = as_strided(feat, shape=(R, self.seq_length, d), strides=(S*sn, sn, sd))
indexes = as_strided(indexes, shape=(R, self.seq_length), strides=(S*i_sn, i_sn))
feat = np.ascontiguousarray(feat) # RxLxD, R = 20
frame_indexes = np.ascontiguousarray(indexes) # RxL
return feat, c, name, frame_indexes
def __len__(self):
return len(self.videos)
def collate(batch):
x, y, z, w = zip(*batch)
x = torch.cat([torch.from_numpy(a) for a in x]) # (bR)xLxD
w = torch.cat([torch.from_numpy(a) for a in w]) # (bR)xL
x = x.permute(1, 0, 2).contiguous() # Lx(bR)xD
y = torch.LongTensor(y)
return x, y, z, w
def class_dict(ids_file):
class2idx = {}
with open(ids_file) as f:
for line in f:
c, name = line.split()
class2idx[name] = int(c) - 1
return class2idx
def video_list(data_file, class2idx):
data = []
with open(data_file) as f:
for line in f:
name = line.split()[0]
name = os.path.splitext(name)[0]
c = name.split('/')[0]
c = class2idx[c]
data.append((name, c))
return data
def train_split(data, n_splits=5, select=0, seed=2017):
labels = np.array([d[1] for d in data])
rng = np.random.RandomState(seed)
skf = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=rng)
cv = list(skf.split(labels, labels))
train_index, valid_index = cv[select]
train_data = [data[idx] for idx in train_index]
valid_data = [data[idx] for idx in valid_index]
return train_data, valid_data
|
[
"numpy.load",
"torch.LongTensor",
"imageio.imread",
"numpy.ascontiguousarray",
"numpy.transpose",
"numpy.random.RandomState",
"numpy.random.randint",
"numpy.array",
"sklearn.model_selection.StratifiedKFold",
"numpy.lib.stride_tricks.as_strided",
"os.path.splitext",
"os.path.join",
"os.listdir",
"cv2.resize",
"torch.from_numpy"
] |
[((300, 312), 'imageio.imread', 'io.imread', (['p'], {}), '(p)\n', (309, 312), True, 'import imageio as io\n'), ((343, 401), 'cv2.resize', 'cv2.resize', (['img', '(224, 224)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(img, (224, 224), interpolation=cv2.INTER_CUBIC)\n', (353, 401), False, 'import cv2\n'), ((534, 562), 'numpy.transpose', 'np.transpose', (['img', '(2, 0, 1)'], {}), '(img, (2, 0, 1))\n', (546, 562), True, 'import numpy as np\n'), ((2859, 2878), 'torch.LongTensor', 'torch.LongTensor', (['y'], {}), '(y)\n', (2875, 2878), False, 'import torch\n'), ((3473, 3503), 'numpy.array', 'np.array', (['[d[1] for d in data]'], {}), '([d[1] for d in data])\n', (3481, 3503), True, 'import numpy as np\n'), ((3514, 3541), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (3535, 3541), True, 'import numpy as np\n'), ((3552, 3618), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'n_splits', 'shuffle': '(True)', 'random_state': 'rng'}), '(n_splits=n_splits, shuffle=True, random_state=rng)\n', (3567, 3618), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((800, 828), 'os.path.join', 'os.path.join', (['self.root', 'vid'], {}), '(self.root, vid)\n', (812, 828), False, 'import os, sys\n'), ((848, 864), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (858, 864), False, 'import os, sys\n'), ((1027, 1050), 'torch.from_numpy', 'torch.from_numpy', (['video'], {}), '(video)\n', (1043, 1050), False, 'import torch\n'), ((1526, 1564), 'os.path.join', 'os.path.join', (['self.root', "(name + '.npy')"], {}), "(self.root, name + '.npy')\n", (1538, 1564), False, 'import os, sys\n'), ((1581, 1594), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (1588, 1594), True, 'import numpy as np\n'), ((885, 909), 'os.path.join', 'os.path.join', (['path', 'name'], {}), '(path, name)\n', (897, 909), False, 'import os, sys\n'), ((1680, 1721), 'numpy.random.randint', 'np.random.randint', (['(0)', '(n - self.seq_length)'], {}), '(0, n - self.seq_length)\n', (1697, 1721), True, 'import numpy as np\n'), ((2210, 2283), 'numpy.lib.stride_tricks.as_strided', 'as_strided', (['feat'], {'shape': '(R, self.seq_length, d)', 'strides': '(S * sn, sn, sd)'}), '(feat, shape=(R, self.seq_length, d), strides=(S * sn, sn, sd))\n', (2220, 2283), False, 'from numpy.lib.stride_tricks import as_strided\n'), ((2304, 2377), 'numpy.lib.stride_tricks.as_strided', 'as_strided', (['indexes'], {'shape': '(R, self.seq_length)', 'strides': '(S * i_sn, i_sn)'}), '(indexes, shape=(R, self.seq_length), strides=(S * i_sn, i_sn))\n', (2314, 2377), False, 'from numpy.lib.stride_tricks import as_strided\n'), ((2396, 2422), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['feat'], {}), '(feat)\n', (2416, 2422), True, 'import numpy as np\n'), ((2467, 2496), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['indexes'], {}), '(indexes)\n', (2487, 2496), True, 'import numpy as np\n'), ((2674, 2693), 'torch.from_numpy', 'torch.from_numpy', (['a'], {}), '(a)\n', (2690, 2693), False, 'import torch\n'), ((2740, 2759), 'torch.from_numpy', 'torch.from_numpy', (['a'], {}), '(a)\n', (2756, 2759), False, 'import torch\n'), ((3261, 3283), 'os.path.splitext', 'os.path.splitext', (['name'], {}), '(name)\n', (3277, 3283), False, 'import os, sys\n')]
|
import matplotlib
matplotlib.use('Agg')
from Swing.util.BoxPlot import BoxPlot
from matplotlib.backends.backend_pdf import PdfPages
from scipy import stats
import pdb
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import sys
import os
import time
from Swing.util.mplstyle import style1
import seaborn as sns
from palettable.colorbrewer.qualitative import Set1_3
def get_df(df, fp, min_lag, max_lag, td_window):
new_df = df[(df['file_path'] == fp) & (df['min_lag'] == min_lag) & (df['max_lag'] == max_lag) & (df['td_window'] == td_window)]
return(new_df)
def read_tdr_results(folder_list, folder_str):
agg_df = pd.DataFrame()
for input_folder in folder_list:
for file_path in os.listdir(input_folder):
if folder_str in file_path:
df = pd.read_csv(input_folder+file_path,sep='\t', engine='python')
# check if the columns are misaligned.
if type(df['permutation_n'].iloc[0]) is str:
new_col = df.columns.tolist()
new_col.pop(0)
new_df = df.iloc[:,0:len(df.iloc[0])-1]
new_df.columns = new_col
df=new_df
agg_df = agg_df.append(df)
return(agg_df)
input_folder_list = ["/projects/p20519/roller_output/gnw/RandomForest/"]
test_statistic = ['aupr', 'auroc']
save_tag = "window_scan"
n_trials = 100
start = time.time()
agg_df = read_tdr_results(input_folder_list, folder_str = "2017-09")
#agg_df.to_pickle("Dionesus_window_scan.pkl")
#agg_df = pd.read_pickle("Dionesus_window_scan.pkl")
end = time.time()
stat = 'aupr'
network_list = agg_df['file_path'].unique().tolist()
window_sizes = range(1,22)
outer_list = []
overall_df = pd.DataFrame()
for td_window in window_sizes:
inner_list = []
for network in network_list:
baseline = get_df(agg_df, network, 0, 0, 21)
if len(baseline) == 0:
continue
if 21-td_window > 2:
max_lag = 3
else:
max_lag = 21-td_window
if (td_window == 21):
min_lag = 0
max_lag = 0
else:
min_lag = 1
comparisons = get_df(agg_df, network, min_lag, max_lag, td_window)
if len(comparisons) == 0:
continue
stat = 'aupr'
baseline_mean=baseline[stat].mean()
comparisons['percent_{}'.format(stat)] = ((comparisons[stat]-baseline_mean)/baseline_mean)*100
stat = 'auroc'
baseline_mean=baseline[stat].mean()
comparisons['percent_{}'.format(stat)] = ((comparisons[stat]-baseline_mean)/baseline_mean)*100
overall_df = overall_df.append(comparisons.iloc[0:50,:], ignore_index = True)
outer_list.append(inner_list)
stat = 'percent_aupr'
colors = []
for w in range(1, 21):
test_data = overall_df[overall_df.td_window == w]
baseline = overall_df[overall_df.td_window == 21]
baseline_mean = baseline[stat].mean()
diff = np.mean(test_data[stat])-baseline_mean
if stats.ttest_ind(test_data[stat], baseline[stat])[1] < 0.05:
if diff > 0:
colors.append(Set1_3.mpl_colors[0])
else:
colors.append(Set1_3.mpl_colors[1])
else:
colors.append('grey')
fig, ax = plt.subplots(figsize=(11,7))
sns.boxplot(ax = ax, data = overall_df, x = 'td_window', y = 'percent_aupr', palette=colors)
xlabs = ax.get_xticks()
ax.set_xticklabels(['{:d}'.format(x+1) for x in xlabs])
ax.set_ylabel('Percent Difference AUPR')
ax.set_xlabel('Window Size')
fig.savefig('RandomForest_10_AUPR_window_scan.png')
fig, ax = plt.subplots(figsize=(11,7))
sns.boxplot(ax = ax, data = overall_df, x = 'td_window', y = 'percent_auroc', palette=colors)
xlabs = ax.get_xticks()
ax.set_xticklabels(['{:d}'.format(x+1) for x in xlabs])
ax.set_ylabel('Percent Difference AUROC')
ax.set_xlabel('Window Size')
fig.savefig('RandomForest_10_AUROC_window_scan.png')
|
[
"pandas.DataFrame",
"pandas.read_csv",
"scipy.stats.ttest_ind",
"time.time",
"seaborn.boxplot",
"matplotlib.use",
"numpy.mean",
"matplotlib.pyplot.subplots",
"os.listdir"
] |
[((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (32, 39), False, 'import matplotlib\n'), ((1417, 1428), 'time.time', 'time.time', ([], {}), '()\n', (1426, 1428), False, 'import time\n'), ((1603, 1614), 'time.time', 'time.time', ([], {}), '()\n', (1612, 1614), False, 'import time\n'), ((1740, 1754), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1752, 1754), True, 'import pandas as pd\n'), ((3270, 3299), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(11, 7)'}), '(figsize=(11, 7))\n', (3282, 3299), True, 'import matplotlib.pyplot as plt\n'), ((3299, 3387), 'seaborn.boxplot', 'sns.boxplot', ([], {'ax': 'ax', 'data': 'overall_df', 'x': '"""td_window"""', 'y': '"""percent_aupr"""', 'palette': 'colors'}), "(ax=ax, data=overall_df, x='td_window', y='percent_aupr',\n palette=colors)\n", (3310, 3387), True, 'import seaborn as sns\n'), ((3605, 3634), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(11, 7)'}), '(figsize=(11, 7))\n', (3617, 3634), True, 'import matplotlib.pyplot as plt\n'), ((3634, 3723), 'seaborn.boxplot', 'sns.boxplot', ([], {'ax': 'ax', 'data': 'overall_df', 'x': '"""td_window"""', 'y': '"""percent_auroc"""', 'palette': 'colors'}), "(ax=ax, data=overall_df, x='td_window', y='percent_auroc',\n palette=colors)\n", (3645, 3723), True, 'import seaborn as sns\n'), ((650, 664), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (662, 664), True, 'import pandas as pd\n'), ((727, 751), 'os.listdir', 'os.listdir', (['input_folder'], {}), '(input_folder)\n', (737, 751), False, 'import os\n'), ((2982, 3006), 'numpy.mean', 'np.mean', (['test_data[stat]'], {}), '(test_data[stat])\n', (2989, 3006), True, 'import numpy as np\n'), ((3028, 3076), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (['test_data[stat]', 'baseline[stat]'], {}), '(test_data[stat], baseline[stat])\n', (3043, 3076), False, 'from scipy import stats\n'), ((812, 876), 'pandas.read_csv', 'pd.read_csv', (['(input_folder + file_path)'], {'sep': '"""\t"""', 'engine': '"""python"""'}), "(input_folder + file_path, sep='\\t', engine='python')\n", (823, 876), True, 'import pandas as pd\n')]
|
import numpy as np
from .wavelength import wave_log10
def center2edge(x):
x = np.asarray(x)
dx = np.diff(x)
return np.hstack((x[0] - .5 * dx[0], x[:-1] + .5 * dx, x[-1] + .5 * dx[-1]))
def rebin(wave, flux=None, flux_err=None, mask=None, wave_new=None):
""" Rebin spectrum to a new wavelength grid
Parameters
----------
wave: array
old wavelength
flux: array
old flux
flux_err: array (optional)
old flux error
mask: array (optional)
old mask, True for bad.
wave_new:
new wavelength. if None, use log10 wavelength.
Return
------
re-binned (flux, [flux_err], [mask])
"""
wave = np.asarray(wave)
if wave_new is None:
wave_new = wave_log10(wave)
else:
wave_new = np.asarray(wave_new)
wave_edge = center2edge(wave)
wave_new_edge = center2edge(wave_new)
# I = interp1d(wave_edge[:-1], np.arange(len(wave)), kind="linear",
# bounds_error=False)
# wave_new_edge_pos = I(wave_new_edge) # accurate position projected to old
wave_new_edge_pos = np.interp(wave_new_edge,
wave_edge[:-1], np.arange(len(wave)),
left=np.nan, right=np.nan)
wave_new_edge_pos2 = np.array(
[wave_new_edge_pos[:-1], wave_new_edge_pos[1:]]).T # slipt to lo & hi
wave_new_ipix = np.floor(wave_new_edge_pos2).astype(int) # integer part
wave_new_frac = wave_new_edge_pos2 - wave_new_ipix # fraction part
flags = np.any(np.isnan(wave_new_edge_pos2), axis=1)
result = []
# rebin flux
if flux is not None:
flux = np.asarray(flux)
assert flux.shape == wave.shape
flux_new = np.zeros_like(wave_new, dtype=float)
for ipix, this_flag in enumerate(flags):
if not this_flag:
flux_new[ipix] = np.sum(
flux[wave_new_ipix[ipix, 0]:wave_new_ipix[ipix, 1]]) \
- flux[wave_new_ipix[ipix, 0]] * wave_new_frac[ipix, 0] \
+ flux[wave_new_ipix[ipix, 1]] * wave_new_frac[ipix, 1]
else:
flux_new[ipix] = np.nan
result.append(flux_new)
# rebin flux_err
if flux_err is not None:
flux_err2 = np.square(np.asarray(flux_err, dtype=float))
assert flux_err2.shape == wave.shape
flux_err2_new = np.zeros_like(wave_new, dtype=float)
for ipix, this_flag in enumerate(flags):
if not this_flag:
flux_err2_new[ipix] = np.sum(
flux_err2[wave_new_ipix[ipix, 0]:wave_new_ipix[ipix, 1]]) \
- flux_err2[wave_new_ipix[ipix, 0]] * wave_new_frac[ipix, 0] \
+ flux_err2[wave_new_ipix[ipix, 1]] * wave_new_frac[ipix, 1]
else:
flux_err2_new[ipix] = np.nan
result.append(np.sqrt(flux_err2_new))
# rebin mask
if mask is not None:
mask = np.asarray(mask)
assert mask.shape == wave.shape
mask_new = np.ones_like(wave_new, dtype=bool)
for ipix, this_flag in enumerate(flags):
if not this_flag:
mask_new[ipix] = np.any(
mask[wave_new_ipix[ipix, 0]:wave_new_ipix[ipix, 1] + 1])
result.append(mask_new)
if len(result) == 1:
return result[0]
elif len(result) > 1:
return result
else:
raise ValueError("@rebin: what to rebin?")
def _test():
wave, flux, wave_new = np.arange(10), np.ones(10), np.arange(0, 10, 2) + 0.5
flux[5] += 1
flux_err = flux
mask = ~ (flux > 0)
mask[5] = True
print("========================")
print(wave, flux)
print("========================")
print(wave, rebin(wave, flux, wave_new=wave_new))
print("========================")
print(wave_new, rebin(
wave, flux=flux, flux_err=flux_err, mask=mask, wave_new=wave_new))
print("========================")
# figure()
# plot(wave, flux, 'x-')
# plot(wave_new, rebin(wave, flux, wave_new), 's-')
return
if __name__ == "__main__":
_test()
|
[
"numpy.zeros_like",
"numpy.ones_like",
"numpy.sum",
"numpy.asarray",
"numpy.floor",
"numpy.ones",
"numpy.isnan",
"numpy.hstack",
"numpy.any",
"numpy.diff",
"numpy.array",
"numpy.arange",
"numpy.sqrt"
] |
[((85, 98), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (95, 98), True, 'import numpy as np\n'), ((108, 118), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (115, 118), True, 'import numpy as np\n'), ((130, 202), 'numpy.hstack', 'np.hstack', (['(x[0] - 0.5 * dx[0], x[:-1] + 0.5 * dx, x[-1] + 0.5 * dx[-1])'], {}), '((x[0] - 0.5 * dx[0], x[:-1] + 0.5 * dx, x[-1] + 0.5 * dx[-1]))\n', (139, 202), True, 'import numpy as np\n'), ((687, 703), 'numpy.asarray', 'np.asarray', (['wave'], {}), '(wave)\n', (697, 703), True, 'import numpy as np\n'), ((794, 814), 'numpy.asarray', 'np.asarray', (['wave_new'], {}), '(wave_new)\n', (804, 814), True, 'import numpy as np\n'), ((1292, 1349), 'numpy.array', 'np.array', (['[wave_new_edge_pos[:-1], wave_new_edge_pos[1:]]'], {}), '([wave_new_edge_pos[:-1], wave_new_edge_pos[1:]])\n', (1300, 1349), True, 'import numpy as np\n'), ((1550, 1578), 'numpy.isnan', 'np.isnan', (['wave_new_edge_pos2'], {}), '(wave_new_edge_pos2)\n', (1558, 1578), True, 'import numpy as np\n'), ((1663, 1679), 'numpy.asarray', 'np.asarray', (['flux'], {}), '(flux)\n', (1673, 1679), True, 'import numpy as np\n'), ((1739, 1775), 'numpy.zeros_like', 'np.zeros_like', (['wave_new'], {'dtype': 'float'}), '(wave_new, dtype=float)\n', (1752, 1775), True, 'import numpy as np\n'), ((2400, 2436), 'numpy.zeros_like', 'np.zeros_like', (['wave_new'], {'dtype': 'float'}), '(wave_new, dtype=float)\n', (2413, 2436), True, 'import numpy as np\n'), ((2973, 2989), 'numpy.asarray', 'np.asarray', (['mask'], {}), '(mask)\n', (2983, 2989), True, 'import numpy as np\n'), ((3049, 3083), 'numpy.ones_like', 'np.ones_like', (['wave_new'], {'dtype': 'bool'}), '(wave_new, dtype=bool)\n', (3061, 3083), True, 'import numpy as np\n'), ((3515, 3528), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (3524, 3528), True, 'import numpy as np\n'), ((3530, 3541), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (3537, 3541), True, 'import numpy as np\n'), ((1402, 1430), 'numpy.floor', 'np.floor', (['wave_new_edge_pos2'], {}), '(wave_new_edge_pos2)\n', (1410, 1430), True, 'import numpy as np\n'), ((2296, 2329), 'numpy.asarray', 'np.asarray', (['flux_err'], {'dtype': 'float'}), '(flux_err, dtype=float)\n', (2306, 2329), True, 'import numpy as np\n'), ((2891, 2913), 'numpy.sqrt', 'np.sqrt', (['flux_err2_new'], {}), '(flux_err2_new)\n', (2898, 2913), True, 'import numpy as np\n'), ((3543, 3562), 'numpy.arange', 'np.arange', (['(0)', '(10)', '(2)'], {}), '(0, 10, 2)\n', (3552, 3562), True, 'import numpy as np\n'), ((3196, 3259), 'numpy.any', 'np.any', (['mask[wave_new_ipix[ipix, 0]:wave_new_ipix[ipix, 1] + 1]'], {}), '(mask[wave_new_ipix[ipix, 0]:wave_new_ipix[ipix, 1] + 1])\n', (3202, 3259), True, 'import numpy as np\n'), ((1888, 1947), 'numpy.sum', 'np.sum', (['flux[wave_new_ipix[ipix, 0]:wave_new_ipix[ipix, 1]]'], {}), '(flux[wave_new_ipix[ipix, 0]:wave_new_ipix[ipix, 1]])\n', (1894, 1947), True, 'import numpy as np\n'), ((2554, 2618), 'numpy.sum', 'np.sum', (['flux_err2[wave_new_ipix[ipix, 0]:wave_new_ipix[ipix, 1]]'], {}), '(flux_err2[wave_new_ipix[ipix, 0]:wave_new_ipix[ipix, 1]])\n', (2560, 2618), True, 'import numpy as np\n')]
|
import math
import numpy as np
import openmdao.api as om
from wisdem.commonse.akima import Akima
from wisdem.commonse.csystem import DirectionVector
from wisdem.commonse.utilities import cosd, sind # , linspace_with_deriv, interp_with_deriv, hstack, vstack
from wisdem.commonse.environment import LogWind, PowerWind, LinearWaves
# -----------------
# Helper Functions
# -----------------
# "Experiments on the Flow Past a Circular Cylinder at Very High Reynolds Numbers", Roshko
Re_pt = [
0.00001,
0.0001,
0.0010,
0.0100,
0.0200,
0.1220,
0.2000,
0.3000,
0.4000,
0.5000,
1.0000,
1.5000,
2.0000,
2.5000,
3.0000,
3.5000,
4.0000,
5.0000,
10.0000,
]
cd_pt = [
4.0000,
2.0000,
1.1100,
1.1100,
1.2000,
1.2000,
1.1700,
0.9000,
0.5400,
0.3100,
0.3800,
0.4600,
0.5300,
0.5700,
0.6100,
0.6400,
0.6700,
0.7000,
0.7000,
]
drag_spline = Akima(np.log10(Re_pt), cd_pt, delta_x=0.0) # exact akima because control points do not change
def cylinderDrag(Re):
"""Drag coefficient for a smooth circular cylinder.
Parameters
----------
Re : array_like
Reynolds number
Returns
-------
cd : array_like
drag coefficient (normalized by cylinder diameter)
"""
ReN = Re / 1.0e6
cd = np.zeros_like(Re)
dcd_dRe = np.zeros_like(Re)
idx = ReN > 0
cd[idx], dcd_dRe[idx], _, _ = drag_spline.interp(np.log10(ReN[idx]))
dcd_dRe[idx] /= Re[idx] * math.log(10) # chain rule
return cd, dcd_dRe
# -----------------
# Components
# -----------------
class AeroHydroLoads(om.ExplicitComponent):
"""
Compute summed forces due to wind and wave loads.
Parameters
----------
windLoads_Px : numpy array[nPoints], [N/m]
distributed loads, force per unit length in x-direction
windLoads_Py : numpy array[nPoints], [N/m]
distributed loads, force per unit length in y-direction
windLoads_Pz : numpy array[nPoints], [N/m]
distributed loads, force per unit length in z-direction
windLoads_qdyn : numpy array[nPoints], [N/m**2]
dynamic pressure
windLoads_z : numpy array[nPoints], [m]
corresponding heights
windLoads_beta : float, [deg]
wind/wave angle relative to inertia c.s.
waveLoads_Px : numpy array[nPoints], [N/m]
distributed loads, force per unit length in x-direction
waveLoads_Py : numpy array[nPoints], [N/m]
distributed loads, force per unit length in y-direction
waveLoads_Pz : numpy array[nPoints], [N/m]
distributed loads, force per unit length in z-direction
waveLoads_qdyn : numpy array[nPoints], [N/m**2]
dynamic pressure
waveLoads_z : numpy array[nPoints], [m]
corresponding heights
waveLoads_beta : float, [deg]
wind/wave angle relative to inertia c.s.
z : numpy array[nPoints], [m]
locations along cylinder
yaw : float, [deg]
yaw angle
Returns
-------
Px : numpy array[nPoints], [N/m]
force per unit length in x-direction
Py : numpy array[nPoints], [N/m]
force per unit length in y-direction
Pz : numpy array[nPoints], [N/m]
force per unit length in z-direction
qdyn : numpy array[nPoints], [N/m**2]
dynamic pressure
"""
def initialize(self):
self.options.declare("nPoints")
def setup(self):
nPoints = self.options["nPoints"]
self.add_input("windLoads_Px", np.zeros(nPoints), units="N/m")
self.add_input("windLoads_Py", np.zeros(nPoints), units="N/m")
self.add_input("windLoads_Pz", np.zeros(nPoints), units="N/m")
self.add_input("windLoads_qdyn", np.zeros(nPoints), units="N/m**2")
self.add_input("windLoads_z", np.zeros(nPoints), units="m")
self.add_input("windLoads_beta", 0.0, units="deg")
self.add_input("waveLoads_Px", np.zeros(nPoints), units="N/m")
self.add_input("waveLoads_Py", np.zeros(nPoints), units="N/m")
self.add_input("waveLoads_Pz", np.zeros(nPoints), units="N/m")
self.add_input("waveLoads_qdyn", np.zeros(nPoints), units="N/m**2")
self.add_input("waveLoads_z", np.zeros(nPoints), units="m")
self.add_input("waveLoads_beta", 0.0, units="deg")
self.add_input("z", np.zeros(nPoints), units="m")
self.add_input("yaw", 0.0, units="deg")
self.add_output("Px", np.zeros(nPoints), units="N/m")
self.add_output("Py", np.zeros(nPoints), units="N/m")
self.add_output("Pz", np.zeros(nPoints), units="N/m")
self.add_output("qdyn", np.zeros(nPoints), units="N/m**2")
def compute(self, inputs, outputs):
z = inputs["z"]
windLoads = (
DirectionVector(inputs["windLoads_Px"], inputs["windLoads_Py"], inputs["windLoads_Pz"])
.inertialToWind(inputs["windLoads_beta"])
.windToYaw(inputs["yaw"])
)
waveLoads = (
DirectionVector(inputs["waveLoads_Px"], inputs["waveLoads_Py"], inputs["waveLoads_Pz"])
.inertialToWind(inputs["waveLoads_beta"])
.windToYaw(inputs["yaw"])
)
Px = np.interp(z, inputs["windLoads_z"], windLoads.x) + np.interp(z, inputs["waveLoads_z"], waveLoads.x)
Py = np.interp(z, inputs["windLoads_z"], windLoads.y) + np.interp(z, inputs["waveLoads_z"], waveLoads.y)
Pz = np.interp(z, inputs["windLoads_z"], windLoads.z) + np.interp(z, inputs["waveLoads_z"], waveLoads.z)
qdyn = np.interp(z, inputs["windLoads_z"], inputs["windLoads_qdyn"]) + np.interp(
z, inputs["waveLoads_z"], inputs["waveLoads_qdyn"]
)
# The following are redundant, at one point we will consolidate them to something that works for both cylinder (not using vartrees) and jacket (still using vartrees)
outputs["Px"] = Px
outputs["Py"] = Py
outputs["Pz"] = Pz
outputs["qdyn"] = qdyn
# -----------------
class CylinderWindDrag(om.ExplicitComponent):
"""
Compute drag forces on a cylindrical cylinder due to wind.
Parameters
----------
U : numpy array[nPoints], [m/s]
magnitude of wind speed
z : numpy array[nPoints], [m]
heights where wind speed was computed
d : numpy array[nPoints], [m]
corresponding diameter of cylinder section
beta_wind : float, [deg]
corresponding wind angles relative to inertial coordinate system
rho_air : float, [kg/m**3]
air density
mu_air : float, [kg/(m*]
dynamic viscosity of air
cd_usr : float
User input drag coefficient to override Reynolds number based one
Returns
-------
windLoads_Px : numpy array[nPoints], [N/m]
distributed loads, force per unit length in x-direction
windLoads_Py : numpy array[nPoints], [N/m]
distributed loads, force per unit length in y-direction
windLoads_Pz : numpy array[nPoints], [N/m]
distributed loads, force per unit length in z-direction
windLoads_qdyn : numpy array[nPoints], [N/m**2]
dynamic pressure
windLoads_z : numpy array[nPoints], [m]
corresponding heights
windLoads_beta : float, [deg]
wind/wave angle relative to inertia c.s.
"""
def initialize(self):
self.options.declare("nPoints")
def setup(self):
nPoints = self.options["nPoints"]
# variables
self.add_input("U", np.zeros(nPoints), units="m/s")
self.add_input("z", np.zeros(nPoints), units="m")
self.add_input("d", np.zeros(nPoints), units="m")
self.add_input("beta_wind", 0.0, units="deg")
self.add_input("rho_air", 0.0, units="kg/m**3")
self.add_input("mu_air", 0.0, units="kg/(m*s)")
self.add_input("cd_usr", -1.0)
self.add_output("windLoads_Px", np.zeros(nPoints), units="N/m")
self.add_output("windLoads_Py", np.zeros(nPoints), units="N/m")
self.add_output("windLoads_Pz", np.zeros(nPoints), units="N/m")
self.add_output("windLoads_qdyn", np.zeros(nPoints), units="N/m**2")
self.add_output("windLoads_z", np.zeros(nPoints), units="m")
self.add_output("windLoads_beta", 0.0, units="deg")
arange = np.arange(nPoints)
self.declare_partials("windLoads_Px", "U", rows=arange, cols=arange)
self.declare_partials("windLoads_Px", "d", rows=arange, cols=arange)
self.declare_partials("windLoads_Py", "U", rows=arange, cols=arange)
self.declare_partials("windLoads_Py", "d", rows=arange, cols=arange)
self.declare_partials(["windLoads_Px", "windLoads_Py"], "cd_usr", method="fd")
self.declare_partials("windLoads_qdyn", "U", rows=arange, cols=arange)
self.declare_partials("windLoads_qdyn", "rho_air", method="fd")
self.declare_partials("windLoads_z", "z", rows=arange, cols=arange, val=1.0)
self.declare_partials("windLoads_beta", "beta_wind", val=1.0)
def compute(self, inputs, outputs):
rho = inputs["rho_air"]
U = inputs["U"]
d = inputs["d"]
mu = inputs["mu_air"]
beta = inputs["beta_wind"]
# dynamic pressure
q = 0.5 * rho * U ** 2
# Reynolds number and drag
if float(inputs["cd_usr"]) < 0.0:
Re = rho * U * d / mu
cd, dcd_dRe = cylinderDrag(Re)
else:
cd = inputs["cd_usr"]
Re = 1.0
dcd_dRe = 0.0
Fp = q * cd * d
# components of distributed loads
Px = Fp * cosd(beta)
Py = Fp * sind(beta)
Pz = 0 * Fp
# pack data
outputs["windLoads_Px"] = Px
outputs["windLoads_Py"] = Py
outputs["windLoads_Pz"] = Pz
outputs["windLoads_qdyn"] = q
outputs["windLoads_z"] = inputs["z"]
outputs["windLoads_beta"] = beta
def compute_partials(self, inputs, J):
# rename
rho = inputs["rho_air"]
U = inputs["U"]
d = inputs["d"]
mu = inputs["mu_air"]
beta = inputs["beta_wind"]
# dynamic pressure
q = 0.5 * rho * U ** 2
# Reynolds number and drag
if float(inputs["cd_usr"]) < 0.0:
Re = rho * U * d / mu
cd, dcd_dRe = cylinderDrag(Re)
else:
cd = inputs["cd_usr"]
Re = 1.0
dcd_dRe = 0.0
# derivatives
dq_dU = rho * U
const = (dq_dU * cd + q * dcd_dRe * rho * d / mu) * d
dPx_dU = const * cosd(beta)
dPy_dU = const * sind(beta)
const = (cd + dcd_dRe * Re) * q
dPx_dd = const * cosd(beta)
dPy_dd = const * sind(beta)
J["windLoads_Px", "U"] = dPx_dU
J["windLoads_Px", "d"] = dPx_dd
J["windLoads_Py", "U"] = dPy_dU
J["windLoads_Py", "d"] = dPy_dd
J["windLoads_qdyn", "U"] = dq_dU
# -----------------
class CylinderWaveDrag(om.ExplicitComponent):
"""
Compute drag forces on a cylindrical cylinder due to waves.
Parameters
----------
U : numpy array[nPoints], [m/s]
magnitude of wave speed
A : numpy array[nPoints], [m/s**2]
magnitude of wave acceleration
p : numpy array[nPoints], [N/m**2]
pressure oscillation
z : numpy array[nPoints], [m]
heights where wave speed was computed
d : numpy array[nPoints], [m]
corresponding diameter of cylinder section
beta_wave : float, [deg]
corresponding wave angles relative to inertial coordinate system
rho_water : float, [kg/m**3]
water density
mu_water : float, [kg/(m*]
dynamic viscosity of water
cm : float
mass coefficient
cd_usr : float
User input drag coefficient to override Reynolds number based one
Returns
-------
waveLoads_Px : numpy array[nPoints], [N/m]
distributed loads, force per unit length in x-direction
waveLoads_Py : numpy array[nPoints], [N/m]
distributed loads, force per unit length in y-direction
waveLoads_Pz : numpy array[nPoints], [N/m]
distributed loads, force per unit length in z-direction
waveLoads_qdyn : numpy array[nPoints], [N/m**2]
dynamic pressure
waveLoads_pt : numpy array[nPoints], [N/m**2]
total (static+dynamic) pressure
waveLoads_z : numpy array[nPoints], [m]
corresponding heights
waveLoads_beta : float, [deg]
wind/wave angle relative to inertia c.s.
"""
def initialize(self):
self.options.declare("nPoints")
def setup(self):
nPoints = self.options["nPoints"]
# variables
self.add_input("U", np.zeros(nPoints), units="m/s")
self.add_input("A", np.zeros(nPoints), units="m/s**2")
self.add_input("p", np.zeros(nPoints), units="N/m**2")
self.add_input("z", np.zeros(nPoints), units="m")
self.add_input("d", np.zeros(nPoints), units="m")
self.add_input("beta_wave", 0.0, units="deg")
self.add_input("rho_water", 0.0, units="kg/m**3")
self.add_input("mu_water", 0.0, units="kg/(m*s)")
self.add_input("cm", 0.0)
self.add_input("cd_usr", -1.0)
self.add_output("waveLoads_Px", np.zeros(nPoints), units="N/m")
self.add_output("waveLoads_Py", np.zeros(nPoints), units="N/m")
self.add_output("waveLoads_Pz", np.zeros(nPoints), units="N/m")
self.add_output("waveLoads_qdyn", np.zeros(nPoints), units="N/m**2")
self.add_output("waveLoads_pt", np.zeros(nPoints), units="N/m**2")
self.add_output("waveLoads_z", np.zeros(nPoints), units="m")
self.add_output("waveLoads_beta", 0.0, units="deg")
self.declare_partials("*", "rho_water", method="fd")
arange = np.arange(nPoints)
self.declare_partials(["waveLoads_Px", "waveLoads_Py"], ["U", "d", "cm", "cd_usr", "beta_wave"], method="fd")
self.declare_partials("waveLoads_Px", "A", rows=arange, cols=arange)
self.declare_partials("waveLoads_Py", "A", rows=arange, cols=arange)
self.declare_partials("waveLoads_qdyn", "U", rows=arange, cols=arange)
self.declare_partials("waveLoads_pt", "U", rows=arange, cols=arange)
self.declare_partials("waveLoads_pt", "p", rows=arange, cols=arange, val=1.0)
self.declare_partials("waveLoads_z", "z", rows=arange, cols=arange, val=1.0)
self.declare_partials("waveLoads_beta", "beta_wave", val=1.0)
def compute(self, inputs, outputs):
# wlevel = inputs['wlevel']
# if wlevel > 0.0: wlevel *= -1.0
rho = inputs["rho_water"]
U = inputs["U"]
# U0 = inputs['U0']
d = inputs["d"]
# zrel= inputs['z']-wlevel
mu = inputs["mu_water"]
beta = inputs["beta_wave"]
# beta0 = inputs['beta0']
# dynamic pressure
q = 0.5 * rho * U * np.abs(U)
# q0= 0.5*rho*U0**2
# Reynolds number and drag
if float(inputs["cd_usr"]) < 0.0:
Re = rho * U * d / mu
cd, dcd_dRe = cylinderDrag(Re)
else:
cd = inputs["cd_usr"] * np.ones_like(d)
Re = 1.0
dcd_dRe = 0.0
# inertial and drag forces
Fi = rho * inputs["cm"] * math.pi / 4.0 * d ** 2 * inputs["A"] # Morrison's equation
Fd = q * cd * d
Fp = Fi + Fd
# components of distributed loads
Px = Fp * cosd(beta)
Py = Fp * sind(beta)
Pz = 0.0 * Fp
# FORCES [N/m] AT z=0 m
# idx0 = np.abs(zrel).argmin() # closest index to z=0, used to find d at z=0
# d0 = d[idx0] # initialize
# cd0 = cd[idx0] # initialize
# if (zrel[idx0]<0.) and (idx0< (zrel.size-1)): # point below water
# d0 = np.mean(d[idx0:idx0+2])
# cd0 = np.mean(cd[idx0:idx0+2])
# elif (zrel[idx0]>0.) and (idx0>0): # point above water
# d0 = np.mean(d[idx0-1:idx0+1])
# cd0 = np.mean(cd[idx0-1:idx0+1])
# Fi0 = rho*inputs['cm']*math.pi/4.0*d0**2*inputs['A0'] # Morrison's equation
# Fd0 = q0*cd0*d0
# Fp0 = Fi0 + Fd0
# Px0 = Fp0*cosd(beta0)
# Py0 = Fp0*sind(beta0)
# Pz0 = 0.*Fp0
# Store qties at z=0 MSL
# outputs['waveLoads_Px0'] = Px0
# outputs['waveLoads_Py0'] = Py0
# outputs['waveLoads_Pz0'] = Pz0
# outputs['waveLoads_qdyn0'] = q0
# outputs['waveLoads_beta0'] = beta0
# pack data
outputs["waveLoads_Px"] = Px
outputs["waveLoads_Py"] = Py
outputs["waveLoads_Pz"] = Pz
outputs["waveLoads_qdyn"] = q
outputs["waveLoads_pt"] = q + inputs["p"]
outputs["waveLoads_z"] = inputs["z"]
outputs["waveLoads_beta"] = beta
def compute_partials(self, inputs, J):
# wlevel = inputs['wlevel']
# if wlevel > 0.0: wlevel *= -1.0
rho = inputs["rho_water"]
U = inputs["U"]
# U0 = inputs['U0']
d = inputs["d"]
# zrel= inputs['z']-wlevel
mu = inputs["mu_water"]
beta = inputs["beta_wave"]
# beta0 = inputs['beta0']
# dynamic pressure
q = 0.5 * rho * U ** 2
# q0= 0.5*rho*U0**2
# Reynolds number and drag
if float(inputs["cd_usr"]) < 0.0:
cd = inputs["cd_usr"] * np.ones_like(d)
Re = 1.0
dcd_dRe = 0.0
else:
Re = rho * U * d / mu
cd, dcd_dRe = cylinderDrag(Re)
# derivatives
dq_dU = rho * U
const = (dq_dU * cd + q * dcd_dRe * rho * d / mu) * d
dPx_dU = const * cosd(beta)
dPy_dU = const * sind(beta)
const = (cd + dcd_dRe * Re) * q + rho * inputs["cm"] * math.pi / 4.0 * 2 * d * inputs["A"]
dPx_dd = const * cosd(beta)
dPy_dd = const * sind(beta)
const = rho * inputs["cm"] * math.pi / 4.0 * d ** 2
dPx_dA = const * cosd(beta)
dPy_dA = const * sind(beta)
J["waveLoads_Px", "A"] = dPx_dA
J["waveLoads_Py", "A"] = dPy_dA
J["waveLoads_qdyn", "U"] = dq_dU
J["waveLoads_pt", "U"] = dq_dU
# ___________________________________________#
class CylinderEnvironment(om.Group):
def initialize(self):
self.options.declare("wind", default="power")
self.options.declare("nPoints")
self.options.declare("water_flag", default=True)
def setup(self):
nPoints = self.options["nPoints"]
wind = self.options["wind"]
water_flag = self.options["water_flag"]
self.set_input_defaults("z0", 0.0)
self.set_input_defaults("cd_usr", -1.0)
self.set_input_defaults("yaw", 0.0, units="deg")
self.set_input_defaults("beta_wind", 0.0, units="deg")
self.set_input_defaults("rho_air", 1.225, units="kg/m**3")
self.set_input_defaults("mu_air", 1.81206e-5, units="kg/m/s")
self.set_input_defaults("shearExp", 0.2)
if water_flag:
self.set_input_defaults("beta_wave", 0.0, units="deg")
self.set_input_defaults("rho_water", 1025.0, units="kg/m**3")
self.set_input_defaults("mu_water", 1.08e-3, units="kg/m/s")
# Wind profile and loads
promwind = ["Uref", "zref", "z", "z0"]
if wind is None or wind.lower() in ["power", "powerwind", ""]:
self.add_subsystem("wind", PowerWind(nPoints=nPoints), promotes=promwind + ["shearExp"])
elif wind.lower() == "logwind":
self.add_subsystem("wind", LogWind(nPoints=nPoints), promotes=promwind)
else:
raise ValueError("Unknown wind type, " + wind)
self.add_subsystem(
"windLoads",
CylinderWindDrag(nPoints=nPoints),
promotes=["cd_usr", "beta_wind", "rho_air", "mu_air", "z", "d"],
)
# Wave profile and loads
if water_flag:
self.add_subsystem(
"wave",
LinearWaves(nPoints=nPoints),
promotes=[
"z",
"Uc",
"Hsig_wave",
"Tsig_wave",
"rho_water",
("z_floor", "water_depth"),
("z_surface", "z0"),
],
)
self.add_subsystem(
"waveLoads",
CylinderWaveDrag(nPoints=nPoints),
promotes=["cm", "cd_usr", "beta_wave", "rho_water", "mu_water", "z", "d"],
)
# Combine all loads
self.add_subsystem(
"distLoads", AeroHydroLoads(nPoints=nPoints), promotes=["Px", "Py", "Pz", "qdyn", "yaw", "z"]
)
# Connections
self.connect("wind.U", "windLoads.U")
if water_flag:
self.connect("wave.U", "waveLoads.U")
self.connect("wave.A", "waveLoads.A")
self.connect("wave.p", "waveLoads.p")
self.connect("windLoads.windLoads_Px", "distLoads.windLoads_Px")
self.connect("windLoads.windLoads_Py", "distLoads.windLoads_Py")
self.connect("windLoads.windLoads_Pz", "distLoads.windLoads_Pz")
self.connect("windLoads.windLoads_qdyn", "distLoads.windLoads_qdyn")
self.connect("windLoads.windLoads_beta", "distLoads.windLoads_beta")
self.connect("windLoads.windLoads_z", "distLoads.windLoads_z")
if water_flag:
self.connect("waveLoads.waveLoads_Px", "distLoads.waveLoads_Px")
self.connect("waveLoads.waveLoads_Py", "distLoads.waveLoads_Py")
self.connect("waveLoads.waveLoads_Pz", "distLoads.waveLoads_Pz")
self.connect("waveLoads.waveLoads_pt", "distLoads.waveLoads_qdyn")
self.connect("waveLoads.waveLoads_beta", "distLoads.waveLoads_beta")
self.connect("waveLoads.waveLoads_z", "distLoads.waveLoads_z")
def main():
# initialize problem
U = np.array([20.0, 25.0, 30.0])
z = np.array([10.0, 30.0, 80.0])
d = np.array([5.5, 4.0, 3.0])
beta = np.array([45.0, 45.0, 45.0])
rho = 1.225
mu = 1.7934e-5
# cd_usr = 0.7
nPoints = len(z)
prob = om.Problem()
root = prob.model = om.Group()
root.add("p1", CylinderWindDrag(nPoints))
prob.setup()
prob["p1.U"] = U
prob["p1.z"] = z
prob["p1.d"] = d
prob["p1.beta"] = beta
prob["p1.rho"] = rho
prob["p1.mu"] = mu
# prob['p1.cd_usr'] = cd_usr
# run
prob.run_once()
# out
Re = prob["p1.rho"] * prob["p1.U"] * prob["p1.d"] / prob["p1.mu"]
cd, dcd_dRe = cylinderDrag(Re)
print(cd)
import matplotlib.pyplot as plt
plt.plot(prob["p1.windLoads_Px"], prob["p1.windLoads_z"])
plt.plot(prob["p1.windLoads_Py"], prob["p1.windLoads_z"])
plt.plot(prob["p1.windLoads_qdyn"], prob["p1.windLoads_z"])
plt.show()
if __name__ == "__main__":
main()
|
[
"numpy.abs",
"wisdem.commonse.environment.PowerWind",
"numpy.arange",
"numpy.interp",
"openmdao.api.Group",
"numpy.zeros_like",
"math.log",
"numpy.log10",
"openmdao.api.Problem",
"matplotlib.pyplot.show",
"numpy.ones_like",
"wisdem.commonse.environment.LinearWaves",
"wisdem.commonse.csystem.DirectionVector",
"wisdem.commonse.utilities.cosd",
"matplotlib.pyplot.plot",
"numpy.zeros",
"numpy.array",
"wisdem.commonse.environment.LogWind",
"wisdem.commonse.utilities.sind"
] |
[((986, 1001), 'numpy.log10', 'np.log10', (['Re_pt'], {}), '(Re_pt)\n', (994, 1001), True, 'import numpy as np\n'), ((1375, 1392), 'numpy.zeros_like', 'np.zeros_like', (['Re'], {}), '(Re)\n', (1388, 1392), True, 'import numpy as np\n'), ((1407, 1424), 'numpy.zeros_like', 'np.zeros_like', (['Re'], {}), '(Re)\n', (1420, 1424), True, 'import numpy as np\n'), ((21955, 21983), 'numpy.array', 'np.array', (['[20.0, 25.0, 30.0]'], {}), '([20.0, 25.0, 30.0])\n', (21963, 21983), True, 'import numpy as np\n'), ((21992, 22020), 'numpy.array', 'np.array', (['[10.0, 30.0, 80.0]'], {}), '([10.0, 30.0, 80.0])\n', (22000, 22020), True, 'import numpy as np\n'), ((22029, 22054), 'numpy.array', 'np.array', (['[5.5, 4.0, 3.0]'], {}), '([5.5, 4.0, 3.0])\n', (22037, 22054), True, 'import numpy as np\n'), ((22067, 22095), 'numpy.array', 'np.array', (['[45.0, 45.0, 45.0]'], {}), '([45.0, 45.0, 45.0])\n', (22075, 22095), True, 'import numpy as np\n'), ((22184, 22196), 'openmdao.api.Problem', 'om.Problem', ([], {}), '()\n', (22194, 22196), True, 'import openmdao.api as om\n'), ((22222, 22232), 'openmdao.api.Group', 'om.Group', ([], {}), '()\n', (22230, 22232), True, 'import openmdao.api as om\n'), ((22672, 22729), 'matplotlib.pyplot.plot', 'plt.plot', (["prob['p1.windLoads_Px']", "prob['p1.windLoads_z']"], {}), "(prob['p1.windLoads_Px'], prob['p1.windLoads_z'])\n", (22680, 22729), True, 'import matplotlib.pyplot as plt\n'), ((22734, 22791), 'matplotlib.pyplot.plot', 'plt.plot', (["prob['p1.windLoads_Py']", "prob['p1.windLoads_z']"], {}), "(prob['p1.windLoads_Py'], prob['p1.windLoads_z'])\n", (22742, 22791), True, 'import matplotlib.pyplot as plt\n'), ((22796, 22855), 'matplotlib.pyplot.plot', 'plt.plot', (["prob['p1.windLoads_qdyn']", "prob['p1.windLoads_z']"], {}), "(prob['p1.windLoads_qdyn'], prob['p1.windLoads_z'])\n", (22804, 22855), True, 'import matplotlib.pyplot as plt\n'), ((22860, 22870), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22868, 22870), True, 'import matplotlib.pyplot as plt\n'), ((1496, 1514), 'numpy.log10', 'np.log10', (['ReN[idx]'], {}), '(ReN[idx])\n', (1504, 1514), True, 'import numpy as np\n'), ((1546, 1558), 'math.log', 'math.log', (['(10)'], {}), '(10)\n', (1554, 1558), False, 'import math\n'), ((8296, 8314), 'numpy.arange', 'np.arange', (['nPoints'], {}), '(nPoints)\n', (8305, 8314), True, 'import numpy as np\n'), ((13804, 13822), 'numpy.arange', 'np.arange', (['nPoints'], {}), '(nPoints)\n', (13813, 13822), True, 'import numpy as np\n'), ((3552, 3569), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (3560, 3569), True, 'import numpy as np\n'), ((3623, 3640), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (3631, 3640), True, 'import numpy as np\n'), ((3694, 3711), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (3702, 3711), True, 'import numpy as np\n'), ((3767, 3784), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (3775, 3784), True, 'import numpy as np\n'), ((3840, 3857), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (3848, 3857), True, 'import numpy as np\n'), ((3968, 3985), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (3976, 3985), True, 'import numpy as np\n'), ((4039, 4056), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (4047, 4056), True, 'import numpy as np\n'), ((4110, 4127), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (4118, 4127), True, 'import numpy as np\n'), ((4183, 4200), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (4191, 4200), True, 'import numpy as np\n'), ((4256, 4273), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (4264, 4273), True, 'import numpy as np\n'), ((4373, 4390), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (4381, 4390), True, 'import numpy as np\n'), ((4482, 4499), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (4490, 4499), True, 'import numpy as np\n'), ((4544, 4561), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (4552, 4561), True, 'import numpy as np\n'), ((4606, 4623), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (4614, 4623), True, 'import numpy as np\n'), ((4670, 4687), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (4678, 4687), True, 'import numpy as np\n'), ((5232, 5280), 'numpy.interp', 'np.interp', (['z', "inputs['windLoads_z']", 'windLoads.x'], {}), "(z, inputs['windLoads_z'], windLoads.x)\n", (5241, 5280), True, 'import numpy as np\n'), ((5283, 5331), 'numpy.interp', 'np.interp', (['z', "inputs['waveLoads_z']", 'waveLoads.x'], {}), "(z, inputs['waveLoads_z'], waveLoads.x)\n", (5292, 5331), True, 'import numpy as np\n'), ((5345, 5393), 'numpy.interp', 'np.interp', (['z', "inputs['windLoads_z']", 'windLoads.y'], {}), "(z, inputs['windLoads_z'], windLoads.y)\n", (5354, 5393), True, 'import numpy as np\n'), ((5396, 5444), 'numpy.interp', 'np.interp', (['z', "inputs['waveLoads_z']", 'waveLoads.y'], {}), "(z, inputs['waveLoads_z'], waveLoads.y)\n", (5405, 5444), True, 'import numpy as np\n'), ((5458, 5506), 'numpy.interp', 'np.interp', (['z', "inputs['windLoads_z']", 'windLoads.z'], {}), "(z, inputs['windLoads_z'], windLoads.z)\n", (5467, 5506), True, 'import numpy as np\n'), ((5509, 5557), 'numpy.interp', 'np.interp', (['z', "inputs['waveLoads_z']", 'waveLoads.z'], {}), "(z, inputs['waveLoads_z'], waveLoads.z)\n", (5518, 5557), True, 'import numpy as np\n'), ((5573, 5634), 'numpy.interp', 'np.interp', (['z', "inputs['windLoads_z']", "inputs['windLoads_qdyn']"], {}), "(z, inputs['windLoads_z'], inputs['windLoads_qdyn'])\n", (5582, 5634), True, 'import numpy as np\n'), ((5637, 5698), 'numpy.interp', 'np.interp', (['z', "inputs['waveLoads_z']", "inputs['waveLoads_qdyn']"], {}), "(z, inputs['waveLoads_z'], inputs['waveLoads_qdyn'])\n", (5646, 5698), True, 'import numpy as np\n'), ((7502, 7519), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (7510, 7519), True, 'import numpy as np\n'), ((7562, 7579), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (7570, 7579), True, 'import numpy as np\n'), ((7620, 7637), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (7628, 7637), True, 'import numpy as np\n'), ((7896, 7913), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (7904, 7913), True, 'import numpy as np\n'), ((7968, 7985), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (7976, 7985), True, 'import numpy as np\n'), ((8040, 8057), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (8048, 8057), True, 'import numpy as np\n'), ((8114, 8131), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (8122, 8131), True, 'import numpy as np\n'), ((8188, 8205), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (8196, 8205), True, 'import numpy as np\n'), ((9600, 9610), 'wisdem.commonse.utilities.cosd', 'cosd', (['beta'], {}), '(beta)\n', (9604, 9610), False, 'from wisdem.commonse.utilities import cosd, sind\n'), ((9629, 9639), 'wisdem.commonse.utilities.sind', 'sind', (['beta'], {}), '(beta)\n', (9633, 9639), False, 'from wisdem.commonse.utilities import cosd, sind\n'), ((10566, 10576), 'wisdem.commonse.utilities.cosd', 'cosd', (['beta'], {}), '(beta)\n', (10570, 10576), False, 'from wisdem.commonse.utilities import cosd, sind\n'), ((10602, 10612), 'wisdem.commonse.utilities.sind', 'sind', (['beta'], {}), '(beta)\n', (10606, 10612), False, 'from wisdem.commonse.utilities import cosd, sind\n'), ((10679, 10689), 'wisdem.commonse.utilities.cosd', 'cosd', (['beta'], {}), '(beta)\n', (10683, 10689), False, 'from wisdem.commonse.utilities import cosd, sind\n'), ((10715, 10725), 'wisdem.commonse.utilities.sind', 'sind', (['beta'], {}), '(beta)\n', (10719, 10725), False, 'from wisdem.commonse.utilities import cosd, sind\n'), ((12709, 12726), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (12717, 12726), True, 'import numpy as np\n'), ((12769, 12786), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (12777, 12786), True, 'import numpy as np\n'), ((12832, 12849), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (12840, 12849), True, 'import numpy as np\n'), ((12895, 12912), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (12903, 12912), True, 'import numpy as np\n'), ((12953, 12970), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (12961, 12970), True, 'import numpy as np\n'), ((13267, 13284), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (13275, 13284), True, 'import numpy as np\n'), ((13339, 13356), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (13347, 13356), True, 'import numpy as np\n'), ((13411, 13428), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (13419, 13428), True, 'import numpy as np\n'), ((13485, 13502), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (13493, 13502), True, 'import numpy as np\n'), ((13560, 13577), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (13568, 13577), True, 'import numpy as np\n'), ((13634, 13651), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (13642, 13651), True, 'import numpy as np\n'), ((14917, 14926), 'numpy.abs', 'np.abs', (['U'], {}), '(U)\n', (14923, 14926), True, 'import numpy as np\n'), ((15459, 15469), 'wisdem.commonse.utilities.cosd', 'cosd', (['beta'], {}), '(beta)\n', (15463, 15469), False, 'from wisdem.commonse.utilities import cosd, sind\n'), ((15488, 15498), 'wisdem.commonse.utilities.sind', 'sind', (['beta'], {}), '(beta)\n', (15492, 15498), False, 'from wisdem.commonse.utilities import cosd, sind\n'), ((17679, 17689), 'wisdem.commonse.utilities.cosd', 'cosd', (['beta'], {}), '(beta)\n', (17683, 17689), False, 'from wisdem.commonse.utilities import cosd, sind\n'), ((17715, 17725), 'wisdem.commonse.utilities.sind', 'sind', (['beta'], {}), '(beta)\n', (17719, 17725), False, 'from wisdem.commonse.utilities import cosd, sind\n'), ((17851, 17861), 'wisdem.commonse.utilities.cosd', 'cosd', (['beta'], {}), '(beta)\n', (17855, 17861), False, 'from wisdem.commonse.utilities import cosd, sind\n'), ((17887, 17897), 'wisdem.commonse.utilities.sind', 'sind', (['beta'], {}), '(beta)\n', (17891, 17897), False, 'from wisdem.commonse.utilities import cosd, sind\n'), ((17984, 17994), 'wisdem.commonse.utilities.cosd', 'cosd', (['beta'], {}), '(beta)\n', (17988, 17994), False, 'from wisdem.commonse.utilities import cosd, sind\n'), ((18020, 18030), 'wisdem.commonse.utilities.sind', 'sind', (['beta'], {}), '(beta)\n', (18024, 18030), False, 'from wisdem.commonse.utilities import cosd, sind\n'), ((15160, 15175), 'numpy.ones_like', 'np.ones_like', (['d'], {}), '(d)\n', (15172, 15175), True, 'import numpy as np\n'), ((17391, 17406), 'numpy.ones_like', 'np.ones_like', (['d'], {}), '(d)\n', (17403, 17406), True, 'import numpy as np\n'), ((19433, 19459), 'wisdem.commonse.environment.PowerWind', 'PowerWind', ([], {'nPoints': 'nPoints'}), '(nPoints=nPoints)\n', (19442, 19459), False, 'from wisdem.commonse.environment import LogWind, PowerWind, LinearWaves\n'), ((20011, 20039), 'wisdem.commonse.environment.LinearWaves', 'LinearWaves', ([], {'nPoints': 'nPoints'}), '(nPoints=nPoints)\n', (20022, 20039), False, 'from wisdem.commonse.environment import LogWind, PowerWind, LinearWaves\n'), ((19575, 19599), 'wisdem.commonse.environment.LogWind', 'LogWind', ([], {'nPoints': 'nPoints'}), '(nPoints=nPoints)\n', (19582, 19599), False, 'from wisdem.commonse.environment import LogWind, PowerWind, LinearWaves\n'), ((4804, 4896), 'wisdem.commonse.csystem.DirectionVector', 'DirectionVector', (["inputs['windLoads_Px']", "inputs['windLoads_Py']", "inputs['windLoads_Pz']"], {}), "(inputs['windLoads_Px'], inputs['windLoads_Py'], inputs[\n 'windLoads_Pz'])\n", (4819, 4896), False, 'from wisdem.commonse.csystem import DirectionVector\n'), ((5028, 5120), 'wisdem.commonse.csystem.DirectionVector', 'DirectionVector', (["inputs['waveLoads_Px']", "inputs['waveLoads_Py']", "inputs['waveLoads_Pz']"], {}), "(inputs['waveLoads_Px'], inputs['waveLoads_Py'], inputs[\n 'waveLoads_Pz'])\n", (5043, 5120), False, 'from wisdem.commonse.csystem import DirectionVector\n')]
|
import sys
import numpy as np
import os
import random
import time
import pygame
import math
from constants import *
from chromosome import Chromosome
from bird import Bird
from selection import *
from pillar import Pillar
from pygame.locals import *
# ata = np.zeros((580,600,2))
# y: [-280,300]
# x: [600,0]
"""Agent for the Q-learning flappym with the matrices and matrix updates
"""
class QAgent:
def __init__(self, discount=0.9):
"""Initialize matrix
Args:
discount (float): discount parameter value
"""
# Action=true-matrix
self.QMatrix = np.zeros((2, 118, 600)) # action,y,x,ded
self.discount = discount
self.stateCount = np.zeros((580, 600))
def updateQMatrix(self, action, state, value):
"""Update the q matrix based on the parameters
Args:
action (bool): action boolean
state (list): list depicting the state
value (float): current state q value
"""
y = int(state[0] / 5)
x = state[1]
self.QMatrix[action, y, x] = value
def updateStateCount(self, state):
"""Update the times one has been on this state
Args:
state (list): list depicting the current state
Returns:
int: statecount
"""
self.stateCount[state[0], state[1]] += 1
return self.stateCount[state[0], state[1]]
def loadMatrix(self, name):
"""Load a q-matrix from the file
Args:
name (str): filename for the matrix to load to the object
"""
self.QMatrix = np.load("flappyQData//" + name + ".npy")
def getFromQ(self, action, state):
"""Get any item from the matrix
Args:
action (bool): boolean action
state (state): state of the game
Returns:
list: state
"""
y = int(state[0] / 5)
x = state[1]
return self.QMatrix.item((action, y, x))
|
[
"numpy.load",
"numpy.zeros"
] |
[((606, 629), 'numpy.zeros', 'np.zeros', (['(2, 118, 600)'], {}), '((2, 118, 600))\n', (614, 629), True, 'import numpy as np\n'), ((707, 727), 'numpy.zeros', 'np.zeros', (['(580, 600)'], {}), '((580, 600))\n', (715, 727), True, 'import numpy as np\n'), ((1619, 1659), 'numpy.load', 'np.load', (["('flappyQData//' + name + '.npy')"], {}), "('flappyQData//' + name + '.npy')\n", (1626, 1659), True, 'import numpy as np\n')]
|
import numpy as np
from deepspeaker.audio_ds import read_mfcc
from deepspeaker.batcher import sample_from_mfcc
from deepspeaker.constants import SAMPLE_RATE, NUM_FRAMES, WIN_LENGTH
from deepspeaker.conv_models import DeepSpeakerModel
import tensorflow as tf
def build_model(ckpt_path):
model = DeepSpeakerModel()
model.m.load_weights(ckpt_path, by_name=True)
return model
def predict_embedding(model, audio, sr=SAMPLE_RATE, win_length=WIN_LENGTH, cuda=True):
mfcc = sample_from_mfcc(read_mfcc(audio, sr, win_length), NUM_FRAMES)
# Call the model to get the embeddings of shape (1, 512) for each file.
gpus = tf.config.experimental.list_physical_devices('GPU') if cuda else 0
if gpus:
try:
tf.config.experimental.set_visible_devices(gpus[0], 'GPU')
except RuntimeError as e:
print(e)
with tf.device('/device:GPU:0'):
embedding = model.m.predict(np.expand_dims(mfcc, axis=0)) # Female
else:
with tf.device('device:cpu:0'):
embedding = model.m.predict(np.expand_dims(mfcc, axis=0)) # Female
return embedding
|
[
"deepspeaker.audio_ds.read_mfcc",
"tensorflow.config.experimental.set_visible_devices",
"tensorflow.device",
"numpy.expand_dims",
"deepspeaker.conv_models.DeepSpeakerModel",
"tensorflow.config.experimental.list_physical_devices"
] |
[((299, 317), 'deepspeaker.conv_models.DeepSpeakerModel', 'DeepSpeakerModel', ([], {}), '()\n', (315, 317), False, 'from deepspeaker.conv_models import DeepSpeakerModel\n'), ((501, 533), 'deepspeaker.audio_ds.read_mfcc', 'read_mfcc', (['audio', 'sr', 'win_length'], {}), '(audio, sr, win_length)\n', (510, 533), False, 'from deepspeaker.audio_ds import read_mfcc\n'), ((634, 685), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (678, 685), True, 'import tensorflow as tf\n'), ((739, 797), 'tensorflow.config.experimental.set_visible_devices', 'tf.config.experimental.set_visible_devices', (['gpus[0]', '"""GPU"""'], {}), "(gpus[0], 'GPU')\n", (781, 797), True, 'import tensorflow as tf\n'), ((866, 892), 'tensorflow.device', 'tf.device', (['"""/device:GPU:0"""'], {}), "('/device:GPU:0')\n", (875, 892), True, 'import tensorflow as tf\n'), ((996, 1021), 'tensorflow.device', 'tf.device', (['"""device:cpu:0"""'], {}), "('device:cpu:0')\n", (1005, 1021), True, 'import tensorflow as tf\n'), ((934, 962), 'numpy.expand_dims', 'np.expand_dims', (['mfcc'], {'axis': '(0)'}), '(mfcc, axis=0)\n', (948, 962), True, 'import numpy as np\n'), ((1063, 1091), 'numpy.expand_dims', 'np.expand_dims', (['mfcc'], {'axis': '(0)'}), '(mfcc, axis=0)\n', (1077, 1091), True, 'import numpy as np\n')]
|
import numpy as np
import scipy.stats as st
import cv2
import time
import os
import glob
def gauss_kernel(size=21, sigma=3, inchannels=3, outchannels=3):
interval = (2 * sigma + 1.0) / size
x = np.linspace(-sigma-interval/2,sigma+interval/2,size+1)
ker1d = np.diff(st.norm.cdf(x))
kernel_raw = np.sqrt(np.outer(ker1d, ker1d))
kernel = kernel_raw / kernel_raw.sum()
out_filter = np.array(kernel, dtype=np.float32)
out_filter = out_filter.reshape((1, 1, size, size))
out_filter = np.tile(out_filter, [outchannels, inchannels, 1, 1])
return out_filter
def np_free_form_mask(maxVertex, maxLength, maxBrushWidth, maxAngle, h, w):
mask = np.zeros((h, w, 1), np.float32)
numVertex = np.random.randint(maxVertex + 1)
startY = np.random.randint(h)
startX = np.random.randint(w)
brushWidth = 0
for i in range(numVertex):
angle = np.random.randint(maxAngle + 1)
angle = angle / 360.0 * 2 * np.pi
if i % 2 == 0:
angle = 2 * np.pi - angle
length = np.random.randint(maxLength + 1)
brushWidth = np.random.randint(10, maxBrushWidth + 1) // 2 * 2
nextY = startY + length * np.cos(angle)
nextX = startX + length * np.sin(angle)
nextY = np.maximum(np.minimum(nextY, h - 1), 0).astype(np.int)
nextX = np.maximum(np.minimum(nextX, w - 1), 0).astype(np.int)
cv2.line(mask, (startY, startX), (nextY, nextX), 1, brushWidth)
cv2.circle(mask, (startY, startX), brushWidth // 2, 2)
startY, startX = nextY, nextX
cv2.circle(mask, (startY, startX), brushWidth // 2, 2)
return mask
def generate_rect_mask(im_size, mask_size, margin=8, rand_mask=True):
mask = np.zeros((im_size[0], im_size[1])).astype(np.float32)
if rand_mask:
sz0, sz1 = mask_size[0], mask_size[1]
of0 = np.random.randint(margin, im_size[0] - sz0 - margin)
of1 = np.random.randint(margin, im_size[1] - sz1 - margin)
else:
sz0, sz1 = mask_size[0], mask_size[1]
of0 = (im_size[0] - sz0) // 2
of1 = (im_size[1] - sz1) // 2
mask[of0:of0+sz0, of1:of1+sz1] = 1
mask = np.expand_dims(mask, axis=0)
mask = np.expand_dims(mask, axis=0)
rect = np.array([[of0, sz0, of1, sz1]], dtype=int)
return mask, rect
def generate_stroke_mask(im_size, parts=10, maxVertex=20, maxLength=100, maxBrushWidth=24, maxAngle=360):
mask = np.zeros((im_size[0], im_size[1], 1), dtype=np.float32)
for i in range(parts):
mask = mask + np_free_form_mask(maxVertex, maxLength, maxBrushWidth, maxAngle, im_size[0], im_size[1])
mask = np.minimum(mask, 1.0)
mask = np.transpose(mask, [2, 0, 1])
mask = np.expand_dims(mask, 0)
return mask
def generate_mask(type, im_size, mask_size):
if type == 'rect':
return generate_rect_mask(im_size, mask_size)
else:
return generate_stroke_mask(im_size), None
def getLatest(folder_path):
files = glob.glob(folder_path)
file_times = list(map(lambda x: time.ctime(os.path.getctime(x)), files))
return files[sorted(range(len(file_times)), key=lambda x: file_times[x])[-1]]
def get_file_mask(filepath):
mask = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE)
mask = np.minimum(mask, 1.0)
mask=np.expand_dims(mask, axis=0)
mask=np.expand_dims(mask, axis=0)
return mask
|
[
"cv2.line",
"cv2.circle",
"numpy.minimum",
"numpy.outer",
"numpy.zeros",
"numpy.expand_dims",
"numpy.transpose",
"os.path.getctime",
"scipy.stats.norm.cdf",
"cv2.imread",
"numpy.random.randint",
"numpy.array",
"numpy.tile",
"numpy.linspace",
"glob.glob",
"numpy.cos",
"numpy.sin"
] |
[((203, 269), 'numpy.linspace', 'np.linspace', (['(-sigma - interval / 2)', '(sigma + interval / 2)', '(size + 1)'], {}), '(-sigma - interval / 2, sigma + interval / 2, size + 1)\n', (214, 269), True, 'import numpy as np\n'), ((403, 437), 'numpy.array', 'np.array', (['kernel'], {'dtype': 'np.float32'}), '(kernel, dtype=np.float32)\n', (411, 437), True, 'import numpy as np\n'), ((511, 563), 'numpy.tile', 'np.tile', (['out_filter', '[outchannels, inchannels, 1, 1]'], {}), '(out_filter, [outchannels, inchannels, 1, 1])\n', (518, 563), True, 'import numpy as np\n'), ((675, 706), 'numpy.zeros', 'np.zeros', (['(h, w, 1)', 'np.float32'], {}), '((h, w, 1), np.float32)\n', (683, 706), True, 'import numpy as np\n'), ((723, 755), 'numpy.random.randint', 'np.random.randint', (['(maxVertex + 1)'], {}), '(maxVertex + 1)\n', (740, 755), True, 'import numpy as np\n'), ((769, 789), 'numpy.random.randint', 'np.random.randint', (['h'], {}), '(h)\n', (786, 789), True, 'import numpy as np\n'), ((803, 823), 'numpy.random.randint', 'np.random.randint', (['w'], {}), '(w)\n', (820, 823), True, 'import numpy as np\n'), ((1564, 1618), 'cv2.circle', 'cv2.circle', (['mask', '(startY, startX)', '(brushWidth // 2)', '(2)'], {}), '(mask, (startY, startX), brushWidth // 2, 2)\n', (1574, 1618), False, 'import cv2\n'), ((2152, 2180), 'numpy.expand_dims', 'np.expand_dims', (['mask'], {'axis': '(0)'}), '(mask, axis=0)\n', (2166, 2180), True, 'import numpy as np\n'), ((2192, 2220), 'numpy.expand_dims', 'np.expand_dims', (['mask'], {'axis': '(0)'}), '(mask, axis=0)\n', (2206, 2220), True, 'import numpy as np\n'), ((2232, 2275), 'numpy.array', 'np.array', (['[[of0, sz0, of1, sz1]]'], {'dtype': 'int'}), '([[of0, sz0, of1, sz1]], dtype=int)\n', (2240, 2275), True, 'import numpy as np\n'), ((2417, 2472), 'numpy.zeros', 'np.zeros', (['(im_size[0], im_size[1], 1)'], {'dtype': 'np.float32'}), '((im_size[0], im_size[1], 1), dtype=np.float32)\n', (2425, 2472), True, 'import numpy as np\n'), ((2622, 2643), 'numpy.minimum', 'np.minimum', (['mask', '(1.0)'], {}), '(mask, 1.0)\n', (2632, 2643), True, 'import numpy as np\n'), ((2655, 2684), 'numpy.transpose', 'np.transpose', (['mask', '[2, 0, 1]'], {}), '(mask, [2, 0, 1])\n', (2667, 2684), True, 'import numpy as np\n'), ((2696, 2719), 'numpy.expand_dims', 'np.expand_dims', (['mask', '(0)'], {}), '(mask, 0)\n', (2710, 2719), True, 'import numpy as np\n'), ((2963, 2985), 'glob.glob', 'glob.glob', (['folder_path'], {}), '(folder_path)\n', (2972, 2985), False, 'import glob\n'), ((3186, 3228), 'cv2.imread', 'cv2.imread', (['filepath', 'cv2.IMREAD_GRAYSCALE'], {}), '(filepath, cv2.IMREAD_GRAYSCALE)\n', (3196, 3228), False, 'import cv2\n'), ((3240, 3261), 'numpy.minimum', 'np.minimum', (['mask', '(1.0)'], {}), '(mask, 1.0)\n', (3250, 3261), True, 'import numpy as np\n'), ((3271, 3299), 'numpy.expand_dims', 'np.expand_dims', (['mask'], {'axis': '(0)'}), '(mask, axis=0)\n', (3285, 3299), True, 'import numpy as np\n'), ((3309, 3337), 'numpy.expand_dims', 'np.expand_dims', (['mask'], {'axis': '(0)'}), '(mask, axis=0)\n', (3323, 3337), True, 'import numpy as np\n'), ((278, 292), 'scipy.stats.norm.cdf', 'st.norm.cdf', (['x'], {}), '(x)\n', (289, 292), True, 'import scipy.stats as st\n'), ((319, 341), 'numpy.outer', 'np.outer', (['ker1d', 'ker1d'], {}), '(ker1d, ker1d)\n', (327, 341), True, 'import numpy as np\n'), ((890, 921), 'numpy.random.randint', 'np.random.randint', (['(maxAngle + 1)'], {}), '(maxAngle + 1)\n', (907, 921), True, 'import numpy as np\n'), ((1042, 1074), 'numpy.random.randint', 'np.random.randint', (['(maxLength + 1)'], {}), '(maxLength + 1)\n', (1059, 1074), True, 'import numpy as np\n'), ((1394, 1457), 'cv2.line', 'cv2.line', (['mask', '(startY, startX)', '(nextY, nextX)', '(1)', 'brushWidth'], {}), '(mask, (startY, startX), (nextY, nextX), 1, brushWidth)\n', (1402, 1457), False, 'import cv2\n'), ((1466, 1520), 'cv2.circle', 'cv2.circle', (['mask', '(startY, startX)', '(brushWidth // 2)', '(2)'], {}), '(mask, (startY, startX), brushWidth // 2, 2)\n', (1476, 1520), False, 'import cv2\n'), ((1850, 1902), 'numpy.random.randint', 'np.random.randint', (['margin', '(im_size[0] - sz0 - margin)'], {}), '(margin, im_size[0] - sz0 - margin)\n', (1867, 1902), True, 'import numpy as np\n'), ((1917, 1969), 'numpy.random.randint', 'np.random.randint', (['margin', '(im_size[1] - sz1 - margin)'], {}), '(margin, im_size[1] - sz1 - margin)\n', (1934, 1969), True, 'import numpy as np\n'), ((1718, 1752), 'numpy.zeros', 'np.zeros', (['(im_size[0], im_size[1])'], {}), '((im_size[0], im_size[1]))\n', (1726, 1752), True, 'import numpy as np\n'), ((1096, 1136), 'numpy.random.randint', 'np.random.randint', (['(10)', '(maxBrushWidth + 1)'], {}), '(10, maxBrushWidth + 1)\n', (1113, 1136), True, 'import numpy as np\n'), ((1180, 1193), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (1186, 1193), True, 'import numpy as np\n'), ((1228, 1241), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (1234, 1241), True, 'import numpy as np\n'), ((1270, 1294), 'numpy.minimum', 'np.minimum', (['nextY', '(h - 1)'], {}), '(nextY, h - 1)\n', (1280, 1294), True, 'import numpy as np\n'), ((1341, 1365), 'numpy.minimum', 'np.minimum', (['nextX', '(w - 1)'], {}), '(nextX, w - 1)\n', (1351, 1365), True, 'import numpy as np\n'), ((3033, 3052), 'os.path.getctime', 'os.path.getctime', (['x'], {}), '(x)\n', (3049, 3052), False, 'import os\n')]
|
# GenerateSpectraPages.py
import argparse
import os
import time
from astropy.coordinates import SkyCoord
from astropy.io import ascii, votable
from astropy.io.votable import parse_single_table, from_table, writeto
from astropy.table import Column, Table
import astropy.units as u
import numpy as np
def parseargs():
"""
Parse the command line arguments
:return: An args map with the parsed arguments
"""
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Produce preview pages for a set of spectra")
parser.add_argument("-s", "--sbid", help="The id of the ASKAP scheduling block",
type=int, required=True)
parser.add_argument("-g", "--good", help="The sigma threshold for spectra to be included in the detections.html page",
type=float, default=3.0)
parser.add_argument("-b", "--best", help="The sigma threshold for spectra to be included in the best.html page",
type=float, default=5.0)
parser.add_argument("-p", "--parent", help="The parent folder for the processing, will default to sbnnn/ where nnn is the sbid.",
required=False)
args = parser.parse_args()
return args
def output_header(f, title):
f.write('<!doctype html>\n<html lang="en">\n<head>\n<title>{}</title>'.format(title))
with open('style.html') as style:
f.write(style.read())
f.write('\n</head>\n<body>')
f.write('\n<div class="container-fluid">')
f.write('\n<h1 align="middle">{}</h1>'.format(title))
return
def output_location_plots(f, source_map='figures/source_loc.png'):
f.write('\n<div class="row px-3" id="maps">')
f.write('\n<div class="col-md-auto"><h2 class="d-inline font-weight-light text-center text-lg-left mt-4 mb-0">Location</hs></div>')
f.write('\n<div class="col-md-auto">')
f.write('\nField Location')
f.write('\n<a href="figures/field_loc.png" class="d-block mb-4 h-100" data-lightbox="maps">')
f.write('\n<img class="img-fluid img-thumbnail" style="height: 180px" src="figures/field_loc.png" alt="Map of the location of the field.">')
f.write('\n</a>\n</div>')
f.write('\n<div class="col-md-auto">')
has_mw_loc_plot = os.path.exists(os.path.dirname(f.name)+'/figures/source_loc_mw.png')
f.write('\n{}Absorption Locations'.format('Magellanic ' if has_mw_loc_plot else ''))
f.write('\n<a href="{}" class="d-block mb-4 h-100" data-lightbox="maps">'.format(source_map))
f.write('\n<img class="img-fluid img-thumbnail" style="height: 180px" src="{}" alt="Map of the location of the sources.">'.format(source_map))
f.write('\n</a>\n</div>')
if has_mw_loc_plot:
f.write('\n<div class="col-md-auto">')
f.write('\nMilky Way Absorption Locations')
f.write('\n<a href="{}" class="d-block mb-4 h-100" data-lightbox="maps">'.format('figures/source_loc_mw.png'))
f.write('\n<img class="img-fluid img-thumbnail" style="height: 180px" src="{}" alt="Map of the location of the Milky Way sources.">'.format('figures/source_loc_mw.png'))
f.write('\n</a>\n</div>')
print(os.path.dirname(f.name)+'/figures/long_vel.png')
if os.path.exists(os.path.dirname(f.name)+'/figures/long_vel.png'):
f.write('\n<div class="col-md-auto">')
f.write('\n<a href="figures/long_vel.png" class="d-block mb-4 h-100" data-lightbox="maps">')
f.write('\n<img class="img-fluid img-thumbnail" style="height: 180px" src="figures/long_vel.png" alt="Longitude-velocity plot of the spectra.">')
f.write('\n</a>\n</div>')
f.write('\n</div>')
def output_block_title(f, rating, first, count):
if not first:
f.write('\n\n</div><br/>\n')
spec = 'spectrum' if count == 1 else 'spectra'
title = '{} Rating {} {}'.format(count, rating, spec) if rating else '{} Missed {} (with closest source)'.format(count, spec)
f.write('\n<div>')
f.write('\n<div class="col-9 d-inline"><h2 class="d-inline font-weight-light text-center text-lg-left mt-4 mb-0">{}</h2></div>'.format(title))
f.write('\n<div class="col-3 pull-right d-inline"><a class="btn btn-primary" data-toggle="collapse" href="#spectra{0}" role="button" aria-expanded="false" aria-controls="spectra{0}" style="font-size: x-small;">Hide/Show</a></div>'.format(rating))
f.write('\n</div>')
f.write('\n<div class="row text-center text-lg-left collapse show" id="spectra{}">'.format(rating))
def output_img(f, comp_name, rating, id, comment, combined=False):
zoom_file_pattern = 'figures/{0}_combined.png' if combined else 'figures/{0}_spec_zoom.png'
zoom_filename = zoom_file_pattern.format(comp_name)
file_pattern = 'figures/{0}_combined.png' if combined else 'figures/{0}_spec.png'
filename = file_pattern.format(comp_name)
f.write('\n<div class="col-lg-3 col-md-4 col-6 px-2">')
f.write('<figure class="figure d-block">')
f.write('\n<a href="{0}" class="mb-4" data-lightbox="rating{1}">'.format(filename, rating))
f.write('\n<img class="img-fluid img-thumbnail" ')
f.write('src="{0}" alt="Zoomed preview of spectrum at {1}">'.format(zoom_filename, comp_name))
f.write('\n</a>')
f.write('<figcaption class="figure-caption text-right">Source #{} {} {}</figcaption>'.format(id, comp_name, comment))
f.write('\n</figure></div>')
return
def output_non_zoom_img(f, comp_name, rating, id):
file_pattern = 'figures/{0}_spec.png'
filename = file_pattern.format(comp_name)
f.write('\n<div class="col-lg-3 col-md-4 col-6 px-2">')
f.write('<figure class="figure d-block">')
f.write('\n<a href="{0}" class="d-block mb-4 h-100" data-lightbox="rating{1}">'.format(filename, rating))
f.write('\n<img class="img-fluid img-thumbnail" ')
f.write('src="{0}" alt="Preview of spectrum at {0}">'.format(filename))
f.write('\n</a>')
f.write('<figcaption class="figure-caption text-right">Source #{} {}</figcaption>'.format(id, comp_name))
f.write('\n</figure></div>')
return
def output_footer(f):
f.write('\n\n</div>\n</div>\n</body>\n</html>')
return
def output_j19_img(f, gaskap_name, j19_name, rating, sep=None):
name_text = gaskap_name
if sep:
name_text += ' at {:.1f} arcsec'.format(sep)
f.write('\n<div class="col-4">')
f.write('\n<a href="spectra/{0}_spec.png" class="d-block mb-4 h-100" data-lightbox="rating{1}">'.format(gaskap_name, rating))
f.write('\n<img class="img-fluid img-thumbnail" ')
f.write('src="spectra/{0}_spec_zoom.png" alt="Zoomed preview of spectrum at {0}">'.format(gaskap_name))
f.write('\n{0}</a>\n</div>'.format(name_text))
f.write('\n<div class="col-8">')
j19_filename = '../jameson2019figset2/{}_lr.jpg'.format(j19_name)
f.write('\n<a href="{0}" class="d-block mb-4 h-100" data-lightbox="rating{1}">'.format(j19_filename, rating))
f.write('\n<img class="img-fluid img-thumbnail" ')
f.write('src="{0}" alt="Zoomed preview of spectrum at {0}">'.format(j19_filename))
f.write('\n</a>\n</div>')
return
def output_spectra(sbid, table, title, filename, threshold=None, has_other_abs=False, has_mw_abs=False,
verbose=False, source_map=None, max_noise=None):
print (title, filename)
with open(filename, 'w') as f:
output_header(f, title)
if source_map:
output_location_plots(f, source_map=source_map)
else:
output_location_plots(f)
for rating in 'ABCDEF':
targets = table[table['rating']==rating]
if max_noise:
targets = targets[targets['sd_cont'] < max_noise]
if has_other_abs:
targets = targets[targets['has_other_abs'] == 1]
elif has_mw_abs:
targets = targets[targets['has_mw_abs'] == 1]
elif threshold:
targets = targets[(1-targets['min_opacity'])/targets['sd_cont'] > threshold]
sort_order = targets.argsort(['comp_name'])
sorted_targets = targets[sort_order]
comp_names = sorted_targets['comp_name']
ids = sorted_targets['id']
maj_axes = sorted_targets['semi_maj_axis']*2
min_axes = sorted_targets['semi_min_axis']*2
fluxes_int = sorted_targets['flux_int']
print('Rating {} has {} spectra'.format(rating, len(comp_names)))
if verbose:
print (comp_names)
output_block_title(f, rating, rating=='A', len(comp_names))
for idx, name in enumerate(comp_names):
comment = '{:.0f}x{:.0f}" {:.0f} mJy'.format(maj_axes[idx], min_axes[idx], fluxes_int[idx])
output_img(f, name, rating, ids[idx], comment, combined=True)
output_footer(f)
def output_listed_spectra(sbid, table, title, filename, comp_names_list, verbose=False, source_map=None, zoomed=True):
print (title, filename)
with open(filename, 'w') as f:
output_header(f, title)
if source_map:
output_location_plots(f, source_map=source_map)
else:
output_location_plots(f)
for rating in 'ABCDEF':
targets = table[table['rating']==rating]
targets = targets[np.in1d(targets['comp_name'], comp_names_list)]
sort_order = targets.argsort(['comp_name'])
sorted_targets = targets[sort_order]
comp_names = sorted_targets['comp_name']
ids = sorted_targets['id']
maj_axes = sorted_targets['semi_maj_axis']*2
min_axes = sorted_targets['semi_min_axis']*2
fluxes_int = sorted_targets['flux_int']
print('Rating {} has {} spectra'.format(rating, len(comp_names)))
if verbose:
print (comp_names)
output_block_title(f, rating, rating=='A', len(comp_names))
for idx, name in enumerate(comp_names):
comment = '{:.0f}x{:.0f}" {:.0f} mJy'.format(maj_axes[idx], min_axes[idx], fluxes_int[idx])
if zoomed:
output_img(f, name, rating, ids[idx], comment, combined=True)
else:
output_non_zoom_img(f, name, rating, ids[idx])
output_footer(f)
def output_diff_sigma_spectra(sbid, table, title, filename, verbose=False, source_map=None, zoomed=True):
print (title, filename)
with open(filename, 'w') as f:
output_header(f, title)
if source_map:
output_location_plots(f, source_map=source_map)
else:
output_location_plots(f)
sigma_name_map = {2.8: ['J005518-714450', 'J010401-720206', 'J005116-734000', 'J010431-720726', 'J011157-734129', 'J010532-721331', 'J002620-743741'],
2.7:['J011332-740758', 'J003037-742903', 'J013218-715348', 'J005448-725353', 'J010556-714607', 'J012924-733153', 'J003208-735038', 'J012037-703843', 'J004306-732828'],
2.6:['J011134-711414', 'J005715-704046', 'J003936-742018', 'J002411-735717', 'J012306-695600', 'J005014-730326', 'J002222-742825', 'J010932-713453'],
2.5:['J014924-730231', 'J012945-701803', 'J005141-725545', 'J002826-703501', 'J002034-705526'],
3: ['J010532-721331', 'J005448-725353', 'J010556-714607', 'J005715-704046']}
for k,v in sigma_name_map.items():
#v = sigma_name_map[k]
print (k, v)
comp_names_list = v
if k < 2.8:
f.write('\n</div>')
if k < 3:
f.write('\n<h2>Spectra included at 2+ channels of {} sigma cutoff</h2>'.format(k))
else:
f.write('\n</div>\n<h2>Spectra included at 3+ channels of 2.5 sigma cutoff</h2>')
first = True
# TODO: Switch to use source lists
for rating in 'ABCDEF':
targets = table[table['rating']==rating]
targets = targets[np.in1d(targets['comp_name'], comp_names_list)]
sort_order = targets.argsort(['comp_name'])
sorted_targets = targets[sort_order]
comp_names = sorted_targets['comp_name']
ids = sorted_targets['id']
maj_axes = sorted_targets['semi_maj_axis']*2
min_axes = sorted_targets['semi_min_axis']*2
fluxes_int = sorted_targets['flux_int']
print('Rating {} has {} spectra'.format(rating, len(comp_names)))
if len(comp_names) == 0:
continue
if verbose:
print (comp_names)
output_block_title(f, rating, first, len(comp_names))
first = False
for idx, name in enumerate(comp_names):
comment = '{:.0f}x{:.0f}" {:.0f} mJy'.format(maj_axes[idx], min_axes[idx], fluxes_int[idx])
if zoomed:
output_img(f, name, rating, ids[idx], comment, combined=True)
else:
output_non_zoom_img(f, name, rating, ids[idx])
output_footer(f)
def find_j19_matches(gaskap_table, no_match_cat=None):
print ('\nCross-matching with Jameson et al 2019', no_match_cat)
j19_table = ascii.read('jameson2019.csv', format='csv')
col_index = Column(name='index', data=1+np.arange(len(j19_table)))
j19_table.add_column(col_index)
coo_j19 = SkyCoord(j19_table['ra']*u.deg, j19_table['dec']*u.deg)
coo_gaskap = SkyCoord(gaskap_table['ra'], gaskap_table['dec'])
idx_j19, d2d_j19, d3d_j19 = coo_gaskap.match_to_catalog_sky(coo_j19)
matched = d2d_j19 <= 18.5*u.arcsec # This cutoff allows for the widest separation without adding duplicates
matched_j19_idx = idx_j19[matched]
un_matched_j19_idx = [i for i in np.arange(len(j19_table)) if i not in matched_j19_idx]
j19_unmatched = j19_table[un_matched_j19_idx]
print ("Found {} sources in Jameson et al 2019 not in GASKAP data.".format(len(j19_unmatched)))
coo_j19_unm = SkyCoord(j19_unmatched['ra']*u.deg, j19_unmatched['dec']*u.deg)
idx_gaskap, d2d_gaskap, d3d_gaskap = coo_j19_unm.match_to_catalog_sky(coo_gaskap)
close_gaskap_comp_names = gaskap_table[idx_gaskap]['comp_name']
col_closest = Column(name='closest_gaskap', data=close_gaskap_comp_names)
col_gaskap_ra = Column(name='gaskap_ra', data=gaskap_table[idx_gaskap]['ra'])
col_gaskap_dec = Column(name='gaskap_dec', data=gaskap_table[idx_gaskap]['dec'])
col_sep = Column(name='gaskap_sep', data=d2d_gaskap.to(u.arcsec))
j19_unmatched.add_columns([col_closest, col_gaskap_ra, col_gaskap_dec, col_sep])
if no_match_cat:
print (j19_unmatched)
j19_unm_vo_table = from_table(j19_unmatched)
writeto(j19_unm_vo_table, no_match_cat)
return j19_table, idx_j19, d2d_j19, matched, j19_unmatched
def output_j19_comparison(sbid, gaskap_table, j19_table, idx_j19, d2d_j19, j19_match, j19_unmatched, title, filename, match_cat=None):
print (title, filename)
gaskap_targets = gaskap_table[j19_match]
j19_targets = j19_table[idx_j19]
j19_targets = j19_targets[j19_match]
sort_order = gaskap_targets.argsort(['comp_name'])
#comp_names = sorted(targets['comp_name'])
gaskap_tgt_ordered = gaskap_targets[sort_order]
j19_tgt_ordered = j19_targets[sort_order]
with open(filename, 'w') as f:
output_header(f, title)
for rating in 'ABCDEF':
mask = gaskap_tgt_ordered['rating']==rating
subset = gaskap_tgt_ordered[mask]
j19_subset = j19_tgt_ordered[mask]
print('Rating {} has {} spectra'.format(rating, len(subset)))
output_block_title(f, rating, rating=='A', len(subset))
for idx, gaskap_src in enumerate(subset):
gaskap_name = gaskap_src['comp_name']
j19_name = j19_subset[idx]['Source']
output_j19_img(f, gaskap_name, j19_name, rating)
# Add a section for missed spectra
output_block_title(f, None, False, len(j19_unmatched))
for row in j19_unmatched:
gaskap_name = row['closest_gaskap']
j19_name = row['Source']
output_j19_img(f, gaskap_name, j19_name, rating, sep=row['gaskap_sep'])
output_footer(f)
if match_cat:
augmented_table = Table(gaskap_tgt_ordered)
close_j19_comp_names = j19_tgt_ordered['Source']
col_closest = Column(name='closest_j19', data=close_j19_comp_names)
col_gaskap_ra = Column(name='j19_ra', data=j19_tgt_ordered['ra']*u.deg)
col_gaskap_dec = Column(name='j19_dec', data=j19_tgt_ordered['dec']*u.deg)
sep_vals = d2d_j19[j19_match]
sep_vals_sorted = sep_vals[sort_order]
col_sep = Column(name='j19_sep', data=sep_vals_sorted.to(u.arcsec))
augmented_table.add_columns([col_closest, col_gaskap_ra, col_gaskap_dec, col_sep])
#print (augmented_table)
j19_match_vo_table = from_table(augmented_table)
writeto(j19_match_vo_table, match_cat)
def main():
args = parseargs()
start = time.time()
print("#### Started generating spectra pages for sbid {} at {} ####".format(args.sbid,
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start))))
parent_folder = 'sb{}/'.format(args.sbid)
if args.parent:
parent_folder = args.parent
if not os.path.exists(parent_folder):
print("Error: Folder {} does not exist.".format(parent_folder))
return 1
spectra_votable = votable.parse('{}/askap_spectra.vot'.format(parent_folder), pedantic=False)
spectra_table = spectra_votable.get_first_table().to_table()
output_spectra(args.sbid, spectra_table, 'Absorption spectra for SBID {}'.format(
args.sbid), '{}/all.html'.format(parent_folder))
output_spectra(args.sbid, spectra_table, 'Absorption spectra for SBID {} with non MW absorption features'.format(
args.sbid, args.good), '{}/detections.html'.format(parent_folder), has_other_abs=True)
output_spectra(args.sbid, spectra_table, 'Absorption spectra for SBID {} with {}σ candidate detections'.format(
args.sbid, args.best), '{}/best.html'.format(parent_folder), threshold=args.best)
output_spectra(args.sbid, spectra_table, 'Absorption spectra for SBID {} with MW absorption features'.format(
args.sbid, args.best), '{}/mw_detections.html'.format(parent_folder), has_mw_abs=True)
max_noise=0.03
output_spectra(args.sbid, spectra_table, 'Absorption spectra for SBID {} with less than {} noise level'.format(
args.sbid, max_noise), '{}/quiet.html'.format(parent_folder), max_noise=max_noise)
if args.sbid == 10944:
output_diff_sigma_spectra(args.sbid, spectra_table, 'Comparison of sigma cutoffs', '{}/sigmacomp.html'.format(parent_folder))
missed_sources = ['J005448-725353', 'J010532-721331', 'J005014-730326', 'J012924-733153', 'J005217-730157', 'J010556-714607', 'J005141-725545', 'J004306-732828', 'J010401-720206',
'J010359-720144', 'J010404-720145', 'J013032-731741', 'J003524-732223', 'J010919-725600', 'J013218-715348', 'J004718-723947', 'J010431-720726', 'J005116-734000', 'J003037-742903',
'J003037-742901', 'J012733-713639', 'J010932-713453', 'J003936-742018', 'J004808-741206', 'J002411-735717', 'J002143-741500']
output_listed_spectra(args.sbid, spectra_table, 'Absorption spectra for SBID {} excluded by changed noise'.format(
args.sbid), '{}/excluded.html'.format(parent_folder), missed_sources)
wide_added = ['J012639-731502', 'J012639-731502', 'J005644-725200', 'J011408-732006', 'J005217-730157']
output_listed_spectra(args.sbid, spectra_table, 'Absorption spectra for SBID {} added by using 3 channels with 2.3 sigma match'.format(
args.sbid), '{}/wide.html'.format(parent_folder), wide_added)
bad_noise = ['J003749-735128',
'J010932-713453',
'J013134-700042',
'J013742-733050',
'J014105-722748']
output_listed_spectra(args.sbid, spectra_table, 'Absorption spectra for SBID {} with poor noise estimates'.format(
args.sbid), '{}/bad_noise.html'.format(parent_folder), bad_noise)
if args.sbid in (8906, 10941, 10944):
j19_table, idx_j19, d2d_j19, j19_match, j19_unmatched = find_j19_matches(spectra_table, no_match_cat='{}/j19_not_matched.vot'.format(parent_folder))
output_j19_comparison(args.sbid, spectra_table, j19_table, idx_j19, d2d_j19, j19_match, j19_unmatched,
'Absorption spectra for SBID {} also in Jameson 19'.format(args.sbid), '{}/j19.html'.format(parent_folder), match_cat='{}/askap_spectra_in_j19.vot'.format(parent_folder))
non_j19_table = gaskap_targets = spectra_table[~j19_match]
print (len(non_j19_table))
output_spectra(args.sbid, non_j19_table, 'Absorption spectra for SBID {} not in J19 with absorption features'.format(
args.sbid), '{}/non_j19_detections.html'.format(parent_folder), has_other_abs=True, source_map='figures/source_loc_nonj19.png')
output_spectra(args.sbid, non_j19_table, 'Absorption spectra for SBID {} not in J19 with {}σ candidate detections'.format(
args.sbid, args.best), '{}/non_j19_best.html'.format(parent_folder), threshold=args.best, source_map='figures/source_loc_nonj19.png')
# Report
end = time.time()
print('#### Processing completed at %s ####' %
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end)))
print('Reported %d spectra in %.02f s' %
(len(spectra_table), end - start))
return 0
if __name__ == '__main__':
exit(main())
|
[
"astropy.io.votable.writeto",
"astropy.io.ascii.read",
"argparse.ArgumentParser",
"astropy.table.Table",
"os.path.dirname",
"os.path.exists",
"time.time",
"astropy.io.votable.from_table",
"astropy.table.Column",
"astropy.coordinates.SkyCoord",
"time.localtime",
"numpy.in1d"
] |
[((437, 584), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter', 'description': '"""Produce preview pages for a set of spectra"""'}), "(formatter_class=argparse.\n ArgumentDefaultsHelpFormatter, description=\n 'Produce preview pages for a set of spectra')\n", (460, 584), False, 'import argparse\n'), ((13401, 13444), 'astropy.io.ascii.read', 'ascii.read', (['"""jameson2019.csv"""'], {'format': '"""csv"""'}), "('jameson2019.csv', format='csv')\n", (13411, 13444), False, 'from astropy.io import ascii, votable\n'), ((13567, 13626), 'astropy.coordinates.SkyCoord', 'SkyCoord', (["(j19_table['ra'] * u.deg)", "(j19_table['dec'] * u.deg)"], {}), "(j19_table['ra'] * u.deg, j19_table['dec'] * u.deg)\n", (13575, 13626), False, 'from astropy.coordinates import SkyCoord\n'), ((13640, 13689), 'astropy.coordinates.SkyCoord', 'SkyCoord', (["gaskap_table['ra']", "gaskap_table['dec']"], {}), "(gaskap_table['ra'], gaskap_table['dec'])\n", (13648, 13689), False, 'from astropy.coordinates import SkyCoord\n'), ((14176, 14243), 'astropy.coordinates.SkyCoord', 'SkyCoord', (["(j19_unmatched['ra'] * u.deg)", "(j19_unmatched['dec'] * u.deg)"], {}), "(j19_unmatched['ra'] * u.deg, j19_unmatched['dec'] * u.deg)\n", (14184, 14243), False, 'from astropy.coordinates import SkyCoord\n'), ((14412, 14471), 'astropy.table.Column', 'Column', ([], {'name': '"""closest_gaskap"""', 'data': 'close_gaskap_comp_names'}), "(name='closest_gaskap', data=close_gaskap_comp_names)\n", (14418, 14471), False, 'from astropy.table import Column, Table\n'), ((14492, 14553), 'astropy.table.Column', 'Column', ([], {'name': '"""gaskap_ra"""', 'data': "gaskap_table[idx_gaskap]['ra']"}), "(name='gaskap_ra', data=gaskap_table[idx_gaskap]['ra'])\n", (14498, 14553), False, 'from astropy.table import Column, Table\n'), ((14575, 14638), 'astropy.table.Column', 'Column', ([], {'name': '"""gaskap_dec"""', 'data': "gaskap_table[idx_gaskap]['dec']"}), "(name='gaskap_dec', data=gaskap_table[idx_gaskap]['dec'])\n", (14581, 14638), False, 'from astropy.table import Column, Table\n'), ((17264, 17275), 'time.time', 'time.time', ([], {}), '()\n', (17273, 17275), False, 'import time\n'), ((21578, 21589), 'time.time', 'time.time', ([], {}), '()\n', (21587, 21589), False, 'import time\n'), ((14872, 14897), 'astropy.io.votable.from_table', 'from_table', (['j19_unmatched'], {}), '(j19_unmatched)\n', (14882, 14897), False, 'from astropy.io.votable import parse_single_table, from_table, writeto\n'), ((14906, 14945), 'astropy.io.votable.writeto', 'writeto', (['j19_unm_vo_table', 'no_match_cat'], {}), '(j19_unm_vo_table, no_match_cat)\n', (14913, 14945), False, 'from astropy.io.votable import parse_single_table, from_table, writeto\n'), ((16503, 16528), 'astropy.table.Table', 'Table', (['gaskap_tgt_ordered'], {}), '(gaskap_tgt_ordered)\n', (16508, 16528), False, 'from astropy.table import Column, Table\n'), ((16608, 16661), 'astropy.table.Column', 'Column', ([], {'name': '"""closest_j19"""', 'data': 'close_j19_comp_names'}), "(name='closest_j19', data=close_j19_comp_names)\n", (16614, 16661), False, 'from astropy.table import Column, Table\n'), ((16686, 16743), 'astropy.table.Column', 'Column', ([], {'name': '"""j19_ra"""', 'data': "(j19_tgt_ordered['ra'] * u.deg)"}), "(name='j19_ra', data=j19_tgt_ordered['ra'] * u.deg)\n", (16692, 16743), False, 'from astropy.table import Column, Table\n'), ((16767, 16826), 'astropy.table.Column', 'Column', ([], {'name': '"""j19_dec"""', 'data': "(j19_tgt_ordered['dec'] * u.deg)"}), "(name='j19_dec', data=j19_tgt_ordered['dec'] * u.deg)\n", (16773, 16826), False, 'from astropy.table import Column, Table\n'), ((17139, 17166), 'astropy.io.votable.from_table', 'from_table', (['augmented_table'], {}), '(augmented_table)\n', (17149, 17166), False, 'from astropy.io.votable import parse_single_table, from_table, writeto\n'), ((17175, 17213), 'astropy.io.votable.writeto', 'writeto', (['j19_match_vo_table', 'match_cat'], {}), '(j19_match_vo_table, match_cat)\n', (17182, 17213), False, 'from astropy.io.votable import parse_single_table, from_table, writeto\n'), ((17551, 17580), 'os.path.exists', 'os.path.exists', (['parent_folder'], {}), '(parent_folder)\n', (17565, 17580), False, 'import os\n'), ((2325, 2348), 'os.path.dirname', 'os.path.dirname', (['f.name'], {}), '(f.name)\n', (2340, 2348), False, 'import os\n'), ((3209, 3232), 'os.path.dirname', 'os.path.dirname', (['f.name'], {}), '(f.name)\n', (3224, 3232), False, 'import os\n'), ((3280, 3303), 'os.path.dirname', 'os.path.dirname', (['f.name'], {}), '(f.name)\n', (3295, 3303), False, 'import os\n'), ((9357, 9403), 'numpy.in1d', 'np.in1d', (["targets['comp_name']", 'comp_names_list'], {}), "(targets['comp_name'], comp_names_list)\n", (9364, 9403), True, 'import numpy as np\n'), ((17412, 17433), 'time.localtime', 'time.localtime', (['start'], {}), '(start)\n', (17426, 17433), False, 'import time\n'), ((21686, 21705), 'time.localtime', 'time.localtime', (['end'], {}), '(end)\n', (21700, 21705), False, 'import time\n'), ((12054, 12100), 'numpy.in1d', 'np.in1d', (["targets['comp_name']", 'comp_names_list'], {}), "(targets['comp_name'], comp_names_list)\n", (12061, 12100), True, 'import numpy as np\n')]
|
import base64
import os
import sys
import threading
from collections import defaultdict
from functools import partial
from io import BytesIO
from mimetypes import guess_extension
from typing import Any
import numpy as np
import six
from PIL import Image
from ...debugging.log import LoggerRoot
from ..frameworks import _patched_call, WeightsFileHandler, _Empty
from ..import_bind import PostImportHookPatching
from ...config import running_remotely
from ...model import InputModel, OutputModel, Framework
try:
from google.protobuf.json_format import MessageToDict # noqa
except ImportError:
MessageToDict = None
class TensorflowBinding(object):
@classmethod
def update_current_task(cls, task, patch_reporting=True, patch_model_io=True):
if not task:
IsTensorboardInit.clear_tensorboard_used()
EventTrainsWriter.update_current_task(task)
if patch_reporting:
PatchSummaryToEventTransformer.update_current_task(task)
PatchTensorFlowEager.update_current_task(task)
if patch_model_io:
PatchKerasModelIO.update_current_task(task)
PatchTensorflowModelIO.update_current_task(task)
PatchTensorflow2ModelIO.update_current_task(task)
class IsTensorboardInit(object):
_tensorboard_initialized = False
@classmethod
def tensorboard_used(cls):
return cls._tensorboard_initialized
@classmethod
def set_tensorboard_used(cls):
cls._tensorboard_initialized = True
@classmethod
def clear_tensorboard_used(cls):
cls._tensorboard_initialized = False
@staticmethod
def _patched_tb__init__(original_init, self, *args, **kwargs):
IsTensorboardInit._tensorboard_initialized = True
return original_init(self, *args, **kwargs)
# noinspection PyProtectedMember
class WeightsGradientHistHelper(object):
def __init__(self, logger, report_freq=100, histogram_update_freq_multiplier=10, histogram_granularity=50):
self._logger = logger
self.report_freq = report_freq
self._histogram_granularity = histogram_granularity
self._histogram_update_freq_multiplier = histogram_update_freq_multiplier
self._histogram_update_call_counter = 0
self._hist_report_cache = {}
self._hist_x_granularity = 50
@staticmethod
def _sample_histograms(_hist_iters, _histogram_granularity):
# re-sample history based on distribution of samples across time (steps)
ratio = ((_hist_iters[-1] - _hist_iters[_histogram_granularity]) /
(_hist_iters[_histogram_granularity - 1] - _hist_iters[0])) if \
_hist_iters.size > _histogram_granularity else 0.
cur_idx_below = np.arange(0, min(_hist_iters.size, _histogram_granularity - 1))
np.random.shuffle(cur_idx_below)
cur_idx_below = cur_idx_below[:int(_histogram_granularity * (1.0 - ratio / (1 + ratio)) + 0.5)]
if ratio > 0.0:
cur_idx_above = np.arange(_histogram_granularity - 1, _hist_iters.size)
np.random.shuffle(cur_idx_above)
cur_idx_above = cur_idx_above[:int(_histogram_granularity * ratio / (1 + ratio))]
else:
cur_idx_above = np.array([])
_cur_idx = np.unique(np.sort(np.concatenate((cur_idx_below, cur_idx_above)).astype(np.int)))
return _cur_idx
def add_histogram(self, title, series, step, hist_data):
# only collect histogram every specific interval
self._histogram_update_call_counter += 1
if self._histogram_update_call_counter % self.report_freq != 0 or \
self._histogram_update_call_counter < self.report_freq - 1:
return None
if isinstance(hist_data, dict):
pass
elif isinstance(hist_data, np.ndarray) and len(hist_data.shape) == 2 and np.atleast_2d(hist_data).shape[1] == 3:
# prepare the dictionary, assume numpy
# hist_data['bucketLimit'] is the histogram bucket right side limit, meaning X axis
# hist_data['bucket'] is the histogram height, meaning the Y axis
# notice hist_data[:, 1] is the right side limit, for backwards compatibility we take the left side
hist_data = {'bucketLimit': hist_data[:, 0].tolist(), 'bucket': hist_data[:, 2].tolist()}
else:
# assume we have to do the histogram on the data
hist_data = np.histogram(hist_data, bins=32)
hist_data = {'bucketLimit': hist_data[1].tolist(), 'bucket': hist_data[0].tolist()}
self._add_histogram(title=title, series=series, step=step, hist_data=hist_data)
def _add_histogram(self, title, series, step, hist_data):
# only collect histogram every specific interval
self._histogram_update_call_counter += 1
if self._histogram_update_call_counter % self.report_freq != 0 or \
self._histogram_update_call_counter < self.report_freq - 1:
return None
# generate forward matrix of the histograms
# Y-axis (rows) is iteration (from 0 to current Step)
# X-axis averaged bins (conformed sample 'bucketLimit')
# Z-axis actual value (interpolated 'bucket')
step = EventTrainsWriter._fix_step_counter(title, series, step)
# get histograms from cache
hist_list, hist_iters, minmax = self._hist_report_cache.get((title, series), ([], np.array([]), None))
# resample data so we are always constrained in number of histogram we keep
if hist_iters.size >= self._histogram_granularity ** 2:
idx = self._sample_histograms(hist_iters, self._histogram_granularity)
hist_iters = hist_iters[idx]
hist_list = [hist_list[i] for i in idx]
# check if current sample is not already here (actually happens some times)
if step in hist_iters:
return None
# add current sample, if not already here
hist_iters = np.append(hist_iters, step)
# hist_data['bucketLimit'] is the histogram bucket right side limit, meaning X axis
# hist_data['bucket'] is the histogram height, meaning the Y axis
hist = np.array(list(zip(hist_data['bucketLimit'], hist_data['bucket'])), dtype=np.float32)
hist = hist[~np.isinf(hist[:, 0]), :]
hist_list.append(hist)
# keep track of min/max values of histograms (for later re-binning)
if minmax is None:
minmax = hist[:, 0].min(), hist[:, 0].max()
else:
# noinspection PyUnresolvedReferences
minmax = min(minmax[0], hist[:, 0].min()), max(minmax[1], hist[:, 0].max())
# update the cache
self._hist_report_cache[(title, series)] = hist_list, hist_iters, minmax
# only report histogram every specific interval, but do report the first few, so you know there are histograms
if hist_iters.size < 1 or (hist_iters.size >= self._histogram_update_freq_multiplier and
hist_iters.size % self._histogram_update_freq_multiplier != 0):
return None
# resample histograms on a unified bin axis +- epsilon
_epsilon = abs((minmax[1] - minmax[0])/float(self._hist_x_granularity))
if _epsilon == 0:
_epsilon = 0.01
_minmax = minmax[0] - _epsilon, minmax[1] + _epsilon
prev_xedge = np.arange(start=_minmax[0],
step=(_minmax[1] - _minmax[0]) / float(self._hist_x_granularity - 2), stop=_minmax[1])
# uniformly select histograms and the last one
cur_idx = self._sample_histograms(hist_iters, self._histogram_granularity)
report_hist = np.zeros(shape=(len(cur_idx), prev_xedge.size), dtype=np.float32)
for i, n in enumerate(cur_idx):
h = hist_list[n]
report_hist[i, :] = np.interp(prev_xedge, h[:, 0], h[:, 1], right=0, left=0)
yedges = hist_iters[cur_idx]
xedges = prev_xedge
# if only a single line make, add another zero line, for the scatter plot to draw
if report_hist.shape[0] < 2:
report_hist = np.vstack((np.zeros_like(report_hist), report_hist))
# create 3d line (scatter) of histograms
skipx = max(1, int(xedges.size / 10))
skipy = max(1, int(yedges.size / 10))
xlabels = ['%.2f' % v if i % skipx == 0 else '' for i, v in enumerate(xedges[:-1])]
ylabels = [str(int(v)) if i % skipy == 0 else '' for i, v in enumerate(yedges)]
self._logger.report_surface(
title=title,
series=series,
iteration=0,
xaxis=' ',
yaxis='iteration',
xlabels=xlabels,
ylabels=ylabels,
matrix=report_hist,
camera=(-0.1, +1.3, 1.4))
# noinspection PyMethodMayBeStatic,PyProtectedMember,SpellCheckingInspection
class EventTrainsWriter(object):
"""
TF SummaryWriter implementation that converts the tensorboard's summary into
ClearML events and reports the events (metrics) for an ClearML task (logger).
"""
__main_task = None
_add_lock = threading.RLock()
_series_name_lookup = {}
# store all the created tensorboard writers in the system
# this allows us to as weather a certain tile/series already exist on some EventWriter
# and if it does, then we add to the series name the last token from the logdir
# (so we can differentiate between the two)
# key, value: key=hash(title, graph), value=EventTrainsWriter._id
_title_series_writers_lookup = {}
_event_writers_id_to_logdir = {}
# Protect against step (iteration) reuse, for example,
# steps counter inside an epoch, but wrapping around when epoch ends
# i.e. step = 0..100 then epoch ends and again step = 0..100
# We store the first report per title/series combination, and if wraparound occurs
# we synthetically continue to increase the step/iteration based on the previous epoch counter
# example: _title_series_wraparound_counter[('title', 'series')] =
# {'first_step':None, 'last_step':None, 'adjust_counter':0,}
_title_series_wraparound_counter = {}
@property
def variants(self):
return self._variants
def prepare_report(self):
return self.variants.copy()
def tag_splitter(self, tag, num_split_parts, split_char='/', join_char='_', default_title='variant',
logdir_header='series', auto_reduce_num_split=False, force_add_prefix=None):
"""
Split a tf.summary tag line to variant and metric.
Variant is the first part of the split tag, metric is the second.
:param str tag:
:param int num_split_parts:
:param str split_char: a character to split the tag on
:param str join_char: a character to join the the splits
:param str default_title: variant to use in case no variant can be inferred automatically
:param str logdir_header: if 'series_last' then series=header: series, if 'series then series=series :header,
if 'title_last' then title=header title, if 'title' then title=title header
:param bool auto_reduce_num_split: if True and the tag is split for less parts then requested,
then requested number of split parts is adjusted.
:param str force_add_prefix: always add the prefix to the series name
:return: (str, str) variant and metric
"""
splitted_tag = tag.split(split_char)
if auto_reduce_num_split and num_split_parts > len(splitted_tag) - 1:
num_split_parts = max(1, len(splitted_tag) - 1)
series = join_char.join(splitted_tag[-num_split_parts:])
title = join_char.join(splitted_tag[:-num_split_parts]) or default_title
if force_add_prefix:
series = str(force_add_prefix)+series
# check if we already decided that we need to change the title/series
graph_id = hash((title, series))
if graph_id in self._graph_name_lookup:
return self._graph_name_lookup[graph_id]
# check if someone other than us used this combination
with self._add_lock:
event_writer_id = self._title_series_writers_lookup.get(graph_id, None)
if not event_writer_id:
# put us there
self._title_series_writers_lookup[graph_id] = self._id
elif event_writer_id != self._id:
# if there is someone else, change our series name and store us
org_series = series
org_title = title
other_logdir = self._event_writers_id_to_logdir[event_writer_id]
split_logddir = self._logdir.split('/')
unique_logdir = set(split_logddir) - set(other_logdir.split('/'))
header = '/'.join(s for s in split_logddir if s in unique_logdir)
if logdir_header == 'series_last':
series = header + ': ' + series
elif logdir_header == 'series':
series = series + ' :' + header
elif logdir_header == 'title':
title = title + ' ' + header
else: # logdir_header == 'title_last':
title = header + ' ' + title
graph_id = hash((title, series))
# check if for some reason the new series is already occupied
new_event_writer_id = self._title_series_writers_lookup.get(graph_id)
if new_event_writer_id is not None and new_event_writer_id != self._id:
# well that's about it, nothing else we could do
if logdir_header == 'series_last':
series = str(self._logdir) + ': ' + org_series
elif logdir_header == 'series':
series = org_series + ' :' + str(self._logdir)
elif logdir_header == 'title':
title = org_title + ' ' + str(self._logdir)
else: # logdir_header == 'title_last':
title = str(self._logdir) + ' ' + org_title
graph_id = hash((title, series))
self._title_series_writers_lookup[graph_id] = self._id
# store for next time
self._graph_name_lookup[graph_id] = (title, series)
return title, series
def __init__(self, logger, logdir=None, report_freq=100, image_report_freq=None,
histogram_update_freq_multiplier=10, histogram_granularity=50, max_keep_images=None):
"""
Create a compatible ClearML backend to the TensorFlow SummaryToEventTransformer
Everything will be serialized directly to the ClearML backend, instead of to the standard TF FileWriter
:param logger: The task.logger to use for sending the metrics (def: task.get_logger())
:param report_freq: How often to update the statistics values
:param image_report_freq: How often to upload images (step % image_update_freq == 0)
:param histogram_update_freq_multiplier: How often to upload histogram
(step//update_freq) % histogram_update_freq_multiplier == 0
:param histogram_granularity: How many histograms (lines) to display in the 3d histogram plot
:param max_keep_images: Maximum number of images to save before starting to reuse files (per title/metric pair)
"""
# We are the events_writer, so that's what we'll pass
IsTensorboardInit.set_tensorboard_used()
self._logdir = logdir or ('unknown %d' % len(self._event_writers_id_to_logdir))
# conform directory structure to unix
if os.path.sep == '\\':
self._logdir = self._logdir.replace('\\', '/')
self._id = hash(self._logdir)
self._event_writers_id_to_logdir[self._id] = self._logdir
self.max_keep_images = max_keep_images
self.report_freq = report_freq
self.image_report_freq = image_report_freq if image_report_freq else report_freq
self.histogram_granularity = histogram_granularity
self.histogram_update_freq_multiplier = histogram_update_freq_multiplier
self._histogram_update_call_counter = 0
self._logger = logger
self._visualization_mode = 'RGB' # 'BGR'
self._variants = defaultdict(lambda: ())
self._scalar_report_cache = {}
self._hist_report_cache = {}
self._hist_x_granularity = 50
self._max_step = 0
self._graph_name_lookup = {}
self._generic_tensor_type_name_lookup = {}
self._grad_helper = WeightsGradientHistHelper(
logger=logger,
report_freq=report_freq,
histogram_update_freq_multiplier=histogram_update_freq_multiplier,
histogram_granularity=histogram_granularity
)
def _decode_image(self, img_str, width=None, height=None, color_channels=None):
# noinspection PyBroadException
try:
if isinstance(img_str, bytes):
imdata = img_str
else:
imdata = base64.b64decode(img_str)
output = BytesIO(imdata)
im = Image.open(output)
image = np.asarray(im)
output.close()
if height is not None and height > 0 and width is not None and width > 0:
# noinspection PyArgumentList
val = image.reshape(height, width, -1).astype(np.uint8)
else:
val = image.astype(np.uint8)
if val.ndim == 3 and val.shape[2] == 3:
if self._visualization_mode == 'BGR':
val = val[:, :, [2, 1, 0]]
else:
val = val
elif (val.ndim == 2) or (val.ndim == 3 and val.shape[2] == 1):
val = np.tile(np.atleast_3d(val), (1, 1, 3))
elif val.ndim == 3 and val.shape[2] == 4:
if self._visualization_mode == 'BGR':
val = val[:, :, [2, 1, 0]]
else:
val = val[:, :, [0, 1, 2]]
except KeyboardInterrupt:
raise
except Exception as e:
logger = LoggerRoot.get_base_logger(TensorflowBinding)
logger.warning('Failed decoding debug image [%s, %s, %s]' % (width, height, color_channels))
logger.warning('Error: %s' % e)
val = None
return val
def _add_image_numpy(self, tag, step, img_data_np, max_keep_images=None):
# type: (str, int, np.ndarray, int) -> ()
# only report images every specific interval
if step % self.image_report_freq != 0:
return None
if img_data_np is None:
return
# noinspection PyProtectedMember
title, series = self.tag_splitter(tag, num_split_parts=3, default_title='Images', logdir_header='title',
auto_reduce_num_split=True,
force_add_prefix=self._logger._get_tensorboard_series_prefix())
step = self._fix_step_counter(title, series, step)
if img_data_np.dtype != np.uint8:
# assume scale 0-1
img_data_np = (img_data_np * 255).astype(np.uint8)
# if 3d, pack into one big image
if img_data_np.ndim == 4:
dims = img_data_np.shape
stack_dim = int(np.sqrt(dims[0]))
# noinspection PyArgumentList
res = img_data_np.reshape(stack_dim, stack_dim, *dims[1:]).transpose((0, 2, 1, 3, 4))
tile_size_h = res.shape[0] * res.shape[1]
tile_size_w = res.shape[2] * res.shape[3]
img_data_np = res.reshape(tile_size_h, tile_size_w, -1)
self._logger.report_image(
title=title,
series=series,
iteration=step,
image=img_data_np,
max_image_history=self.max_keep_images if max_keep_images is None else max_keep_images,
)
def _add_image(self, tag, step, img_data):
# only report images every specific interval
if step % self.image_report_freq != 0:
return None
width = img_data.get('width')
height = img_data.get('height')
colorspace = img_data.get('colorspace')
img_str = img_data['encodedImageString']
matrix = self._decode_image(img_str, width=width, height=height, color_channels=colorspace)
if matrix is None:
return
return self._add_image_numpy(tag=tag, step=step, img_data_np=matrix)
def _add_scalar(self, tag, step, scalar_data):
default_title = tag if not self._logger._get_tensorboard_auto_group_scalars() else 'Scalars'
series_per_graph = self._logger._get_tensorboard_single_series_per_graph()
# noinspection PyProtectedMember
title, series = self.tag_splitter(
tag, num_split_parts=1, default_title=default_title,
logdir_header='title' if series_per_graph else 'series_last',
force_add_prefix=self._logger._get_tensorboard_series_prefix()
)
step = self._fix_step_counter(title, series, step)
tag = self._get_add_scalars_event_tag(default_title)
possible_title = tag if series_per_graph else None
possible_tag = None if series_per_graph else tag
title = title + possible_title if possible_title else title
series = possible_tag or series
# update scalar cache
num, value = self._scalar_report_cache.get((title, series), (0, 0))
# nan outputs is a string, it's probably a NaN
if isinstance(scalar_data, six.string_types):
# noinspection PyBroadException
try:
scalar_data = float(scalar_data)
except Exception:
scalar_data = float('nan')
# nan outputs nan
self._scalar_report_cache[(title, series)] = \
(num + 1,
(value + scalar_data) if scalar_data == scalar_data else scalar_data)
# only report images every specific interval
if step % self.report_freq != 0:
return None
# calculate mean and zero cache
num, value = self._scalar_report_cache.get((title, series), (0, 0))
scalar_data = value / num
self._scalar_report_cache[(title, series)] = (0, 0)
self._logger.report_scalar(
title=title,
series=series,
iteration=step,
value=scalar_data,
)
def _add_histogram(self, tag, step, hist_data):
# noinspection PyProtectedMember
title, series = self.tag_splitter(tag, num_split_parts=1, default_title='Histograms',
logdir_header='series',
force_add_prefix=self._logger._get_tensorboard_series_prefix())
self._grad_helper.add_histogram(
title=title,
series=series,
step=step,
hist_data=hist_data
)
def _add_plot(self, tag, step, values, vdict):
# noinspection PyBroadException
try:
if values.get('floatVal'):
plot_values = np.array(values.get('floatVal'), dtype=np.float32)
else:
plot_values = np.frombuffer(base64.b64decode(values['tensorContent'].encode('utf-8')),
dtype=np.float32)
plot_values = plot_values.reshape((int(values['tensorShape']['dim'][0]['size']),
int(values['tensorShape']['dim'][1]['size'])))
if 'metadata' in vdict:
if tag not in self._series_name_lookup:
self._series_name_lookup[tag] = [(tag, vdict['metadata'].get('displayName', ''),
vdict['metadata']['pluginData']['pluginName'])]
else:
# this should not happen, maybe it's another run, let increase the value
self._series_name_lookup[tag] += [(tag + '_%d' % (len(self._series_name_lookup[tag]) + 1),
vdict['metadata'].get('displayName', ''),
vdict['metadata']['pluginData']['pluginName'])]
tag, series, plugin_name = self._series_name_lookup.get(tag, [(tag, tag, '')])[-1]
if 'pr_curve' in plugin_name:
# our thresholds are evenly distributed, in that
# width = 1.0 / (num_thresholds - 1)
# thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0]
num_thresholds = plot_values.shape[1]
width = 1.0 / num_thresholds
thresholds = np.arange(0.0, 1.0, width, dtype=plot_values.dtype)
data_points = ['Threshold ', 'TP ', 'FP ', 'TN ', 'FN ', 'Precision ', ' Recall']
series = [{'name': series, 'data': np.vstack((plot_values[-1], plot_values[-2])).T,
'labels': [''.join(data_points) + '<br> {:.3f} '.format(thresholds[j]) +
' '.join(['%-3.2f' % v for v in plot_values[:, j]]) for j in
range(num_thresholds)]}]
reverse_xaxis = False
else:
reverse_xaxis = False
series = [{'name': series, 'data': plot_values}]
self._logger.report_line_plot(title=tag, series=series, xaxis='', yaxis='',
iteration=step, reverse_xaxis=reverse_xaxis)
except Exception:
pass
def _add_audio(self, tag, step, values, audio_data=None):
# only report images every specific interval
if step % self.image_report_freq != 0:
return None
if values:
audio_str = values['encodedAudioString']
audio_data = base64.b64decode(audio_str)
if audio_data is None:
return
# noinspection PyProtectedMember
title, series = self.tag_splitter(tag, num_split_parts=3, default_title='Audio', logdir_header='title',
auto_reduce_num_split=True,
force_add_prefix=self._logger._get_tensorboard_series_prefix())
step = self._fix_step_counter(title, series, step)
stream = BytesIO(audio_data)
if values:
file_extension = guess_extension(values['contentType']) or \
'.{}'.format(values['contentType'].split('/')[-1])
else:
# assume wav as default
file_extension = '.wav'
self._logger.report_media(
title=title,
series=series,
iteration=step,
stream=stream,
file_extension=file_extension,
max_history=self.max_keep_images,
)
def _add_text(self, tag, step, tensor_bytes):
# noinspection PyProtectedMember
title, series = self.tag_splitter(tag, num_split_parts=3, default_title='Text', logdir_header='title',
auto_reduce_num_split=True,
force_add_prefix=self._logger._get_tensorboard_series_prefix())
step = self._fix_step_counter(title, series, step)
text = tensor_bytes.decode('utf-8', errors='replace')
self._logger.report_media(
title=title,
series=series,
iteration=step,
stream=six.StringIO(text),
file_extension='.txt',
max_history=self.max_keep_images,
)
@staticmethod
def _fix_step_counter(title, series, step):
key = (title, series)
if key not in EventTrainsWriter._title_series_wraparound_counter:
EventTrainsWriter._title_series_wraparound_counter[key] = {'first_step': step, 'last_step': step,
'adjust_counter': 0}
return step
wraparound_counter = EventTrainsWriter._title_series_wraparound_counter[key]
# we decide on wrap around if the current step is less than 10% of the previous step
# notice since counter is int and we want to avoid rounding error, we have double check in the if
if step < wraparound_counter['last_step'] and step < 0.9 * wraparound_counter['last_step']:
# adjust step base line
wraparound_counter['adjust_counter'] += wraparound_counter['last_step'] + (1 if step <= 0 else step)
# return adjusted step
wraparound_counter['last_step'] = step
return step + wraparound_counter['adjust_counter']
def add_event(self, event, step=None, walltime=None, **_):
supported_metrics = {
'simpleValue', 'image', 'histo', 'tensor', 'audio'
}
def get_data(value_dict, metric_search_order):
data = None
metric_type = 'Unsupported'
for variant in metric_search_order:
data = value_dict.get(variant)
if data is not None:
metric_type = variant
break
return metric_type, data
# Support multiple threads accessing this instance (i.e. let TF/Keras do what they need)
with self._add_lock:
# TODO: add report frequency threshold (i.e. if we are sending too much data, increase the report_freq)
# we should measure reports per second and throttle back the reporting details accordingly
msg_dict = MessageToDict(event)
summary = msg_dict.get('summary')
if summary is None:
msg_dict.pop('step', None)
msg_dict.pop('wallTime', None)
keys_list = [key for key in msg_dict.keys() if len(key) > 0]
keys_list = ', '.join(keys_list)
LoggerRoot.get_base_logger(TensorflowBinding).debug(
'event summary not found, message type unsupported: %s' % keys_list)
return
value_dicts = summary.get('value')
# noinspection PyUnusedLocal
walltime = walltime or msg_dict.get('step')
step = step or msg_dict.get('step')
if step is None:
# when we start a new epoch there is no step in the msg_dict,
# we have to extract it manually
if hasattr(event, 'step'):
step = int(event.step)
else:
step = 0
LoggerRoot.get_base_logger(TensorflowBinding).debug(
'Received event without step, assuming step = {}'.format(step))
else:
step = int(step)
self._max_step = max(self._max_step, step)
if value_dicts is None:
LoggerRoot.get_base_logger(TensorflowBinding).debug("Summary arrived without 'value'")
return
for vdict in value_dicts:
tag = vdict.pop('tag', None)
if tag is None:
# we should not get here
LoggerRoot.get_base_logger(TensorflowBinding).debug(
'No tag for \'value\' existing keys %s' % ', '.join(vdict.keys()))
continue
metric, values = get_data(vdict, supported_metrics)
if metric == 'simpleValue':
self._add_scalar(tag=tag, step=step, scalar_data=values)
elif metric == 'histo':
self._add_histogram(tag=tag, step=step, hist_data=values)
elif metric == 'image':
self._add_image(tag=tag, step=step, img_data=values)
elif metric == 'audio':
self._add_audio(tag, step, values)
elif metric == 'tensor' and values.get('dtype') == 'DT_STRING':
# generic tensor
tensor_bytes = base64.b64decode('\n'.join(values['stringVal']))
plugin_type = self._generic_tensor_type_name_lookup.get(tag) or \
vdict.get('metadata', {}).get('pluginData', {}).get('pluginName', '').lower()
if plugin_type == 'audio':
self._generic_tensor_type_name_lookup[tag] = plugin_type
self._add_audio(tag, step, None, tensor_bytes)
elif plugin_type == 'text':
self._generic_tensor_type_name_lookup[tag] = plugin_type
self._add_text(tag, step, tensor_bytes)
else:
# we do not support it
pass
elif metric == 'tensor' and values.get('dtype') == 'DT_FLOAT':
self._add_plot(tag, step, values, vdict)
else:
LoggerRoot.get_base_logger(TensorflowBinding).debug(
'Event unsupported. tag = %s, vdict keys [%s]' % (tag, ', '.join(vdict.keys())))
continue
def get_logdir(self):
""" Returns a temporary directory name for compatibility with FileWriter. This directory is not actually used.
:return: '.'
"""
return '.'
def flush(self):
"""Flushes the event file to disk.
Call this method to make sure that all pending events have been written to
disk.
"""
self._logger.flush()
def close(self):
"""Flushes the event file to disk and close the file.
Call this method when you do not need the summary writer anymore.
"""
self._logger.flush()
def reopen(self):
"""Reopens the EventFileWriter.
Can be called after `close` to add more events in the same directory.
The events will go into a new events file.
Does nothing if the EventFileWriter was not closed.
"""
pass
def _get_add_scalars_event_tag(self, title_prefix):
"""
:param str title_prefix: the table title prefix that was added to the series.
:return: str same as tensorboard use
"""
# HACK - this is tensorboard Summary util function, original path:
# ~/torch/utils/tensorboard/summary.py
def _clean_tag(name):
import re as _re
# noinspection RegExpRedundantEscape
_INVALID_TAG_CHARACTERS = _re.compile(r'[^-/\w\.]')
if name is not None:
new_name = _INVALID_TAG_CHARACTERS.sub('_', name)
new_name = new_name.lstrip('/') # Remove leading slashes
if new_name != name:
LoggerRoot.get_base_logger(TensorflowBinding).debug(
'Summary name %s is illegal; using %s instead.' % (name, new_name))
name = new_name
return name
main_path = self._logdir
# noinspection PyBroadException
try:
main_path = _clean_tag(main_path)
origin_tag = main_path.rpartition("/")[2].replace(title_prefix, "", 1)
if title_prefix and origin_tag[0] == "_": # add_scalars tag
origin_tag = origin_tag[1:] # Remove the first "_" that was added by the main_tag in tensorboard
else:
return ""
except Exception:
origin_tag = ""
return origin_tag
@classmethod
def update_current_task(cls, task):
if cls.__main_task != task:
with cls._add_lock:
cls._series_name_lookup = {}
cls._title_series_writers_lookup = {}
cls._event_writers_id_to_logdir = {}
cls._title_series_wraparound_counter = {}
cls.__main_task = task
# noinspection PyCallingNonCallable
class ProxyEventsWriter(object):
def __init__(self, events):
IsTensorboardInit.set_tensorboard_used()
self._events = events
def _get_sentinel_event(self):
ret = None
for ev in self._events:
if hasattr(ev, '_get_sentinel_event'):
# noinspection PyProtectedMember
ret = ev._get_sentinel_event()
return ret
def get_logdir(self):
ret = None
for ev in self._events:
if hasattr(ev, 'get_logdir'):
ret = ev.get_logdir()
return ret
def reopen(self):
ret = None
for ev in self._events:
if hasattr(ev, 'reopen'):
ret = ev.reopen()
return ret
def add_event(self, *args, **kwargs):
ret = None
for ev in self._events:
if hasattr(ev, 'add_event'):
ret = ev.add_event(*args, **kwargs)
return ret
def flush(self):
ret = None
for ev in self._events:
if hasattr(ev, 'flush'):
ret = ev.flush()
return ret
def close(self):
ret = None
for ev in self._events:
if hasattr(ev, 'close'):
ret = ev.close()
return ret
# noinspection PyPep8Naming
class PatchSummaryToEventTransformer(object):
__main_task = None
__original_getattribute = None
__original_getattributeX = None
_original_add_event = None
_original_add_eventT = None
_original_add_eventX = None
defaults_dict = dict(
report_freq=1, image_report_freq=1, histogram_update_freq_multiplier=5,
histogram_granularity=50)
@staticmethod
def trains_object(self):
if isinstance(self.event_writer, ProxyEventsWriter):
# noinspection PyProtectedMember
trains_writer = [e for e in self.event_writer._events if isinstance(e, EventTrainsWriter)]
return trains_writer[0] if trains_writer else None
elif isinstance(self.event_writer, EventTrainsWriter):
return self.event_writer
if not self.__dict__.get('_trains_defaults'):
self.__dict__['_trains_defaults'] = {}
return self.__dict__['_trains_defaults']
@staticmethod
def update_current_task(task, **kwargs):
PatchSummaryToEventTransformer.defaults_dict.update(kwargs)
PatchSummaryToEventTransformer.__main_task = task
# make sure we patched the SummaryToEventTransformer
PatchSummaryToEventTransformer._patch_summary_to_event_transformer()
PostImportHookPatching.add_on_import('tensorflow',
PatchSummaryToEventTransformer._patch_summary_to_event_transformer)
PostImportHookPatching.add_on_import('torch',
PatchSummaryToEventTransformer._patch_summary_to_event_transformer)
PostImportHookPatching.add_on_import('tensorboardX',
PatchSummaryToEventTransformer._patch_summary_to_event_transformer)
@staticmethod
def _patch_summary_to_event_transformer():
if 'tensorflow' in sys.modules:
try:
from tensorflow.python.summary.writer.writer import SummaryToEventTransformer # noqa
# only patch once
if PatchSummaryToEventTransformer.__original_getattribute is None:
PatchSummaryToEventTransformer.__original_getattribute = SummaryToEventTransformer.__getattribute__
SummaryToEventTransformer.__getattribute__ = PatchSummaryToEventTransformer._patched_getattribute
setattr(SummaryToEventTransformer, 'clearml',
property(PatchSummaryToEventTransformer.trains_object))
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).debug(str(ex))
if 'torch' in sys.modules:
try:
# only patch once
if PatchSummaryToEventTransformer._original_add_eventT is None:
# noinspection PyUnresolvedReferences
from torch.utils.tensorboard.writer import FileWriter as FileWriterT # noqa
PatchSummaryToEventTransformer._original_add_eventT = FileWriterT.add_event
FileWriterT.add_event = PatchSummaryToEventTransformer._patched_add_eventT
setattr(FileWriterT, 'clearml', None)
except ImportError:
# this is a new version of TensorflowX
pass
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).debug(str(ex))
if 'tensorboardX' in sys.modules:
try:
# only patch once
if PatchSummaryToEventTransformer.__original_getattributeX is None:
# noinspection PyUnresolvedReferences
from tensorboardX.writer import SummaryToEventTransformer as SummaryToEventTransformerX # noqa
PatchSummaryToEventTransformer.__original_getattributeX = \
SummaryToEventTransformerX.__getattribute__
SummaryToEventTransformerX.__getattribute__ = PatchSummaryToEventTransformer._patched_getattributeX
setattr(SummaryToEventTransformerX, 'clearml',
property(PatchSummaryToEventTransformer.trains_object))
except ImportError:
# this is a new version of TensorflowX
pass
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).debug(str(ex))
if PatchSummaryToEventTransformer.__original_getattributeX is None:
try:
# only patch once
if PatchSummaryToEventTransformer._original_add_eventX is None:
from tensorboardX.writer import FileWriter as FileWriterX # noqa
PatchSummaryToEventTransformer._original_add_eventX = FileWriterX.add_event
FileWriterX.add_event = PatchSummaryToEventTransformer._patched_add_eventX
setattr(FileWriterX, 'clearml', None)
except ImportError:
# this is a new version of TensorflowX
pass
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).debug(str(ex))
@staticmethod
def _patched_add_eventT(self, *args, **kwargs):
if not hasattr(self, 'clearml') or not PatchSummaryToEventTransformer.__main_task:
return PatchSummaryToEventTransformer._original_add_eventT(self, *args, **kwargs)
if not self.clearml: # noqa
# noinspection PyBroadException
try:
logdir = self.get_logdir()
except Exception:
logdir = None
self.clearml = EventTrainsWriter(PatchSummaryToEventTransformer.__main_task.get_logger(),
logdir=logdir, **PatchSummaryToEventTransformer.defaults_dict)
# noinspection PyBroadException
try:
self.clearml.add_event(*args, **kwargs)
except Exception:
pass
return PatchSummaryToEventTransformer._original_add_eventT(self, *args, **kwargs)
@staticmethod
def _patched_add_eventX(self, *args, **kwargs):
if not hasattr(self, 'clearml') or not PatchSummaryToEventTransformer.__main_task:
return PatchSummaryToEventTransformer._original_add_eventX(self, *args, **kwargs)
if not self.clearml:
# noinspection PyBroadException
try:
logdir = self.get_logdir()
except Exception:
logdir = None
self.clearml = EventTrainsWriter(PatchSummaryToEventTransformer.__main_task.get_logger(),
logdir=logdir, **PatchSummaryToEventTransformer.defaults_dict)
# noinspection PyBroadException
try:
self.clearml.add_event(*args, **kwargs)
except Exception:
pass
return PatchSummaryToEventTransformer._original_add_eventX(self, *args, **kwargs)
@staticmethod
def _patched_getattribute(self, attr):
get_base = PatchSummaryToEventTransformer.__original_getattribute
return PatchSummaryToEventTransformer._patched_getattribute_(self, attr, get_base)
@staticmethod
def _patched_getattributeX(self, attr):
get_base = PatchSummaryToEventTransformer.__original_getattributeX
return PatchSummaryToEventTransformer._patched_getattribute_(self, attr, get_base)
@staticmethod
def _patched_getattribute_(self, attr, get_base):
# no main task, zero chance we have an ClearML event logger
if PatchSummaryToEventTransformer.__main_task is None:
return get_base(self, attr)
# check if we already have an ClearML event logger
__dict__ = get_base(self, '__dict__')
if 'event_writer' not in __dict__ or \
isinstance(__dict__['event_writer'], (ProxyEventsWriter, EventTrainsWriter)):
return get_base(self, attr)
# patch the events writer field, and add a double Event Logger (ClearML and original)
base_eventwriter = __dict__['event_writer']
# noinspection PyBroadException
try:
logdir = base_eventwriter.get_logdir()
except Exception:
logdir = None
defaults_dict = __dict__.get('_trains_defaults') or PatchSummaryToEventTransformer.defaults_dict
trains_event = EventTrainsWriter(PatchSummaryToEventTransformer.__main_task.get_logger(),
logdir=logdir, **defaults_dict)
# order is important, the return value of ProxyEventsWriter is the last object in the list
__dict__['event_writer'] = ProxyEventsWriter([trains_event, base_eventwriter])
return get_base(self, attr)
class _ModelAdapter(object):
""" Model adapter which extends the save and save_weights methods of a Keras Model instance """
_model = None # type: Any
_output_model = None # type: OutputModel
def __init__(self, model, output_model):
super(_ModelAdapter, self).__init__()
super(_ModelAdapter, self).__setattr__('_model', model)
super(_ModelAdapter, self).__setattr__('_output_model', output_model)
super(_ModelAdapter, self).__setattr__('_logger', LoggerRoot.get_base_logger(TensorflowBinding))
def __getattr__(self, attr):
return getattr(self._model, attr)
def __setattr__(self, key, value):
return setattr(self._model, key, value)
def save(self, filepath, overwrite=True, include_optimizer=True):
self._model.save(filepath=filepath, overwrite=overwrite, include_optimizer=include_optimizer)
# TODO: auto generate new objects of filename changes
try:
self._output_model.update_weights(weights_filename=filepath, auto_delete_file=True)
except Exception as ex:
self._logger.error(str(ex))
def save_weights(self, filepath, overwrite=True):
self._model.save_weights(filepath=filepath, overwrite=overwrite)
# TODO: auto generate new objects of filename changes
try:
self._output_model.update_weights(weights_filename=filepath, auto_delete_file=True)
except Exception as ex:
self._logger.error(str(ex))
class PatchModelCheckPointCallback(object):
__main_task = None
__original_getattribute = None
defaults_dict = dict(
config_text=None,
config_dict=None,
label_enumeration=None,
name=None,
comment=None)
@staticmethod
def trains_object(self):
if isinstance(self.model, _ModelAdapter):
# noinspection PyProtectedMember
return self.model._output_model
if not self.__dict__.get('_trains_defaults'):
self.__dict__['_trains_defaults'] = {}
return self.__dict__['_trains_defaults']
@staticmethod
def update_current_task(task, **kwargs):
PatchModelCheckPointCallback.defaults_dict.update(kwargs)
PatchModelCheckPointCallback.__main_task = task
# make sure we patched the SummaryToEventTransformer
PatchModelCheckPointCallback._patch_model_checkpoint()
PostImportHookPatching.add_on_import('keras', PatchModelCheckPointCallback._patch_model_checkpoint)
PostImportHookPatching.add_on_import('tensorflow', PatchModelCheckPointCallback._patch_model_checkpoint)
@staticmethod
def _patch_model_checkpoint():
is_keras = 'keras' in sys.modules
is_tf_keras = 'tensorflow' in sys.modules
callbacks = None
if is_keras:
try:
import keras.callbacks as callbacks # noqa
except ImportError:
is_keras = False
if not is_keras and is_tf_keras:
try:
# hack: make sure tensorflow.__init__ is called
import tensorflow # noqa
import tensorflow.python.keras.callbacks as callbacks # noqa
except ImportError:
is_tf_keras = False
callbacks = None
# we have nothing, quit
if not is_keras and not is_tf_keras:
return
try:
# only patch once
if PatchModelCheckPointCallback.__original_getattribute is None and callbacks is not None:
PatchModelCheckPointCallback.__original_getattribute = callbacks.ModelCheckpoint.__getattribute__
callbacks.ModelCheckpoint.__getattribute__ = PatchModelCheckPointCallback._patched_getattribute
setattr(callbacks.ModelCheckpoint, 'clearml',
property(PatchModelCheckPointCallback.trains_object))
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex))
@staticmethod
def _patched_getattribute(self, attr):
get_base = PatchModelCheckPointCallback.__original_getattribute
# no main task, zero chance we have an ClearML event logger
if PatchModelCheckPointCallback.__main_task is None:
return get_base(self, attr)
# check if we already have an ClearML event logger
__dict__ = get_base(self, '__dict__')
if 'model' not in __dict__ or \
isinstance(__dict__['model'], _ModelAdapter):
return get_base(self, attr)
# patch the events writer field, and add a double Event Logger (ClearML and original)
base_model = __dict__['model']
defaults_dict = __dict__.get('_trains_defaults') or PatchModelCheckPointCallback.defaults_dict
output_model = OutputModel(
PatchModelCheckPointCallback.__main_task,
config_text=defaults_dict.get('config_text'),
config_dict=defaults_dict.get('config_dict'),
name=defaults_dict.get('name'),
comment=defaults_dict.get('comment'),
label_enumeration=defaults_dict.get('label_enumeration') or
PatchModelCheckPointCallback.__main_task.get_labels_enumeration(),
framework=Framework.keras,
)
output_model.set_upload_destination(
PatchModelCheckPointCallback.__main_task.get_output_destination(raise_on_error=False))
trains_model = _ModelAdapter(base_model, output_model)
# order is important, the return value of ProxyEventsWriter is the last object in the list
__dict__['model'] = trains_model
return get_base(self, attr)
# noinspection PyProtectedMember,PyUnresolvedReferences
class PatchTensorFlowEager(object):
__main_task = None
__original_fn_scalar = None
__original_fn_hist = None
__original_fn_image = None
__trains_event_writer = {}
defaults_dict = dict(
report_freq=1, image_report_freq=1, histogram_update_freq_multiplier=5,
histogram_granularity=50)
@staticmethod
def update_current_task(task, **kwargs):
if task != PatchTensorFlowEager.__main_task:
PatchTensorFlowEager.__trains_event_writer = {}
PatchTensorFlowEager.defaults_dict.update(kwargs)
PatchTensorFlowEager.__main_task = task
# make sure we patched the SummaryToEventTransformer
PatchTensorFlowEager._patch_summary_ops()
PostImportHookPatching.add_on_import('tensorflow', PatchTensorFlowEager._patch_summary_ops)
@staticmethod
def _patch_summary_ops():
if PatchTensorFlowEager.__original_fn_scalar is not None:
return
if 'tensorflow' in sys.modules:
try:
# hack: make sure tensorflow.__init__ is called
import tensorflow # noqa
from tensorflow.python.ops import gen_summary_ops # noqa
PatchTensorFlowEager.__original_fn_scalar = gen_summary_ops.write_scalar_summary
gen_summary_ops.write_scalar_summary = PatchTensorFlowEager._write_scalar_summary
PatchTensorFlowEager.__original_fn_image = gen_summary_ops.write_image_summary
gen_summary_ops.write_image_summary = PatchTensorFlowEager._write_image_summary
PatchTensorFlowEager.__original_fn_hist = gen_summary_ops.write_histogram_summary
gen_summary_ops.write_histogram_summary = PatchTensorFlowEager._write_hist_summary
PatchTensorFlowEager.__write_summary = gen_summary_ops.write_summary
gen_summary_ops.write_summary = PatchTensorFlowEager._write_summary
gen_summary_ops.create_summary_file_writer = partial(IsTensorboardInit._patched_tb__init__,
gen_summary_ops.create_summary_file_writer)
gen_summary_ops.create_summary_db_writer = partial(IsTensorboardInit._patched_tb__init__,
gen_summary_ops.create_summary_db_writer)
except ImportError:
pass
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).debug(str(ex))
@staticmethod
def _get_event_writer(writer):
if not PatchTensorFlowEager.__main_task:
return None
if not PatchTensorFlowEager.__trains_event_writer.get(id(writer)):
# noinspection PyBroadException
try:
logdir = writer.get_logdir()
except Exception:
# check if we are in eager mode, let's get the global context lopdir
# noinspection PyBroadException
try:
from tensorflow.python.eager import context # noqa
logdir = context.context().summary_writer._init_op_fn.keywords.get('logdir')
except Exception:
# noinspection PyBroadException
try:
from tensorflow.python.ops.summary_ops_v2 import _summary_state # noqa
logdir = _summary_state.writer._init_op_fn.keywords.get('logdir')
except Exception:
logdir = None
# noinspection PyBroadException
try:
if logdir is not None:
logdir = logdir.numpy().decode()
except Exception:
logdir = None
PatchTensorFlowEager.__trains_event_writer[id(writer)] = EventTrainsWriter(
logger=PatchTensorFlowEager.__main_task.get_logger(), logdir=logdir,
**PatchTensorFlowEager.defaults_dict)
return PatchTensorFlowEager.__trains_event_writer[id(writer)]
@staticmethod
def trains_object(self):
if not PatchTensorFlowEager.__trains_event_writer:
return None
return PatchTensorFlowEager.__trains_event_writer.get(
id(self), list(PatchTensorFlowEager.__trains_event_writer.values())[0])
@staticmethod
def _write_summary(writer, step, tensor, tag, summary_metadata, name=None, **kwargs):
event_writer = PatchTensorFlowEager._get_event_writer(writer)
# make sure we can get the tensors values
if event_writer and isinstance(step, int) or hasattr(step, 'numpy'):
# noinspection PyBroadException
try:
plugin_type = summary_metadata.decode()
if plugin_type.endswith('scalars'):
event_writer._add_scalar(tag=str(tag),
step=int(step.numpy()) if not isinstance(step, int) else step,
scalar_data=tensor.numpy())
elif plugin_type.endswith('images'):
img_data_np = tensor.numpy()
PatchTensorFlowEager._add_image_event_helper(event_writer, img_data_np=img_data_np,
tag=tag, step=step, **kwargs)
elif plugin_type.endswith('histograms'):
event_writer._add_histogram(
tag=str(tag), step=int(step.numpy()) if not isinstance(step, int) else step,
hist_data=tensor.numpy()
)
elif plugin_type.endswith('text'):
event_writer._add_text(
tag=str(tag), step=int(step.numpy()) if not isinstance(step, int) else step,
tensor_bytes=tensor.numpy()
)
elif 'audio' in plugin_type:
audio_bytes_list = [a for a in tensor.numpy().flatten() if a]
for i, audio_bytes in enumerate(audio_bytes_list):
event_writer._add_audio(tag=str(tag) + ('/{}'.format(i) if len(audio_bytes_list) > 1 else ''),
step=int(step.numpy()) if not isinstance(step, int) else step,
values=None, audio_data=audio_bytes)
else:
pass # print('unsupported plugin_type', plugin_type)
except Exception:
pass
return PatchTensorFlowEager.__write_summary(writer, step, tensor, tag, summary_metadata, name, **kwargs)
@staticmethod
def _write_scalar_summary(writer, step, tag, value, name=None, **kwargs):
event_writer = PatchTensorFlowEager._get_event_writer(writer)
if event_writer and isinstance(step, int) or hasattr(step, 'numpy'):
try:
event_writer._add_scalar(tag=str(tag),
step=int(step.numpy()) if not isinstance(step, int) else step,
scalar_data=value.numpy())
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex))
elif event_writer:
def _report_summary_op(a_writer, a_step, a_tag, a_value, a_name=None, **_):
if isinstance(a_step, int) or hasattr(a_step, 'numpy'):
try:
str_tag = a_tag.numpy()
str_tag = str_tag.decode() if isinstance(str_tag, bytes) else str(str_tag)
event_writer._add_scalar(
tag=str_tag,
step=int(a_step.numpy()) if not isinstance(a_step, int) else a_step,
scalar_data=a_value.numpy())
except Exception as a_ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(
'_report_summary_op: {}'.format(str(a_ex)))
# this is a mix of eager and graph execution
try:
from tensorflow.python.eager import context as _context
if not _context.executing_eagerly():
from tensorflow import py_function
# just creating the operator is enough (for some reason)
# to make sure it is added into the execution tree.
# the operator itself, will do the reporting to the backend
py_function(
_report_summary_op,
inp=[writer, step, tag, value, name], Tout=[])
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex))
return PatchTensorFlowEager.__original_fn_scalar(writer, step, tag, value, name, **kwargs)
@staticmethod
def _write_hist_summary(writer, step, tag, values, name, **kwargs):
event_writer = PatchTensorFlowEager._get_event_writer(writer)
if event_writer and isinstance(step, int) or hasattr(step, 'numpy'):
try:
event_writer._add_histogram(
tag=str(tag), step=int(step.numpy()) if not isinstance(step, int) else step,
hist_data=values.numpy()
)
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex))
elif event_writer:
def _report_summary_op(a_writer, a_step, a_tag, a_value, a_name=None, **_):
if isinstance(a_step, int) or hasattr(a_step, 'numpy'):
try:
str_tag = a_tag.numpy()
str_tag = str_tag.decode() if isinstance(str_tag, bytes) else str(str_tag)
event_writer._add_histogram(
tag=str_tag,
step=int(a_step.numpy()) if not isinstance(a_step, int) else a_step,
hist_data=a_value.numpy()
)
except Exception as a_ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(
'_report_summary_op: {}'.format(str(a_ex)))
# this is a mix of eager and graph execution
try:
from tensorflow.python.eager import context as _context
if not _context.executing_eagerly():
from tensorflow import py_function
# just creating the operator is enough (for some reason)
# to make sure it is added into the execution tree.
# the operator itself, will do the reporting to the backend
py_function(
_report_summary_op,
inp=[writer, step, tag, values, name], Tout=[])
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex))
return PatchTensorFlowEager.__original_fn_hist(writer, step, tag, values, name, **kwargs)
@staticmethod
def _write_image_summary(writer, step, tag, tensor, bad_color, max_images, name, **kwargs):
event_writer = PatchTensorFlowEager._get_event_writer(writer)
if event_writer and isinstance(step, int) or hasattr(step, 'numpy'):
try:
PatchTensorFlowEager._add_image_event_helper(event_writer, img_data_np=tensor.numpy(),
tag=tag, step=step, **kwargs)
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex))
elif event_writer:
def _report_summary_op(a_writer, a_step, a_tag, a_tensor, a_bad_color, a_max_images, a_name=None, **_):
if isinstance(a_step, int) or hasattr(a_step, 'numpy'):
try:
str_tag = a_tag.numpy()
str_tag = str_tag.decode() if isinstance(str_tag, bytes) else str(str_tag)
PatchTensorFlowEager._add_image_event_helper(
event_writer, img_data_np=a_tensor.numpy(),
tag=str_tag, step=a_step, **kwargs)
except Exception as a_ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(
'_report_summary_op: {}'.format(str(a_ex)))
# this is a mix of eager and graph execution
try:
from tensorflow.python.eager import context as _context
if not _context.executing_eagerly():
from tensorflow import py_function
# just creating the operator is enough (for some reason)
# to make sure it is added into the execution tree.
# the operator itself, will do the reporting to the backend
py_function(
_report_summary_op,
inp=[writer, step, tag, tensor, bad_color, max_images, name], Tout=[])
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex))
return PatchTensorFlowEager.__original_fn_image(
writer, step, tag, tensor, bad_color, max_images, name, **kwargs)
@staticmethod
def _add_image_event_helper(event_writer, img_data_np, tag, step, **kwargs):
if img_data_np.ndim == 1 and img_data_np.size >= 3 and \
(len(img_data_np[0]) < 10 and len(img_data_np[1]) < 10):
# this is just for making sure these are actually valid numbers
width = int(img_data_np[0].decode()) # noqa: F841
height = int(img_data_np[1].decode()) # noqa: F841
for i in range(2, img_data_np.size):
img_data = {'width': None, 'height': None,
'colorspace': 'RGB', 'encodedImageString': img_data_np[i]}
image_tag = str(tag) + '/sample_{}'.format(i - 2) if img_data_np.size > 3 else str(tag)
event_writer._add_image(tag=image_tag,
step=int(step.numpy()) if not isinstance(step, int) else step,
img_data=img_data)
else:
event_writer._add_image_numpy(tag=str(tag),
step=int(step.numpy()) if not isinstance(step, int) else step,
img_data_np=img_data_np,
max_keep_images=kwargs.get('max_images'))
@staticmethod
def _nothing_op(*_, **__):
"""Convenient else branch for when summaries do not record."""
from tensorflow.python.framework import constant_op
return constant_op.constant(False)
# noinspection PyPep8Naming,SpellCheckingInspection
class PatchKerasModelIO(object):
__main_task = None
__patched_keras = None
__patched_tensorflow = None
@staticmethod
def update_current_task(task, **_):
PatchKerasModelIO.__main_task = task
PatchKerasModelIO._patch_model_checkpoint()
PostImportHookPatching.add_on_import('tensorflow', PatchKerasModelIO._patch_model_checkpoint)
PostImportHookPatching.add_on_import('keras', PatchKerasModelIO._patch_model_checkpoint)
@staticmethod
def _patch_model_checkpoint():
if 'keras' in sys.modules and not PatchKerasModelIO.__patched_keras:
try:
from keras.engine.network import Network # noqa
except ImportError:
Network = None
try:
from keras.engine.functional import Functional # noqa
except ImportError:
Functional = None
try:
from keras.engine.sequential import Sequential # noqa
except ImportError:
Sequential = None
try:
from keras import models as keras_saving # noqa
except ImportError:
keras_saving = None
# check that we are not patching anything twice
if PatchKerasModelIO.__patched_tensorflow:
PatchKerasModelIO.__patched_keras = [
Network if PatchKerasModelIO.__patched_tensorflow[0] != Network else None,
Sequential if PatchKerasModelIO.__patched_tensorflow[1] != Sequential else None,
keras_saving if PatchKerasModelIO.__patched_tensorflow[2] != keras_saving else None,
Functional if PatchKerasModelIO.__patched_tensorflow[3] != Functional else None,
None,
None,
]
else:
PatchKerasModelIO.__patched_keras = [Network, Sequential, keras_saving, Functional, None, None]
PatchKerasModelIO._patch_io_calls(*PatchKerasModelIO.__patched_keras)
if 'tensorflow' in sys.modules and not PatchKerasModelIO.__patched_tensorflow:
try:
# hack: make sure tensorflow.__init__ is called
import tensorflow # noqa
from tensorflow.python.keras.engine.network import Network # noqa
except ImportError:
Network = None
try:
# hack: make sure tensorflow.__init__ is called
import tensorflow # noqa
from tensorflow.python.keras.engine.functional import Functional # noqa
except ImportError:
Functional = None
try:
# hack: make sure tensorflow.__init__ is called
import tensorflow # noqa
from tensorflow.python.keras.engine.sequential import Sequential # noqa
except ImportError:
Sequential = None
try:
# hack: make sure tensorflow.__init__ is called
import tensorflow # noqa
from tensorflow.python.keras import models as keras_saving_legacy # noqa
except ImportError:
keras_saving_legacy = None
try:
# hack: make sure tensorflow.__init__ is called
import tensorflow # noqa
from tensorflow.keras import models as keras_saving # noqa
except ImportError:
keras_saving = None
try:
# hack: make sure tensorflow.__init__ is called
import tensorflow # noqa
from tensorflow.python.keras.saving import hdf5_format as keras_hdf5 # noqa
except ImportError:
keras_hdf5 = None
if PatchKerasModelIO.__patched_keras:
PatchKerasModelIO.__patched_tensorflow = [
Network if PatchKerasModelIO.__patched_keras[0] != Network else None,
Sequential if PatchKerasModelIO.__patched_keras[1] != Sequential else None,
keras_saving if PatchKerasModelIO.__patched_keras[2] != keras_saving else None,
Functional if PatchKerasModelIO.__patched_keras[3] != Functional else None,
keras_saving_legacy if PatchKerasModelIO.__patched_keras[4] != keras_saving_legacy else None,
keras_hdf5 if PatchKerasModelIO.__patched_keras[5] != keras_hdf5 else None,
]
else:
PatchKerasModelIO.__patched_tensorflow = [
Network, Sequential, keras_saving, Functional, keras_saving_legacy, keras_hdf5]
PatchKerasModelIO._patch_io_calls(*PatchKerasModelIO.__patched_tensorflow)
@staticmethod
def _patch_io_calls(Network, Sequential, keras_saving, Functional, keras_saving_legacy=None, keras_hdf5=None):
try:
if Sequential is not None:
Sequential._updated_config = _patched_call(Sequential._updated_config,
PatchKerasModelIO._updated_config)
if hasattr(Sequential.from_config, '__func__'):
# noinspection PyUnresolvedReferences
Sequential.from_config = classmethod(_patched_call(Sequential.from_config.__func__,
PatchKerasModelIO._from_config))
else:
Sequential.from_config = _patched_call(Sequential.from_config, PatchKerasModelIO._from_config)
if Network is not None:
Network._updated_config = _patched_call(Network._updated_config, PatchKerasModelIO._updated_config)
if hasattr(Sequential.from_config, '__func__'):
# noinspection PyUnresolvedReferences
Network.from_config = classmethod(_patched_call(Network.from_config.__func__,
PatchKerasModelIO._from_config))
else:
Network.from_config = _patched_call(Network.from_config, PatchKerasModelIO._from_config)
Network.save = _patched_call(Network.save, PatchKerasModelIO._save)
Network.save_weights = _patched_call(Network.save_weights, PatchKerasModelIO._save_weights)
Network.load_weights = _patched_call(Network.load_weights, PatchKerasModelIO._load_weights)
elif Functional is not None:
Functional._updated_config = _patched_call(
Functional._updated_config, PatchKerasModelIO._updated_config)
if hasattr(Sequential.from_config, '__func__'):
# noinspection PyUnresolvedReferences
Functional.from_config = classmethod(_patched_call(Functional.from_config.__func__,
PatchKerasModelIO._from_config))
else:
Functional.from_config = _patched_call(Functional.from_config, PatchKerasModelIO._from_config)
Functional.save = _patched_call(Functional.save, PatchKerasModelIO._save)
Functional.save_weights = _patched_call(Functional.save_weights, PatchKerasModelIO._save_weights)
Functional.load_weights = _patched_call(Functional.load_weights, PatchKerasModelIO._load_weights)
if keras_saving is not None:
keras_saving.save_model = _patched_call(keras_saving.save_model, PatchKerasModelIO._save_model)
keras_saving.load_model = _patched_call(keras_saving.load_model, PatchKerasModelIO._load_model)
if keras_saving_legacy is not None:
keras_saving_legacy.save_model = _patched_call(
keras_saving_legacy.save_model, PatchKerasModelIO._save_model)
keras_saving_legacy.load_model = _patched_call(
keras_saving_legacy.load_model, PatchKerasModelIO._load_model)
if keras_hdf5 is not None:
keras_hdf5.save_weights_to_hdf5_group = _patched_call(
keras_hdf5.save_weights_to_hdf5_group, PatchKerasModelIO._save_weights)
keras_hdf5.load_weights_from_hdf5_group = _patched_call(
keras_hdf5.load_weights_from_hdf5_group, PatchKerasModelIO._load_weights)
keras_hdf5.load_weights_from_hdf5_group_by_name = _patched_call(
keras_hdf5.load_weights_from_hdf5_group_by_name, PatchKerasModelIO._load_weights)
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex))
@staticmethod
def _updated_config(original_fn, self):
config = original_fn(self)
# check if we have main task
if PatchKerasModelIO.__main_task is None:
return config
try:
# there is no actual file, so we create the OutputModel without one
# check if object already has InputModel
if not hasattr(self, 'trains_out_model'):
self.trains_out_model = []
# check if object already has InputModel
model_name_id = config.get('name', getattr(self, 'name', 'unknown'))
if self.trains_out_model:
self.trains_out_model[-1].config_dict = config
else:
# todo: support multiple models for the same task
self.trains_out_model.append(OutputModel(
task=PatchKerasModelIO.__main_task,
config_dict=config,
name=PatchKerasModelIO.__main_task.name + ' ' + model_name_id,
label_enumeration=PatchKerasModelIO.__main_task.get_labels_enumeration(),
framework=Framework.keras,
))
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex))
return config
@staticmethod
def _from_config(original_fn, *args, **kwargs):
try:
self = original_fn(*args, **kwargs)
except Exception as ex:
if not running_remotely():
raise ex
self = _Empty()
# check if we have main task
if PatchKerasModelIO.__main_task is None:
return self
try:
# check if object already has InputModel
if not hasattr(self, 'trains_in_model'):
self.trains_in_model = None
# get config
config_dict = kwargs['config'] if 'config' in kwargs else args[0]
# check if object already has InputModel
self.trains_in_model = InputModel.empty(
config_dict=config_dict,
label_enumeration=PatchKerasModelIO.__main_task.get_labels_enumeration(),
)
# todo: support multiple models for the same task
PatchKerasModelIO.__main_task.connect(self.trains_in_model)
# if we are running remotely we should deserialize the object
# because someone might have changed the configuration
# Hack: disabled
if False and running_remotely():
# reload the model
model_config = self.trains_in_model.config_dict
# verify that this is the same model so we are not deserializing a diff model
if (config_dict and config_dict.get('config') and model_config and model_config.get('config') and
config_dict.get('config').get('name') == model_config.get('config').get('name')) or \
(not config_dict and not model_config):
if 'config' in kwargs:
kwargs['config'] = model_config
else:
args = (model_config,) + args[1:]
model = original_fn(*args, **kwargs)
model.trains_in_model = self.trains_in_model
return model
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex))
return self
@staticmethod
def _load_weights(original_fn, self, *args, **kwargs):
# check if we have main task
if PatchKerasModelIO.__main_task is None:
return original_fn(self, *args, **kwargs)
# get filepath
if self and getattr(self, 'filename', None):
filepath = getattr(self, 'filename', None)
else:
filepath = kwargs['filepath'] if 'filepath' in kwargs else args[0]
# Hack: disabled
if False and running_remotely():
# register/load model weights
filepath = WeightsFileHandler.restore_weights_file(self, filepath, Framework.keras,
PatchKerasModelIO.__main_task)
if 'filepath' in kwargs:
kwargs['filepath'] = filepath
else:
args = (filepath,) + args[1:]
# load model
return original_fn(self, *args, **kwargs)
# try to load the files, if something happened exception will be raised before we register the file
model = original_fn(self, *args, **kwargs)
# register/load model weights
WeightsFileHandler.restore_weights_file(self, filepath, Framework.keras, PatchKerasModelIO.__main_task)
return model
@staticmethod
def _save(original_fn, self, *args, **kwargs):
if hasattr(self, 'trains_out_model') and self.trains_out_model:
# noinspection PyProtectedMember
self.trains_out_model[-1]._processed = False
original_fn(self, *args, **kwargs)
# no need to specially call, because the original save uses "save_model" which we overload
# noinspection PyProtectedMember
if not hasattr(self, 'trains_out_model') or not self.trains_out_model or \
not hasattr(self.trains_out_model[-1], '_processed') or not self.trains_out_model[-1]._processed:
PatchKerasModelIO._update_outputmodel(self, *args, **kwargs)
@staticmethod
def _save_weights(original_fn, self, *args, **kwargs):
original_fn(self, *args, **kwargs)
PatchKerasModelIO._update_outputmodel(self, *args, **kwargs)
@staticmethod
def _update_outputmodel(self, *args, **kwargs):
# check if we have main task
if PatchKerasModelIO.__main_task is None:
return
try:
# get filepath
if self and getattr(self, 'filename', None):
filepath = getattr(self, 'filename', None)
else:
filepath = kwargs['filepath'] if 'filepath' in kwargs else args[0]
# this will already generate an output model
# noinspection PyBroadException
try:
config = self._updated_config()
except Exception:
# we failed to convert the network to json, for some reason (most likely internal keras error)
config = {}
if filepath:
WeightsFileHandler.create_output_model(
self, filepath, Framework.keras, PatchKerasModelIO.__main_task,
config_obj=config or None, singlefile=True)
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex))
@staticmethod
def _save_model(original_fn, model, filepath, *args, **kwargs):
original_fn(model, filepath, *args, **kwargs)
if PatchKerasModelIO.__main_task:
PatchKerasModelIO._update_outputmodel(model, filepath)
@staticmethod
def _load_model(original_fn, filepath, *args, **kwargs):
if not PatchKerasModelIO.__main_task:
return original_fn(filepath, *args, **kwargs)
empty = _Empty()
# Hack: disabled
if False and running_remotely():
# register/load model weights
filepath = WeightsFileHandler.restore_weights_file(empty, filepath, Framework.keras,
PatchKerasModelIO.__main_task)
model = original_fn(filepath, *args, **kwargs)
else:
model = original_fn(filepath, *args, **kwargs)
# register/load model weights
WeightsFileHandler.restore_weights_file(empty, filepath, Framework.keras, PatchKerasModelIO.__main_task)
# update the input model object
if empty.trains_in_model:
# noinspection PyBroadException
try:
model.trains_in_model = empty.trains_in_model
except Exception:
pass
return model
class PatchTensorflowModelIO(object):
__main_task = None
__patched = None
@staticmethod
def update_current_task(task, **_):
PatchTensorflowModelIO.__main_task = task
PatchTensorflowModelIO._patch_model_checkpoint()
PostImportHookPatching.add_on_import('tensorflow', PatchTensorflowModelIO._patch_model_checkpoint)
@staticmethod
def _patch_model_checkpoint():
if PatchTensorflowModelIO.__patched:
return
if 'tensorflow' not in sys.modules:
return
PatchTensorflowModelIO.__patched = True
# noinspection PyBroadException
try:
# hack: make sure tensorflow.__init__ is called
import tensorflow # noqa
# noinspection PyUnresolvedReferences
from tensorflow.python.training.saver import Saver # noqa
# noinspection PyBroadException
try:
Saver.save = _patched_call(Saver.save, PatchTensorflowModelIO._save)
except Exception:
pass
# noinspection PyBroadException
try:
Saver.restore = _patched_call(Saver.restore, PatchTensorflowModelIO._restore)
except Exception:
pass
except ImportError:
pass
except Exception:
LoggerRoot.get_base_logger(TensorflowBinding).debug('Failed patching tensorflow')
# noinspection PyBroadException
try:
# make sure we import the correct version of save
import tensorflow # noqa
from tensorflow.saved_model import save # noqa
# actual import
from tensorflow.python.saved_model import save as saved_model # noqa
except ImportError:
# noinspection PyBroadException
try:
# make sure we import the correct version of save
import tensorflow # noqa
from tensorflow.saved_model.experimental import save # noqa
# actual import
import tensorflow.saved_model.experimental as saved_model # noqa
except ImportError:
saved_model = None
except Exception:
saved_model = None
except Exception:
saved_model = None
if saved_model is not None:
saved_model.save = _patched_call(saved_model.save, PatchTensorflowModelIO._save_model)
# noinspection PyBroadException
try:
# make sure we import the correct version of save
import tensorflow # noqa
# actual import
from tensorflow.saved_model import load # noqa
# noinspection PyUnresolvedReferences
import tensorflow.saved_model as saved_model_load # noqa
saved_model_load.load = _patched_call(saved_model_load.load, PatchTensorflowModelIO._load)
except ImportError:
pass
except Exception:
LoggerRoot.get_base_logger(TensorflowBinding).debug('Failed patching tensorflow')
# noinspection PyBroadException
try:
# make sure we import the correct version of save
import tensorflow # noqa
# actual import
# noinspection PyUnresolvedReferences
from tensorflow.saved_model import loader as loader1 # noqa
loader1.load = _patched_call(loader1.load, PatchTensorflowModelIO._load)
except ImportError:
pass
except Exception:
LoggerRoot.get_base_logger(TensorflowBinding).debug('Failed patching tensorflow')
# noinspection PyBroadException
try:
# make sure we import the correct version of save
import tensorflow # noqa
# actual import
# noinspection PyUnresolvedReferences
from tensorflow.compat.v1.saved_model import loader as loader2 # noqa
loader2.load = _patched_call(loader2.load, PatchTensorflowModelIO._load)
except ImportError:
pass
except Exception:
LoggerRoot.get_base_logger(TensorflowBinding).debug('Failed patching tensorflow')
# noinspection PyBroadException
try:
import tensorflow # noqa
from tensorflow.train import Checkpoint # noqa
# noinspection PyBroadException
try:
Checkpoint.save = _patched_call(Checkpoint.save, PatchTensorflowModelIO._ckpt_save)
except Exception:
pass
# noinspection PyBroadException
try:
Checkpoint.restore = _patched_call(Checkpoint.restore, PatchTensorflowModelIO._ckpt_restore)
except Exception:
pass
# noinspection PyBroadException
try:
Checkpoint.write = _patched_call(Checkpoint.write, PatchTensorflowModelIO._ckpt_write)
except Exception:
pass
except ImportError:
pass
except Exception:
LoggerRoot.get_base_logger(TensorflowBinding).debug('Failed patching tensorflow')
@staticmethod
def _save(original_fn, self, sess, save_path, *args, **kwargs):
saved_path = original_fn(self, sess, save_path, *args, **kwargs)
if not saved_path:
return saved_path
# store output Model
return WeightsFileHandler.create_output_model(self, saved_path, Framework.tensorflow,
PatchTensorflowModelIO.__main_task)
@staticmethod
def _save_model(original_fn, obj, export_dir, *args, **kwargs):
original_fn(obj, export_dir, *args, **kwargs)
# store output Model
WeightsFileHandler.create_output_model(obj, export_dir, Framework.tensorflow,
PatchTensorflowModelIO.__main_task)
@staticmethod
def _restore(original_fn, self, sess, save_path, *args, **kwargs):
if PatchTensorflowModelIO.__main_task is None:
return original_fn(self, sess, save_path, *args, **kwargs)
# Hack: disabled
if False and running_remotely():
# register/load model weights
save_path = WeightsFileHandler.restore_weights_file(self, save_path, Framework.tensorflow,
PatchTensorflowModelIO.__main_task)
# load model
return original_fn(self, sess, save_path, *args, **kwargs)
# load model, if something is wrong, exception will be raised before we register the input model
model = original_fn(self, sess, save_path, *args, **kwargs)
# register/load model weights
WeightsFileHandler.restore_weights_file(self, save_path, Framework.tensorflow,
PatchTensorflowModelIO.__main_task)
return model
@staticmethod
def _load(original_fn, sess, tags, export_dir, *args, **saver_kwargs):
if PatchTensorflowModelIO.__main_task is None:
return original_fn(sess, tags, export_dir, *args, **saver_kwargs)
# register input model
empty = _Empty()
# Hack: disabled
if False and running_remotely():
export_dir = WeightsFileHandler.restore_weights_file(empty, export_dir, Framework.tensorflow,
PatchTensorflowModelIO.__main_task)
model = original_fn(sess, tags, export_dir, *args, **saver_kwargs)
else:
# try to load model before registering, it might fail
model = original_fn(sess, tags, export_dir, *args, **saver_kwargs)
WeightsFileHandler.restore_weights_file(empty, export_dir, Framework.tensorflow,
PatchTensorflowModelIO.__main_task)
if empty.trains_in_model:
# noinspection PyBroadException
try:
model.trains_in_model = empty.trains_in_model
except Exception:
pass
return model
@staticmethod
def _ckpt_save(original_fn, self, file_prefix, *args, **kwargs):
checkpoint_path = original_fn(self, file_prefix, *args, **kwargs)
if PatchTensorflowModelIO.__main_task is None:
return checkpoint_path
WeightsFileHandler.create_output_model(self, checkpoint_path, Framework.tensorflow,
PatchTensorflowModelIO.__main_task)
return checkpoint_path
@staticmethod
def _ckpt_write(original_fn, self, file_prefix, *args, **kwargs):
checkpoint_path = original_fn(self, file_prefix, *args, **kwargs)
if PatchTensorflowModelIO.__main_task is None:
return checkpoint_path
WeightsFileHandler.create_output_model(self, checkpoint_path, Framework.tensorflow,
PatchTensorflowModelIO.__main_task)
return checkpoint_path
@staticmethod
def _ckpt_restore(original_fn, self, save_path, *args, **kwargs):
if PatchTensorflowModelIO.__main_task is None:
return original_fn(self, save_path, *args, **kwargs)
# register input model
empty = _Empty()
# Hack: disabled
if False and running_remotely():
save_path = WeightsFileHandler.restore_weights_file(empty, save_path, Framework.tensorflow,
PatchTensorflowModelIO.__main_task)
model = original_fn(self, save_path, *args, **kwargs)
else:
# try to load model before registering it, in case it fails.
model = original_fn(self, save_path, *args, **kwargs)
WeightsFileHandler.restore_weights_file(empty, save_path, Framework.tensorflow,
PatchTensorflowModelIO.__main_task)
if empty.trains_in_model:
# noinspection PyBroadException
try:
model.trains_in_model = empty.trains_in_model
except Exception:
pass
return model
class PatchTensorflow2ModelIO(object):
__main_task = None
__patched = None
@staticmethod
def update_current_task(task, **_):
PatchTensorflow2ModelIO.__main_task = task
PatchTensorflow2ModelIO._patch_model_checkpoint()
PostImportHookPatching.add_on_import('tensorflow', PatchTensorflow2ModelIO._patch_model_checkpoint)
@staticmethod
def _patch_model_checkpoint():
if PatchTensorflow2ModelIO.__patched:
return
if 'tensorflow' not in sys.modules:
return
PatchTensorflow2ModelIO.__patched = True
# noinspection PyBroadException
try:
# hack: make sure tensorflow.__init__ is called
import tensorflow # noqa
from tensorflow.python.training.tracking import util # noqa
# noinspection PyBroadException
try:
util.TrackableSaver.save = _patched_call(util.TrackableSaver.save,
PatchTensorflow2ModelIO._save)
except Exception:
pass
# noinspection PyBroadException
try:
util.TrackableSaver.restore = _patched_call(util.TrackableSaver.restore,
PatchTensorflow2ModelIO._restore)
except Exception:
pass
except ImportError:
pass
except Exception:
LoggerRoot.get_base_logger(TensorflowBinding).debug('Failed patching tensorflow v2')
@staticmethod
def _save(original_fn, self, file_prefix, *args, **kwargs):
model = original_fn(self, file_prefix, *args, **kwargs)
# store output Model
# noinspection PyBroadException
try:
WeightsFileHandler.create_output_model(self, file_prefix, Framework.tensorflow,
PatchTensorflow2ModelIO.__main_task)
except Exception:
pass
return model
@staticmethod
def _restore(original_fn, self, save_path, *args, **kwargs):
if PatchTensorflow2ModelIO.__main_task is None:
return original_fn(self, save_path, *args, **kwargs)
# Hack: disabled
if False and running_remotely():
# register/load model weights
# noinspection PyBroadException
try:
save_path = WeightsFileHandler.restore_weights_file(self, save_path, Framework.tensorflow,
PatchTensorflow2ModelIO.__main_task)
except Exception:
pass
# load model
return original_fn(self, save_path, *args, **kwargs)
# load model, if something is wrong, exception will be raised before we register the input model
model = original_fn(self, save_path, *args, **kwargs)
# register/load model weights
# noinspection PyBroadException
try:
WeightsFileHandler.restore_weights_file(self, save_path, Framework.tensorflow,
PatchTensorflow2ModelIO.__main_task)
except Exception:
pass
return model
|
[
"tensorflow.python.framework.constant_op.constant",
"base64.b64decode",
"collections.defaultdict",
"numpy.histogram",
"numpy.arange",
"numpy.interp",
"google.protobuf.json_format.MessageToDict",
"numpy.atleast_2d",
"tensorflow.py_function",
"numpy.zeros_like",
"tensorflow.python.ops.summary_ops_v2._summary_state.writer._init_op_fn.keywords.get",
"numpy.append",
"tensorflow.python.eager.context.executing_eagerly",
"numpy.random.shuffle",
"tensorflow.python.eager.context.context",
"functools.partial",
"io.BytesIO",
"threading.RLock",
"numpy.asarray",
"numpy.isinf",
"numpy.concatenate",
"re.compile",
"numpy.vstack",
"numpy.atleast_3d",
"PIL.Image.open",
"six.StringIO",
"numpy.array",
"mimetypes.guess_extension",
"numpy.sqrt"
] |
[((9151, 9168), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (9166, 9168), False, 'import threading\n'), ((2812, 2844), 'numpy.random.shuffle', 'np.random.shuffle', (['cur_idx_below'], {}), '(cur_idx_below)\n', (2829, 2844), True, 'import numpy as np\n'), ((5990, 6017), 'numpy.append', 'np.append', (['hist_iters', 'step'], {}), '(hist_iters, step)\n', (5999, 6017), True, 'import numpy as np\n'), ((16393, 16417), 'collections.defaultdict', 'defaultdict', (['(lambda : ())'], {}), '(lambda : ())\n', (16404, 16417), False, 'from collections import defaultdict\n'), ((26576, 26595), 'io.BytesIO', 'BytesIO', (['audio_data'], {}), '(audio_data)\n', (26583, 26595), False, 'from io import BytesIO\n'), ((66940, 66967), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(False)'], {}), '(False)\n', (66960, 66967), False, 'from tensorflow.python.framework import constant_op\n'), ((3001, 3056), 'numpy.arange', 'np.arange', (['(_histogram_granularity - 1)', '_hist_iters.size'], {}), '(_histogram_granularity - 1, _hist_iters.size)\n', (3010, 3056), True, 'import numpy as np\n'), ((3069, 3101), 'numpy.random.shuffle', 'np.random.shuffle', (['cur_idx_above'], {}), '(cur_idx_above)\n', (3086, 3101), True, 'import numpy as np\n'), ((3238, 3250), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3246, 3250), True, 'import numpy as np\n'), ((7874, 7930), 'numpy.interp', 'np.interp', (['prev_xedge', 'h[:, 0]', 'h[:, 1]'], {'right': '(0)', 'left': '(0)'}), '(prev_xedge, h[:, 0], h[:, 1], right=0, left=0)\n', (7883, 7930), True, 'import numpy as np\n'), ((17214, 17229), 'io.BytesIO', 'BytesIO', (['imdata'], {}), '(imdata)\n', (17221, 17229), False, 'from io import BytesIO\n'), ((17247, 17265), 'PIL.Image.open', 'Image.open', (['output'], {}), '(output)\n', (17257, 17265), False, 'from PIL import Image\n'), ((17286, 17300), 'numpy.asarray', 'np.asarray', (['im'], {}), '(im)\n', (17296, 17300), True, 'import numpy as np\n'), ((26091, 26118), 'base64.b64decode', 'base64.b64decode', (['audio_str'], {}), '(audio_str)\n', (26107, 26118), False, 'import base64\n'), ((29789, 29809), 'google.protobuf.json_format.MessageToDict', 'MessageToDict', (['event'], {}), '(event)\n', (29802, 29809), False, 'from google.protobuf.json_format import MessageToDict\n'), ((34682, 34708), 're.compile', '_re.compile', (['"""[^-/\\\\w\\\\.]"""'], {}), "('[^-/\\\\w\\\\.]')\n", (34693, 34708), True, 'import re as _re\n'), ((4437, 4469), 'numpy.histogram', 'np.histogram', (['hist_data'], {'bins': '(32)'}), '(hist_data, bins=32)\n', (4449, 4469), True, 'import numpy as np\n'), ((5432, 5444), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5440, 5444), True, 'import numpy as np\n'), ((17167, 17192), 'base64.b64decode', 'base64.b64decode', (['img_str'], {}), '(img_str)\n', (17183, 17192), False, 'import base64\n'), ((19474, 19490), 'numpy.sqrt', 'np.sqrt', (['dims[0]'], {}), '(dims[0])\n', (19481, 19490), True, 'import numpy as np\n'), ((24899, 24950), 'numpy.arange', 'np.arange', (['(0.0)', '(1.0)', 'width'], {'dtype': 'plot_values.dtype'}), '(0.0, 1.0, width, dtype=plot_values.dtype)\n', (24908, 24950), True, 'import numpy as np\n'), ((26644, 26682), 'mimetypes.guess_extension', 'guess_extension', (["values['contentType']"], {}), "(values['contentType'])\n", (26659, 26682), False, 'from mimetypes import guess_extension\n'), ((27717, 27735), 'six.StringIO', 'six.StringIO', (['text'], {}), '(text)\n', (27729, 27735), False, 'import six\n'), ((53950, 54045), 'functools.partial', 'partial', (['IsTensorboardInit._patched_tb__init__', 'gen_summary_ops.create_summary_file_writer'], {}), '(IsTensorboardInit._patched_tb__init__, gen_summary_ops.\n create_summary_file_writer)\n', (53957, 54045), False, 'from functools import partial\n'), ((54169, 54262), 'functools.partial', 'partial', (['IsTensorboardInit._patched_tb__init__', 'gen_summary_ops.create_summary_db_writer'], {}), '(IsTensorboardInit._patched_tb__init__, gen_summary_ops.\n create_summary_db_writer)\n', (54176, 54262), False, 'from functools import partial\n'), ((6305, 6325), 'numpy.isinf', 'np.isinf', (['hist[:, 0]'], {}), '(hist[:, 0])\n', (6313, 6325), True, 'import numpy as np\n'), ((8161, 8187), 'numpy.zeros_like', 'np.zeros_like', (['report_hist'], {}), '(report_hist)\n', (8174, 8187), True, 'import numpy as np\n'), ((3288, 3334), 'numpy.concatenate', 'np.concatenate', (['(cur_idx_below, cur_idx_above)'], {}), '((cur_idx_below, cur_idx_above))\n', (3302, 3334), True, 'import numpy as np\n'), ((17905, 17923), 'numpy.atleast_3d', 'np.atleast_3d', (['val'], {}), '(val)\n', (17918, 17923), True, 'import numpy as np\n'), ((60249, 60277), 'tensorflow.python.eager.context.executing_eagerly', '_context.executing_eagerly', ([], {}), '()\n', (60275, 60277), True, 'from tensorflow.python.eager import context as _context\n'), ((60583, 60661), 'tensorflow.py_function', 'py_function', (['_report_summary_op'], {'inp': '[writer, step, tag, value, name]', 'Tout': '[]'}), '(_report_summary_op, inp=[writer, step, tag, value, name], Tout=[])\n', (60594, 60661), False, 'from tensorflow import py_function\n'), ((62498, 62526), 'tensorflow.python.eager.context.executing_eagerly', '_context.executing_eagerly', ([], {}), '()\n', (62524, 62526), True, 'from tensorflow.python.eager import context as _context\n'), ((62832, 62911), 'tensorflow.py_function', 'py_function', (['_report_summary_op'], {'inp': '[writer, step, tag, values, name]', 'Tout': '[]'}), '(_report_summary_op, inp=[writer, step, tag, values, name], Tout=[])\n', (62843, 62911), False, 'from tensorflow import py_function\n'), ((64724, 64752), 'tensorflow.python.eager.context.executing_eagerly', '_context.executing_eagerly', ([], {}), '()\n', (64750, 64752), True, 'from tensorflow.python.eager import context as _context\n'), ((65058, 65164), 'tensorflow.py_function', 'py_function', (['_report_summary_op'], {'inp': '[writer, step, tag, tensor, bad_color, max_images, name]', 'Tout': '[]'}), '(_report_summary_op, inp=[writer, step, tag, tensor, bad_color,\n max_images, name], Tout=[])\n', (65069, 65164), False, 'from tensorflow import py_function\n'), ((3859, 3883), 'numpy.atleast_2d', 'np.atleast_2d', (['hist_data'], {}), '(hist_data)\n', (3872, 3883), True, 'import numpy as np\n'), ((25116, 25161), 'numpy.vstack', 'np.vstack', (['(plot_values[-1], plot_values[-2])'], {}), '((plot_values[-1], plot_values[-2]))\n', (25125, 25161), True, 'import numpy as np\n'), ((55392, 55448), 'tensorflow.python.ops.summary_ops_v2._summary_state.writer._init_op_fn.keywords.get', '_summary_state.writer._init_op_fn.keywords.get', (['"""logdir"""'], {}), "('logdir')\n", (55438, 55448), False, 'from tensorflow.python.ops.summary_ops_v2 import _summary_state\n'), ((55084, 55101), 'tensorflow.python.eager.context.context', 'context.context', ([], {}), '()\n', (55099, 55101), False, 'from tensorflow.python.eager import context\n')]
|
import pandas as pd
import hoki.hrdiagrams
import hoki.cmd
import hoki.load as load
from hoki.constants import BPASS_TIME_BINS
import warnings
from hoki.utils.exceptions import HokiFatalError, HokiUserWarning, HokiFormatError, HokiFormatWarning, HokiDeprecationWarning
from hoki.utils.hoki_object import HokiObject
from hoki.utils.hoki_dialogue import HokiDialogue
import numpy as np
import warnings
Dialogue = HokiDialogue()
deprecation='\n\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' \
'\nDeprecated since hoki v1.6 ' \
'\nPLEASE USE THE hoki.age SUBPACKAGE AND MODULES WITHIN. ' \
'\ne.g. from hoki.age.wizard import AgeWizard' \
'\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n'
warnings.warn(HokiDeprecationWarning(deprecation))
class AgeWizard(HokiObject):
"""
AgeWizard object
"""
def __init__(self, obs_df, model):
"""
Initialisation of the AgeWizard object
Parameters
----------
obs_df: pandas.DataFrame
Observational data. MUST contain a logT and logL column (for HRD comparison) or a col and mag column
(for CMD comparison)
model: str or hoki.hrdiagrams.HRDiagrams() hoki.cmd.CMD()
Location of the modeled HRD or CMD. This can be an already instanciated HRDiagram or CMD() object, or a
path to an HR Diagram file or a pickled CMD.
"""
# Making sure the osbervational properties are given in a format we can use.
if not isinstance(obs_df, pd.DataFrame):
raise HokiFormatError("Observations should be stored in a Data Frame")
if 'name' not in obs_df.columns:
warnings.warn("We expect the name of sources to be given in the 'name' column. "
"If I can't find names I'll make my own ;)", HokiFormatWarning)
# Checking what format they giving for the model:
if isinstance(model, hoki.hrdiagrams.HRDiagram):
self.model = model
elif isinstance(model, hoki.cmd.CMD):
self.model = model
elif isinstance(model, str) and 'hrs' in model:
self.model = load.model_output(model, hr_type='TL')
elif isinstance(model, str):
try:
self.model = load.unpickle(path=model)
except AssertionError:
print('-----------------')
print(
'HOKI DEBUGGER:\nThe model param should be a path to \na BPASS HRDiagram output file or pickled CMD,'
'or\na hoki.hrdiagrams.HRDiagram or a hoki.cmd.CMD')
print('-----------------')
raise HokiFatalError('model is ' + str(type(model)))
else:
print('-----------------')
print('HOKI DEBUGGER:\nThe model param should be a path to \na BPASS HRDiagram output file or pickled CMD,'
'or\na hoki.hrdiagrams.HRDiagram or a hoki.cmd.CMD')
print('-----------------')
raise HokiFatalError('model is ' + str(type(model)))
self.obs_df = obs_df
self.coordinates = find_coordinates(self.obs_df, self.model)
# This line is obsolete but might need revival if we ever want to add the not normalised distributions again
# self._distributions = calculate_distributions_normalised(self.obs_df, self.model)
self.pdfs = calculate_individual_pdfs(self.obs_df, self.model).fillna(0)
self.sources = self.pdfs.columns.to_list()
self.sample_pdf = None
self._most_likely_age = None
def calculate_sample_pdf(self, not_you=None, return_df=False):
self.sample_pdf = calculate_sample_pdf(self.pdfs, not_you=not_you)
if return_df: return self.sample_pdf
@property
def most_likely_age(self):
"""
Finds the most likely age by finding the max value in self.calculate_sample_pdf
"""
if self._most_likely_age is not None: return self._most_likely_age
if self.sample_pdf is None:
warnings.warn('self.multiplied_pdf is not yet defined -- running AgeWizard.combined_pdfs()',
HokiUserWarning)
self.calculate_sample_pdf()
index = self.sample_pdf.index[self.sample_pdf.pdf == max(self.sample_pdf.pdf)].tolist()
return self.t[index]
@property
def most_likely_ages(self):
"""
Finds the most likely ages for all the sources given in the obs_df DataFrame.
"""
# index = self.pdfs.drop('time_bins', axis=1).idxmax(axis=0).tolist()
index = self.pdfs.idxmax(axis=0).tolist()
return self.t[index]
def calculate_p_given_age_range(self, age_range):
"""
Calculates the probability that each source has age within age_range
Parameters
----------
age_range: list or tuple of 2 values
Minimum and Maximum age to consider (inclusive).
Returns
-------
numpy.array containing the probabilities.
"""
# Selects only the rows corresponding to the range age_range[0] to age_range[1] (inclusive)
# and then we sum the probabilities up for each column.
probability = calculate_p_given_age_range(self.pdfs, age_range)
return probability
def find_coordinates(obs_df, model):
"""
Finds the coordinates on a BPASS CMD or HRD that correspond to the given observations
Parameters
----------
obs_df: pandas.DataFrame
Observational data. MUST contain a logT and logL column (for HRD comparison) or a col and mag column
(for CMD comparison)
model: str or hoki.hrdiagrams.HRDiagrams() hoki.cmd.CMD()
Location of the modeled HRD or CMD. This can be an already instanciated HRDiagram or CMD() object, or a
path to an HR Diagram file or a pickled CMD.
Returns
-------
"""
if isinstance(model, hoki.hrdiagrams.HRDiagram):
return _find_hrd_coordinates(obs_df, model)
elif isinstance(model, hoki.cmd.CMD):
return _find_cmd_coordinates(obs_df, model)
else:
raise HokiFormatError("The model should be an instance of hoki.hrdiagrams.HRDiagrams or hoki.cmd.CMD")
def _find_hrd_coordinates(obs_df, myhrd):
"""
Find the BPASS HRD coordinates that match the given observations
Parameters
----------
obs_df: pandas.DataFrame
Observational data. MUST contain a logT and logL column.
myhrd: hoki.hrdiagrams.HRDiagrams
BPASS HRDiagram
Returns
-------
Tuple of lists:(logT coordinates, logL coordinates)
"""
if not isinstance(obs_df, pd.DataFrame):
raise HokiFormatError("obs_df should be a pandas.DataFrame")
if not isinstance(myhrd, hoki.hrdiagrams.HRDiagram):
raise HokiFormatError("model should be an instance of hoki.hrdiagrams.HRDiagrams")
# List if indices that located the HRD location that most closely matches observations
L_i = []
T_i = []
try:
logT, logL = obs_df.logT, obs_df.logL
except AttributeError:
raise HokiFormatError("obs_df should have a logT and a logL column")
# How this works:
# abs(model.L_coord-L)==abs(model.L_coord-L).min() *finds* the HRD location that most closely corresponds to obs.
# np.where(....)[0] *finds* the index of that location (which was originally in L or T space)
# int( ....) is juuust to make sure we get an integer because Python is a motherfucker and adds s.f. for no reason
# Then we append that index to our list.
for T, L in zip(logT, logL):
try:
T = float(T)
# Finds the index that is at the minimum distance in Temperature space and adds it to the list
T_i.append(int((np.where(abs(myhrd.T_coord - T) == abs(myhrd.T_coord - T).min()))[0]))
except TypeError:
T_i.append(int((np.where(abs(myhrd.T_coord - T) == abs(myhrd.T_coord - T).min()))[0][0]))
except ValueError:
warnings.warn("T=" + str(T) + " cannot be converted to a float", HokiUserWarning)
T_i.append(np.nan)
try:
L = float(L)
# Finds the index that is at the minimum distance in Luminosity space and adds it to the list
L_i.append(int((np.where(abs(myhrd.L_coord - L) == abs(myhrd.L_coord - L).min()))[0]))
except TypeError:
L_i.append(int((np.where(abs(myhrd.L_coord - L) == abs(myhrd.L_coord - L).min()))[0][0]))
except ValueError:
warnings.warn("L=" + str(L) + " cannot be converted to a float", HokiUserWarning)
L_i.append(np.nan)
return T_i, L_i
def _find_cmd_coordinates(obs_df, mycmd):
"""
Find the BPASS HRD coordinates that match the given observations
Parameters
----------
obs_df: pandas.DataFrame
Observational data. MUST contain a col and mag column.
mycmd: hoki.cmd.CMD
BPASS CMD
Returns
-------
Tuple of lists:(colour coordinates, magnitude coordinates)
"""
if not isinstance(obs_df, pd.DataFrame):
raise HokiFormatError("obs_df should be a pandas.DataFrame")
if not isinstance(mycmd, hoki.cmd.CMD):
raise HokiFormatError("cmd should be an instance of hoki.cmd.CMD")
# List if indices that located the HRD location that most closely matches observations
col_i = []
mag_i = []
try:
colours, magnitudes = obs_df.col, obs_df.mag
except AttributeError:
raise HokiFormatError("obs_df should have a logT and a logL column")
# How this works:
# abs(model.L_coord-L)==abs(model.L_coord-L).min() *finds* the HRD location that most closely corresponds to obs.
# np.where(....)[0] *finds* the index
# of that location (which was originally in L or T space)
# int( ....) is juuust to make sure we get an integer because Python is a motherfucker and adds s.f. for no reason
# Then we append that index to our list.
for col, mag in zip(colours, magnitudes):
try:
col = float(col)
# Finds the index that is at the minimum distance in Colour space and adds it to the list
col_i.append(int((np.where(abs(mycmd.col_range - col) == abs(mycmd.col_range - col).min()))[0]))
except TypeError:
col_i.append(int((np.where(abs(mycmd.col_range - col) == abs(mycmd.col_range - col).min()))[0][0]))
except ValueError:
warnings.warn("Colour=" + str(col) + " cannot be converted to a float", HokiUserWarning)
col_i.append(np.nan)
try:
mag = float(mag)
# Finds the index that is at the minimum distance in Magnitude space and adds it to the list
mag_i.append(int((np.where(abs(mycmd.mag_range - mag) == abs(mycmd.mag_range - mag).min()))[0]))
except TypeError:
mag_i.append(int((np.where(abs(mycmd.mag_range - mag) == abs(mycmd.mag_range - mag).min()))[0][0]))
except ValueError:
warnings.warn("Magnitude=" + str(mag) + " cannot be converted to a float", HokiUserWarning)
mag_i.append(np.nan)
return col_i, mag_i
def normalise_1d(distribution, crop_the_future=False):
"""
Simple function that devides by the sum of the 1D array or DataFrame given.
"""
if crop_the_future:
distribution = _crop_the_future(distribution)
area = np.sum([bin_t for bin_t in distribution])
return distribution / area
def _crop_the_future(distribution):
# Anything about 10.1 is the future - time bin 42 and above must have proba == 0
array_that_erases_the_future = np.array([1] * 42 + [0] * 9)
return np.array(distribution) * array_that_erases_the_future
def calculate_individual_pdfs(obs_df, model):
"""
Calculates the age pdfs of all the stars in the sample and returns them in a dataframe
Parameters
----------
obs_df: pandas.DataFrame
Dataframe containing the observational data
model: hoki.hrdiagrams.HRDiagrams or hoki.cmd.CMD
BPASS HRDiagram or CMD
Returns
-------
pandas.Dataframe containing the age pdfs of each star
"""
likelihoods = calculate_distributions_normalised(obs_df, model)
pdfs = []
for col in likelihoods.columns:
pdfs.append(normalise_1d(likelihoods[col].values))
return pd.DataFrame(np.array(pdfs).T, columns=likelihoods.columns)
def calculate_distributions(obs_df, model):
"""
Given observations and an HR Diagram, calculates the distribution across ages (not normalised)
Parameters
----------
obs_df: pandas.DataFrame
Observational data. MUST contain a logT and logL column.
model: hoki.hrdiagrams.HRDiagrams or hoki.cmd.CMD
BPASS HRDiagram or CMD
Returns
-------
Age Probability Distribution Functions in a pandas.DataFrame.
"""
# Checking whether it;s HRD or CMD
if isinstance(model, hoki.hrdiagrams.HRDiagram):
x_coord, y_coord = find_coordinates(obs_df, model)
if isinstance(model, hoki.cmd.CMD):
y_coord, x_coord = find_coordinates(obs_df, model) # yeah it's reversed... -_-
# If source names not given we make our own
try:
source_names = obs_df.name
except AttributeError:
warnings.warn("No source names given so I'll make my own", HokiUserWarning)
source_names = ["s" + str(i) for i in range(obs_df.shape[0])]
likelihoods = []
# Time to calcualte the pdfs
for i, name in zip(range(obs_df.shape[0]), source_names):
xi, yi = x_coord[i], y_coord[i] # just saving space
# Here we take care of the possibility that a coordinate is a NaN
if np.isnan(xi) or np.isnan(yi):
warnings.warn("NaN Value encountered in coordinates for source: " + name, HokiUserWarning)
likelihoods.append([0] * 51) # Probability is then 0 at all times - That star doesn't exist in our models
continue
# Here we fill our not-yet-nromalised distribution
distrib_i = []
for model_i in model:
# For each time step i, we retrieve the proba in CMD_i or HRD_i and fill our distribution element distrib_i
# with it. At the end of the for loop we have iterated over all 51 time bins
distrib_i.append(model_i[xi, yi])
# Then we normalise, so that we have proper probability distributions
# pdf_i = normalise_1d(distrib_i)
# finally our pdf is added to the list
likelihoods.append(distrib_i)
# Our list of pdfs (which is a list of lists) is turned into a PDF with the source names as column names
likelihoods_df = pd.DataFrame((np.array(likelihoods)).T, columns=source_names)
# We add the time bins in there because it can make plotting extra convenient.
# distributions_df['time_bins'] = hoki.constants.BPASS_TIME_BINS
return likelihoods_df
def calculate_distributions_normalised(obs_df, model):
"""
Given observations and an HR Diagram, calculates the distribution across ages NORMALISED
Parameters
----------
obs_df: pandas.DataFrame
Observational data. MUST contain a logT and logL column.
model: hoki.hrdiagrams.HRDiagrams or hoki.cmd.CMD
BPASS HRDiagram or CMD
Returns
-------
Age Probability Distribution Functions in a pandas.DataFrame.
"""
# Checking whether it;s HRD or CMD
if isinstance(model, hoki.hrdiagrams.HRDiagram):
x_coord, y_coord = find_coordinates(obs_df, model)
if isinstance(model, hoki.cmd.CMD):
y_coord, x_coord = find_coordinates(obs_df, model) # yeah it's reversed... -_-
# If source names not given we make our own
try:
source_names = obs_df.name
except AttributeError:
warnings.warn("No source names given so I'll make my own", HokiUserWarning)
source_names = ["s" + str(i) for i in range(obs_df.shape[0])]
likelihoods = []
# Time to calcualte the pdfs
for i, name in zip(range(obs_df.shape[0]), source_names):
xi, yi = x_coord[i], y_coord[i] # just saving space
# Here we take care of the possibility that a coordinate is a NaN
if np.isnan(xi) or np.isnan(yi):
warnings.warn("NaN Value encountered in coordinates for source: " + name, HokiUserWarning)
likelihoods.append([0] * 51) # Probability is then 0 at all times - That star doesn't exist in our models
continue
# Here we fill our not-yet-nromalised distribution
distrib_i = []
for model_i in model:
# For each time step i, we retrieve the proba in CMD_i or HRD_i and fill our distribution element distrib_i
# with it. At the end of the for loop we have iterated over all 51 time bins
distrib_i.append(model_i[xi, yi])
# Then we normalise, so that we have proper probability distributions
# pdf_i = normalise_1d(distrib_i)
# finally our pdf is added to the list
likelihoods.append(normalise_1d(distrib_i, crop_the_future=True))
# Our list of pdfs (which is a list of lists) is turned into a PDF with the source names as column names
likelihoods_df = pd.DataFrame((np.array(likelihoods)).T, columns=source_names)
# We add the time bins in there because it can make plotting extra convenient.
# distributions_df['time_bins'] = hoki.constants.BPASS_TIME_BINS
return likelihoods_df
def calculate_sample_pdf(distributions_df, not_you=None):
"""
Adds together all the columns in given in DataFrame apart from the "time_bins" column
Parameters
----------
distributions_df: pandas.DataFrame
DataFrame containing probability distribution functions
not_you: list, optional
List of the column names to ignore. Default is None so all the pdfs are multiplied
Returns
-------
Combined Probability Distribution Function in a pandas.DataFrame.
"""
assert isinstance(distributions_df, pd.DataFrame)
# We start our combined pdf with a list of 1s. We'll the multiply each pdf in sequence.
combined_pdf = [0] * distributions_df.shape[0]
# We want to allow the user to exclude certain columns -- we drop them here.
if not_you:
try:
distributions_df = distributions_df.drop(labels=not_you, axis=1)
except KeyError as e:
message = 'FEATURE DISABLED' + '\nKeyError' + str(
e) + '\nHOKI DIALOGUE: Your labels could not be dropped -- ' \
'all pdfs will be combined \nDEBUGGING ASSISTANT: ' \
'Make sure the labels you listed are spelled correctly:)'
warnings.warn(message, HokiUserWarning)
# We also must be careful not to multiply the time bin column in there so we have a list of the column names
# that remain after the "not_you" exclusion minus the time_bins column.
# columns = [col for col in distributions_df.columns if "time_bins" not in col]
columns = []
if "time_bins" not in distributions_df.columns:
for col in distributions_df.columns:
columns.append(col)
for col in columns:
# for col in distributions_df.columns:
combined_pdf += distributions_df[col].values
combined_df = pd.DataFrame(normalise_1d(combined_pdf) )
combined_df.columns = ['pdf']
return combined_df
def calculate_p_given_age_range(pdfs, age_range=None):
"""
Calculates the probability that each source has age within age_range
Parameters
----------
pdfs: pandas.DataFrame
Age Probability Distributions Functions
age_range: list or tuple of 2 values
Minimum and Maximum age to consider (inclusive).
Returns
-------
numpy.array containing the probabilities.
"""
# Selects only the rows corresponding to the range age_range[0] to age_range[1] (inclusive)
# and then we sum the probabilities up for each column.
probability = pdfs[(np.round(BPASS_TIME_BINS, 2) >= min(age_range))
& (np.round(BPASS_TIME_BINS, 2) <= max(age_range))].sum()
return probability
|
[
"numpy.sum",
"hoki.utils.exceptions.HokiDeprecationWarning",
"numpy.isnan",
"hoki.utils.hoki_dialogue.HokiDialogue",
"hoki.utils.exceptions.HokiFormatError",
"numpy.array",
"hoki.load.model_output",
"warnings.warn",
"numpy.round",
"hoki.load.unpickle"
] |
[((412, 426), 'hoki.utils.hoki_dialogue.HokiDialogue', 'HokiDialogue', ([], {}), '()\n', (424, 426), False, 'from hoki.utils.hoki_dialogue import HokiDialogue\n'), ((792, 827), 'hoki.utils.exceptions.HokiDeprecationWarning', 'HokiDeprecationWarning', (['deprecation'], {}), '(deprecation)\n', (814, 827), False, 'from hoki.utils.exceptions import HokiFatalError, HokiUserWarning, HokiFormatError, HokiFormatWarning, HokiDeprecationWarning\n'), ((11476, 11517), 'numpy.sum', 'np.sum', (['[bin_t for bin_t in distribution]'], {}), '([bin_t for bin_t in distribution])\n', (11482, 11517), True, 'import numpy as np\n'), ((11708, 11736), 'numpy.array', 'np.array', (['([1] * 42 + [0] * 9)'], {}), '([1] * 42 + [0] * 9)\n', (11716, 11736), True, 'import numpy as np\n'), ((6740, 6794), 'hoki.utils.exceptions.HokiFormatError', 'HokiFormatError', (['"""obs_df should be a pandas.DataFrame"""'], {}), "('obs_df should be a pandas.DataFrame')\n", (6755, 6794), False, 'from hoki.utils.exceptions import HokiFatalError, HokiUserWarning, HokiFormatError, HokiFormatWarning, HokiDeprecationWarning\n'), ((6866, 6942), 'hoki.utils.exceptions.HokiFormatError', 'HokiFormatError', (['"""model should be an instance of hoki.hrdiagrams.HRDiagrams"""'], {}), "('model should be an instance of hoki.hrdiagrams.HRDiagrams')\n", (6881, 6942), False, 'from hoki.utils.exceptions import HokiFatalError, HokiUserWarning, HokiFormatError, HokiFormatWarning, HokiDeprecationWarning\n'), ((9173, 9227), 'hoki.utils.exceptions.HokiFormatError', 'HokiFormatError', (['"""obs_df should be a pandas.DataFrame"""'], {}), "('obs_df should be a pandas.DataFrame')\n", (9188, 9227), False, 'from hoki.utils.exceptions import HokiFatalError, HokiUserWarning, HokiFormatError, HokiFormatWarning, HokiDeprecationWarning\n'), ((9286, 9346), 'hoki.utils.exceptions.HokiFormatError', 'HokiFormatError', (['"""cmd should be an instance of hoki.cmd.CMD"""'], {}), "('cmd should be an instance of hoki.cmd.CMD')\n", (9301, 9346), False, 'from hoki.utils.exceptions import HokiFatalError, HokiUserWarning, HokiFormatError, HokiFormatWarning, HokiDeprecationWarning\n'), ((11748, 11770), 'numpy.array', 'np.array', (['distribution'], {}), '(distribution)\n', (11756, 11770), True, 'import numpy as np\n'), ((1618, 1682), 'hoki.utils.exceptions.HokiFormatError', 'HokiFormatError', (['"""Observations should be stored in a Data Frame"""'], {}), "('Observations should be stored in a Data Frame')\n", (1633, 1682), False, 'from hoki.utils.exceptions import HokiFatalError, HokiUserWarning, HokiFormatError, HokiFormatWarning, HokiDeprecationWarning\n'), ((1737, 1888), 'warnings.warn', 'warnings.warn', (['"""We expect the name of sources to be given in the \'name\' column. If I can\'t find names I\'ll make my own ;)"""', 'HokiFormatWarning'], {}), '(\n "We expect the name of sources to be given in the \'name\' column. If I can\'t find names I\'ll make my own ;)"\n , HokiFormatWarning)\n', (1750, 1888), False, 'import warnings\n'), ((4099, 4218), 'warnings.warn', 'warnings.warn', (['"""self.multiplied_pdf is not yet defined -- running AgeWizard.combined_pdfs()"""', 'HokiUserWarning'], {}), "(\n 'self.multiplied_pdf is not yet defined -- running AgeWizard.combined_pdfs()'\n , HokiUserWarning)\n", (4112, 4218), False, 'import warnings\n'), ((6187, 6293), 'hoki.utils.exceptions.HokiFormatError', 'HokiFormatError', (['"""The model should be an instance of hoki.hrdiagrams.HRDiagrams or hoki.cmd.CMD"""'], {}), "(\n 'The model should be an instance of hoki.hrdiagrams.HRDiagrams or hoki.cmd.CMD'\n )\n", (6202, 6293), False, 'from hoki.utils.exceptions import HokiFatalError, HokiUserWarning, HokiFormatError, HokiFormatWarning, HokiDeprecationWarning\n'), ((7158, 7220), 'hoki.utils.exceptions.HokiFormatError', 'HokiFormatError', (['"""obs_df should have a logT and a logL column"""'], {}), "('obs_df should have a logT and a logL column')\n", (7173, 7220), False, 'from hoki.utils.exceptions import HokiFatalError, HokiUserWarning, HokiFormatError, HokiFormatWarning, HokiDeprecationWarning\n'), ((9573, 9635), 'hoki.utils.exceptions.HokiFormatError', 'HokiFormatError', (['"""obs_df should have a logT and a logL column"""'], {}), "('obs_df should have a logT and a logL column')\n", (9588, 9635), False, 'from hoki.utils.exceptions import HokiFatalError, HokiUserWarning, HokiFormatError, HokiFormatWarning, HokiDeprecationWarning\n'), ((12449, 12463), 'numpy.array', 'np.array', (['pdfs'], {}), '(pdfs)\n', (12457, 12463), True, 'import numpy as np\n'), ((13366, 13441), 'warnings.warn', 'warnings.warn', (['"""No source names given so I\'ll make my own"""', 'HokiUserWarning'], {}), '("No source names given so I\'ll make my own", HokiUserWarning)\n', (13379, 13441), False, 'import warnings\n'), ((13777, 13789), 'numpy.isnan', 'np.isnan', (['xi'], {}), '(xi)\n', (13785, 13789), True, 'import numpy as np\n'), ((13793, 13805), 'numpy.isnan', 'np.isnan', (['yi'], {}), '(yi)\n', (13801, 13805), True, 'import numpy as np\n'), ((13819, 13913), 'warnings.warn', 'warnings.warn', (["('NaN Value encountered in coordinates for source: ' + name)", 'HokiUserWarning'], {}), "('NaN Value encountered in coordinates for source: ' + name,\n HokiUserWarning)\n", (13832, 13913), False, 'import warnings\n'), ((14770, 14791), 'numpy.array', 'np.array', (['likelihoods'], {}), '(likelihoods)\n', (14778, 14791), True, 'import numpy as np\n'), ((15872, 15947), 'warnings.warn', 'warnings.warn', (['"""No source names given so I\'ll make my own"""', 'HokiUserWarning'], {}), '("No source names given so I\'ll make my own", HokiUserWarning)\n', (15885, 15947), False, 'import warnings\n'), ((16283, 16295), 'numpy.isnan', 'np.isnan', (['xi'], {}), '(xi)\n', (16291, 16295), True, 'import numpy as np\n'), ((16299, 16311), 'numpy.isnan', 'np.isnan', (['yi'], {}), '(yi)\n', (16307, 16311), True, 'import numpy as np\n'), ((16325, 16419), 'warnings.warn', 'warnings.warn', (["('NaN Value encountered in coordinates for source: ' + name)", 'HokiUserWarning'], {}), "('NaN Value encountered in coordinates for source: ' + name,\n HokiUserWarning)\n", (16338, 16419), False, 'import warnings\n'), ((17312, 17333), 'numpy.array', 'np.array', (['likelihoods'], {}), '(likelihoods)\n', (17320, 17333), True, 'import numpy as np\n'), ((18778, 18817), 'warnings.warn', 'warnings.warn', (['message', 'HokiUserWarning'], {}), '(message, HokiUserWarning)\n', (18791, 18817), False, 'import warnings\n'), ((2213, 2251), 'hoki.load.model_output', 'load.model_output', (['model'], {'hr_type': '"""TL"""'}), "(model, hr_type='TL')\n", (2230, 2251), True, 'import hoki.load as load\n'), ((20085, 20113), 'numpy.round', 'np.round', (['BPASS_TIME_BINS', '(2)'], {}), '(BPASS_TIME_BINS, 2)\n', (20093, 20113), True, 'import numpy as np\n'), ((20159, 20187), 'numpy.round', 'np.round', (['BPASS_TIME_BINS', '(2)'], {}), '(BPASS_TIME_BINS, 2)\n', (20167, 20187), True, 'import numpy as np\n'), ((2335, 2360), 'hoki.load.unpickle', 'load.unpickle', ([], {'path': 'model'}), '(path=model)\n', (2348, 2360), True, 'import hoki.load as load\n')]
|
from detectron2.engine import default_argument_parser
from liuy.implementation.CoCoSegModel import CoCoSegModel
from liuy.implementation.RandomSampler import CoCoRandomSampler
import numpy as np
import random
from liuy.utils.reg_dataset import register_coco_instances_from_selected_image_files
from liuy.utils.local_config import coco_data, debug_data
import copy
def generate_one_curve(
whole_image_id,
coco_data,
sampler,
ins_seg_model,
seed_batch,
batch_size
):
"""
:return:
"""
# initialize the quantity relationship
whole_train_size = len(whole_image_id)
if seed_batch < 1:
seed_batch = int(seed_batch * whole_train_size)
if batch_size < 1:
batch_size = int(batch_size * whole_train_size)
# initally, seed_batch pieces of image were selected randomly
selected_image_id = random.sample(whole_image_id, seed_batch)
# register data set and build data loader
register_coco_instances_from_selected_image_files(
name='coco_from_selected_image',
json_file=coco_data[0]['json_file'],
image_root=coco_data[0]['image_root'],
selected_image_files=selected_image_id
)
data_loader_from_selected_image_files, _ = ins_seg_model.trainer.re_build_train_loader(
'coco_from_selected_image')
n_batches = int(np.ceil(((whole_train_size - seed_batch) * 1 / batch_size))) + 1
for n in range(n_batches):
# check the size in this iter
n_train_size = seed_batch + min((whole_train_size - seed_batch), n * batch_size)
assert n_train_size == len(selected_image_id)
print('{} data ponints for training in iter{}'.format(n_train_size, n))
# start training and test
ins_seg_model.save_selected_image_id(selected_image_id)
ins_seg_model.fit_on_subset(data_loader_from_selected_image_files)
# select new batch
n_sample = min(batch_size, whole_train_size - len(selected_image_id))
new_batch = sampler.select_batch(n_sample=n_sample, already_selected=copy.deepcopy(selected_image_id))
selected_image_id.extend(new_batch)
assert len(new_batch) == n_sample
print('Requested: %d, Selected: %d' % (n_sample, len(new_batch)))
# register dataset and build data loader
register_coco_instances_from_selected_image_files(
name='coco_from_selected_image',
json_file=coco_data[0]['json_file'],
image_root=coco_data[0]['image_root'],
selected_image_files=selected_image_id
)
data_loader_from_selected_image_files, _ = ins_seg_model.trainer.re_build_train_loader(
'coco_from_selected_image')
# reset model
print("--reset model")
ins_seg_model.reset_model()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
project_id = "random"
seg_model = CoCoSegModel(args, project_id=project_id, coco_data=debug_data, resume_or_load=True)
data_loader = seg_model.trainer.data_loader
whole_image_id = []
index_list = data_loader.dataset._dataset._lst
for item in index_list:
whole_image_id.append(item['image_id'])
randomsampler = CoCoRandomSampler("random_sampler", whole_image_id=whole_image_id)
generate_one_curve(
coco_data=copy.deepcopy(debug_data),
whole_image_id=copy.deepcopy(whole_image_id),
sampler=randomsampler,
ins_seg_model=seg_model,
batch_size=100,
seed_batch=100,
)
|
[
"liuy.utils.reg_dataset.register_coco_instances_from_selected_image_files",
"copy.deepcopy",
"numpy.ceil",
"random.sample",
"detectron2.engine.default_argument_parser",
"liuy.implementation.CoCoSegModel.CoCoSegModel",
"liuy.implementation.RandomSampler.CoCoRandomSampler"
] |
[((878, 919), 'random.sample', 'random.sample', (['whole_image_id', 'seed_batch'], {}), '(whole_image_id, seed_batch)\n', (891, 919), False, 'import random\n'), ((970, 1182), 'liuy.utils.reg_dataset.register_coco_instances_from_selected_image_files', 'register_coco_instances_from_selected_image_files', ([], {'name': '"""coco_from_selected_image"""', 'json_file': "coco_data[0]['json_file']", 'image_root': "coco_data[0]['image_root']", 'selected_image_files': 'selected_image_id'}), "(name=\n 'coco_from_selected_image', json_file=coco_data[0]['json_file'],\n image_root=coco_data[0]['image_root'], selected_image_files=\n selected_image_id)\n", (1019, 1182), False, 'from liuy.utils.reg_dataset import register_coco_instances_from_selected_image_files\n'), ((2928, 3016), 'liuy.implementation.CoCoSegModel.CoCoSegModel', 'CoCoSegModel', (['args'], {'project_id': 'project_id', 'coco_data': 'debug_data', 'resume_or_load': '(True)'}), '(args, project_id=project_id, coco_data=debug_data,\n resume_or_load=True)\n', (2940, 3016), False, 'from liuy.implementation.CoCoSegModel import CoCoSegModel\n'), ((3233, 3299), 'liuy.implementation.RandomSampler.CoCoRandomSampler', 'CoCoRandomSampler', (['"""random_sampler"""'], {'whole_image_id': 'whole_image_id'}), "('random_sampler', whole_image_id=whole_image_id)\n", (3250, 3299), False, 'from liuy.implementation.RandomSampler import CoCoRandomSampler\n'), ((2323, 2535), 'liuy.utils.reg_dataset.register_coco_instances_from_selected_image_files', 'register_coco_instances_from_selected_image_files', ([], {'name': '"""coco_from_selected_image"""', 'json_file': "coco_data[0]['json_file']", 'image_root': "coco_data[0]['image_root']", 'selected_image_files': 'selected_image_id'}), "(name=\n 'coco_from_selected_image', json_file=coco_data[0]['json_file'],\n image_root=coco_data[0]['image_root'], selected_image_files=\n selected_image_id)\n", (2372, 2535), False, 'from liuy.utils.reg_dataset import register_coco_instances_from_selected_image_files\n'), ((1356, 1413), 'numpy.ceil', 'np.ceil', (['((whole_train_size - seed_batch) * 1 / batch_size)'], {}), '((whole_train_size - seed_batch) * 1 / batch_size)\n', (1363, 1413), True, 'import numpy as np\n'), ((2847, 2872), 'detectron2.engine.default_argument_parser', 'default_argument_parser', ([], {}), '()\n', (2870, 2872), False, 'from detectron2.engine import default_argument_parser\n'), ((3342, 3367), 'copy.deepcopy', 'copy.deepcopy', (['debug_data'], {}), '(debug_data)\n', (3355, 3367), False, 'import copy\n'), ((3392, 3421), 'copy.deepcopy', 'copy.deepcopy', (['whole_image_id'], {}), '(whole_image_id)\n', (3405, 3421), False, 'import copy\n'), ((2070, 2102), 'copy.deepcopy', 'copy.deepcopy', (['selected_image_id'], {}), '(selected_image_id)\n', (2083, 2102), False, 'import copy\n')]
|
import pytest
import numpy as np
import torch
from pycox.models.utils import pad_col, make_subgrid, cumsum_reverse
@pytest.mark.parametrize('val', [0, 1, 5])
def test_pad_col_start(val):
x = torch.ones((2, 3))
x_pad = pad_col(x, val, where='start')
pad = torch.ones(2, 1) * val
assert (x_pad == torch.cat([pad, x], dim=1)).all()
@pytest.mark.parametrize('val', [0, 1, 5])
def test_pad_col_end(val):
x = torch.ones((2, 3))
x_pad = pad_col(x, val)
pad = torch.ones(2, 1) * val
assert (x_pad == torch.cat([x, pad], dim=1)).all()
@pytest.mark.parametrize('n', [2, 13, 40])
def test_make_subgrid_1(n):
grid = np.random.uniform(0, 100, n)
grid = np.sort(grid)
new_grid = make_subgrid(grid, 1)
assert len(new_grid) == len(grid)
assert (new_grid == grid).all()
@pytest.mark.parametrize('sub', [2, 10, 20])
@pytest.mark.parametrize('start', [0, 2])
@pytest.mark.parametrize('stop', [4, 100])
@pytest.mark.parametrize('n', [5, 10])
def test_make_subgrid(sub, start, stop, n):
grid = np.linspace(start, stop, n)
new_grid = make_subgrid(grid, sub)
true_new = np.linspace(start, stop, n*sub - (sub-1))
assert len(new_grid) == len(true_new)
assert np.abs(true_new - new_grid).max() < 1e-13
def test_cumsum_reverse_error_dim():
x = torch.randn((5, 3))
with pytest.raises(NotImplementedError):
cumsum_reverse(x, dim=0)
with pytest.raises(NotImplementedError):
cumsum_reverse(x, dim=2)
def test_cumsum_reverse_dim_1():
torch.manual_seed(1234)
x = torch.randn(5, 16)
res_np = x.numpy()[:, ::-1].cumsum(1)[:, ::-1]
res = cumsum_reverse(x, dim=1)
assert np.abs(res.numpy() - res_np).max() < 1e-6
|
[
"numpy.random.uniform",
"torch.ones",
"pycox.models.utils.make_subgrid",
"numpy.abs",
"torch.manual_seed",
"torch.randn",
"torch.cat",
"numpy.sort",
"pytest.raises",
"numpy.linspace",
"pytest.mark.parametrize",
"pycox.models.utils.pad_col",
"pycox.models.utils.cumsum_reverse"
] |
[((117, 158), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""val"""', '[0, 1, 5]'], {}), "('val', [0, 1, 5])\n", (140, 158), False, 'import pytest\n'), ((348, 389), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""val"""', '[0, 1, 5]'], {}), "('val', [0, 1, 5])\n", (371, 389), False, 'import pytest\n'), ((562, 603), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n"""', '[2, 13, 40]'], {}), "('n', [2, 13, 40])\n", (585, 603), False, 'import pytest\n'), ((810, 853), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sub"""', '[2, 10, 20]'], {}), "('sub', [2, 10, 20])\n", (833, 853), False, 'import pytest\n'), ((855, 895), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""start"""', '[0, 2]'], {}), "('start', [0, 2])\n", (878, 895), False, 'import pytest\n'), ((897, 938), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""stop"""', '[4, 100]'], {}), "('stop', [4, 100])\n", (920, 938), False, 'import pytest\n'), ((940, 977), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n"""', '[5, 10]'], {}), "('n', [5, 10])\n", (963, 977), False, 'import pytest\n'), ((196, 214), 'torch.ones', 'torch.ones', (['(2, 3)'], {}), '((2, 3))\n', (206, 214), False, 'import torch\n'), ((227, 257), 'pycox.models.utils.pad_col', 'pad_col', (['x', 'val'], {'where': '"""start"""'}), "(x, val, where='start')\n", (234, 257), False, 'from pycox.models.utils import pad_col, make_subgrid, cumsum_reverse\n'), ((425, 443), 'torch.ones', 'torch.ones', (['(2, 3)'], {}), '((2, 3))\n', (435, 443), False, 'import torch\n'), ((456, 471), 'pycox.models.utils.pad_col', 'pad_col', (['x', 'val'], {}), '(x, val)\n', (463, 471), False, 'from pycox.models.utils import pad_col, make_subgrid, cumsum_reverse\n'), ((643, 671), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(100)', 'n'], {}), '(0, 100, n)\n', (660, 671), True, 'import numpy as np\n'), ((683, 696), 'numpy.sort', 'np.sort', (['grid'], {}), '(grid)\n', (690, 696), True, 'import numpy as np\n'), ((712, 733), 'pycox.models.utils.make_subgrid', 'make_subgrid', (['grid', '(1)'], {}), '(grid, 1)\n', (724, 733), False, 'from pycox.models.utils import pad_col, make_subgrid, cumsum_reverse\n'), ((1033, 1060), 'numpy.linspace', 'np.linspace', (['start', 'stop', 'n'], {}), '(start, stop, n)\n', (1044, 1060), True, 'import numpy as np\n'), ((1076, 1099), 'pycox.models.utils.make_subgrid', 'make_subgrid', (['grid', 'sub'], {}), '(grid, sub)\n', (1088, 1099), False, 'from pycox.models.utils import pad_col, make_subgrid, cumsum_reverse\n'), ((1115, 1160), 'numpy.linspace', 'np.linspace', (['start', 'stop', '(n * sub - (sub - 1))'], {}), '(start, stop, n * sub - (sub - 1))\n', (1126, 1160), True, 'import numpy as np\n'), ((1298, 1317), 'torch.randn', 'torch.randn', (['(5, 3)'], {}), '((5, 3))\n', (1309, 1317), False, 'import torch\n'), ((1512, 1535), 'torch.manual_seed', 'torch.manual_seed', (['(1234)'], {}), '(1234)\n', (1529, 1535), False, 'import torch\n'), ((1544, 1562), 'torch.randn', 'torch.randn', (['(5)', '(16)'], {}), '(5, 16)\n', (1555, 1562), False, 'import torch\n'), ((1624, 1648), 'pycox.models.utils.cumsum_reverse', 'cumsum_reverse', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (1638, 1648), False, 'from pycox.models.utils import pad_col, make_subgrid, cumsum_reverse\n'), ((268, 284), 'torch.ones', 'torch.ones', (['(2)', '(1)'], {}), '(2, 1)\n', (278, 284), False, 'import torch\n'), ((482, 498), 'torch.ones', 'torch.ones', (['(2)', '(1)'], {}), '(2, 1)\n', (492, 498), False, 'import torch\n'), ((1327, 1361), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (1340, 1361), False, 'import pytest\n'), ((1371, 1395), 'pycox.models.utils.cumsum_reverse', 'cumsum_reverse', (['x'], {'dim': '(0)'}), '(x, dim=0)\n', (1385, 1395), False, 'from pycox.models.utils import pad_col, make_subgrid, cumsum_reverse\n'), ((1405, 1439), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (1418, 1439), False, 'import pytest\n'), ((1449, 1473), 'pycox.models.utils.cumsum_reverse', 'cumsum_reverse', (['x'], {'dim': '(2)'}), '(x, dim=2)\n', (1463, 1473), False, 'from pycox.models.utils import pad_col, make_subgrid, cumsum_reverse\n'), ((312, 338), 'torch.cat', 'torch.cat', (['[pad, x]'], {'dim': '(1)'}), '([pad, x], dim=1)\n', (321, 338), False, 'import torch\n'), ((526, 552), 'torch.cat', 'torch.cat', (['[x, pad]'], {'dim': '(1)'}), '([x, pad], dim=1)\n', (535, 552), False, 'import torch\n'), ((1210, 1237), 'numpy.abs', 'np.abs', (['(true_new - new_grid)'], {}), '(true_new - new_grid)\n', (1216, 1237), True, 'import numpy as np\n')]
|
#
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import numpy as np
from numba import cuda
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
train_clustering_model,
run_query,
convert_datestring_to_days,
)
from bdb_tools.readers import build_reader
from dask import delayed
# q25 parameters
Q25_DATE = "2002-01-02"
N_CLUSTERS = 8
CLUSTER_ITERATIONS = 20
N_ITER = 5
def read_tables(config):
table_reader = build_reader(
data_format=config["file_format"],
basepath=config["data_dir"],
split_row_groups=config["split_row_groups"],
)
ss_cols = ["ss_customer_sk", "ss_sold_date_sk", "ss_ticket_number", "ss_net_paid"]
ws_cols = [
"ws_bill_customer_sk",
"ws_sold_date_sk",
"ws_order_number",
"ws_net_paid",
]
datedim_cols = ["d_date_sk", "d_date"]
ss_ddf = table_reader.read("store_sales", relevant_cols=ss_cols, index=False)
ws_ddf = table_reader.read("web_sales", relevant_cols=ws_cols, index=False)
datedim_ddf = table_reader.read("date_dim", relevant_cols=datedim_cols, index=False)
return (ss_ddf, ws_ddf, datedim_ddf)
def agg_count_distinct(df, group_key, counted_key, client):
"""Returns a Series that is the result of counting distinct instances of 'counted_key' within each 'group_key'.
The series' index will have one entry per unique 'group_key' value.
Workaround for lack of nunique aggregate function on Dask df.
"""
### going via repartition for split_out drop duplicates
unique_df = df[[group_key, counted_key]].map_partitions(
lambda df: df.drop_duplicates()
)
unique_df = unique_df.shuffle(on=[group_key])
unique_df = unique_df.map_partitions(lambda df: df.drop_duplicates())
return unique_df.groupby(group_key)[counted_key].count(split_every=2)
def get_clusters(client, ml_input_df):
import dask_cudf
ml_tasks = [
delayed(train_clustering_model)(df, N_CLUSTERS, CLUSTER_ITERATIONS, N_ITER)
for df in ml_input_df.to_delayed()
]
results_dict = client.compute(*ml_tasks, sync=True)
output = ml_input_df.index.to_frame().reset_index(drop=True)
labels_final = dask_cudf.from_cudf(
results_dict["cid_labels"], npartitions=output.npartitions
)
output["label"] = labels_final.reset_index()[0]
# Sort based on CDH6.1 q25-result formatting
output = output.sort_values(["cid"])
results_dict["cid_labels"] = output
return results_dict
def main(client, config):
import dask_cudf
ss_ddf, ws_ddf, datedim_ddf = benchmark(
read_tables,
config=config,
compute_result=config["get_read_time"],
dask_profile=config["dask_profile"],
)
datedim_ddf = datedim_ddf.map_partitions(convert_datestring_to_days)
min_date = np.datetime64(Q25_DATE, "D").astype(int)
# Filter by date
valid_dates_ddf = datedim_ddf[datedim_ddf["d_date"] > min_date].reset_index(
drop=True
)
f_ss_ddf = ss_ddf[ss_ddf["ss_customer_sk"].notnull()].reset_index(drop=True)
f_ws_ddf = ws_ddf[ws_ddf["ws_bill_customer_sk"].notnull()].reset_index(drop=True)
# Merge
ss_merged_df = f_ss_ddf.merge(
valid_dates_ddf, left_on="ss_sold_date_sk", right_on="d_date_sk", how="inner"
)
ws_merged_df = f_ws_ddf.merge(
valid_dates_ddf, left_on="ws_sold_date_sk", right_on="d_date_sk", how="inner"
)
# Roll up store sales
agg_store_sales_ddf = ss_merged_df.groupby("ss_customer_sk").agg(
{"ss_sold_date_sk": "max", "ss_net_paid": "sum"}
)
agg_store_sales_ddf["frequency"] = agg_count_distinct(
ss_merged_df, "ss_customer_sk", "ss_ticket_number", client=client
) # Simulate count distinct
# Same rollup, just different columns for web sales
agg_web_sales_ddf = ws_merged_df.groupby("ws_bill_customer_sk").agg(
{"ws_sold_date_sk": "max", "ws_net_paid": "sum"}
)
agg_web_sales_ddf["frequency"] = agg_count_distinct(
ws_merged_df, "ws_bill_customer_sk", "ws_order_number", client=client
) # Simulate count distinct
agg_store_sales_ddf = agg_store_sales_ddf.reset_index()
agg_web_sales_ddf = agg_web_sales_ddf.reset_index()
shared_columns = ["cid", "most_recent_date", "amount", "frequency"]
agg_store_sales_ddf.columns = shared_columns
agg_web_sales_ddf.columns = shared_columns
agg_sales_ddf = dask_cudf.concat([agg_store_sales_ddf, agg_web_sales_ddf])
cluster_input_ddf = agg_sales_ddf.groupby("cid").agg(
{"most_recent_date": "max", "frequency": "sum", "amount": "sum"}
)
cluster_input_ddf["recency"] = (37621 - cluster_input_ddf["most_recent_date"]) < 60
# Reorder to match refererence examples
cluster_input_ddf = cluster_input_ddf[["recency", "frequency", "amount"]]
# Prepare df for KMeans clustering
cluster_input_ddf["recency"] = cluster_input_ddf["recency"].astype("int64")
cluster_input_ddf["amount"] = cluster_input_ddf["amount"].astype("float64")
cluster_input_ddf = cluster_input_ddf.persist()
results_dict = get_clusters(client=client, ml_input_df=cluster_input_ddf)
return results_dict
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster
import cudf
import dask_cudf
config = gpubdb_argparser()
client, bc = attach_to_cluster(config)
run_query(config=config, client=client, query_func=main)
|
[
"dask.delayed",
"dask_cudf.from_cudf",
"numpy.datetime64",
"bdb_tools.utils.run_query",
"bdb_tools.utils.gpubdb_argparser",
"bdb_tools.cluster_startup.attach_to_cluster",
"bdb_tools.utils.benchmark",
"bdb_tools.readers.build_reader",
"dask_cudf.concat"
] |
[((1004, 1129), 'bdb_tools.readers.build_reader', 'build_reader', ([], {'data_format': "config['file_format']", 'basepath': "config['data_dir']", 'split_row_groups': "config['split_row_groups']"}), "(data_format=config['file_format'], basepath=config['data_dir'],\n split_row_groups=config['split_row_groups'])\n", (1016, 1129), False, 'from bdb_tools.readers import build_reader\n'), ((2758, 2837), 'dask_cudf.from_cudf', 'dask_cudf.from_cudf', (["results_dict['cid_labels']"], {'npartitions': 'output.npartitions'}), "(results_dict['cid_labels'], npartitions=output.npartitions)\n", (2777, 2837), False, 'import dask_cudf\n'), ((3144, 3263), 'bdb_tools.utils.benchmark', 'benchmark', (['read_tables'], {'config': 'config', 'compute_result': "config['get_read_time']", 'dask_profile': "config['dask_profile']"}), "(read_tables, config=config, compute_result=config['get_read_time'\n ], dask_profile=config['dask_profile'])\n", (3153, 3263), False, 'from bdb_tools.utils import benchmark, gpubdb_argparser, train_clustering_model, run_query, convert_datestring_to_days\n'), ((4983, 5041), 'dask_cudf.concat', 'dask_cudf.concat', (['[agg_store_sales_ddf, agg_web_sales_ddf]'], {}), '([agg_store_sales_ddf, agg_web_sales_ddf])\n', (4999, 5041), False, 'import dask_cudf\n'), ((5888, 5906), 'bdb_tools.utils.gpubdb_argparser', 'gpubdb_argparser', ([], {}), '()\n', (5904, 5906), False, 'from bdb_tools.utils import benchmark, gpubdb_argparser, train_clustering_model, run_query, convert_datestring_to_days\n'), ((5924, 5949), 'bdb_tools.cluster_startup.attach_to_cluster', 'attach_to_cluster', (['config'], {}), '(config)\n', (5941, 5949), False, 'from bdb_tools.cluster_startup import attach_to_cluster\n'), ((5954, 6010), 'bdb_tools.utils.run_query', 'run_query', ([], {'config': 'config', 'client': 'client', 'query_func': 'main'}), '(config=config, client=client, query_func=main)\n', (5963, 6010), False, 'from bdb_tools.utils import benchmark, gpubdb_argparser, train_clustering_model, run_query, convert_datestring_to_days\n'), ((2491, 2522), 'dask.delayed', 'delayed', (['train_clustering_model'], {}), '(train_clustering_model)\n', (2498, 2522), False, 'from dask import delayed\n'), ((3386, 3414), 'numpy.datetime64', 'np.datetime64', (['Q25_DATE', '"""D"""'], {}), "(Q25_DATE, 'D')\n", (3399, 3414), True, 'import numpy as np\n')]
|
# Copyright (C) 2016 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# ==============================================================================
# Based on original Work Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common image reprocess functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
try:
import cv2
has_cv2 = True
except ImportError:
has_cv2 = False
try:
from skimage import transform
has_skimage = True
except ImportError:
has_skimage = False
# FIXME these decentralized flags get REALLY annoying with module import conflicts, in common spot for now
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('image_size', 299,
"""Provide square images of this size.""")
tf.app.flags.DEFINE_float('image_aspect', 0.0,
"""Aspect ratio based sizing, square image_size*image_size if 0""")
tf.app.flags.DEFINE_string('image_norm', 'default',
"""Either 'caffe' BGR [0,255], 'caffe_rgb' [0, 255],
'frame' per-frame standardize, 'global' standardize, 'default' [-1, 1].""")
tf.app.flags.DEFINE_string('image_fmt', 'jpg',
"""Either 'jpg', 'png', or 'gif'""")
IMAGENET_MEAN_CAFFE = [103.939, 116.779, 123.68]
IMAGENET_MEAN_STD = [
[0.485, 0.456, 0.406], # mean
[0.229, 0.224, 0.225], # std
]
def image_normalize(
image,
method='global',
global_mean_std=IMAGENET_MEAN_STD,
caffe_mean=IMAGENET_MEAN_CAFFE):
"""
Args:
image:
method:
global_mean_std:
caffe_mean:
Returns:
"""
if method == 'caffe' or method == 'caffe_bgr':
print('Caffe BGR normalize', image.get_shape())
# Rescale to [0, 255]
image = tf.mul(image, 255.0)
# Convert RGB to BGR
red, green, blue = tf.split(2, 3, image)
image = tf.concat(2, [blue, green, red])
tf.sub(image, caffe_mean)
elif method == 'caffe_rgb':
print('Caffe RGB normalize', image.get_shape())
# Rescale to [0, 255]
image = tf.mul(image, 255.0)
caffe_mean_rgb = tf.gather(caffe_mean, [2, 1, 0])
image = tf.sub(image, caffe_mean_rgb)
elif method == 'frame':
print("Per-frame standardize", image.get_shape())
mean, var = tf.nn.moments(image, axes=[0, 1], shift=0.3)
std = tf.sqrt(tf.add(var, .001))
image = tf.sub(image, mean)
image = tf.div(image, std)
elif method == 'global':
print('Global standardize', image.get_shape())
image = tf.sub(image, global_mean_std[0])
image = tf.div(image, global_mean_std[1])
else:
assert method == 'default'
print('Default normalize [-1, 1]', image.get_shape())
# Rescale to [-1,1] instead of [0, 1)
image = tf.sub(image, 0.5)
image = tf.mul(image, 2.0)
return image
def decode_compressed_image(image_buffer, image_fmt='jpg', depth=3, scope=None):
"""Decode a JPEG string into one 3-D float image Tensor.
Args:
image_buffer: scalar string Tensor.
scope: Optional scope for op_scope.
Returns:
3-D float Tensor with values ranging from [0, 1).
"""
with tf.name_scope(scope, 'decode_image', [image_buffer]):
# Decode the string as an RGB JPEG.
# Note that the resulting image contains an unknown height and width
# that is set dynamically by decode_jpeg. In other words, the height
# and width of image is unknown at compile-time.
image_fmt = str.lower(image_fmt)
if image_fmt == 'png':
image = tf.image.decode_png(image_buffer, channels=depth)
elif image_fmt == 'gif':
assert depth == 3
image = tf.image.decode_gif(image_buffer)
else:
assert image_fmt == 'jpg' or image_fmt == 'jpeg'
image = tf.image.decode_jpeg(image_buffer, channels=depth)
# After this point, all image pixels reside in [0,1)
# until the very end, when they're rescaled to (-1, 1). The various
# adjust_* ops all require this range for dtype float.
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
return image
def distort_color(image, hue_delta=0.2, thread_id=0, scope=None):
"""Distort the color of the image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather then adding that level of complication, we select a distinct ordering
of color ops for each preprocessing thread.
Args:
image: Tensor containing single image.
hue_delta: range for random hue shift
thread_id: preprocessing thread ID.
scope: Optional scope for op_scope.
Returns:
color-distorted image
"""
with tf.name_scope(scope, 'distort_color', [image]):
color_ordering = thread_id % 2
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=hue_delta)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=hue_delta)
# The random_* ops do not necessarily clamp.
image = tf.clip_by_value(image, 0.0, 1.0)
return image
def distort_affine_cv2(image, alpha_affine=10, random_state=None):
if random_state is None:
random_state = np.random.RandomState(None)
shape = image.shape
shape_size = shape[:2]
center_square = np.float32(shape_size) // 2
square_size = min(shape_size) // 3
pts1 = np.float32([
center_square + square_size,
[center_square[0] + square_size, center_square[1] - square_size],
center_square - square_size])
pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)
M = cv2.getAffineTransform(pts1, pts2)
distorted_image = cv2.warpAffine(
image, M, shape_size[::-1], borderMode=cv2.BORDER_REPLICATE) #cv2.BORDER_REFLECT_101)
return distorted_image
def distort_affine_skimage(image, rotation=10.0, shear=5.0, random_state=None):
if random_state is None:
random_state = np.random.RandomState(None)
rot = np.deg2rad(np.random.uniform(-rotation, rotation))
sheer = np.deg2rad(np.random.uniform(-shear, shear))
shape = image.shape
shape_size = shape[:2]
center = np.float32(shape_size) / 2. - 0.5
pre = transform.SimilarityTransform(translation=-center)
affine = transform.AffineTransform(rotation=rot, shear=sheer, translation=center)
tform = pre + affine
distorted_image = transform.warp(image, tform.params, mode='reflect')
return distorted_image.astype(np.float32)
def distort_elastic_cv2(image, alpha=80, sigma=20, random_state=None):
"""Elastic deformation of images as per [Simard2003].
"""
if random_state is None:
random_state = np.random.RandomState(None)
shape_size = image.shape[:2]
# Downscaling the random grid and then upsizing post filter
# improves performance. Approx 3x for scale of 4, diminishing returns after.
grid_scale = 4
alpha //= grid_scale # Does scaling these make sense? seems to provide
sigma //= grid_scale # more similar end result when scaling grid used.
grid_shape = (shape_size[0]//grid_scale, shape_size[1]//grid_scale)
blur_size = int(4 * sigma) | 1
rand_x = cv2.GaussianBlur(
(random_state.rand(*grid_shape) * 2 - 1).astype(np.float32),
ksize=(blur_size, blur_size), sigmaX=sigma) * alpha
rand_y = cv2.GaussianBlur(
(random_state.rand(*grid_shape) * 2 - 1).astype(np.float32),
ksize=(blur_size, blur_size), sigmaX=sigma) * alpha
if grid_scale > 1:
rand_x = cv2.resize(rand_x, shape_size[::-1])
rand_y = cv2.resize(rand_y, shape_size[::-1])
grid_x, grid_y = np.meshgrid(np.arange(shape_size[1]), np.arange(shape_size[0]))
grid_x = (grid_x + rand_x).astype(np.float32)
grid_y = (grid_y + rand_y).astype(np.float32)
distorted_img = cv2.remap(image, grid_x, grid_y,
borderMode=cv2.BORDER_REFLECT_101, interpolation=cv2.INTER_LINEAR)
return distorted_img
distort_params_default = {
'h_flip': True,
'v_flip': False,
'elastic_distortion': False,
'affine_distortion': True,
'aspect_ratio_range': [0.67, 1.33],
'area_range': [0.1, 1.0],
'min_object_covered': 0.1,
'hue_delta': 0.2,
'rotation_range': 10.0,
'shear_range': 5.0,
}
def process_for_train(
image,
height,
width,
bbox=None,
params=distort_params_default,
thread_id=0,
summary_suffix='',
scope=None):
"""Distort one image for training a network.
Distorting images provides a useful technique for augmenting the data
set during training in order to make the network invariant to aspects
of the image that do not effect the label.
Args:
image: 3-D float Tensor of image
height: integer
width: integer
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax].
thread_id: integer indicating the preprocessing thread.
params: distortion parameters dictionary for configurtion distortions
scope: Optional scope for op_scope.
Returns:
3-D float Tensor of distorted image used for training.
"""
with tf.name_scope(scope, 'distort_image', [image, height, width, bbox]):
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
# Display the bounding box in the first thread only.
if not thread_id:
image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), bbox)
tf.summary.image('image_with_bounding_boxes%s' % summary_suffix, image_with_box)
# A large fraction of image datasets contain a human-annotated bounding
# box delineating the region of the image containing the object of interest.
# We choose to create a new bounding box for the object which is a randomly
# distorted version of the human-annotated bounding box that obeys an allowed
# range of aspect ratios, sizes and overlap with the human-annotated
# bounding box. If no box is supplied, then we assume the bounding box is
# the entire image.
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bbox,
min_object_covered=params['min_object_covered'],
aspect_ratio_range=params['aspect_ratio_range'],
area_range=params['area_range'],
max_attempts=100,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box
if not thread_id:
image_with_distorted_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), distort_bbox)
tf.summary.image('images_with_distorted_bounding_box%s' % summary_suffix, image_with_distorted_box)
if params['affine_distortion']:
rotation_range = params['rotation_range']
shear_range = params['shear_range']
if has_skimage:
image = tf.py_func(distort_affine_skimage, [image, rotation_range, shear_range], [tf.float32])[0]
#elif has_cv2:
# image = tf.py_func(distort_affine_cv2, [image, angle_range], [tf.float32])[0]
else:
print('Affine image distortion disabled, no cv2 or skimage module present.')
image.set_shape([height, width, 3])
# Crop the image to the specified bounding box.
distorted_image = tf.slice(image, bbox_begin, bbox_size)
# This resizing operation may distort the images because the aspect
# ratio is not respected.
resize_method = tf.image.ResizeMethod.BILINEAR
distorted_image = tf.image.resize_images(distorted_image, [height, width], resize_method)
# Restore the shape since the dynamic slice based upon the bbox_size loses
# the third dimension.
distorted_image.set_shape([height, width, 3])
if not thread_id:
tf.summary.image('cropped_resized_image%s' % summary_suffix, tf.expand_dims(distorted_image, 0))
if params['elastic_distortion']:
if has_cv2:
distorted_image = tf.py_func(distort_elastic_cv2, [distorted_image], [tf.float32])[0]
else:
print('Elastic image distortion disabled, no cv2 module present.')
distorted_image.set_shape([height, width, 3])
# Randomly flip the image horizontally.
if params['h_flip']:
distorted_image = tf.image.random_flip_left_right(distorted_image)
if params['v_flip']:
distorted_image = tf.image.random_flip_up_down(distorted_image)
# Randomly distort the colors.
distorted_image = distort_color(distorted_image, hue_delta=params['hue_delta'], thread_id=thread_id)
if not thread_id:
tf.summary.image('final_distorted_image%s' % summary_suffix, tf.expand_dims(distorted_image, 0))
return distorted_image
eval_params_default = {
'central_crop_fraction': 0.95,
}
def process_for_eval(image, height, width, params=eval_params_default, scope=None):
"""Prepare one image for evaluation.
Args:
image: 3-D float Tensor
height: integer
width: integer
scope: Optional scope for op_scope.
Returns:
3-D float Tensor of prepared image.
"""
with tf.name_scope(scope, 'eval_image', [image, height, width]):
# Crop the central region of the image
if params['central_crop_fraction'] != 1.0:
image = tf.image.central_crop(image, central_fraction=params['central_crop_fraction'])
# Resize the image to the network height and width.
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width], align_corners=False)
image = tf.squeeze(image, [0])
return image
|
[
"tensorflow.app.flags.DEFINE_float",
"tensorflow.clip_by_value",
"skimage.transform.SimilarityTransform",
"cv2.remap",
"cv2.warpAffine",
"tensorflow.image.resize_bilinear",
"tensorflow.image.decode_png",
"numpy.arange",
"tensorflow.split",
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.nn.moments",
"tensorflow.gather",
"tensorflow.image.random_contrast",
"tensorflow.concat",
"numpy.random.RandomState",
"tensorflow.div",
"tensorflow.image.central_crop",
"tensorflow.squeeze",
"skimage.transform.AffineTransform",
"tensorflow.name_scope",
"tensorflow.image.decode_gif",
"cv2.resize",
"tensorflow.mul",
"tensorflow.image.resize_images",
"tensorflow.summary.image",
"tensorflow.add",
"tensorflow.image.random_hue",
"tensorflow.image.random_flip_left_right",
"skimage.transform.warp",
"tensorflow.image.random_saturation",
"tensorflow.image.random_brightness",
"tensorflow.expand_dims",
"numpy.random.uniform",
"tensorflow.py_func",
"tensorflow.image.random_flip_up_down",
"numpy.float32",
"tensorflow.shape",
"cv2.getAffineTransform",
"tensorflow.app.flags.DEFINE_string",
"tensorflow.slice",
"tensorflow.sub",
"tensorflow.image.decode_jpeg",
"tensorflow.image.convert_image_dtype"
] |
[((1574, 1663), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""image_size"""', '(299)', '"""Provide square images of this size."""'], {}), "('image_size', 299,\n 'Provide square images of this size.')\n", (1601, 1663), True, 'import tensorflow as tf\n'), ((1692, 1806), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""image_aspect"""', '(0.0)', '"""Aspect ratio based sizing, square image_size*image_size if 0"""'], {}), "('image_aspect', 0.0,\n 'Aspect ratio based sizing, square image_size*image_size if 0')\n", (1717, 1806), True, 'import tensorflow as tf\n'), ((1833, 2049), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""image_norm"""', '"""default"""', '"""Either \'caffe\' BGR [0,255], \'caffe_rgb\' [0, 255],\n \'frame\' per-frame standardize, \'global\' standardize, \'default\' [-1, 1]."""'], {}), '(\'image_norm\', \'default\',\n """Either \'caffe\' BGR [0,255], \'caffe_rgb\' [0, 255],\n \'frame\' per-frame standardize, \'global\' standardize, \'default\' [-1, 1]."""\n )\n', (1859, 2049), True, 'import tensorflow as tf\n'), ((2068, 2147), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""image_fmt"""', '"""jpg"""', '"""Either \'jpg\', \'png\', or \'gif\'"""'], {}), '(\'image_fmt\', \'jpg\', "Either \'jpg\', \'png\', or \'gif\'")\n', (2094, 2147), True, 'import tensorflow as tf\n'), ((7005, 7146), 'numpy.float32', 'np.float32', (['[center_square + square_size, [center_square[0] + square_size, \n center_square[1] - square_size], center_square - square_size]'], {}), '([center_square + square_size, [center_square[0] + square_size, \n center_square[1] - square_size], center_square - square_size])\n', (7015, 7146), True, 'import numpy as np\n'), ((7280, 7314), 'cv2.getAffineTransform', 'cv2.getAffineTransform', (['pts1', 'pts2'], {}), '(pts1, pts2)\n', (7302, 7314), False, 'import cv2\n'), ((7337, 7412), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'M', 'shape_size[::-1]'], {'borderMode': 'cv2.BORDER_REPLICATE'}), '(image, M, shape_size[::-1], borderMode=cv2.BORDER_REPLICATE)\n', (7351, 7412), False, 'import cv2\n'), ((7866, 7916), 'skimage.transform.SimilarityTransform', 'transform.SimilarityTransform', ([], {'translation': '(-center)'}), '(translation=-center)\n', (7895, 7916), False, 'from skimage import transform\n'), ((7930, 8002), 'skimage.transform.AffineTransform', 'transform.AffineTransform', ([], {'rotation': 'rot', 'shear': 'sheer', 'translation': 'center'}), '(rotation=rot, shear=sheer, translation=center)\n', (7955, 8002), False, 'from skimage import transform\n'), ((8051, 8102), 'skimage.transform.warp', 'transform.warp', (['image', 'tform.params'], {'mode': '"""reflect"""'}), "(image, tform.params, mode='reflect')\n", (8065, 8102), False, 'from skimage import transform\n'), ((9486, 9589), 'cv2.remap', 'cv2.remap', (['image', 'grid_x', 'grid_y'], {'borderMode': 'cv2.BORDER_REFLECT_101', 'interpolation': 'cv2.INTER_LINEAR'}), '(image, grid_x, grid_y, borderMode=cv2.BORDER_REFLECT_101,\n interpolation=cv2.INTER_LINEAR)\n', (9495, 9589), False, 'import cv2\n'), ((2741, 2761), 'tensorflow.mul', 'tf.mul', (['image', '(255.0)'], {}), '(image, 255.0)\n', (2747, 2761), True, 'import tensorflow as tf\n'), ((2818, 2839), 'tensorflow.split', 'tf.split', (['(2)', '(3)', 'image'], {}), '(2, 3, image)\n', (2826, 2839), True, 'import tensorflow as tf\n'), ((2856, 2888), 'tensorflow.concat', 'tf.concat', (['(2)', '[blue, green, red]'], {}), '(2, [blue, green, red])\n', (2865, 2888), True, 'import tensorflow as tf\n'), ((2897, 2922), 'tensorflow.sub', 'tf.sub', (['image', 'caffe_mean'], {}), '(image, caffe_mean)\n', (2903, 2922), True, 'import tensorflow as tf\n'), ((4194, 4246), 'tensorflow.name_scope', 'tf.name_scope', (['scope', '"""decode_image"""', '[image_buffer]'], {}), "(scope, 'decode_image', [image_buffer])\n", (4207, 4246), True, 'import tensorflow as tf\n'), ((5126, 5179), 'tensorflow.image.convert_image_dtype', 'tf.image.convert_image_dtype', (['image'], {'dtype': 'tf.float32'}), '(image, dtype=tf.float32)\n', (5154, 5179), True, 'import tensorflow as tf\n'), ((5838, 5884), 'tensorflow.name_scope', 'tf.name_scope', (['scope', '"""distort_color"""', '[image]'], {}), "(scope, 'distort_color', [image])\n", (5851, 5884), True, 'import tensorflow as tf\n'), ((6650, 6683), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['image', '(0.0)', '(1.0)'], {}), '(image, 0.0, 1.0)\n', (6666, 6683), True, 'import tensorflow as tf\n'), ((6826, 6853), 'numpy.random.RandomState', 'np.random.RandomState', (['None'], {}), '(None)\n', (6847, 6853), True, 'import numpy as np\n'), ((6927, 6949), 'numpy.float32', 'np.float32', (['shape_size'], {}), '(shape_size)\n', (6937, 6949), True, 'import numpy as np\n'), ((7609, 7636), 'numpy.random.RandomState', 'np.random.RandomState', (['None'], {}), '(None)\n', (7630, 7636), True, 'import numpy as np\n'), ((7659, 7697), 'numpy.random.uniform', 'np.random.uniform', (['(-rotation)', 'rotation'], {}), '(-rotation, rotation)\n', (7676, 7697), True, 'import numpy as np\n'), ((7722, 7754), 'numpy.random.uniform', 'np.random.uniform', (['(-shear)', 'shear'], {}), '(-shear, shear)\n', (7739, 7754), True, 'import numpy as np\n'), ((8341, 8368), 'numpy.random.RandomState', 'np.random.RandomState', (['None'], {}), '(None)\n', (8362, 8368), True, 'import numpy as np\n'), ((9188, 9224), 'cv2.resize', 'cv2.resize', (['rand_x', 'shape_size[::-1]'], {}), '(rand_x, shape_size[::-1])\n', (9198, 9224), False, 'import cv2\n'), ((9242, 9278), 'cv2.resize', 'cv2.resize', (['rand_y', 'shape_size[::-1]'], {}), '(rand_y, shape_size[::-1])\n', (9252, 9278), False, 'import cv2\n'), ((9313, 9337), 'numpy.arange', 'np.arange', (['shape_size[1]'], {}), '(shape_size[1])\n', (9322, 9337), True, 'import numpy as np\n'), ((9339, 9363), 'numpy.arange', 'np.arange', (['shape_size[0]'], {}), '(shape_size[0])\n', (9348, 9363), True, 'import numpy as np\n'), ((10928, 10995), 'tensorflow.name_scope', 'tf.name_scope', (['scope', '"""distort_image"""', '[image, height, width, bbox]'], {}), "(scope, 'distort_image', [image, height, width, bbox])\n", (10941, 10995), True, 'import tensorflow as tf\n'), ((13284, 13322), 'tensorflow.slice', 'tf.slice', (['image', 'bbox_begin', 'bbox_size'], {}), '(image, bbox_begin, bbox_size)\n', (13292, 13322), True, 'import tensorflow as tf\n'), ((13515, 13586), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['distorted_image', '[height, width]', 'resize_method'], {}), '(distorted_image, [height, width], resize_method)\n', (13537, 13586), True, 'import tensorflow as tf\n'), ((15185, 15243), 'tensorflow.name_scope', 'tf.name_scope', (['scope', '"""eval_image"""', '[image, height, width]'], {}), "(scope, 'eval_image', [image, height, width])\n", (15198, 15243), True, 'import tensorflow as tf\n'), ((15519, 15543), 'tensorflow.expand_dims', 'tf.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (15533, 15543), True, 'import tensorflow as tf\n'), ((15560, 15629), 'tensorflow.image.resize_bilinear', 'tf.image.resize_bilinear', (['image', '[height, width]'], {'align_corners': '(False)'}), '(image, [height, width], align_corners=False)\n', (15584, 15629), True, 'import tensorflow as tf\n'), ((15646, 15668), 'tensorflow.squeeze', 'tf.squeeze', (['image', '[0]'], {}), '(image, [0])\n', (15656, 15668), True, 'import tensorflow as tf\n'), ((3057, 3077), 'tensorflow.mul', 'tf.mul', (['image', '(255.0)'], {}), '(image, 255.0)\n', (3063, 3077), True, 'import tensorflow as tf\n'), ((3103, 3135), 'tensorflow.gather', 'tf.gather', (['caffe_mean', '[2, 1, 0]'], {}), '(caffe_mean, [2, 1, 0])\n', (3112, 3135), True, 'import tensorflow as tf\n'), ((3152, 3181), 'tensorflow.sub', 'tf.sub', (['image', 'caffe_mean_rgb'], {}), '(image, caffe_mean_rgb)\n', (3158, 3181), True, 'import tensorflow as tf\n'), ((4595, 4644), 'tensorflow.image.decode_png', 'tf.image.decode_png', (['image_buffer'], {'channels': 'depth'}), '(image_buffer, channels=depth)\n', (4614, 4644), True, 'import tensorflow as tf\n'), ((5978, 6035), 'tensorflow.image.random_brightness', 'tf.image.random_brightness', (['image'], {'max_delta': '(32.0 / 255.0)'}), '(image, max_delta=32.0 / 255.0)\n', (6004, 6035), True, 'import tensorflow as tf\n'), ((6054, 6109), 'tensorflow.image.random_saturation', 'tf.image.random_saturation', (['image'], {'lower': '(0.5)', 'upper': '(1.5)'}), '(image, lower=0.5, upper=1.5)\n', (6080, 6109), True, 'import tensorflow as tf\n'), ((6130, 6177), 'tensorflow.image.random_hue', 'tf.image.random_hue', (['image'], {'max_delta': 'hue_delta'}), '(image, max_delta=hue_delta)\n', (6149, 6177), True, 'import tensorflow as tf\n'), ((6198, 6251), 'tensorflow.image.random_contrast', 'tf.image.random_contrast', (['image'], {'lower': '(0.5)', 'upper': '(1.5)'}), '(image, lower=0.5, upper=1.5)\n', (6222, 6251), True, 'import tensorflow as tf\n'), ((7821, 7843), 'numpy.float32', 'np.float32', (['shape_size'], {}), '(shape_size)\n', (7831, 7843), True, 'import numpy as np\n'), ((11320, 11405), 'tensorflow.summary.image', 'tf.summary.image', (["('image_with_bounding_boxes%s' % summary_suffix)", 'image_with_box'], {}), "('image_with_bounding_boxes%s' % summary_suffix, image_with_box\n )\n", (11336, 11405), True, 'import tensorflow as tf\n'), ((12016, 12031), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (12024, 12031), True, 'import tensorflow as tf\n'), ((12535, 12638), 'tensorflow.summary.image', 'tf.summary.image', (["('images_with_distorted_bounding_box%s' % summary_suffix)", 'image_with_distorted_box'], {}), "('images_with_distorted_bounding_box%s' % summary_suffix,\n image_with_distorted_box)\n", (12551, 12638), True, 'import tensorflow as tf\n'), ((14326, 14374), 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['distorted_image'], {}), '(distorted_image)\n', (14357, 14374), True, 'import tensorflow as tf\n'), ((14435, 14480), 'tensorflow.image.random_flip_up_down', 'tf.image.random_flip_up_down', (['distorted_image'], {}), '(distorted_image)\n', (14463, 14480), True, 'import tensorflow as tf\n'), ((15363, 15441), 'tensorflow.image.central_crop', 'tf.image.central_crop', (['image'], {'central_fraction': "params['central_crop_fraction']"}), "(image, central_fraction=params['central_crop_fraction'])\n", (15384, 15441), True, 'import tensorflow as tf\n'), ((3288, 3332), 'tensorflow.nn.moments', 'tf.nn.moments', (['image'], {'axes': '[0, 1]', 'shift': '(0.3)'}), '(image, axes=[0, 1], shift=0.3)\n', (3301, 3332), True, 'import tensorflow as tf\n'), ((3390, 3409), 'tensorflow.sub', 'tf.sub', (['image', 'mean'], {}), '(image, mean)\n', (3396, 3409), True, 'import tensorflow as tf\n'), ((3426, 3444), 'tensorflow.div', 'tf.div', (['image', 'std'], {}), '(image, std)\n', (3432, 3444), True, 'import tensorflow as tf\n'), ((4728, 4761), 'tensorflow.image.decode_gif', 'tf.image.decode_gif', (['image_buffer'], {}), '(image_buffer)\n', (4747, 4761), True, 'import tensorflow as tf\n'), ((4857, 4907), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['image_buffer'], {'channels': 'depth'}), '(image_buffer, channels=depth)\n', (4877, 4907), True, 'import tensorflow as tf\n'), ((6306, 6363), 'tensorflow.image.random_brightness', 'tf.image.random_brightness', (['image'], {'max_delta': '(32.0 / 255.0)'}), '(image, max_delta=32.0 / 255.0)\n', (6332, 6363), True, 'import tensorflow as tf\n'), ((6382, 6435), 'tensorflow.image.random_contrast', 'tf.image.random_contrast', (['image'], {'lower': '(0.5)', 'upper': '(1.5)'}), '(image, lower=0.5, upper=1.5)\n', (6406, 6435), True, 'import tensorflow as tf\n'), ((6456, 6511), 'tensorflow.image.random_saturation', 'tf.image.random_saturation', (['image'], {'lower': '(0.5)', 'upper': '(1.5)'}), '(image, lower=0.5, upper=1.5)\n', (6482, 6511), True, 'import tensorflow as tf\n'), ((6532, 6579), 'tensorflow.image.random_hue', 'tf.image.random_hue', (['image'], {'max_delta': 'hue_delta'}), '(image, max_delta=hue_delta)\n', (6551, 6579), True, 'import tensorflow as tf\n'), ((11276, 11300), 'tensorflow.expand_dims', 'tf.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (11290, 11300), True, 'import tensorflow as tf\n'), ((12483, 12507), 'tensorflow.expand_dims', 'tf.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (12497, 12507), True, 'import tensorflow as tf\n'), ((13855, 13889), 'tensorflow.expand_dims', 'tf.expand_dims', (['distorted_image', '(0)'], {}), '(distorted_image, 0)\n', (13869, 13889), True, 'import tensorflow as tf\n'), ((14730, 14764), 'tensorflow.expand_dims', 'tf.expand_dims', (['distorted_image', '(0)'], {}), '(distorted_image, 0)\n', (14744, 14764), True, 'import tensorflow as tf\n'), ((3355, 3373), 'tensorflow.add', 'tf.add', (['var', '(0.001)'], {}), '(var, 0.001)\n', (3361, 3373), True, 'import tensorflow as tf\n'), ((3545, 3578), 'tensorflow.sub', 'tf.sub', (['image', 'global_mean_std[0]'], {}), '(image, global_mean_std[0])\n', (3551, 3578), True, 'import tensorflow as tf\n'), ((3595, 3628), 'tensorflow.div', 'tf.div', (['image', 'global_mean_std[1]'], {}), '(image, global_mean_std[1])\n', (3601, 3628), True, 'import tensorflow as tf\n'), ((3798, 3816), 'tensorflow.sub', 'tf.sub', (['image', '(0.5)'], {}), '(image, 0.5)\n', (3804, 3816), True, 'import tensorflow as tf\n'), ((3833, 3851), 'tensorflow.mul', 'tf.mul', (['image', '(2.0)'], {}), '(image, 2.0)\n', (3839, 3851), True, 'import tensorflow as tf\n'), ((12830, 12921), 'tensorflow.py_func', 'tf.py_func', (['distort_affine_skimage', '[image, rotation_range, shear_range]', '[tf.float32]'], {}), '(distort_affine_skimage, [image, rotation_range, shear_range], [\n tf.float32])\n', (12840, 12921), True, 'import tensorflow as tf\n'), ((13991, 14055), 'tensorflow.py_func', 'tf.py_func', (['distort_elastic_cv2', '[distorted_image]', '[tf.float32]'], {}), '(distort_elastic_cv2, [distorted_image], [tf.float32])\n', (14001, 14055), True, 'import tensorflow as tf\n')]
|
# Copyright 2022 Sony Semiconductors Israel, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import unittest
from keras import Input, Model
from keras.layers import Conv2D, Conv2DTranspose
import model_compression_toolkit as mct
from model_compression_toolkit import QuantizationConfig, QuantizationErrorMethod
from model_compression_toolkit import CoreConfig
from model_compression_toolkit.core.common.bias_correction.compute_bias_correction_of_graph import \
compute_bias_correction_of_graph
from model_compression_toolkit.core.common.constants import RANGE_MIN, RANGE_MAX
from model_compression_toolkit.core.common.mixed_precision.bit_width_setter import set_bit_widths
from model_compression_toolkit.core.common.post_training_quantization import _quantize_fixed_bit_widths_graph
from model_compression_toolkit.core.common.quantization.quantization_analyzer import analyzer_graph
from model_compression_toolkit.core.common.quantization.quantization_params_generation.qparams_computation import \
calculate_quantization_params
from model_compression_toolkit.core.common.quantization.set_node_quantization_config import \
set_quantization_configuration_to_graph
from model_compression_toolkit.core.common.model_collector import ModelCollector
from model_compression_toolkit.core.tpc_models.keras_tp_models.keras_default import generate_keras_default_tpc
from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation
from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
def get_random_weights(kernel, in_channels, out_channels):
return np.random.normal(size=[kernel, kernel, in_channels, out_channels])
def create_network():
num_conv_channels = 4
kernel = 3
conv_w1 = get_random_weights(kernel, num_conv_channels, num_conv_channels)
conv_w2 = get_random_weights(kernel, num_conv_channels, num_conv_channels)
inputs = Input(shape=(16, 16, num_conv_channels))
x = Conv2D(num_conv_channels, kernel, use_bias=False)(inputs)
outputs = Conv2DTranspose(num_conv_channels, kernel, use_bias=False)(x)
model = Model(inputs=inputs, outputs=outputs)
model.layers[1].set_weights([conv_w1])
model.layers[2].set_weights([conv_w2])
return model
class TestUniformRangeSelectionWeights(unittest.TestCase):
def test_per_channel_weights_uniform_range_selection_no_clipping(self):
self.run_test_for_threshold_method(QuantizationErrorMethod.NOCLIPPING)
def test_weights_uniform_range_selection_no_clipping(self):
self.run_test_for_threshold_method(QuantizationErrorMethod.NOCLIPPING, per_channel=False)
def test_per_channel_weights_uniform_range_selection_mse(self):
self.run_test_for_threshold_method(QuantizationErrorMethod.MSE)
def test_weights_uniform_range_selection_mse(self):
self.run_test_for_threshold_method(QuantizationErrorMethod.MSE, per_channel=False)
def test_per_channel_weights_uniform_range_selection_mae(self):
self.run_test_for_threshold_method(QuantizationErrorMethod.MAE)
def test_weights_uniform_range_selection_mae(self):
self.run_test_for_threshold_method(QuantizationErrorMethod.MAE, per_channel=False)
def test_per_channel_weights_uniform_range_selection_lp(self):
self.run_test_for_threshold_method(QuantizationErrorMethod.LP)
def test_weights_uniform_range_selection_lp(self):
self.run_test_for_threshold_method(QuantizationErrorMethod.LP, per_channel=False)
def test_per_channel_weights_uniform_range_selection_kl(self):
self.run_test_for_threshold_method(QuantizationErrorMethod.KL)
def test_weights_uniform_range_selection_kl(self):
self.run_test_for_threshold_method(QuantizationErrorMethod.KL, per_channel=False)
def run_test_for_threshold_method(self, threshold_method, per_channel=True):
qc = QuantizationConfig(weights_error_method=threshold_method,
weights_per_channel_threshold=per_channel)
core_config = CoreConfig(n_iter=1, quantization_config=qc)
tp = generate_test_tp_model({
'weights_quantization_method': mct.target_platform.QuantizationMethod.UNIFORM})
tpc = generate_keras_default_tpc(name="uniform_range_selection_test", tp_model=tp)
fw_info = DEFAULT_KERAS_INFO
in_model = create_network()
keras_impl = KerasImplementation()
graph = keras_impl.model_reader(in_model, None) # model reading
graph.set_tpc(tpc)
graph.set_fw_info(fw_info)
graph = set_quantization_configuration_to_graph(graph=graph,
quant_config=core_config.quantization_config,
mixed_precision_enable=core_config.mixed_precision_enable)
for node in graph.nodes:
node.prior_info = keras_impl.get_node_prior_info(node=node,
fw_info=fw_info,
graph=graph)
analyzer_graph(keras_impl.attach_sc_to_node,
graph,
fw_info)
mi = ModelCollector(graph,
fw_info=DEFAULT_KERAS_INFO,
fw_impl=keras_impl)
for i in range(10):
mi.infer([np.random.randn(1, 16, 16, 4)])
calculate_quantization_params(graph,
fw_info,
fw_impl=keras_impl)
tg = compute_bias_correction_of_graph(graph,
fw_info,
keras_impl)
tg = set_bit_widths(core_config.mixed_precision_enable,
tg,
None)
quantized_model, user_info = _quantize_fixed_bit_widths_graph(False,
fw_info,
None,
lambda: [np.random.randn(1, 16, 16, 4)],
None,
tg,
keras_impl)
nodes_list = list(graph.nodes)
conv1_min = nodes_list[0].candidates_quantization_cfg[0].weights_quantization_cfg.weights_quantization_params[RANGE_MIN].flatten()
conv2_min = nodes_list[1].candidates_quantization_cfg[0].weights_quantization_cfg.weights_quantization_params[RANGE_MIN].flatten()
conv1_max = nodes_list[0].candidates_quantization_cfg[0].weights_quantization_cfg.weights_quantization_params[RANGE_MAX].flatten()
conv2_max = nodes_list[1].candidates_quantization_cfg[0].weights_quantization_cfg.weights_quantization_params[RANGE_MAX].flatten()
for range_min, range_max in list(zip(conv1_min, conv1_max)):
self.assertTrue(range_min <= 0 <= range_max,
msg=f"First conv layer quantization range ({range_min}, {range_max}) does not include 0")
for range_min, range_max in list(zip(conv2_min, conv2_max)):
self.assertTrue(range_min <= 0 <= range_max,
msg=f"First conv layer quantization range ({range_min}, {range_max}) does not include 0")
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"keras.Input",
"model_compression_toolkit.core.tpc_models.keras_tp_models.keras_default.generate_keras_default_tpc",
"keras.Model",
"model_compression_toolkit.core.common.quantization.quantization_analyzer.analyzer_graph",
"model_compression_toolkit.core.common.mixed_precision.bit_width_setter.set_bit_widths",
"numpy.random.randn",
"model_compression_toolkit.core.keras.keras_implementation.KerasImplementation",
"model_compression_toolkit.core.common.quantization.quantization_params_generation.qparams_computation.calculate_quantization_params",
"model_compression_toolkit.QuantizationConfig",
"keras.layers.Conv2DTranspose",
"model_compression_toolkit.CoreConfig",
"model_compression_toolkit.core.common.quantization.set_node_quantization_config.set_quantization_configuration_to_graph",
"model_compression_toolkit.core.common.model_collector.ModelCollector",
"model_compression_toolkit.core.common.bias_correction.compute_bias_correction_of_graph.compute_bias_correction_of_graph",
"keras.layers.Conv2D",
"numpy.random.normal",
"tests.common_tests.helpers.generate_test_tp_model.generate_test_tp_model"
] |
[((2331, 2397), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[kernel, kernel, in_channels, out_channels]'}), '(size=[kernel, kernel, in_channels, out_channels])\n', (2347, 2397), True, 'import numpy as np\n'), ((2635, 2675), 'keras.Input', 'Input', ([], {'shape': '(16, 16, num_conv_channels)'}), '(shape=(16, 16, num_conv_channels))\n', (2640, 2675), False, 'from keras import Input, Model\n'), ((2830, 2867), 'keras.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'outputs'}), '(inputs=inputs, outputs=outputs)\n', (2835, 2867), False, 'from keras import Input, Model\n'), ((8292, 8307), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8305, 8307), False, 'import unittest\n'), ((2684, 2733), 'keras.layers.Conv2D', 'Conv2D', (['num_conv_channels', 'kernel'], {'use_bias': '(False)'}), '(num_conv_channels, kernel, use_bias=False)\n', (2690, 2733), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((2756, 2814), 'keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['num_conv_channels', 'kernel'], {'use_bias': '(False)'}), '(num_conv_channels, kernel, use_bias=False)\n', (2771, 2814), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((4595, 4699), 'model_compression_toolkit.QuantizationConfig', 'QuantizationConfig', ([], {'weights_error_method': 'threshold_method', 'weights_per_channel_threshold': 'per_channel'}), '(weights_error_method=threshold_method,\n weights_per_channel_threshold=per_channel)\n', (4613, 4699), False, 'from model_compression_toolkit import QuantizationConfig, QuantizationErrorMethod\n'), ((4750, 4794), 'model_compression_toolkit.CoreConfig', 'CoreConfig', ([], {'n_iter': '(1)', 'quantization_config': 'qc'}), '(n_iter=1, quantization_config=qc)\n', (4760, 4794), False, 'from model_compression_toolkit import CoreConfig\n'), ((4809, 4917), 'tests.common_tests.helpers.generate_test_tp_model.generate_test_tp_model', 'generate_test_tp_model', (["{'weights_quantization_method': mct.target_platform.QuantizationMethod.UNIFORM}"], {}), "({'weights_quantization_method': mct.target_platform.\n QuantizationMethod.UNIFORM})\n", (4831, 4917), False, 'from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model\n'), ((4940, 5016), 'model_compression_toolkit.core.tpc_models.keras_tp_models.keras_default.generate_keras_default_tpc', 'generate_keras_default_tpc', ([], {'name': '"""uniform_range_selection_test"""', 'tp_model': 'tp'}), "(name='uniform_range_selection_test', tp_model=tp)\n", (4966, 5016), False, 'from model_compression_toolkit.core.tpc_models.keras_tp_models.keras_default import generate_keras_default_tpc\n'), ((5112, 5133), 'model_compression_toolkit.core.keras.keras_implementation.KerasImplementation', 'KerasImplementation', ([], {}), '()\n', (5131, 5133), False, 'from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation\n'), ((5285, 5452), 'model_compression_toolkit.core.common.quantization.set_node_quantization_config.set_quantization_configuration_to_graph', 'set_quantization_configuration_to_graph', ([], {'graph': 'graph', 'quant_config': 'core_config.quantization_config', 'mixed_precision_enable': 'core_config.mixed_precision_enable'}), '(graph=graph, quant_config=\n core_config.quantization_config, mixed_precision_enable=core_config.\n mixed_precision_enable)\n', (5324, 5452), False, 'from model_compression_toolkit.core.common.quantization.set_node_quantization_config import set_quantization_configuration_to_graph\n'), ((5820, 5880), 'model_compression_toolkit.core.common.quantization.quantization_analyzer.analyzer_graph', 'analyzer_graph', (['keras_impl.attach_sc_to_node', 'graph', 'fw_info'], {}), '(keras_impl.attach_sc_to_node, graph, fw_info)\n', (5834, 5880), False, 'from model_compression_toolkit.core.common.quantization.quantization_analyzer import analyzer_graph\n'), ((5941, 6010), 'model_compression_toolkit.core.common.model_collector.ModelCollector', 'ModelCollector', (['graph'], {'fw_info': 'DEFAULT_KERAS_INFO', 'fw_impl': 'keras_impl'}), '(graph, fw_info=DEFAULT_KERAS_INFO, fw_impl=keras_impl)\n', (5955, 6010), False, 'from model_compression_toolkit.core.common.model_collector import ModelCollector\n'), ((6159, 6224), 'model_compression_toolkit.core.common.quantization.quantization_params_generation.qparams_computation.calculate_quantization_params', 'calculate_quantization_params', (['graph', 'fw_info'], {'fw_impl': 'keras_impl'}), '(graph, fw_info, fw_impl=keras_impl)\n', (6188, 6224), False, 'from model_compression_toolkit.core.common.quantization.quantization_params_generation.qparams_computation import calculate_quantization_params\n'), ((6315, 6375), 'model_compression_toolkit.core.common.bias_correction.compute_bias_correction_of_graph.compute_bias_correction_of_graph', 'compute_bias_correction_of_graph', (['graph', 'fw_info', 'keras_impl'], {}), '(graph, fw_info, keras_impl)\n', (6347, 6375), False, 'from model_compression_toolkit.core.common.bias_correction.compute_bias_correction_of_graph import compute_bias_correction_of_graph\n'), ((6481, 6541), 'model_compression_toolkit.core.common.mixed_precision.bit_width_setter.set_bit_widths', 'set_bit_widths', (['core_config.mixed_precision_enable', 'tg', 'None'], {}), '(core_config.mixed_precision_enable, tg, None)\n', (6495, 6541), False, 'from model_compression_toolkit.core.common.mixed_precision.bit_width_setter import set_bit_widths\n'), ((6118, 6147), 'numpy.random.randn', 'np.random.randn', (['(1)', '(16)', '(16)', '(4)'], {}), '(1, 16, 16, 4)\n', (6133, 6147), True, 'import numpy as np\n'), ((6910, 6939), 'numpy.random.randn', 'np.random.randn', (['(1)', '(16)', '(16)', '(4)'], {}), '(1, 16, 16, 4)\n', (6925, 6939), True, 'import numpy as np\n')]
|
import numpy as np
import copy
from pommerman import constants
from pommerman import utility
STEP_COUNT_POS = 0
DONE_POS = 1
AMMO_POS = 0
BLAST_STRENGTH_POS = 1
CAN_KICK_POS = 2
ALIVE_POS = 3
ROW_POS = 4
COL_POS = 5
class EnvSimulator:
@staticmethod
def get_initial_game_data(obs, my_id, max_steps=1000):
board_size = len(obs['board'])
game_data = EnvSimulator.get_board(board_size, obs['board'])
agent_0_pos = EnvSimulator.get_position(game_data, 0, True)
agent_1_pos = EnvSimulator.get_position(game_data, 1, True)
game_info = np.zeros((1, board_size)).astype(np.uint16)
game_info[0, STEP_COUNT_POS] = int(obs['step_count'])
game_info[0, DONE_POS] = 0
player1row = np.zeros((1, board_size)).astype(np.uint16)
player1row[0, AMMO_POS] = int(obs['ammo'])
player1row[0, BLAST_STRENGTH_POS] = int(obs['blast_strength'])
player1row[0, CAN_KICK_POS] = int(obs['can_kick'])
player1row[0, ALIVE_POS] = 1
player1row[0, ROW_POS] = agent_0_pos[0]
player1row[0, COL_POS] = agent_0_pos[1]
player2row = np.zeros((1, board_size)).astype(np.uint16)
player2row[0, AMMO_POS] = 1
player2row[0, BLAST_STRENGTH_POS] = constants.DEFAULT_BLAST_STRENGTH
player2row[0, CAN_KICK_POS] = False
player2row[0, ALIVE_POS] = 1
player2row[0, ROW_POS] = agent_1_pos[0]
player2row[0, COL_POS] = agent_1_pos[1]
bomb = np.zeros((1, board_size)).astype(np.uint16)
game_data = np.vstack([game_data, game_info, player1row, player2row])
return game_data
@staticmethod
def update(game_data, obs, my_id):
enemy_id = 0
if my_id == 0:
enemy_id = 1
step_count = EnvSimulator._get_game_value(game_data, STEP_COUNT_POS)
if game_data.shape[1] != len(obs['board']):
raise ValueError('Invalid update: boardsize different!')
if step_count + 1 != int(obs['step_count']) and (step_count != 0 or int(obs['step_count']) != 0):
raise ValueError('Invalid update: missed step count!')
EnvSimulator._set_game_value(game_data, STEP_COUNT_POS, obs['step_count'])
new_board = EnvSimulator._get_game_data_from_obs(obs)
new_board = EnvSimulator.get_board(game_data.shape[1], obs['board'])
new_bomb_life = EnvSimulator.get_board(game_data.shape[1], obs['bomb_life'], 0)
# get actions
actions = {}
for agent_id in [10, 11]:
old_pos = EnvSimulator.get_position(game_data, agent_id, True)
new_pos = EnvSimulator.get_position(new_board, agent_id + 10, True)
if old_pos != new_pos:
actions[agent_id] = EnvSimulator.get_direction(old_pos, new_pos).value
elif new_bomb_life[new_pos] == constants.DEFAULT_BOMB_LIFE:
actions[agent_id] = constants.Action.Bomb.value
else:
actions[agent_id] = constants.Action.Stop.value
EnvSimulator.act(game_data, actions)
reset = False
# compare boards
if not EnvSimulator.boards_equal(EnvSimulator.get_game_data_board(game_data), new_board, True):
a1bomb, a2bomb, kick, flame = EnvSimulator.get_boards_differences(
EnvSimulator.get_game_data_board(game_data), new_board)
#print(a1bomb, a2bomb, kick, flame)
if a1bomb and my_id != 0:
ammo = EnvSimulator._get_agent_value(game_data, 0, AMMO_POS)
EnvSimulator._set_agent_value(game_data, 0, AMMO_POS, ammo+1)
elif a2bomb and my_id != 1:
ammo = EnvSimulator._get_agent_value(game_data, 1, AMMO_POS)
EnvSimulator._set_agent_value(game_data, 1, AMMO_POS, ammo + 1)
elif kick and EnvSimulator._get_agent_value(game_data, my_id, CAN_KICK_POS) == int(obs['can_kick']):
EnvSimulator._set_agent_value(game_data, enemy_id, CAN_KICK_POS, 1)
elif flame and EnvSimulator._get_agent_value(game_data, my_id, BLAST_STRENGTH_POS) == int(obs['blast_strength']):
blast = EnvSimulator._get_agent_value(game_data, enemy_id, BLAST_STRENGTH_POS)
EnvSimulator._set_agent_value(game_data, enemy_id, BLAST_STRENGTH_POS, blast+1)
reset = True
EnvSimulator._set_agent_value(game_data, enemy_id, AMMO_POS, int(obs['ammo']))
EnvSimulator._set_agent_value(game_data, enemy_id, BLAST_STRENGTH_POS, int(obs['blast_strength']))
EnvSimulator._set_agent_value(game_data, enemy_id, CAN_KICK_POS, int(obs['can_kick']))
# update board because of items
game_data[0:game_data.shape[1], 0:game_data.shape[1]] = new_board
return game_data, actions, reset
@staticmethod
def _get_game_data_from_obs(obs):
board_size = len(obs['board'])
board = EnvSimulator.get_board(board_size, obs['board'])
blast_strength = obs['bomb_blast_strength']
bomb_life = obs['bomb_life']
for row in range(len(board)):
for col in range(len(board[0])):
if (board[row, col] == 10 or board[row, col] == 11) and blast_strength[row, col] > 0.0:
# agent over bomb
value = 10000 + (board[row, col]-7)*1000 + int(blast_strength[row, col])*10 + int(bomb_life[row, col])
board[row, col] = value
if board[row, col] == 3: # bomb
agent_id = 0
value = 10000 + (board[row, col]-7)*1000 + int(blast_strength[row, col])*10 + int(bomb_life[row, col])
return
@staticmethod
def get_game_data_board(game_data):
return game_data[0:game_data.shape[1], 0:game_data.shape[1]]
@staticmethod
def act(game_data, actions):
MIN_FIRE = 20
AGENT_0 = 10
AGENT_1 = 11
if EnvSimulator.get_done(game_data):
return
#print(game_data, actions)
# move objects
pos_agent0_prev = None
pos_agent0 = None
pos_agent1_prev = None
pos_agent1 = None
pos_bomb_prev = []
for row in range(game_data.shape[1]):
for col in range(game_data.shape[1]):
if EnvSimulator._is_fire(game_data, (row, col)):
game_data[row, col] -= 1
if game_data[row, col] == MIN_FIRE:
game_data[row, col] = 0
elif game_data[row, col] == AGENT_1 or game_data[row, col] >= 14000:
pos_agent1_prev = (row, col)
pos_agent1 = EnvSimulator.handle_agent_move(game_data, 1, row, col, actions[1])
elif game_data[row, col] == AGENT_0 or game_data[row, col] >= 13000:
pos_agent0_prev = (row, col)
pos_agent0 = EnvSimulator.handle_agent_move(game_data, 0, row, col, actions[0])
if game_data[row, col] >= 10000:
pos_bomb_prev.append((row, col))
if pos_agent0 == pos_agent1:
pos_agent0 = pos_agent0_prev
pos_agent1 = pos_agent1_prev
# move bombs
pos_bomb = []
change = False
invalid_values = [constants.Item.Rigid.value, constants.Item.Wood.value, constants.Item.Kick,
constants.Item.IncrRange, constants.Item.ExtraBomb]
for bomb_pos in pos_bomb_prev:
bomb = game_data[bomb_pos]
direction = int((bomb % 1000) / 100)
if direction == 0 and bomb_pos == pos_agent0:
if pos_agent0 != pos_agent0_prev: # kick bomb
direction = EnvSimulator.get_direction(pos_agent0_prev, pos_agent0).value
elif int((bomb % 10000) / 1000) != 1 and int((bomb % 10000) / 1000) != 3:
raise ValueError("Fatal Error")
elif direction == 0 and bomb_pos == pos_agent1:
if pos_agent1 != pos_agent1_prev: # kick bomb
direction = EnvSimulator.get_direction(pos_agent1_prev, pos_agent1).value
elif int((bomb % 10000) / 1000) != 2 and int((bomb % 10000) / 1000) != 4:
raise ValueError("Fatal Error")
new_bomb_pos = bomb_pos
if direction > 0:
change = True
row, col = bomb_pos
if EnvSimulator._is_valid_direction(game_data, row, col, direction, invalid_values):
new_bomb_pos = utility.get_next_position(bomb_pos, constants.Action(direction))
if (row, col) == pos_agent0 or (row, col) == pos_agent1:
new_bomb_pos = bomb_pos
pos_bomb.append(new_bomb_pos)
while change:
change = False
# bomb <-> bomb
for i in range(len(pos_bomb)):
pos = pos_bomb[i]
for j in range(len(pos_bomb)):
if i != j and pos == pos_bomb[j]:
pos_bomb[i] = pos_bomb_prev[i]
pos_bomb[j] = pos_bomb_prev[j]
change = True
if pos_bomb[i] == pos_agent0 and (pos_bomb[i] != pos_bomb_prev[i] or pos_agent0 != pos_agent0_prev):
pos_agent0 = pos_agent0_prev
pos_bomb[i] = pos_bomb_prev[i]
change = True
elif pos_bomb[i] == pos_agent1 and (pos_bomb[i] != pos_bomb_prev[i] or pos_agent1 != pos_agent1_prev):
pos_agent1 = pos_agent1_prev
pos_bomb[i] = pos_bomb_prev[i]
change = True
for i in range(len(pos_bomb)):
cur_value = game_data[pos_bomb_prev[i]]
life = int(cur_value % 10) - 1
if 20 < game_data[pos_bomb[i]] < 30:
life = 0
strength = int((cur_value % 100) / 10)
direction = EnvSimulator.get_direction(pos_bomb[i], pos_bomb_prev[i]).value
player = int((cur_value % 10000) / 1000)
if player > 2:
player -= 2
if pos_agent0 == pos_bomb[i] or pos_agent1 == pos_bomb[i]:
player += 2
game_data[pos_bomb_prev[i]] = 0
game_data[pos_bomb[i]] = 10000 + player * 1000 + direction * 100 + strength * 10 + life
# set agent
#print(pos_agent0, pos_agent1)
EnvSimulator._agent_collect(game_data, 0, pos_agent0)
EnvSimulator._agent_collect(game_data, 1, pos_agent1)
if pos_agent0_prev != pos_agent0:
if game_data[pos_agent0_prev] < 10000:
game_data[pos_agent0_prev] = 0
if EnvSimulator._is_fire(game_data, pos_agent0):
EnvSimulator._agent_died(game_data, 0)
else:
game_data[pos_agent0] = AGENT_0
if pos_agent1_prev != pos_agent1:
if game_data[pos_agent1_prev] < 10000:
game_data[pos_agent1_prev] = 0
if EnvSimulator._is_fire(game_data, pos_agent1):
EnvSimulator._agent_died(game_data, 1)
else:
game_data[pos_agent1] = AGENT_1
# fire bombs
fire = True
while fire:
fire = False
for bomb in pos_bomb:
bomb_value = game_data[bomb]
if int(bomb_value % 10) == 0:
strength = int((bomb_value % 100) / 10)
EnvSimulator._set_fire(game_data, bomb[0], bomb[1], True)
EnvSimulator._fire_bomb(game_data, bomb[0], bomb[1], 0, 1, strength - 1) # right
EnvSimulator._fire_bomb(game_data, bomb[0], bomb[1], 0, -1, strength - 1) # left
EnvSimulator._fire_bomb(game_data, bomb[0], bomb[1], 1, 0, strength - 1) # down
EnvSimulator._fire_bomb(game_data, bomb[0], bomb[1], -1, 0, strength - 1) # up
fire = True
#print('result: ', game_data)
@staticmethod
def handle_agent_move(game_data, agent_id, row, col, action):
if action == constants.Action.Stop.value:
return row, col
elif action == constants.Action.Bomb.value:
ammo = EnvSimulator._get_agent_value(game_data, agent_id, AMMO_POS)
if game_data[row, col] < 10000 and ammo > 0:
game_data[row, col] = 10009 + (agent_id + 3) * 1000 + EnvSimulator._get_agent_value(game_data, agent_id, BLAST_STRENGTH_POS) * 10
EnvSimulator._set_agent_value(game_data, agent_id, AMMO_POS, ammo-1)
return row, col
else:
invalid_values = [constants.Item.Rigid.value, constants.Item.Wood.value]
if EnvSimulator._is_valid_direction(game_data, row, col, action, invalid_values):
return utility.get_next_position((row, col), constants.Action(action))
else:
return row, col
@staticmethod
def _agent_collect(game_data, agent_id, pos):
item = game_data[pos]
if item == constants.Item.Kick.value:
EnvSimulator._set_agent_value(game_data, agent_id, CAN_KICK_POS, 1)
elif item == constants.Item.ExtraBomb.value:
cur_ammo = EnvSimulator._get_agent_value(game_data, agent_id, AMMO_POS)
EnvSimulator._set_agent_value(game_data, agent_id, AMMO_POS, cur_ammo + 1)
elif item == constants.Item.IncrRange.value:
cur_range = EnvSimulator._get_agent_value(game_data, agent_id, BLAST_STRENGTH_POS)
EnvSimulator._set_agent_value(game_data, agent_id, BLAST_STRENGTH_POS, cur_range + 1)
@staticmethod
def _position_on_board(game_data, row, col):
return all([game_data.shape[1] > row, game_data.shape[1] > col, row >= 0, col >= 0])
@staticmethod
def _is_fire(game_data, pos):
return 20 < game_data[pos] < 30
@staticmethod
def _fire_bomb(game_data, row, col, row_off, col_off, strength):
if strength <= 0:
return
next_row = row + row_off
next_col = col + col_off
if not EnvSimulator._position_on_board(game_data, next_row, next_col):
return
if utility.position_in_items(game_data, (next_row, next_col), [constants.Item.Rigid, constants.Item.Wood]):
return
EnvSimulator._set_fire(game_data, next_row, next_col, False)
EnvSimulator._fire_bomb(game_data, next_row, next_col, row_off, col_off, strength - 1)
@staticmethod
def _set_fire(game_data, row, col, first):
prev_value = game_data[row, col]
if prev_value > 14000 or prev_value == 11:
EnvSimulator._agent_died(game_data, 1)
if prev_value > 13000 or prev_value == 10:
EnvSimulator._agent_died(game_data, 0)
if not first and prev_value > 10000:
prev_value -= int(prev_value % 10)
else:
if first and prev_value > 10000:
# increase ammo
player = int((prev_value % 10000) / 1000)
if player == 1 or player == 3:
player = 0
else:
player = 1
ammo = EnvSimulator._get_agent_value(game_data, player, AMMO_POS)
EnvSimulator._set_agent_value(game_data, player, AMMO_POS, ammo+1)
game_data[row, col] = 22
@staticmethod
def _agent_died(game_data, agent_id):
EnvSimulator._set_agent_value(game_data, agent_id, ALIVE_POS, 0)
EnvSimulator._set_game_value(game_data, DONE_POS, 1)
@staticmethod
def _is_valid_direction(board, row, col, direction, invalid_values=None):
if invalid_values is None:
invalid_values = [item.value for item in [constants.Item.Rigid, constants.Item.Wood]]
if constants.Action(direction) == constants.Action.Stop:
return True
if constants.Action(direction) == constants.Action.Up:
return row - 1 >= 0 and board[row - 1][col] not in invalid_values
if constants.Action(direction) == constants.Action.Down:
return row + 1 < len(board) and board[row + 1][col] not in invalid_values
if constants.Action(direction) == constants.Action.Left:
return col - 1 >= 0 and board[row][col - 1] not in invalid_values
if constants.Action(direction) == constants.Action.Right:
return col + 1 < len(board[0]) and board[row][col + 1] not in invalid_values
raise constants.InvalidAction("We did not receive a valid direction: ", direction)
@staticmethod
def get_direction(position, next_position):
if position == next_position:
return constants.Action.Stop
x, y = position
next_x, next_y = next_position
if x == next_x:
if y < next_y:
return constants.Action.Right
else:
return constants.Action.Left
elif y == next_y:
if x < next_x:
return constants.Action.Down
else:
return constants.Action.Up
raise constants.InvalidAction(
"We did not receive a valid position transition.")
@staticmethod
def _get_agent_value(game_data, agent_id, value):
return game_data[game_data.shape[0] - 2 + agent_id, value]
@staticmethod
def _set_agent_value(game_data, agent_id, value, val):
game_data[game_data.shape[0] - 2 + agent_id, value] = val
@staticmethod
def _get_game_value(game_data, value):
return game_data[game_data.shape[0] - 3, value]
@staticmethod
def _set_game_value(game_data, value, val):
game_data[game_data.shape[0] - 3, value] = val
@staticmethod
def get_done(game_data):
return bool(EnvSimulator._get_game_value(game_data, DONE_POS))
@staticmethod
def get_alive(game_data):
alive = {0: bool(game_data[game_data.shape[0] - 2, ALIVE_POS]),
1: bool(game_data[game_data.shape[0] - 1, ALIVE_POS])}
return alive
@staticmethod
def get_board(board_size, board_array, init_value=constants.Item.Passage.value):
board = np.ones((board_size, board_size)).astype(np.uint16)
board *= init_value
for x in range(board_size):
for y in range(board_size):
board[x, y] = board_array[x][y]
return board
@staticmethod
def get_position(board, item, is_single_pos):
pos = np.where(board == item)
pos = list(zip(pos[0], pos[1]))
if is_single_pos:
if len(pos) != 1:
raise ValueError("Invalid pos count!")
return pos[0]
else:
return pos
@staticmethod
def get_valid_actions(board, flames, bombs, agent, actions):
return actions
@staticmethod
def boards_equal(board1, board2, ignore_items):
if ignore_items:
board1 = copy.deepcopy(board1)
board2 = copy.deepcopy(board2)
board1[board1 == constants.Item.ExtraBomb.value] = constants.Item.Passage.value
board1[board1 == constants.Item.IncrRange.value] = constants.Item.Passage.value
board1[board1 == constants.Item.Kick.value] = constants.Item.Passage.value
board2[board2 == constants.Item.ExtraBomb.value] = constants.Item.Passage.value
board2[board2 == constants.Item.IncrRange.value] = constants.Item.Passage.value
board2[board2 == constants.Item.Kick.value] = constants.Item.Passage.value
comparison = (board1 == board2)
return comparison.all()
@staticmethod
def get_boards_differences(board1, board2):
board1 = copy.deepcopy(board1)
board2 = copy.deepcopy(board2)
board1[board1 == constants.Item.ExtraBomb.value] = constants.Item.Passage.value
board1[board1 == constants.Item.IncrRange.value] = constants.Item.Passage.value
board1[board1 == constants.Item.Kick.value] = constants.Item.Passage.value
board2[board2 == constants.Item.ExtraBomb.value] = constants.Item.Passage.value
board2[board2 == constants.Item.IncrRange.value] = constants.Item.Passage.value
board2[board2 == constants.Item.Kick.value] = constants.Item.Passage.value
a1bomb = a2bomb = kick = flame = False
comparison = (board1 == board2)
diffs = np.where(comparison is False)
if len(diffs) >= 2:
diffs = list(zip(diffs[0], diffs[1]))
for diff in diffs:
prev_item = board1[diff]
new_item = board2[diff]
if prev_item is constants.Item.Agent1 and new_item is constants.Item.Bomb:
a1bomb = True
elif prev_item is constants.Item.Agent2 and new_item is constants.Item.Bomb:
a2bomb = True
elif prev_item is constants.Item.Passage and new_item is constants.Item.Bomb:
kick = True
elif new_item is constants.Item.Flames:
flame = True
else:
raise ValueError('Invalid difference between maps.')
else:
print(comparison, "diffs: ", diffs)
return a1bomb, a2bomb, kick, flame
@staticmethod
def get_game_state(game_data):
return game_data, EnvSimulator.get_done(game_data)
@staticmethod
def get_game_data(game_state):
return copy.deepcopy(game_state)
|
[
"copy.deepcopy",
"numpy.zeros",
"numpy.ones",
"numpy.where",
"pommerman.constants.InvalidAction",
"pommerman.constants.Action",
"pommerman.utility.position_in_items",
"numpy.vstack"
] |
[((1538, 1595), 'numpy.vstack', 'np.vstack', (['[game_data, game_info, player1row, player2row]'], {}), '([game_data, game_info, player1row, player2row])\n', (1547, 1595), True, 'import numpy as np\n'), ((14201, 14309), 'pommerman.utility.position_in_items', 'utility.position_in_items', (['game_data', '(next_row, next_col)', '[constants.Item.Rigid, constants.Item.Wood]'], {}), '(game_data, (next_row, next_col), [constants.Item.\n Rigid, constants.Item.Wood])\n', (14226, 14309), False, 'from pommerman import utility\n'), ((16500, 16576), 'pommerman.constants.InvalidAction', 'constants.InvalidAction', (['"""We did not receive a valid direction: """', 'direction'], {}), "('We did not receive a valid direction: ', direction)\n", (16523, 16576), False, 'from pommerman import constants\n'), ((17120, 17194), 'pommerman.constants.InvalidAction', 'constants.InvalidAction', (['"""We did not receive a valid position transition."""'], {}), "('We did not receive a valid position transition.')\n", (17143, 17194), False, 'from pommerman import constants\n'), ((18493, 18516), 'numpy.where', 'np.where', (['(board == item)'], {}), '(board == item)\n', (18501, 18516), True, 'import numpy as np\n'), ((19719, 19740), 'copy.deepcopy', 'copy.deepcopy', (['board1'], {}), '(board1)\n', (19732, 19740), False, 'import copy\n'), ((19758, 19779), 'copy.deepcopy', 'copy.deepcopy', (['board2'], {}), '(board2)\n', (19771, 19779), False, 'import copy\n'), ((20402, 20431), 'numpy.where', 'np.where', (['(comparison is False)'], {}), '(comparison is False)\n', (20410, 20431), True, 'import numpy as np\n'), ((21472, 21497), 'copy.deepcopy', 'copy.deepcopy', (['game_state'], {}), '(game_state)\n', (21485, 21497), False, 'import copy\n'), ((15813, 15840), 'pommerman.constants.Action', 'constants.Action', (['direction'], {}), '(direction)\n', (15829, 15840), False, 'from pommerman import constants\n'), ((15903, 15930), 'pommerman.constants.Action', 'constants.Action', (['direction'], {}), '(direction)\n', (15919, 15930), False, 'from pommerman import constants\n'), ((16045, 16072), 'pommerman.constants.Action', 'constants.Action', (['direction'], {}), '(direction)\n', (16061, 16072), False, 'from pommerman import constants\n'), ((16197, 16224), 'pommerman.constants.Action', 'constants.Action', (['direction'], {}), '(direction)\n', (16213, 16224), False, 'from pommerman import constants\n'), ((16341, 16368), 'pommerman.constants.Action', 'constants.Action', (['direction'], {}), '(direction)\n', (16357, 16368), False, 'from pommerman import constants\n'), ((18955, 18976), 'copy.deepcopy', 'copy.deepcopy', (['board1'], {}), '(board1)\n', (18968, 18976), False, 'import copy\n'), ((18998, 19019), 'copy.deepcopy', 'copy.deepcopy', (['board2'], {}), '(board2)\n', (19011, 19019), False, 'import copy\n'), ((584, 609), 'numpy.zeros', 'np.zeros', (['(1, board_size)'], {}), '((1, board_size))\n', (592, 609), True, 'import numpy as np\n'), ((746, 771), 'numpy.zeros', 'np.zeros', (['(1, board_size)'], {}), '((1, board_size))\n', (754, 771), True, 'import numpy as np\n'), ((1125, 1150), 'numpy.zeros', 'np.zeros', (['(1, board_size)'], {}), '((1, board_size))\n', (1133, 1150), True, 'import numpy as np\n'), ((1474, 1499), 'numpy.zeros', 'np.zeros', (['(1, board_size)'], {}), '((1, board_size))\n', (1482, 1499), True, 'import numpy as np\n'), ((18185, 18218), 'numpy.ones', 'np.ones', (['(board_size, board_size)'], {}), '((board_size, board_size))\n', (18192, 18218), True, 'import numpy as np\n'), ((8550, 8577), 'pommerman.constants.Action', 'constants.Action', (['direction'], {}), '(direction)\n', (8566, 8577), False, 'from pommerman import constants\n'), ((12868, 12892), 'pommerman.constants.Action', 'constants.Action', (['action'], {}), '(action)\n', (12884, 12892), False, 'from pommerman import constants\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 30 16:33:05 2018
@author: lhe39759
"""
import keras
import os
import PIL
import numpy as np
import tensorflow as tf
import sys
sys.path.append(r'C:\Users\lhe39759\Documents\GitHub/')
from SliceOPy import NetSlice, DataSlice
from model.losses import bce_dice_loss, bce_dice_loss_jake, dice_loss, weighted_bce_dice_loss, weighted_dice_loss, dice_coeff
def loadImg():
path = "Patterns/"
patOptions = ["annealing_twins","Brass bronze","Ductile_Cast_Iron","Grey_Cast_Iron","hypoeutectoid_steel","malleable_cast_iron","superalloy"]
image_array = []
for folder in patOptions:
folder_array = []
for filename in os.listdir(path+folder+"/"):
if filename.endswith(".png"):
insertImage1 = np.asarray(PIL.Image.open(path+folder+"/"+filename).convert('L'))
insertImage1.setflags(write=1)
insertImage1 = np.pad(insertImage1, (300,300), 'symmetric')
folder_array.append(np.array(insertImage1[:256,:256]))
image_array.append(np.array(folder_array))
return (np.array(image_array))
def generateData():
images = loadImg()
features = []
labels = []
for folder in range(0,images.shape[0]):
for image in images[folder]:
features.append(image)
labels.append(folder)
return np.array(features),np.array(labels).reshape(len(labels),1)
features, labels = generateData()
#%%
#model = keras.Sequential()
#model.add(keras.layers.Conv2D(64, (3, 3), input_shape=(256,256,1),padding="same",data_format= keras.backend.image_data_format()))
#model.add(keras.layers.Activation('relu'))
#model.add(keras.layers.MaxPooling2D(pool_size=(2, 2),data_format= keras.backend.image_data_format()))
##
#model.add(keras.layers.Conv2D(32, (3, 3),padding="same",data_format= keras.backend.image_data_format()))
#
###
## model.add(keras.layers.Conv2D(64, (2, 2),data_format= K.image_data_format()))
## model.add(keras.layers.Activation('relu'))
## model.add(keras.layers.MaxPooling2D(pool_size=(2, 2),data_format= K.image_data_format()))
#model.add(keras.layers.Flatten()) # this converts our 3D feature maps to 1D feature vectors
#model.add(keras.layers.Dense(200))
#model.add(keras.layers.Activation('relu'))
## model.add(keras.layers.Dense(64))
## model.add(keras.layers.Activation('relu'))
## model.add(keras.layers.Dropout(0.25))
#model.add(keras.layers.Dense(7))
#model.add(keras.layers.Activation('sigmoid'))
#model.add(keras.layers.Softmax())
model = keras.Sequential([
keras.layers.Conv2D(32,(3, 3),input_shape=(256,256,1), activation='relu'),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Conv2D(64, (2,2), activation='relu'),
# keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Flatten(),
keras.layers.Dense(32, activation=tf.nn.sigmoid),
keras.layers.Dense(16, activation=tf.nn.sigmoid),
keras.layers.Dense(7, activation=tf.nn.softmax)
])
print(features.shape,labels.shape)
data = DataSlice(Features = features, Labels = labels,Shuffle=True,Split_Ratio = 0.7,Channel_Features= (256,256))
data.oneHot(7)
print(data)
model = NetSlice(model,'keras', Data_Slice=data)
#model.loadModel('pattern_model',customObject={'dice_coeff':dice_coeff})
model.compileModel(keras.optimizers.Adam(lr=0.001), 'categorical_crossentropy', ['accuracy'])
model.trainModel(Epochs = 100,Batch_size=100, Verbose = 2)
model.saveModel("pattern_model")
|
[
"sys.path.append",
"numpy.pad",
"keras.layers.MaxPooling2D",
"keras.optimizers.Adam",
"keras.layers.Flatten",
"PIL.Image.open",
"keras.layers.Dense",
"numpy.array",
"keras.layers.Conv2D",
"SliceOPy.NetSlice",
"SliceOPy.DataSlice",
"os.listdir"
] |
[((176, 234), 'sys.path.append', 'sys.path.append', (['"""C:\\\\Users\\\\lhe39759\\\\Documents\\\\GitHub/"""'], {}), "('C:\\\\Users\\\\lhe39759\\\\Documents\\\\GitHub/')\n", (191, 234), False, 'import sys\n'), ((3161, 3268), 'SliceOPy.DataSlice', 'DataSlice', ([], {'Features': 'features', 'Labels': 'labels', 'Shuffle': '(True)', 'Split_Ratio': '(0.7)', 'Channel_Features': '(256, 256)'}), '(Features=features, Labels=labels, Shuffle=True, Split_Ratio=0.7,\n Channel_Features=(256, 256))\n', (3170, 3268), False, 'from SliceOPy import NetSlice, DataSlice\n'), ((3303, 3344), 'SliceOPy.NetSlice', 'NetSlice', (['model', '"""keras"""'], {'Data_Slice': 'data'}), "(model, 'keras', Data_Slice=data)\n", (3311, 3344), False, 'from SliceOPy import NetSlice, DataSlice\n'), ((1126, 1147), 'numpy.array', 'np.array', (['image_array'], {}), '(image_array)\n', (1134, 1147), True, 'import numpy as np\n'), ((3436, 3467), 'keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (3457, 3467), False, 'import keras\n'), ((700, 731), 'os.listdir', 'os.listdir', (["(path + folder + '/')"], {}), "(path + folder + '/')\n", (710, 731), False, 'import os\n'), ((1407, 1425), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (1415, 1425), True, 'import numpy as np\n'), ((2649, 2726), 'keras.layers.Conv2D', 'keras.layers.Conv2D', (['(32)', '(3, 3)'], {'input_shape': '(256, 256, 1)', 'activation': '"""relu"""'}), "(32, (3, 3), input_shape=(256, 256, 1), activation='relu')\n", (2668, 2726), False, 'import keras\n'), ((2737, 2780), 'keras.layers.MaxPooling2D', 'keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (2762, 2780), False, 'import keras\n'), ((2795, 2845), 'keras.layers.Conv2D', 'keras.layers.Conv2D', (['(64)', '(2, 2)'], {'activation': '"""relu"""'}), "(64, (2, 2), activation='relu')\n", (2814, 2845), False, 'import keras\n'), ((2918, 2940), 'keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (2938, 2940), False, 'import keras\n'), ((2950, 2998), 'keras.layers.Dense', 'keras.layers.Dense', (['(32)'], {'activation': 'tf.nn.sigmoid'}), '(32, activation=tf.nn.sigmoid)\n', (2968, 2998), False, 'import keras\n'), ((3008, 3056), 'keras.layers.Dense', 'keras.layers.Dense', (['(16)'], {'activation': 'tf.nn.sigmoid'}), '(16, activation=tf.nn.sigmoid)\n', (3026, 3056), False, 'import keras\n'), ((3067, 3114), 'keras.layers.Dense', 'keras.layers.Dense', (['(7)'], {'activation': 'tf.nn.softmax'}), '(7, activation=tf.nn.softmax)\n', (3085, 3114), False, 'import keras\n'), ((1089, 1111), 'numpy.array', 'np.array', (['folder_array'], {}), '(folder_array)\n', (1097, 1111), True, 'import numpy as np\n'), ((946, 991), 'numpy.pad', 'np.pad', (['insertImage1', '(300, 300)', '"""symmetric"""'], {}), "(insertImage1, (300, 300), 'symmetric')\n", (952, 991), True, 'import numpy as np\n'), ((1426, 1442), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (1434, 1442), True, 'import numpy as np\n'), ((1027, 1061), 'numpy.array', 'np.array', (['insertImage1[:256, :256]'], {}), '(insertImage1[:256, :256])\n', (1035, 1061), True, 'import numpy as np\n'), ((813, 859), 'PIL.Image.open', 'PIL.Image.open', (["(path + folder + '/' + filename)"], {}), "(path + folder + '/' + filename)\n", (827, 859), False, 'import PIL\n')]
|
import sklearn, re, nltk, base64, json, urllib2, os
import numpy as np
import cPickle as pickle
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
import os
MIN_RESULTS = 30 # Minimum number of results needed for valid user input
BASE_SEARCH_URL = 'https://api.twitter.com/1.1/search/tweets.json?'
class TweetMining(object):
def __init__(self, method = 'tf_idf_old'):
nltk.data.path.append('nltk_data/')
self.method = method
self.setup()
# Sets up Twitter API connection
def setup(self):
if os.path.isfile("config.py"):
config = {}
execfile("config.py", config)
consumer_key = config["consumer_key"]
consumer_secret = config["consumer_secret"]
elif os.path.isfile("project_template/config.py"):
config = {}
execfile("project_template/config.py", config)
consumer_key = config["consumer_key"]
consumer_secret = config["consumer_secret"]
else:
consumer_key = os.getenv('CONSUMER_KEY')
consumer_secret = os.getenv('CONSUMER_SECRET')
bearer_token = '%s:%s' % (consumer_key, consumer_secret)
bearer_token_64 = base64.b64encode(bearer_token)
token_request = urllib2.Request('https://api.twitter.com/oauth2/token')
token_request.add_header('Content-Type', 'application/x-www-form-urlencoded;charset=UTF-8')
token_request.add_header('Authorization', 'Basic %s' % bearer_token_64)
token_request.data = 'grant_type=client_credentials'
token_response = urllib2.urlopen(token_request)
token_contents = token_response.read()
token_data = json.loads(token_contents)
self.access_token = token_data['access_token']
if os.path.isfile("smaller_pho_dict.p"):
with open('smaller_pho_dict.p', 'rb') as handle:
self.dict = pickle.load(handle)
else:
with open('project_template/smaller_pho_dict.p', 'rb') as handle:
self.dict = pickle.load(handle)
if self.method == 'tf_idf_new':
if os.path.isfile("idf.pickle"):
with open('idf.pickle', 'rb') as handle:
self.idf = pickle.load(handle)
else:
with open('project_template/idf.pickle', 'rb') as handle:
self.idf = pickle.load(handle)
# Returns list of at most num_words topical words for the given hashtag_set
def get_topical_words(self, hashtag_set, num_words = 30):
hashtag_set = self.cleanup_tags(hashtag_set)
if self.method == 'tf_idf_old':
statuses = [t['text'] for t in self.get_tweets(hashtag_set, 100)]
if len(statuses) < MIN_RESULTS:
return []
self.process_tweets(statuses)
vect = TfidfVectorizer(min_df = 2, stop_words = 'english', strip_accents = 'ascii')
matrix = vect.fit_transform(statuses)
top_indices = np.argsort(vect.idf_)[::-1]
features = vect.get_feature_names()
return [features[i] for i in top_indices[:num_words]]
elif self.method == 'tf_idf_new':
statuses = [t['text'] for t in self.get_tweets(hashtag_set, 200 * len(hashtag_set))]
if len(statuses) < MIN_RESULTS:
return [], []
self.process_tweets(statuses, nouns_only = False)
getIDF = lambda word : self.idf[word] if word in self.idf else 0
vect = CountVectorizer(stop_words = 'english', strip_accents = 'ascii')
tf = vect.fit_transform([' '.join(statuses)]).toarray()
features = vect.get_feature_names()
idf_vals = np.array([np.log(1600000.0 / (1 + getIDF(word))) for word in features])
tfidf = np.multiply(tf, idf_vals)
top_indices = np.argsort(tfidf[0])[::-1]
max_tfidf = tfidf[0][top_indices[0]]
frequencies = [(features[i], 80 * (tfidf[0][i] / max_tfidf)) for i in top_indices[:40]]
top_words = [(word, max_tfidf * 1.01) for word in hashtag_set if word.upper() in self.dict and word not in features]
for i in top_indices:
word = features[i]
if not any(word in pair for pair in top_words) and word.upper() in self.dict:
top_words.append((word, tfidf[0][i]))
if len(top_words) == num_words:
break
return top_words, frequencies
else:
raise Exception('Error: Invalid method specified')
# Helper function for get_topical_words
# Cleans up hashtag list input by stripping hashtags if they exist
def cleanup_tags(self, hashtags):
return [h.strip(',').strip('#').strip() for h in hashtags]
# Helper function for get_topical_words
# Returns list of dicts; access "text" key to get status text
# hashtag_set is a list of hashtags to search for (don't include #)
def get_tweets(self, hashtag_set, num_tweets = 500):
num_queries = num_tweets / 100
extra_tweets = num_tweets % 100
base_query = BASE_SEARCH_URL + 'q='
for i in range(len(hashtag_set)):
base_query += '%23' + hashtag_set[i]
if i < len(hashtag_set) - 1:
base_query += '%20OR%20'
base_query += '&lang=en&result_type=recent&count=100'
def callAPI(query_url):
request = urllib2.Request(query_url)
request.add_header('Authorization', 'Bearer %s' % self.access_token)
response = urllib2.urlopen(request)
contents = response.read()
return json.loads(contents)
result = []
query = base_query
for q in range(num_queries):
statuses = callAPI(query)['statuses']
if statuses == []:
return []
result.extend(statuses)
minID = min([status['id'] for status in statuses])
query = base_query + '&max_id=' + str(minID)
if extra_tweets > 0 and not out_of_tweets:
query = re.sub(r'&count=\d+', '', query) + '&count=' + str(extra_tweets)
result.extend(callAPI(query)['statuses'])
return result
# Helper method for get_topical_words
# Processes statuses in-place by removing irrelevant components
def process_tweets(self, statuses, nouns_only = True):
for i in range(len(statuses)):
statuses[i] = re.sub(r'\S*/\S*', '', statuses[i]) # Links
statuses[i] = re.sub(r'htt\S*', '', statuses[i]) # Hanging https
statuses[i] = re.sub(r'#\S*', '', statuses[i]) # Hashtag symbols
statuses[i] = re.sub(r'(RT)*( )?@\S*', '', statuses[i]) # RT, @user
statuses[i] = re.sub(r'(RT |rt[^a-z])', '', statuses[i]) # RT/rt
statuses[i] = re.sub(r'\S*\d+\S*', '', statuses[i]) # Numerical
statuses[i] = re.sub(r"\w+'[^s ]+", '', statuses[i]) # Contractions
statuses[i] = re.sub(r'&\S+;', '', statuses[i]) # HTML entities
if nouns_only:
pos_info = nltk.pos_tag(nltk.word_tokenize(statuses[i]))
statuses[i] = ' '.join([word[0] for word in pos_info if 'NN' in word[1]])
|
[
"os.getenv",
"sklearn.feature_extraction.text.CountVectorizer",
"numpy.multiply",
"json.loads",
"sklearn.feature_extraction.text.TfidfVectorizer",
"urllib2.Request",
"nltk.data.path.append",
"cPickle.load",
"nltk.word_tokenize",
"numpy.argsort",
"os.path.isfile",
"base64.b64encode",
"re.sub",
"urllib2.urlopen"
] |
[((451, 486), 'nltk.data.path.append', 'nltk.data.path.append', (['"""nltk_data/"""'], {}), "('nltk_data/')\n", (472, 486), False, 'import sklearn, re, nltk, base64, json, urllib2, os\n'), ((607, 634), 'os.path.isfile', 'os.path.isfile', (['"""config.py"""'], {}), "('config.py')\n", (621, 634), False, 'import os\n'), ((1274, 1304), 'base64.b64encode', 'base64.b64encode', (['bearer_token'], {}), '(bearer_token)\n', (1290, 1304), False, 'import sklearn, re, nltk, base64, json, urllib2, os\n'), ((1330, 1385), 'urllib2.Request', 'urllib2.Request', (['"""https://api.twitter.com/oauth2/token"""'], {}), "('https://api.twitter.com/oauth2/token')\n", (1345, 1385), False, 'import sklearn, re, nltk, base64, json, urllib2, os\n'), ((1653, 1683), 'urllib2.urlopen', 'urllib2.urlopen', (['token_request'], {}), '(token_request)\n', (1668, 1683), False, 'import sklearn, re, nltk, base64, json, urllib2, os\n'), ((1752, 1778), 'json.loads', 'json.loads', (['token_contents'], {}), '(token_contents)\n', (1762, 1778), False, 'import sklearn, re, nltk, base64, json, urllib2, os\n'), ((1846, 1882), 'os.path.isfile', 'os.path.isfile', (['"""smaller_pho_dict.p"""'], {}), "('smaller_pho_dict.p')\n", (1860, 1882), False, 'import os\n'), ((821, 865), 'os.path.isfile', 'os.path.isfile', (['"""project_template/config.py"""'], {}), "('project_template/config.py')\n", (835, 865), False, 'import os\n'), ((2190, 2218), 'os.path.isfile', 'os.path.isfile', (['"""idf.pickle"""'], {}), "('idf.pickle')\n", (2204, 2218), False, 'import os\n'), ((2917, 2987), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'min_df': '(2)', 'stop_words': '"""english"""', 'strip_accents': '"""ascii"""'}), "(min_df=2, stop_words='english', strip_accents='ascii')\n", (2932, 2987), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((5533, 5559), 'urllib2.Request', 'urllib2.Request', (['query_url'], {}), '(query_url)\n', (5548, 5559), False, 'import sklearn, re, nltk, base64, json, urllib2, os\n'), ((5664, 5688), 'urllib2.urlopen', 'urllib2.urlopen', (['request'], {}), '(request)\n', (5679, 5688), False, 'import sklearn, re, nltk, base64, json, urllib2, os\n'), ((5747, 5767), 'json.loads', 'json.loads', (['contents'], {}), '(contents)\n', (5757, 5767), False, 'import sklearn, re, nltk, base64, json, urllib2, os\n'), ((6565, 6601), 're.sub', 're.sub', (['"""\\\\S*/\\\\S*"""', '""""""', 'statuses[i]'], {}), "('\\\\S*/\\\\S*', '', statuses[i])\n", (6571, 6601), False, 'import sklearn, re, nltk, base64, json, urllib2, os\n'), ((6635, 6669), 're.sub', 're.sub', (['"""htt\\\\S*"""', '""""""', 'statuses[i]'], {}), "('htt\\\\S*', '', statuses[i])\n", (6641, 6669), False, 'import sklearn, re, nltk, base64, json, urllib2, os\n'), ((6712, 6744), 're.sub', 're.sub', (['"""#\\\\S*"""', '""""""', 'statuses[i]'], {}), "('#\\\\S*', '', statuses[i])\n", (6718, 6744), False, 'import sklearn, re, nltk, base64, json, urllib2, os\n'), ((6789, 6830), 're.sub', 're.sub', (['"""(RT)*( )?@\\\\S*"""', '""""""', 'statuses[i]'], {}), "('(RT)*( )?@\\\\S*', '', statuses[i])\n", (6795, 6830), False, 'import sklearn, re, nltk, base64, json, urllib2, os\n'), ((6869, 6910), 're.sub', 're.sub', (['"""(RT |rt[^a-z])"""', '""""""', 'statuses[i]'], {}), "('(RT |rt[^a-z])', '', statuses[i])\n", (6875, 6910), False, 'import sklearn, re, nltk, base64, json, urllib2, os\n'), ((6946, 6985), 're.sub', 're.sub', (['"""\\\\S*\\\\d+\\\\S*"""', '""""""', 'statuses[i]'], {}), "('\\\\S*\\\\d+\\\\S*', '', statuses[i])\n", (6952, 6985), False, 'import sklearn, re, nltk, base64, json, urllib2, os\n'), ((7022, 7060), 're.sub', 're.sub', (['"""\\\\w+\'[^s ]+"""', '""""""', 'statuses[i]'], {}), '("\\\\w+\'[^s ]+", \'\', statuses[i])\n', (7028, 7060), False, 'import sklearn, re, nltk, base64, json, urllib2, os\n'), ((7102, 7135), 're.sub', 're.sub', (['"""&\\\\S+;"""', '""""""', 'statuses[i]'], {}), "('&\\\\S+;', '', statuses[i])\n", (7108, 7135), False, 'import sklearn, re, nltk, base64, json, urllib2, os\n'), ((1097, 1122), 'os.getenv', 'os.getenv', (['"""CONSUMER_KEY"""'], {}), "('CONSUMER_KEY')\n", (1106, 1122), False, 'import os\n'), ((1153, 1181), 'os.getenv', 'os.getenv', (['"""CONSUMER_SECRET"""'], {}), "('CONSUMER_SECRET')\n", (1162, 1181), False, 'import os\n'), ((1973, 1992), 'cPickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (1984, 1992), True, 'import cPickle as pickle\n'), ((2113, 2132), 'cPickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (2124, 2132), True, 'import cPickle as pickle\n'), ((3070, 3091), 'numpy.argsort', 'np.argsort', (['vect.idf_'], {}), '(vect.idf_)\n', (3080, 3091), True, 'import numpy as np\n'), ((3586, 3646), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'stop_words': '"""english"""', 'strip_accents': '"""ascii"""'}), "(stop_words='english', strip_accents='ascii')\n", (3601, 3646), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((3883, 3908), 'numpy.multiply', 'np.multiply', (['tf', 'idf_vals'], {}), '(tf, idf_vals)\n', (3894, 3908), True, 'import numpy as np\n'), ((2308, 2327), 'cPickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (2319, 2327), True, 'import cPickle as pickle\n'), ((2451, 2470), 'cPickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (2462, 2470), True, 'import cPickle as pickle\n'), ((3936, 3956), 'numpy.argsort', 'np.argsort', (['tfidf[0]'], {}), '(tfidf[0])\n', (3946, 3956), True, 'import numpy as np\n'), ((6188, 6220), 're.sub', 're.sub', (['"""&count=\\\\d+"""', '""""""', 'query'], {}), "('&count=\\\\d+', '', query)\n", (6194, 6220), False, 'import sklearn, re, nltk, base64, json, urllib2, os\n'), ((7220, 7251), 'nltk.word_tokenize', 'nltk.word_tokenize', (['statuses[i]'], {}), '(statuses[i])\n', (7238, 7251), False, 'import sklearn, re, nltk, base64, json, urllib2, os\n')]
|
# -*- coding:utf-8 -*-
"""
股票技术指标接口
Created on 2018/05/26
@author: <NAME>
@group : **
@contact: <EMAIL>
"""
def ma(data, n=10, val_name="close"):
import numpy as np
'''
移动平均线 Moving Average
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
移动平均线时长,时间单位根据data决定
val_name:string
计算哪一列的列名,默认为 close 收盘值
return
-------
list
移动平均线
'''
values = []
MA = []
for index, row in data.iterrows():
values.append(row[val_name])
if len(values) == n:
del values[0]
MA.append(np.average(values))
return np.asarray(MA)
def md(data, n=10, val_name="close"):
import numpy as np
'''
移动标准差
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
移动平均线时长,时间单位根据data决定
val_name:string
计算哪一列的列名,默认为 close 收盘值
return
-------
list
移动平均线
'''
values = []
MD = []
for index, row in data.iterrows():
values.append(row[val_name])
if len(values) == n:
del values[0]
MD.append(np.std(values))
return np.asarray(MD)
def _get_day_ema(prices, n):
a = 1 - 2 / (n + 1)
day_ema = 0
for index, price in enumerate(reversed(prices)):
day_ema += a ** index * price
return day_ema
def ema(data, n=12, val_name="close"):
import numpy as np
'''
指数平均数指标 Exponential Moving Average
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
移动平均线时长,时间单位根据data决定
val_name:string
计算哪一列的列名,默认为 close 收盘值
return
-------
EMA:numpy.ndarray<numpy.float64>
指数平均数指标
'''
prices = []
EMA = []
for index, row in data.iterrows():
if index == 0:
past_ema = row[val_name]
EMA.append(row[val_name])
else:
# Y=[2*X+(N-1)*Y’]/(N+1)
today_ema = (2 * row[val_name] + (n - 1) * past_ema) / (n + 1)
past_ema = today_ema
EMA.append(today_ema)
return np.asarray(EMA)
def macd(data, quick_n=12, slow_n=26, dem_n=9, val_name="close"):
import numpy as np
'''
指数平滑异同平均线(MACD: Moving Average Convergence Divergence)
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
quick_n:int
DIFF差离值中快速移动天数
slow_n:int
DIFF差离值中慢速移动天数
dem_n:int
DEM讯号线的移动天数
val_name:string
计算哪一列的列名,默认为 close 收盘值
return
-------
OSC:numpy.ndarray<numpy.float64>
MACD bar / OSC 差值柱形图 DIFF - DEM
DIFF:numpy.ndarray<numpy.float64>
差离值
DEM:numpy.ndarray<numpy.float64>
讯号线
'''
ema_quick = np.asarray(ema(data, quick_n, val_name))
ema_slow = np.asarray(ema(data, slow_n, val_name))
DIFF = ema_quick - ema_slow
data["diff"] = DIFF
DEM = ema(data, dem_n, "diff")
OSC = DIFF - DEM
return OSC, DIFF, DEM
def kdj(data):
import numpy as np
'''
随机指标KDJ
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
return
-------
K:numpy.ndarray<numpy.float64>
K线
D:numpy.ndarray<numpy.float64>
D线
J:numpy.ndarray<numpy.float64>
J线
'''
K, D, J = [], [], []
last_k, last_d = None, None
for index, row in data.iterrows():
if last_k is None or last_d is None:
last_k = 50
last_d = 50
c, l, h = row["close"], row["low"], row["high"]
rsv = (c - l) / (h - l) * 100
k = (2 / 3) * last_k + (1 / 3) * rsv
d = (2 / 3) * last_d + (1 / 3) * k
j = 3 * k - 2 * d
K.append(k)
D.append(d)
J.append(j)
last_k, last_d = k, d
return np.asarray(K), np.asarray(D), np.asarray(J)
def rsi(data, n=6, val_name="close"):
import numpy as np
'''
相对强弱指标RSI
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,时间单位根据data决定
return
-------
RSI:numpy.ndarray<numpy.float64>
RSI线
'''
RSI = []
UP = []
DOWN = []
for index, row in data.iterrows():
if index == 0:
past_value = row[val_name]
RSI.append(0)
else:
diff = row[val_name] - past_value
if diff > 0:
UP.append(diff)
DOWN.append(0)
else:
UP.append(0)
DOWN.append(diff)
if len(UP) == n:
del UP[0]
if len(DOWN) == n:
del DOWN[0]
past_value = row[val_name]
rsi = np.sum(UP) / (-np.sum(DOWN) + np.sum(UP)) * 100
RSI.append(rsi)
return np.asarray(RSI)
def boll(data, n=10, val_name="close", k=2):
'''
布林线指标BOLL
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,时间单位根据data决定
return
-------
BOLL:numpy.ndarray<numpy.float64>
中轨线
UPPER:numpy.ndarray<numpy.float64>
D线
J:numpy.ndarray<numpy.float64>
J线
'''
BOLL = ma(data, n, val_name)
MD = md(data, n, val_name)
UPPER = BOLL + k * MD
LOWER = BOLL - k * MD
return BOLL, UPPER, LOWER
def wnr(data, n=14):
'''
威廉指标 w&r
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,时间单位根据data决定
return
-------
WNR:numpy.ndarray<numpy.float64>
威廉指标
'''
high_prices = []
low_prices = []
WNR = []
for index, row in data.iterrows():
high_prices.append(row["high"])
if len(high_prices) == n:
del high_prices[0]
low_prices.append(row["low"])
if len(low_prices) == n:
del low_prices[0]
highest = max(high_prices)
lowest = min(low_prices)
wnr = (highest - row["close"]) / (highest - lowest) * 100
WNR.append(wnr)
return WNR
def _get_any_ma(arr, n):
import numpy as np
MA = []
values = []
for val in arr:
values.append(val)
if len(values) == n:
del values[0]
MA.append(np.average(values))
return np.asarray(MA)
def dmi(data, n=14, m=14, k=6):
import numpy as np
'''
动向指标或趋向指标 DMI
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
+-DI(n): DI统计时长,默认14
m:int
ADX(m): ADX统计时常参数,默认14
k:int
ADXR(k): ADXR统计k个周期前数据,默认6
return
-------
P_DI:numpy.ndarray<numpy.float64>
+DI指标
M_DI:numpy.ndarray<numpy.float64>
-DI指标
ADX:numpy.ndarray<numpy.float64>
ADX指标
ADXR:numpy.ndarray<numpy.float64>
ADXR指标
ref.
-------
https://www.mk-mode.com/octopress/2012/03/03/03002038/
'''
# 上升动向(+DM)
P_DM = [0.]
# 下降动向(-DM)
M_DM = [0.]
# 真实波幅TR
TR = [0.]
# 动向
DX = [0.]
P_DI = [0.]
M_DI = [0.]
for index, row in data.iterrows():
if index == 0:
past_row = row
else:
p_dm = row["high"] - past_row["high"]
m_dm = past_row["low"] - row["low"]
if (p_dm < 0 and m_dm < 0) or (np.isclose(p_dm, m_dm)):
p_dm = 0
m_dm = 0
if p_dm > m_dm:
m_dm = 0
if m_dm > p_dm:
p_dm = 0
P_DM.append(p_dm)
M_DM.append(m_dm)
tr = max(row["high"] - past_row["low"], row["high"] - past_row["close"], past_row["close"] - row["low"])
TR.append(tr)
if len(P_DM) == n:
del P_DM[0]
if len(M_DM) == n:
del M_DM[0]
if len(TR) == n:
del TR[0]
# 上升方向线(+DI)
p_di = (np.average(P_DM) / np.average(TR)) * 100
P_DI.append(p_di)
# 下降方向线(-DI)
m_di = (np.average(M_DM) / np.average(TR)) * 100
M_DI.append(m_di)
# 当日+DI与-DI
# p_day_di = (p_dm / tr) * 100
# m_day_di = (m_dm / tr) * 100
# 动向DX
# dx=(di dif÷di sum) ×100
# di dif为上升指标和下降指标的价差的绝对值
# di sum为上升指标和下降指标的总和
# adx就是dx的一定周期n的移动平均值。
if (p_di + m_di) == 0:
dx = 0
else:
dx = (abs(p_di - m_di) / (p_di + m_di)) * 100
DX.append(dx)
past_row = row
ADX = _get_any_ma(DX, m)
#
# # 估计数值ADXR
ADXR = []
for index, adx in enumerate(ADX):
if index >= k:
adxr = (adx + ADX[index - k]) / 2
ADXR.append(adxr)
else:
ADXR.append(0)
return P_DI, M_DI, ADX, ADXR
def bias(data, n=5):
import numpy as np
'''
乖离率 bias
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,默认5
return
-------
BIAS:numpy.ndarray<numpy.float64>
乖离率指标
'''
MA = ma(data, n)
CLOSES = data["close"]
BIAS = (np.true_divide((CLOSES - MA), MA)) * (100 / 100)
return BIAS
def asi(data, n=5):
import numpy as np
'''
振动升降指标 ASI
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,默认5
return
-------
ASI:numpy.ndarray<numpy.float64>
振动升降指标
'''
SI = []
for index, row in data.iterrows():
if index == 0:
last_row = row
SI.append(0.)
else:
a = abs(row["close"] - last_row["close"])
b = abs(row["low"] - last_row["close"])
c = abs(row["high"] - last_row["close"])
d = abs(last_row["close"] - last_row["open"])
if b > a and b > c:
r = b + (1 / 2) * a + (1 / 4) * d
elif c > a and c > b:
r = c + (1 / 4) * d
else:
r = 0
e = row["close"] - last_row["close"]
f = row["close"] - last_row["open"]
g = last_row["close"] - last_row["open"]
x = e + (1 / 2) * f + g
k = max(a, b)
l = 3
if np.isclose(r, 0) or np.isclose(l, 0):
si = 0
else:
si = 50 * (x / r) * (k / l)
SI.append(si)
ASI = _get_any_ma(SI, n)
return ASI
def vr(data, n=26):
import numpy as np
'''
Volatility Volume Ratio 成交量变异率
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,默认26
return
-------
VR:numpy.ndarray<numpy.float64>
成交量变异率
'''
VR = []
AV_volumes, BV_volumes, CV_volumes = [], [], []
for index, row in data.iterrows():
if row["close"] > row["open"]:
AV_volumes.append(row["volume"])
elif row["close"] < row["open"]:
BV_volumes.append(row["volume"])
else:
CV_volumes.append(row["volume"])
if len(AV_volumes) == n:
del AV_volumes[0]
if len(BV_volumes) == n:
del BV_volumes[0]
if len(CV_volumes) == n:
del CV_volumes[0]
avs = sum(AV_volumes)
bvs = sum(BV_volumes)
cvs = sum(CV_volumes)
if (bvs + (1 / 2) * cvs) != 0:
vr = (avs + (1 / 2) * cvs) / (bvs + (1 / 2) * cvs)
else:
vr = 0
VR.append(vr)
return np.asarray(VR)
def arbr(data, n=26):
import numpy as np
'''
AR 指标 BR指标
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,默认26
return
-------
AR:numpy.ndarray<numpy.float64>
AR指标
BR:numpy.ndarray<numpy.float64>
BR指标
'''
H, L, O, PC = np.array([0]), np.array([0]), np.array([0]), np.array([0])
AR, BR = np.array([0]), np.array([0])
for index, row in data.iterrows():
if index == 0:
last_row = row
else:
h = row["high"]
H = np.append(H, [h])
if len(H) == n:
H = np.delete(H, 0)
l = row["low"]
L = np.append(L, [l])
if len(L) == n:
L = np.delete(L, 0)
o = row["open"]
O = np.append(O, [o])
if len(O) == n:
O = np.delete(O, 0)
pc = last_row["close"]
PC = np.append(PC, [pc])
if len(PC) == n:
PC = np.delete(PC, 0)
ar = (np.sum(np.asarray(H) - np.asarray(O)) / sum(np.asarray(O) - np.asarray(L))) * 100
AR = np.append(AR, [ar])
br = (np.sum(np.asarray(H) - np.asarray(PC)) / sum(np.asarray(PC) - np.asarray(L))) * 100
BR = np.append(BR, [br])
last_row = row
return np.asarray(AR), np.asarray(BR)
def dpo(data, n=20, m=6):
'''
区间震荡线指标 DPO
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,默认20
m:int
MADPO的参数M,默认6
return
-------
DPO:numpy.ndarray<numpy.float64>
DPO指标
MADPO:numpy.ndarray<numpy.float64>
MADPO指标
'''
CLOSES = data["close"]
DPO = CLOSES - ma(data, int(n / 2 + 1))
MADPO = _get_any_ma(DPO, m)
return DPO, MADPO
def trix(data, n=12, m=20):
import numpy as np
'''
三重指数平滑平均线 TRIX
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,默认12
m:int
TRMA的参数M,默认20
return
-------
TRIX:numpy.ndarray<numpy.float64>
AR指标
TRMA:numpy.ndarray<numpy.float64>
BR指标
'''
CLOSES = []
TRIX = []
for index, row in data.iterrows():
CLOSES.append(row["close"])
if len(CLOSES) == n:
del CLOSES[0]
tr = np.average(CLOSES)
if index == 0:
past_tr = tr
TRIX.append(0)
else:
trix = (tr - past_tr) / past_tr * 100
TRIX.append(trix)
TRMA = _get_any_ma(TRIX, m)
return TRIX, TRMA
def bbi(data):
import numpy as np
'''
Bull And Bearlndex 多空指标
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
return
-------
BBI:numpy.ndarray<numpy.float64>
BBI指标
'''
CS = []
BBI = []
for index, row in data.iterrows():
CS.append(row["close"])
if len(CS) < 24:
BBI.append(row["close"])
else:
bbi = np.average([np.average(CS[-3:]), np.average(CS[-6:]), np.average(CS[-12:]), np.average(CS[-24:])])
BBI.append(bbi)
return np.asarray(BBI)
def mtm(data, n=6):
import numpy as np
'''
Momentum Index 动量指标
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,默认6
return
-------
MTM:numpy.ndarray<numpy.float64>
MTM动量指标
'''
MTM = []
CN = []
for index, row in data.iterrows():
if index < n - 1:
MTM.append(0.)
else:
mtm = row["close"] - CN[index - n]
MTM.append(mtm)
CN.append(row["close"])
return np.asarray(MTM)
def obv(data):
import numpy as np
'''
On Balance Volume 能量潮指标
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
return
-------
OBV:numpy.ndarray<numpy.float64>
OBV能量潮指标
'''
tmp = np.true_divide(((data["close"] - data["low"]) - (data["high"] - data["close"])), (data["high"] - data["low"]))
OBV = tmp * data["volume"]
return OBV
def sar(data, n=4):
raise Exception("Not implemented yet")
def plot_all(data, is_show=True, output=None):
import matplotlib.pyplot as plt
from pylab import rcParams
import numpy as np
rcParams['figure.figsize'] = 18, 50
plt.figure()
# 收盘价
plt.subplot(20, 1, 1)
plt.plot(data["date"], data["close"], label="close")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 移动平均线
plt.subplot(20, 1, 2)
MA = ma(data, n=10)
plt.plot(data["date"], MA, label="MA(n=10)")
plt.plot(data["date"], data["close"], label="CLOSE PRICE")
plt.title("MA")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 移动标准差
n = 10
plt.subplot(20, 1, 3)
MD = md(data, n)
plt.plot(data["date"], MD, label="MD(n=10)")
plt.title("MD")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 指数平均数指标
plt.subplot(20, 1, 4)
EMA = ema(data, n)
plt.plot(data["date"], EMA, label="EMA(n=12)")
plt.title("EMA")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 指数平滑异同平均线(MACD: Moving Average Convergence Divergence)
plt.subplot(20, 1, 5)
OSC, DIFF, DEM = macd(data, n)
plt.plot(data["date"], OSC, label="OSC")
plt.plot(data["date"], DIFF, label="DIFF")
plt.plot(data["date"], DEM, label="DEM")
plt.title("MACD")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 随机指标
plt.subplot(20, 1, 6)
K, D, J = kdj(data)
plt.plot(data["date"], K, label="K")
plt.plot(data["date"], D, label="D")
plt.plot(data["date"], J, label="J")
plt.title("KDJ")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 相对强弱指标
plt.subplot(20, 1, 7)
RSI6 = rsi(data, 6)
RSI12 = rsi(data, 12)
RSI24 = rsi(data, 24)
plt.plot(data["date"], RSI6, label="RSI(n=6)")
plt.plot(data["date"], RSI12, label="RSI(n=12)")
plt.plot(data["date"], RSI24, label="RSI(n=24)")
plt.title("RSI")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# BOLL 林线指标
plt.subplot(20, 1, 8)
BOLL, UPPER, LOWER = boll(data)
plt.plot(data["date"], BOLL, label="BOLL(n=10)")
plt.plot(data["date"], UPPER, label="UPPER(n=10)")
plt.plot(data["date"], LOWER, label="LOWER(n=10)")
plt.plot(data["date"], data["close"], label="CLOSE PRICE")
plt.title("BOLL")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# W&R 威廉指标
plt.subplot(20, 1, 9)
WNR = wnr(data, n=14)
plt.plot(data["date"], WNR, label="WNR(n=14)")
plt.title("WNR")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 动向或趋向指标
plt.subplot(20, 1, 10)
P_DI, M_DI, ADX, ADXR = dmi(data)
plt.plot(data["date"], P_DI, label="+DI(n=14)")
plt.plot(data["date"], M_DI, label="-DI(n=14)")
plt.plot(data["date"], ADX, label="ADX(m=14)")
plt.plot(data["date"], ADXR, label="ADXR(k=6)")
plt.title("DMI")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 乖离值
plt.subplot(20, 1, 11)
BIAS = bias(data, n=5)
plt.plot(data["date"], BIAS, label="BIAS(n=5)")
plt.title("BIAS")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 振动升降指标
plt.subplot(20, 1, 12)
ASI = asi(data, n=5)
plt.plot(data["date"], ASI, label="ASI(n=5)")
plt.title("ASI")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 振动升降指标
plt.subplot(20, 1, 13)
VR = vr(data, n=26)
plt.plot(data["date"], VR, label="VR(n=26)")
plt.title("VR")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 振动升降指标
plt.subplot(20, 1, 14)
AR, BR = arbr(data, n=26)
plt.plot(data["date"], AR, label="AR(n=26)")
plt.plot(data["date"], BR, label="BR(n=26)")
plt.title("ARBR")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 区间震荡线
plt.subplot(20, 1, 15)
DPO, MADPO = dpo(data, n=20, m=6)
plt.plot(data["date"], DPO, label="DPO(n=20)")
plt.plot(data["date"], MADPO, label="MADPO(m=6)")
plt.title("DPO")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 三重指数平滑平均线
plt.subplot(20, 1, 16)
TRIX, TRMA = trix(data, n=12, m=20)
plt.plot(data["date"], TRIX, label="DPO(n=12)")
plt.plot(data["date"], TRMA, label="MADPO(m=20)")
plt.title("TRIX")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 多空指标
plt.subplot(20, 1, 17)
BBI = bbi(data)
plt.plot(data["date"], BBI, label="BBI(3,6,12,24)")
plt.title("BBI")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 动量指标
plt.subplot(20, 1, 18)
MTM = mtm(data, n=6)
plt.plot(data["date"], MTM, label="MTM(n=6)")
plt.title("MTM")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 动量指标
plt.subplot(20, 1, 19)
OBV = obv(data)
plt.plot(data["date"], OBV, label="OBV")
plt.title("OBV")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
plt.tight_layout()
if is_show:
plt.show()
if output is not None:
plt.savefig(output)
|
[
"matplotlib.pyplot.title",
"numpy.sum",
"matplotlib.pyplot.figure",
"numpy.isclose",
"matplotlib.pyplot.tight_layout",
"numpy.true_divide",
"numpy.std",
"numpy.append",
"matplotlib.pyplot.xticks",
"numpy.average",
"matplotlib.pyplot.show",
"numpy.asarray",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"numpy.delete",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((685, 699), 'numpy.asarray', 'np.asarray', (['MA'], {}), '(MA)\n', (695, 699), True, 'import numpy as np\n'), ((1257, 1271), 'numpy.asarray', 'np.asarray', (['MD'], {}), '(MD)\n', (1267, 1271), True, 'import numpy as np\n'), ((2290, 2305), 'numpy.asarray', 'np.asarray', (['EMA'], {}), '(EMA)\n', (2300, 2305), True, 'import numpy as np\n'), ((5271, 5286), 'numpy.asarray', 'np.asarray', (['RSI'], {}), '(RSI)\n', (5281, 5286), True, 'import numpy as np\n'), ((6907, 6921), 'numpy.asarray', 'np.asarray', (['MA'], {}), '(MA)\n', (6917, 6921), True, 'import numpy as np\n'), ((12531, 12545), 'numpy.asarray', 'np.asarray', (['VR'], {}), '(VR)\n', (12541, 12545), True, 'import numpy as np\n'), ((16063, 16078), 'numpy.asarray', 'np.asarray', (['BBI'], {}), '(BBI)\n', (16073, 16078), True, 'import numpy as np\n'), ((16664, 16679), 'numpy.asarray', 'np.asarray', (['MTM'], {}), '(MTM)\n', (16674, 16679), True, 'import numpy as np\n'), ((16988, 17096), 'numpy.true_divide', 'np.true_divide', (["(data['close'] - data['low'] - (data['high'] - data['close']))", "(data['high'] - data['low'])"], {}), "(data['close'] - data['low'] - (data['high'] - data['close']),\n data['high'] - data['low'])\n", (17002, 17096), True, 'import numpy as np\n'), ((17394, 17406), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (17404, 17406), True, 'import matplotlib.pyplot as plt\n'), ((17421, 17442), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', '(1)', '(1)'], {}), '(20, 1, 1)\n', (17432, 17442), True, 'import matplotlib.pyplot as plt\n'), ((17447, 17499), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", "data['close']"], {'label': '"""close"""'}), "(data['date'], data['close'], label='close')\n", (17455, 17499), True, 'import matplotlib.pyplot as plt\n'), ((17504, 17522), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (17514, 17522), True, 'import matplotlib.pyplot as plt\n'), ((17527, 17546), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (17537, 17546), True, 'import matplotlib.pyplot as plt\n'), ((17551, 17563), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (17561, 17563), True, 'import matplotlib.pyplot as plt\n'), ((17568, 17591), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (17578, 17591), True, 'import matplotlib.pyplot as plt\n'), ((17609, 17630), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', '(1)', '(2)'], {}), '(20, 1, 2)\n', (17620, 17630), True, 'import matplotlib.pyplot as plt\n'), ((17659, 17703), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'MA'], {'label': '"""MA(n=10)"""'}), "(data['date'], MA, label='MA(n=10)')\n", (17667, 17703), True, 'import matplotlib.pyplot as plt\n'), ((17708, 17766), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", "data['close']"], {'label': '"""CLOSE PRICE"""'}), "(data['date'], data['close'], label='CLOSE PRICE')\n", (17716, 17766), True, 'import matplotlib.pyplot as plt\n'), ((17771, 17786), 'matplotlib.pyplot.title', 'plt.title', (['"""MA"""'], {}), "('MA')\n", (17780, 17786), True, 'import matplotlib.pyplot as plt\n'), ((17791, 17809), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (17801, 17809), True, 'import matplotlib.pyplot as plt\n'), ((17814, 17833), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (17824, 17833), True, 'import matplotlib.pyplot as plt\n'), ((17838, 17850), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (17848, 17850), True, 'import matplotlib.pyplot as plt\n'), ((17855, 17878), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (17865, 17878), True, 'import matplotlib.pyplot as plt\n'), ((17907, 17928), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', '(1)', '(3)'], {}), '(20, 1, 3)\n', (17918, 17928), True, 'import matplotlib.pyplot as plt\n'), ((17954, 17998), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'MD'], {'label': '"""MD(n=10)"""'}), "(data['date'], MD, label='MD(n=10)')\n", (17962, 17998), True, 'import matplotlib.pyplot as plt\n'), ((18003, 18018), 'matplotlib.pyplot.title', 'plt.title', (['"""MD"""'], {}), "('MD')\n", (18012, 18018), True, 'import matplotlib.pyplot as plt\n'), ((18023, 18041), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (18033, 18041), True, 'import matplotlib.pyplot as plt\n'), ((18046, 18065), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (18056, 18065), True, 'import matplotlib.pyplot as plt\n'), ((18070, 18082), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (18080, 18082), True, 'import matplotlib.pyplot as plt\n'), ((18087, 18110), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (18097, 18110), True, 'import matplotlib.pyplot as plt\n'), ((18130, 18151), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', '(1)', '(4)'], {}), '(20, 1, 4)\n', (18141, 18151), True, 'import matplotlib.pyplot as plt\n'), ((18179, 18225), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'EMA'], {'label': '"""EMA(n=12)"""'}), "(data['date'], EMA, label='EMA(n=12)')\n", (18187, 18225), True, 'import matplotlib.pyplot as plt\n'), ((18230, 18246), 'matplotlib.pyplot.title', 'plt.title', (['"""EMA"""'], {}), "('EMA')\n", (18239, 18246), True, 'import matplotlib.pyplot as plt\n'), ((18251, 18269), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (18261, 18269), True, 'import matplotlib.pyplot as plt\n'), ((18274, 18293), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (18284, 18293), True, 'import matplotlib.pyplot as plt\n'), ((18298, 18310), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (18308, 18310), True, 'import matplotlib.pyplot as plt\n'), ((18315, 18338), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (18325, 18338), True, 'import matplotlib.pyplot as plt\n'), ((18405, 18426), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', '(1)', '(5)'], {}), '(20, 1, 5)\n', (18416, 18426), True, 'import matplotlib.pyplot as plt\n'), ((18466, 18506), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'OSC'], {'label': '"""OSC"""'}), "(data['date'], OSC, label='OSC')\n", (18474, 18506), True, 'import matplotlib.pyplot as plt\n'), ((18511, 18553), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'DIFF'], {'label': '"""DIFF"""'}), "(data['date'], DIFF, label='DIFF')\n", (18519, 18553), True, 'import matplotlib.pyplot as plt\n'), ((18558, 18598), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'DEM'], {'label': '"""DEM"""'}), "(data['date'], DEM, label='DEM')\n", (18566, 18598), True, 'import matplotlib.pyplot as plt\n'), ((18603, 18620), 'matplotlib.pyplot.title', 'plt.title', (['"""MACD"""'], {}), "('MACD')\n", (18612, 18620), True, 'import matplotlib.pyplot as plt\n'), ((18625, 18643), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (18635, 18643), True, 'import matplotlib.pyplot as plt\n'), ((18648, 18667), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (18658, 18667), True, 'import matplotlib.pyplot as plt\n'), ((18672, 18684), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (18682, 18684), True, 'import matplotlib.pyplot as plt\n'), ((18689, 18712), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (18699, 18712), True, 'import matplotlib.pyplot as plt\n'), ((18729, 18750), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', '(1)', '(6)'], {}), '(20, 1, 6)\n', (18740, 18750), True, 'import matplotlib.pyplot as plt\n'), ((18779, 18815), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'K'], {'label': '"""K"""'}), "(data['date'], K, label='K')\n", (18787, 18815), True, 'import matplotlib.pyplot as plt\n'), ((18820, 18856), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'D'], {'label': '"""D"""'}), "(data['date'], D, label='D')\n", (18828, 18856), True, 'import matplotlib.pyplot as plt\n'), ((18861, 18897), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'J'], {'label': '"""J"""'}), "(data['date'], J, label='J')\n", (18869, 18897), True, 'import matplotlib.pyplot as plt\n'), ((18902, 18918), 'matplotlib.pyplot.title', 'plt.title', (['"""KDJ"""'], {}), "('KDJ')\n", (18911, 18918), True, 'import matplotlib.pyplot as plt\n'), ((18923, 18941), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (18933, 18941), True, 'import matplotlib.pyplot as plt\n'), ((18946, 18965), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (18956, 18965), True, 'import matplotlib.pyplot as plt\n'), ((18970, 18982), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (18980, 18982), True, 'import matplotlib.pyplot as plt\n'), ((18987, 19010), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (18997, 19010), True, 'import matplotlib.pyplot as plt\n'), ((19029, 19050), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', '(1)', '(7)'], {}), '(20, 1, 7)\n', (19040, 19050), True, 'import matplotlib.pyplot as plt\n'), ((19131, 19177), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'RSI6'], {'label': '"""RSI(n=6)"""'}), "(data['date'], RSI6, label='RSI(n=6)')\n", (19139, 19177), True, 'import matplotlib.pyplot as plt\n'), ((19182, 19230), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'RSI12'], {'label': '"""RSI(n=12)"""'}), "(data['date'], RSI12, label='RSI(n=12)')\n", (19190, 19230), True, 'import matplotlib.pyplot as plt\n'), ((19235, 19283), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'RSI24'], {'label': '"""RSI(n=24)"""'}), "(data['date'], RSI24, label='RSI(n=24)')\n", (19243, 19283), True, 'import matplotlib.pyplot as plt\n'), ((19288, 19304), 'matplotlib.pyplot.title', 'plt.title', (['"""RSI"""'], {}), "('RSI')\n", (19297, 19304), True, 'import matplotlib.pyplot as plt\n'), ((19309, 19327), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (19319, 19327), True, 'import matplotlib.pyplot as plt\n'), ((19332, 19351), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (19342, 19351), True, 'import matplotlib.pyplot as plt\n'), ((19356, 19368), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (19366, 19368), True, 'import matplotlib.pyplot as plt\n'), ((19373, 19396), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (19383, 19396), True, 'import matplotlib.pyplot as plt\n'), ((19418, 19439), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', '(1)', '(8)'], {}), '(20, 1, 8)\n', (19429, 19439), True, 'import matplotlib.pyplot as plt\n'), ((19480, 19528), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'BOLL'], {'label': '"""BOLL(n=10)"""'}), "(data['date'], BOLL, label='BOLL(n=10)')\n", (19488, 19528), True, 'import matplotlib.pyplot as plt\n'), ((19533, 19583), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'UPPER'], {'label': '"""UPPER(n=10)"""'}), "(data['date'], UPPER, label='UPPER(n=10)')\n", (19541, 19583), True, 'import matplotlib.pyplot as plt\n'), ((19588, 19638), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'LOWER'], {'label': '"""LOWER(n=10)"""'}), "(data['date'], LOWER, label='LOWER(n=10)')\n", (19596, 19638), True, 'import matplotlib.pyplot as plt\n'), ((19643, 19701), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", "data['close']"], {'label': '"""CLOSE PRICE"""'}), "(data['date'], data['close'], label='CLOSE PRICE')\n", (19651, 19701), True, 'import matplotlib.pyplot as plt\n'), ((19706, 19723), 'matplotlib.pyplot.title', 'plt.title', (['"""BOLL"""'], {}), "('BOLL')\n", (19715, 19723), True, 'import matplotlib.pyplot as plt\n'), ((19728, 19746), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (19738, 19746), True, 'import matplotlib.pyplot as plt\n'), ((19751, 19770), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (19761, 19770), True, 'import matplotlib.pyplot as plt\n'), ((19775, 19787), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (19785, 19787), True, 'import matplotlib.pyplot as plt\n'), ((19792, 19815), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (19802, 19815), True, 'import matplotlib.pyplot as plt\n'), ((19836, 19857), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', '(1)', '(9)'], {}), '(20, 1, 9)\n', (19847, 19857), True, 'import matplotlib.pyplot as plt\n'), ((19888, 19934), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'WNR'], {'label': '"""WNR(n=14)"""'}), "(data['date'], WNR, label='WNR(n=14)')\n", (19896, 19934), True, 'import matplotlib.pyplot as plt\n'), ((19939, 19955), 'matplotlib.pyplot.title', 'plt.title', (['"""WNR"""'], {}), "('WNR')\n", (19948, 19955), True, 'import matplotlib.pyplot as plt\n'), ((19960, 19978), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (19970, 19978), True, 'import matplotlib.pyplot as plt\n'), ((19983, 20002), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (19993, 20002), True, 'import matplotlib.pyplot as plt\n'), ((20007, 20019), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (20017, 20019), True, 'import matplotlib.pyplot as plt\n'), ((20024, 20047), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (20034, 20047), True, 'import matplotlib.pyplot as plt\n'), ((20067, 20089), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', '(1)', '(10)'], {}), '(20, 1, 10)\n', (20078, 20089), True, 'import matplotlib.pyplot as plt\n'), ((20132, 20179), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'P_DI'], {'label': '"""+DI(n=14)"""'}), "(data['date'], P_DI, label='+DI(n=14)')\n", (20140, 20179), True, 'import matplotlib.pyplot as plt\n'), ((20184, 20231), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'M_DI'], {'label': '"""-DI(n=14)"""'}), "(data['date'], M_DI, label='-DI(n=14)')\n", (20192, 20231), True, 'import matplotlib.pyplot as plt\n'), ((20236, 20282), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'ADX'], {'label': '"""ADX(m=14)"""'}), "(data['date'], ADX, label='ADX(m=14)')\n", (20244, 20282), True, 'import matplotlib.pyplot as plt\n'), ((20287, 20334), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'ADXR'], {'label': '"""ADXR(k=6)"""'}), "(data['date'], ADXR, label='ADXR(k=6)')\n", (20295, 20334), True, 'import matplotlib.pyplot as plt\n'), ((20339, 20355), 'matplotlib.pyplot.title', 'plt.title', (['"""DMI"""'], {}), "('DMI')\n", (20348, 20355), True, 'import matplotlib.pyplot as plt\n'), ((20360, 20378), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (20370, 20378), True, 'import matplotlib.pyplot as plt\n'), ((20383, 20402), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (20393, 20402), True, 'import matplotlib.pyplot as plt\n'), ((20407, 20419), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (20417, 20419), True, 'import matplotlib.pyplot as plt\n'), ((20424, 20447), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (20434, 20447), True, 'import matplotlib.pyplot as plt\n'), ((20463, 20485), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', '(1)', '(11)'], {}), '(20, 1, 11)\n', (20474, 20485), True, 'import matplotlib.pyplot as plt\n'), ((20517, 20564), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'BIAS'], {'label': '"""BIAS(n=5)"""'}), "(data['date'], BIAS, label='BIAS(n=5)')\n", (20525, 20564), True, 'import matplotlib.pyplot as plt\n'), ((20569, 20586), 'matplotlib.pyplot.title', 'plt.title', (['"""BIAS"""'], {}), "('BIAS')\n", (20578, 20586), True, 'import matplotlib.pyplot as plt\n'), ((20591, 20609), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (20601, 20609), True, 'import matplotlib.pyplot as plt\n'), ((20614, 20633), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (20624, 20633), True, 'import matplotlib.pyplot as plt\n'), ((20638, 20650), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (20648, 20650), True, 'import matplotlib.pyplot as plt\n'), ((20655, 20678), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (20665, 20678), True, 'import matplotlib.pyplot as plt\n'), ((20697, 20719), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', '(1)', '(12)'], {}), '(20, 1, 12)\n', (20708, 20719), True, 'import matplotlib.pyplot as plt\n'), ((20749, 20794), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'ASI'], {'label': '"""ASI(n=5)"""'}), "(data['date'], ASI, label='ASI(n=5)')\n", (20757, 20794), True, 'import matplotlib.pyplot as plt\n'), ((20799, 20815), 'matplotlib.pyplot.title', 'plt.title', (['"""ASI"""'], {}), "('ASI')\n", (20808, 20815), True, 'import matplotlib.pyplot as plt\n'), ((20820, 20838), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (20830, 20838), True, 'import matplotlib.pyplot as plt\n'), ((20843, 20862), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (20853, 20862), True, 'import matplotlib.pyplot as plt\n'), ((20867, 20879), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (20877, 20879), True, 'import matplotlib.pyplot as plt\n'), ((20884, 20907), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (20894, 20907), True, 'import matplotlib.pyplot as plt\n'), ((20926, 20948), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', '(1)', '(13)'], {}), '(20, 1, 13)\n', (20937, 20948), True, 'import matplotlib.pyplot as plt\n'), ((20977, 21021), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'VR'], {'label': '"""VR(n=26)"""'}), "(data['date'], VR, label='VR(n=26)')\n", (20985, 21021), True, 'import matplotlib.pyplot as plt\n'), ((21026, 21041), 'matplotlib.pyplot.title', 'plt.title', (['"""VR"""'], {}), "('VR')\n", (21035, 21041), True, 'import matplotlib.pyplot as plt\n'), ((21046, 21064), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (21056, 21064), True, 'import matplotlib.pyplot as plt\n'), ((21069, 21088), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (21079, 21088), True, 'import matplotlib.pyplot as plt\n'), ((21093, 21105), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (21103, 21105), True, 'import matplotlib.pyplot as plt\n'), ((21110, 21133), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (21120, 21133), True, 'import matplotlib.pyplot as plt\n'), ((21152, 21174), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', '(1)', '(14)'], {}), '(20, 1, 14)\n', (21163, 21174), True, 'import matplotlib.pyplot as plt\n'), ((21209, 21253), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'AR'], {'label': '"""AR(n=26)"""'}), "(data['date'], AR, label='AR(n=26)')\n", (21217, 21253), True, 'import matplotlib.pyplot as plt\n'), ((21258, 21302), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'BR'], {'label': '"""BR(n=26)"""'}), "(data['date'], BR, label='BR(n=26)')\n", (21266, 21302), True, 'import matplotlib.pyplot as plt\n'), ((21307, 21324), 'matplotlib.pyplot.title', 'plt.title', (['"""ARBR"""'], {}), "('ARBR')\n", (21316, 21324), True, 'import matplotlib.pyplot as plt\n'), ((21329, 21347), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (21339, 21347), True, 'import matplotlib.pyplot as plt\n'), ((21352, 21371), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (21362, 21371), True, 'import matplotlib.pyplot as plt\n'), ((21376, 21388), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (21386, 21388), True, 'import matplotlib.pyplot as plt\n'), ((21393, 21416), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (21403, 21416), True, 'import matplotlib.pyplot as plt\n'), ((21434, 21456), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', '(1)', '(15)'], {}), '(20, 1, 15)\n', (21445, 21456), True, 'import matplotlib.pyplot as plt\n'), ((21499, 21545), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'DPO'], {'label': '"""DPO(n=20)"""'}), "(data['date'], DPO, label='DPO(n=20)')\n", (21507, 21545), True, 'import matplotlib.pyplot as plt\n'), ((21550, 21599), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'MADPO'], {'label': '"""MADPO(m=6)"""'}), "(data['date'], MADPO, label='MADPO(m=6)')\n", (21558, 21599), True, 'import matplotlib.pyplot as plt\n'), ((21604, 21620), 'matplotlib.pyplot.title', 'plt.title', (['"""DPO"""'], {}), "('DPO')\n", (21613, 21620), True, 'import matplotlib.pyplot as plt\n'), ((21625, 21643), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (21635, 21643), True, 'import matplotlib.pyplot as plt\n'), ((21648, 21667), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (21658, 21667), True, 'import matplotlib.pyplot as plt\n'), ((21672, 21684), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (21682, 21684), True, 'import matplotlib.pyplot as plt\n'), ((21689, 21712), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (21699, 21712), True, 'import matplotlib.pyplot as plt\n'), ((21734, 21756), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', '(1)', '(16)'], {}), '(20, 1, 16)\n', (21745, 21756), True, 'import matplotlib.pyplot as plt\n'), ((21801, 21848), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'TRIX'], {'label': '"""DPO(n=12)"""'}), "(data['date'], TRIX, label='DPO(n=12)')\n", (21809, 21848), True, 'import matplotlib.pyplot as plt\n'), ((21853, 21902), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'TRMA'], {'label': '"""MADPO(m=20)"""'}), "(data['date'], TRMA, label='MADPO(m=20)')\n", (21861, 21902), True, 'import matplotlib.pyplot as plt\n'), ((21907, 21924), 'matplotlib.pyplot.title', 'plt.title', (['"""TRIX"""'], {}), "('TRIX')\n", (21916, 21924), True, 'import matplotlib.pyplot as plt\n'), ((21929, 21947), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (21939, 21947), True, 'import matplotlib.pyplot as plt\n'), ((21952, 21971), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (21962, 21971), True, 'import matplotlib.pyplot as plt\n'), ((21976, 21988), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (21986, 21988), True, 'import matplotlib.pyplot as plt\n'), ((21993, 22016), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (22003, 22016), True, 'import matplotlib.pyplot as plt\n'), ((22033, 22055), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', '(1)', '(17)'], {}), '(20, 1, 17)\n', (22044, 22055), True, 'import matplotlib.pyplot as plt\n'), ((22080, 22131), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'BBI'], {'label': '"""BBI(3,6,12,24)"""'}), "(data['date'], BBI, label='BBI(3,6,12,24)')\n", (22088, 22131), True, 'import matplotlib.pyplot as plt\n'), ((22136, 22152), 'matplotlib.pyplot.title', 'plt.title', (['"""BBI"""'], {}), "('BBI')\n", (22145, 22152), True, 'import matplotlib.pyplot as plt\n'), ((22157, 22175), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (22167, 22175), True, 'import matplotlib.pyplot as plt\n'), ((22180, 22199), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (22190, 22199), True, 'import matplotlib.pyplot as plt\n'), ((22204, 22216), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (22214, 22216), True, 'import matplotlib.pyplot as plt\n'), ((22221, 22244), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (22231, 22244), True, 'import matplotlib.pyplot as plt\n'), ((22261, 22283), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', '(1)', '(18)'], {}), '(20, 1, 18)\n', (22272, 22283), True, 'import matplotlib.pyplot as plt\n'), ((22313, 22358), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'MTM'], {'label': '"""MTM(n=6)"""'}), "(data['date'], MTM, label='MTM(n=6)')\n", (22321, 22358), True, 'import matplotlib.pyplot as plt\n'), ((22363, 22379), 'matplotlib.pyplot.title', 'plt.title', (['"""MTM"""'], {}), "('MTM')\n", (22372, 22379), True, 'import matplotlib.pyplot as plt\n'), ((22384, 22402), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (22394, 22402), True, 'import matplotlib.pyplot as plt\n'), ((22407, 22426), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (22417, 22426), True, 'import matplotlib.pyplot as plt\n'), ((22431, 22443), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (22441, 22443), True, 'import matplotlib.pyplot as plt\n'), ((22448, 22471), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (22458, 22471), True, 'import matplotlib.pyplot as plt\n'), ((22488, 22510), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', '(1)', '(19)'], {}), '(20, 1, 19)\n', (22499, 22510), True, 'import matplotlib.pyplot as plt\n'), ((22535, 22575), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'OBV'], {'label': '"""OBV"""'}), "(data['date'], OBV, label='OBV')\n", (22543, 22575), True, 'import matplotlib.pyplot as plt\n'), ((22580, 22596), 'matplotlib.pyplot.title', 'plt.title', (['"""OBV"""'], {}), "('OBV')\n", (22589, 22596), True, 'import matplotlib.pyplot as plt\n'), ((22601, 22619), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (22611, 22619), True, 'import matplotlib.pyplot as plt\n'), ((22624, 22643), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (22634, 22643), True, 'import matplotlib.pyplot as plt\n'), ((22648, 22660), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (22658, 22660), True, 'import matplotlib.pyplot as plt\n'), ((22665, 22688), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (22675, 22688), True, 'import matplotlib.pyplot as plt\n'), ((22694, 22712), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (22710, 22712), True, 'import matplotlib.pyplot as plt\n'), ((4209, 4222), 'numpy.asarray', 'np.asarray', (['K'], {}), '(K)\n', (4219, 4222), True, 'import numpy as np\n'), ((4224, 4237), 'numpy.asarray', 'np.asarray', (['D'], {}), '(D)\n', (4234, 4237), True, 'import numpy as np\n'), ((4239, 4252), 'numpy.asarray', 'np.asarray', (['J'], {}), '(J)\n', (4249, 4252), True, 'import numpy as np\n'), ((10014, 10045), 'numpy.true_divide', 'np.true_divide', (['(CLOSES - MA)', 'MA'], {}), '(CLOSES - MA, MA)\n', (10028, 10045), True, 'import numpy as np\n'), ((12952, 12965), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (12960, 12965), True, 'import numpy as np\n'), ((12967, 12980), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (12975, 12980), True, 'import numpy as np\n'), ((12982, 12995), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (12990, 12995), True, 'import numpy as np\n'), ((12997, 13010), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (13005, 13010), True, 'import numpy as np\n'), ((13025, 13038), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (13033, 13038), True, 'import numpy as np\n'), ((13040, 13053), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (13048, 13053), True, 'import numpy as np\n'), ((13993, 14007), 'numpy.asarray', 'np.asarray', (['AR'], {}), '(AR)\n', (14003, 14007), True, 'import numpy as np\n'), ((14009, 14023), 'numpy.asarray', 'np.asarray', (['BR'], {}), '(BR)\n', (14019, 14023), True, 'import numpy as np\n'), ((15192, 15210), 'numpy.average', 'np.average', (['CLOSES'], {}), '(CLOSES)\n', (15202, 15210), True, 'import numpy as np\n'), ((22738, 22748), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22746, 22748), True, 'import matplotlib.pyplot as plt\n'), ((22785, 22804), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output'], {}), '(output)\n', (22796, 22804), True, 'import matplotlib.pyplot as plt\n'), ((653, 671), 'numpy.average', 'np.average', (['values'], {}), '(values)\n', (663, 671), True, 'import numpy as np\n'), ((1229, 1243), 'numpy.std', 'np.std', (['values'], {}), '(values)\n', (1235, 1243), True, 'import numpy as np\n'), ((6876, 6894), 'numpy.average', 'np.average', (['values'], {}), '(values)\n', (6886, 6894), True, 'import numpy as np\n'), ((13204, 13221), 'numpy.append', 'np.append', (['H', '[h]'], {}), '(H, [h])\n', (13213, 13221), True, 'import numpy as np\n'), ((13329, 13346), 'numpy.append', 'np.append', (['L', '[l]'], {}), '(L, [l])\n', (13338, 13346), True, 'import numpy as np\n'), ((13455, 13472), 'numpy.append', 'np.append', (['O', '[o]'], {}), '(O, [o])\n', (13464, 13472), True, 'import numpy as np\n'), ((13589, 13608), 'numpy.append', 'np.append', (['PC', '[pc]'], {}), '(PC, [pc])\n', (13598, 13608), True, 'import numpy as np\n'), ((13794, 13813), 'numpy.append', 'np.append', (['AR', '[ar]'], {}), '(AR, [ar])\n', (13803, 13813), True, 'import numpy as np\n'), ((13933, 13952), 'numpy.append', 'np.append', (['BR', '[br]'], {}), '(BR, [br])\n', (13942, 13952), True, 'import numpy as np\n'), ((8078, 8100), 'numpy.isclose', 'np.isclose', (['p_dm', 'm_dm'], {}), '(p_dm, m_dm)\n', (8088, 8100), True, 'import numpy as np\n'), ((11205, 11221), 'numpy.isclose', 'np.isclose', (['r', '(0)'], {}), '(r, 0)\n', (11215, 11221), True, 'import numpy as np\n'), ((11225, 11241), 'numpy.isclose', 'np.isclose', (['l', '(0)'], {}), '(l, 0)\n', (11235, 11241), True, 'import numpy as np\n'), ((13270, 13285), 'numpy.delete', 'np.delete', (['H', '(0)'], {}), '(H, 0)\n', (13279, 13285), True, 'import numpy as np\n'), ((13395, 13410), 'numpy.delete', 'np.delete', (['L', '(0)'], {}), '(L, 0)\n', (13404, 13410), True, 'import numpy as np\n'), ((13521, 13536), 'numpy.delete', 'np.delete', (['O', '(0)'], {}), '(O, 0)\n', (13530, 13536), True, 'import numpy as np\n'), ((13659, 13675), 'numpy.delete', 'np.delete', (['PC', '(0)'], {}), '(PC, 0)\n', (13668, 13675), True, 'import numpy as np\n'), ((5183, 5193), 'numpy.sum', 'np.sum', (['UP'], {}), '(UP)\n', (5189, 5193), True, 'import numpy as np\n'), ((8684, 8700), 'numpy.average', 'np.average', (['P_DM'], {}), '(P_DM)\n', (8694, 8700), True, 'import numpy as np\n'), ((8703, 8717), 'numpy.average', 'np.average', (['TR'], {}), '(TR)\n', (8713, 8717), True, 'import numpy as np\n'), ((8801, 8817), 'numpy.average', 'np.average', (['M_DM'], {}), '(M_DM)\n', (8811, 8817), True, 'import numpy as np\n'), ((8820, 8834), 'numpy.average', 'np.average', (['TR'], {}), '(TR)\n', (8830, 8834), True, 'import numpy as np\n'), ((15936, 15955), 'numpy.average', 'np.average', (['CS[-3:]'], {}), '(CS[-3:])\n', (15946, 15955), True, 'import numpy as np\n'), ((15957, 15976), 'numpy.average', 'np.average', (['CS[-6:]'], {}), '(CS[-6:])\n', (15967, 15976), True, 'import numpy as np\n'), ((15978, 15998), 'numpy.average', 'np.average', (['CS[-12:]'], {}), '(CS[-12:])\n', (15988, 15998), True, 'import numpy as np\n'), ((16000, 16020), 'numpy.average', 'np.average', (['CS[-24:]'], {}), '(CS[-24:])\n', (16010, 16020), True, 'import numpy as np\n'), ((5213, 5223), 'numpy.sum', 'np.sum', (['UP'], {}), '(UP)\n', (5219, 5223), True, 'import numpy as np\n'), ((5198, 5210), 'numpy.sum', 'np.sum', (['DOWN'], {}), '(DOWN)\n', (5204, 5210), True, 'import numpy as np\n'), ((13702, 13715), 'numpy.asarray', 'np.asarray', (['H'], {}), '(H)\n', (13712, 13715), True, 'import numpy as np\n'), ((13718, 13731), 'numpy.asarray', 'np.asarray', (['O'], {}), '(O)\n', (13728, 13731), True, 'import numpy as np\n'), ((13739, 13752), 'numpy.asarray', 'np.asarray', (['O'], {}), '(O)\n', (13749, 13752), True, 'import numpy as np\n'), ((13755, 13768), 'numpy.asarray', 'np.asarray', (['L'], {}), '(L)\n', (13765, 13768), True, 'import numpy as np\n'), ((13839, 13852), 'numpy.asarray', 'np.asarray', (['H'], {}), '(H)\n', (13849, 13852), True, 'import numpy as np\n'), ((13855, 13869), 'numpy.asarray', 'np.asarray', (['PC'], {}), '(PC)\n', (13865, 13869), True, 'import numpy as np\n'), ((13877, 13891), 'numpy.asarray', 'np.asarray', (['PC'], {}), '(PC)\n', (13887, 13891), True, 'import numpy as np\n'), ((13894, 13907), 'numpy.asarray', 'np.asarray', (['L'], {}), '(L)\n', (13904, 13907), True, 'import numpy as np\n')]
|
import mxnet as mx
import mxnet.ndarray as nd
import mxnet.gluon as gluon
import gluonnlp
from mxnet.io import NDArrayIter
from tqdm import tqdm
import json
import argparse
import pandas as pd
import os
import sys
import numpy as np
from gensim.corpora import Dictionary
from sklearn.metrics import recall_score, confusion_matrix
def load_data(trainFile, dct, ctx = mx.cpu(0)):
labels = []
num_lines = sum(1 for line in open(trainFile))
array = nd.ones((num_lines, SEQ_LENGTH), dtype='float32', ctx = ctx)
print("Loading data: ")
pbar = tqdm(total = num_lines)
with open(trainFile) as f:
for i, line in enumerate(f):
l = json.loads(line)
text = l['tokenized_text']
label = l['type']
labels.append(label)
array[i] = tokens_to_idx(text, dct)
pbar.update(1)
pbar.close()
return array, label_binarize(labels, ctx)
def tokens_to_idx(tokens, dct, ctx = mx.cpu(0)):
array = [dct.token2id[token] if token in dct.token2id else -1 for token in tokens]
if len(array) > SEQ_LENGTH:
array = array[0:SEQ_LENGTH]
else:
array.extend([-1 for i in range(0, SEQ_LENGTH - len(array))])
return nd.array(array, ctx = ctx)
def label_binarize(labels, ctx = mx.cpu(0)):
lab = nd.zeros(len(labels), ctx = ctx)
for i, label in enumerate(labels):
if label == 'fake':
lab[i] = 1
return lab
def recall(y, y_hat):
y = y.asnumpy()
y_hat = y_hat.asnumpy()
return recall_score(y, y_hat), confusion_matrix(y, y_hat).ravel()
class LSTM(gluon.Block):
def __init__(self, vocab_size, num_embed, num_hidden, num_layers, dropout, **kwargs):
super(LSTM, self).__init__(**kwargs)
with self.name_scope():
self.encoder = gluon.nn.Embedding(vocab_size, num_embed)
self.LSTM1 = gluon.rnn.LSTM(num_embed, num_layers, layout = 'NTC', bidirectional = True)
self.dropout = gluon.nn.Dropout(dropout)
self.fc1 = gluon.nn.Dense(1, activation='sigmoid')
def forward(self, inputs, hidden):
emb = self.encoder(inputs)
output, hidden = self.LSTM1(emb, hidden)
output = self.dropout(output)
output = self.fc1(output)
return output, hidden
def begin_state(self, *args, **kwargs):
return self.LSTM1.begin_state(*args, **kwargs)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Arguments for LSTM model')
parser.add_argument('--test', nargs='+', type=str, help = "Validation set file", required=True)
parser.add_argument('--input', type=str, help = "Input directory for the model files")
parser.add_argument('--dictFile', type=str, help = "Path to the dictionary file")
parser.add_argument('--SEQ_LENGTH', type = int, help = "Fixed size length to expand or srink text")
parser.add_argument('--EMBEDDING_DIM', type = int, help = "Size of the embedding dimention")
parser.add_argument('--HIDDEN', type = int, help = "Size of the hidden layer")
parser.add_argument('--LAYERS', type = int, help = "Number of hidden layers")
parser.add_argument('--DROPOUT', type = float, help = "Number of hidden layers")
parser.add_argument('--BATCH_SIZE', type = int, help = "Batch size")
parser.add_argument('--utils', type=str, help = "Helper directory")
parser.add_argument('--db', type=str, help = "DB name", required=True)
parser.add_argument('--collection', type=str, help = "DB collection")
parser.add_argument('--host', type=str, help = "DB host")
parser.add_argument('--port', type=int, help = "Port number of db")
args = parser.parse_args()
sys.path.append(args.utils)
from register_experiment import Register
testFiles = args.test
SEQ_LENGTH = args.SEQ_LENGTH
EMBEDDING_DIM = args.EMBEDDING_DIM
HIDDEN = args.HIDDEN
LAYERS = args.LAYERS
DROPOUT = args.DROPOUT
BATCH_SIZE = args.BATCH_SIZE
ctx = mx.gpu(1)
files = []
# r=root, d=directories, f = files
for r, d, f in os.walk(args.input):
for file in f:
files.append(os.path.join(r, file))
files.sort()
r = Register(args.host, args.port, args.db, args.collection)
print(r.getLastExperiment())
pbar = tqdm(len(testFiles))
for i, test_file in enumerate(testFiles):
dct = Dictionary.load(args.dictFile)
array, labels = load_data(test_file, dct)
acc = mx.metric.Accuracy()
accuracy = []
for j, model in enumerate(files):
recall_list = []
cfMatrix = []
net = LSTM(len(dct), EMBEDDING_DIM, HIDDEN, LAYERS, DROPOUT)
net.load_parameters(model, ctx=ctx)
hidden = net.begin_state(func=mx.nd.zeros, batch_size=BATCH_SIZE, ctx = ctx)
nd_iter = NDArrayIter(data={'data':array},
label={'softmax_label':labels},
batch_size=BATCH_SIZE)
for batch in nd_iter:
output, _ = net(batch.data[0].copyto(ctx), hidden)
pred = output > 0.5
y = batch.label[0]
acc.update(y, pred)
rec, mat = recall(y, pred)
recall_list.append(rec)
cfMatrix.append(mat)
accuracy.append(acc.get()[1])
r.addEpochs(j, {'accuracy' : acc.get()[1], 'recall' : np.mean(recall_list), 'Confusion Matrix' : list(map(int, sum(cfMatrix)))}, r.getLastExperiment() + 1, 'valid')
pbar.update(1)
pbar.close()
r.closeExperiment(r.getLastExperiment() + 1)
|
[
"mxnet.ndarray.array",
"argparse.ArgumentParser",
"os.walk",
"mxnet.gluon.nn.Dropout",
"numpy.mean",
"os.path.join",
"sys.path.append",
"json.loads",
"gensim.corpora.Dictionary.load",
"mxnet.io.NDArrayIter",
"mxnet.gpu",
"tqdm.tqdm",
"mxnet.gluon.rnn.LSTM",
"register_experiment.Register",
"mxnet.metric.Accuracy",
"sklearn.metrics.recall_score",
"mxnet.cpu",
"mxnet.ndarray.ones",
"mxnet.gluon.nn.Dense",
"sklearn.metrics.confusion_matrix",
"mxnet.gluon.nn.Embedding"
] |
[((368, 377), 'mxnet.cpu', 'mx.cpu', (['(0)'], {}), '(0)\n', (374, 377), True, 'import mxnet as mx\n'), ((450, 508), 'mxnet.ndarray.ones', 'nd.ones', (['(num_lines, SEQ_LENGTH)'], {'dtype': '"""float32"""', 'ctx': 'ctx'}), "((num_lines, SEQ_LENGTH), dtype='float32', ctx=ctx)\n", (457, 508), True, 'import mxnet.ndarray as nd\n'), ((544, 565), 'tqdm.tqdm', 'tqdm', ([], {'total': 'num_lines'}), '(total=num_lines)\n', (548, 565), False, 'from tqdm import tqdm\n'), ((878, 887), 'mxnet.cpu', 'mx.cpu', (['(0)'], {}), '(0)\n', (884, 887), True, 'import mxnet as mx\n'), ((1136, 1160), 'mxnet.ndarray.array', 'nd.array', (['array'], {'ctx': 'ctx'}), '(array, ctx=ctx)\n', (1144, 1160), True, 'import mxnet.ndarray as nd\n'), ((1197, 1206), 'mxnet.cpu', 'mx.cpu', (['(0)'], {}), '(0)\n', (1203, 1206), True, 'import mxnet as mx\n'), ((2220, 2283), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Arguments for LSTM model"""'}), "(description='Arguments for LSTM model')\n", (2243, 2283), False, 'import argparse\n'), ((3429, 3456), 'sys.path.append', 'sys.path.append', (['args.utils'], {}), '(args.utils)\n', (3444, 3456), False, 'import sys\n'), ((3696, 3705), 'mxnet.gpu', 'mx.gpu', (['(1)'], {}), '(1)\n', (3702, 3705), True, 'import mxnet as mx\n'), ((3771, 3790), 'os.walk', 'os.walk', (['args.input'], {}), '(args.input)\n', (3778, 3790), False, 'import os\n'), ((3868, 3924), 'register_experiment.Register', 'Register', (['args.host', 'args.port', 'args.db', 'args.collection'], {}), '(args.host, args.port, args.db, args.collection)\n', (3876, 3924), False, 'from register_experiment import Register\n'), ((1406, 1428), 'sklearn.metrics.recall_score', 'recall_score', (['y', 'y_hat'], {}), '(y, y_hat)\n', (1418, 1428), False, 'from sklearn.metrics import recall_score, confusion_matrix\n'), ((4036, 4066), 'gensim.corpora.Dictionary.load', 'Dictionary.load', (['args.dictFile'], {}), '(args.dictFile)\n', (4051, 4066), False, 'from gensim.corpora import Dictionary\n'), ((4119, 4139), 'mxnet.metric.Accuracy', 'mx.metric.Accuracy', ([], {}), '()\n', (4137, 4139), True, 'import mxnet as mx\n'), ((634, 650), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (644, 650), False, 'import json\n'), ((1662, 1703), 'mxnet.gluon.nn.Embedding', 'gluon.nn.Embedding', (['vocab_size', 'num_embed'], {}), '(vocab_size, num_embed)\n', (1680, 1703), True, 'import mxnet.gluon as gluon\n'), ((1720, 1791), 'mxnet.gluon.rnn.LSTM', 'gluon.rnn.LSTM', (['num_embed', 'num_layers'], {'layout': '"""NTC"""', 'bidirectional': '(True)'}), "(num_embed, num_layers, layout='NTC', bidirectional=True)\n", (1734, 1791), True, 'import mxnet.gluon as gluon\n'), ((1814, 1839), 'mxnet.gluon.nn.Dropout', 'gluon.nn.Dropout', (['dropout'], {}), '(dropout)\n', (1830, 1839), True, 'import mxnet.gluon as gluon\n'), ((1854, 1893), 'mxnet.gluon.nn.Dense', 'gluon.nn.Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (1868, 1893), True, 'import mxnet.gluon as gluon\n'), ((4425, 4518), 'mxnet.io.NDArrayIter', 'NDArrayIter', ([], {'data': "{'data': array}", 'label': "{'softmax_label': labels}", 'batch_size': 'BATCH_SIZE'}), "(data={'data': array}, label={'softmax_label': labels},\n batch_size=BATCH_SIZE)\n", (4436, 4518), False, 'from mxnet.io import NDArrayIter\n'), ((1430, 1456), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y', 'y_hat'], {}), '(y, y_hat)\n', (1446, 1456), False, 'from sklearn.metrics import recall_score, confusion_matrix\n'), ((3825, 3846), 'os.path.join', 'os.path.join', (['r', 'file'], {}), '(r, file)\n', (3837, 3846), False, 'import os\n'), ((4856, 4876), 'numpy.mean', 'np.mean', (['recall_list'], {}), '(recall_list)\n', (4863, 4876), True, 'import numpy as np\n')]
|
# coding: utf-8
# In[1]:
import matplotlib.pyplot as plt
get_ipython().magic('matplotlib inline')
import tensorflow as tf
import numpy as np
import pandas as pd
import time
from numpy import genfromtxt
from scipy import stats
# In[2]:
start_time = time.time()
# In[3]:
def read_data(file_name):
df = pd.read_csv(file_name, sep='\t', header=None)
return df
# In[4]:
df = read_data('lezhin_public_dataset_training.tsv')
# In[5]:
# df.iloc[:, :20]
del df[7], df[8], df[16], df[18]
# In[6]:
df.describe()
# In[7]:
features = df.iloc[:, 1:].values
labels = df.iloc[:, :1].values
print(stats.describe(features).variance)
print(features.shape, labels.shape)
# In[8]:
rnd_indices = np.random.rand(len(features)) < 0.70
train_x = features[rnd_indices]
train_y = labels[rnd_indices]
test_x = features[~rnd_indices]
test_y = labels[~rnd_indices]
print("train row count : %d, test row count : %d" % (train_x.shape[0], test_x.shape[0]))
feature_count = train_x.shape[1]
label_count = train_y.shape[1]
print(feature_count, label_count)
# In[9]:
training_epochs = 90
learning_rate = 0.01
cost_history = np.empty(shape=[1],dtype=float)
nb_classes = 2
X = tf.placeholder(tf.float32,[None,feature_count])
Y = tf.placeholder(tf.int32,[None,label_count])
Y_one_hot = tf.one_hot(Y, nb_classes) # one hot
print("one_hot", Y_one_hot)
Y_one_hot = tf.reshape(Y_one_hot, [-1, nb_classes])
print("reshape", Y_one_hot)
# In[10]:
def init_weights(shape):
return tf.Variable(tf.random_normal(shape, stddev=0.1)), tf.Variable(tf.random_normal([shape[1]]))
def make_hidden_layer(previous_h, weight, bias, p_keep_hidden, is_dropout=True):
h = tf.nn.relu(tf.matmul(previous_h, weight) + bias)
if is_dropout:
h = tf.nn.dropout(h, p_keep_hidden)
return h
def model(X, p_keep_hidden):
s_1 = feature_count + 2
s_2 = feature_count + 2
s_3 = feature_count
w_h, b = init_weights([feature_count, s_1])
w_h2, b2 = init_weights([s_1, s_2])
w_h3, b3 = init_weights([s_2, s_3])
w_o, b_o = init_weights([s_3, nb_classes])
h = make_hidden_layer(X, w_h, b, p_keep_hidden)
h2 = make_hidden_layer(h, w_h2, b2, p_keep_hidden)
h3 = make_hidden_layer(h2, w_h3, b3, p_keep_hidden, False)
return tf.matmul(h3, w_o) + b_o
# In[11]:
p_keep_hidden = tf.placeholder("float")
h0 = model(X, p_keep_hidden)
# In[12]:
# Cross entropy cost/loss
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=h0, labels=Y_one_hot))
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
# In[13]:
prediction = tf.argmax(h0, 1)
correct_prediction = tf.equal(prediction, tf.argmax(Y_one_hot, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# In[14]:
print(train_x.shape, train_y.shape)
print(test_x.shape, test_y.shape)
print(X.shape, Y.shape)
training_dropout_h = 0.95
batch_size = 2000
batch_length = int(train_x.shape[0] / batch_size)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(training_epochs + 1):
for batch_num in range(batch_length):
start_idx = batch_num * batch_size
end_idx = (train_x.shape[0] - 1) if batch_num == batch_length - 1 else (batch_num + 1) * batch_size
if batch_num % 200 == 0 or batch_num == batch_length - 1:
print("batch num : %d / %d, index: %d ~ %d" % (batch_num, batch_length - 1, start_idx, end_idx))
sess.run(optimizer, feed_dict={X: train_x[start_idx:end_idx], Y: train_y[start_idx:end_idx], p_keep_hidden: training_dropout_h})
loss, acc = sess.run([cost, accuracy], feed_dict={
X: train_x, Y: train_y, p_keep_hidden: training_dropout_h})
cost_history = np.append(cost_history, acc)
if step % 4 == 0:
print("Step: {:5}\tLoss: {:.3f}\tAcc: {:.2%}".format(
step, loss, acc))
# Test model and check accuracy
pre = tf.argmax(h0, 1)
test_yy = np.transpose(test_y.ravel())
print(test_yy.shape)
correct_prediction = tf.equal(pre, test_yy)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print('Test Accuracy:', sess.run(accuracy, feed_dict={X: test_x, p_keep_hidden: 1.0}))
# In[15]:
print(cost_history.shape)
plt.plot(range(len(cost_history)),cost_history)
plt.axis([0,training_epochs,0,1])
plt.show()
# In[16]:
sess.close()
end_time = time.time()
print("processing time : %d seconds" % (end_time - start_time,))
|
[
"pandas.read_csv",
"numpy.empty",
"tensorflow.reshape",
"tensorflow.matmul",
"scipy.stats.describe",
"tensorflow.one_hot",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.placeholder",
"tensorflow.cast",
"numpy.append",
"tensorflow.equal",
"matplotlib.pyplot.show",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"tensorflow.random_normal",
"tensorflow.argmax",
"matplotlib.pyplot.axis",
"time.time",
"tensorflow.train.AdamOptimizer",
"tensorflow.nn.dropout"
] |
[((255, 266), 'time.time', 'time.time', ([], {}), '()\n', (264, 266), False, 'import time\n'), ((1126, 1158), 'numpy.empty', 'np.empty', ([], {'shape': '[1]', 'dtype': 'float'}), '(shape=[1], dtype=float)\n', (1134, 1158), True, 'import numpy as np\n'), ((1178, 1227), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, feature_count]'], {}), '(tf.float32, [None, feature_count])\n', (1192, 1227), True, 'import tensorflow as tf\n'), ((1230, 1275), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, label_count]'], {}), '(tf.int32, [None, label_count])\n', (1244, 1275), True, 'import tensorflow as tf\n'), ((1286, 1311), 'tensorflow.one_hot', 'tf.one_hot', (['Y', 'nb_classes'], {}), '(Y, nb_classes)\n', (1296, 1311), True, 'import tensorflow as tf\n'), ((1363, 1402), 'tensorflow.reshape', 'tf.reshape', (['Y_one_hot', '[-1, nb_classes]'], {}), '(Y_one_hot, [-1, nb_classes])\n', (1373, 1402), True, 'import tensorflow as tf\n'), ((2322, 2345), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""'], {}), "('float')\n", (2336, 2345), True, 'import tensorflow as tf\n'), ((2598, 2614), 'tensorflow.argmax', 'tf.argmax', (['h0', '(1)'], {}), '(h0, 1)\n', (2607, 2614), True, 'import tensorflow as tf\n'), ((4403, 4439), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, training_epochs, 0, 1]'], {}), '([0, training_epochs, 0, 1])\n', (4411, 4439), True, 'import matplotlib.pyplot as plt\n'), ((4437, 4447), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4445, 4447), True, 'import matplotlib.pyplot as plt\n'), ((4485, 4496), 'time.time', 'time.time', ([], {}), '()\n', (4494, 4496), False, 'import time\n'), ((314, 359), 'pandas.read_csv', 'pd.read_csv', (['file_name'], {'sep': '"""\t"""', 'header': 'None'}), "(file_name, sep='\\t', header=None)\n", (325, 359), True, 'import pandas as pd\n'), ((2437, 2505), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'h0', 'labels': 'Y_one_hot'}), '(logits=h0, labels=Y_one_hot)\n', (2476, 2505), True, 'import tensorflow as tf\n'), ((2657, 2680), 'tensorflow.argmax', 'tf.argmax', (['Y_one_hot', '(1)'], {}), '(Y_one_hot, 1)\n', (2666, 2680), True, 'import tensorflow as tf\n'), ((2708, 2747), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (2715, 2747), True, 'import tensorflow as tf\n'), ((2957, 2969), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2967, 2969), True, 'import tensorflow as tf\n'), ((4021, 4037), 'tensorflow.argmax', 'tf.argmax', (['h0', '(1)'], {}), '(h0, 1)\n', (4030, 4037), True, 'import tensorflow as tf\n'), ((4131, 4153), 'tensorflow.equal', 'tf.equal', (['pre', 'test_yy'], {}), '(pre, test_yy)\n', (4139, 4153), True, 'import tensorflow as tf\n'), ((610, 634), 'scipy.stats.describe', 'stats.describe', (['features'], {}), '(features)\n', (624, 634), False, 'from scipy import stats\n'), ((1742, 1773), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['h', 'p_keep_hidden'], {}), '(h, p_keep_hidden)\n', (1755, 1773), True, 'import tensorflow as tf\n'), ((2268, 2286), 'tensorflow.matmul', 'tf.matmul', (['h3', 'w_o'], {}), '(h3, w_o)\n', (2277, 2286), True, 'import tensorflow as tf\n'), ((2519, 2556), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (2541, 2556), True, 'import tensorflow as tf\n'), ((2992, 3025), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3023, 3025), True, 'import tensorflow as tf\n'), ((3807, 3835), 'numpy.append', 'np.append', (['cost_history', 'acc'], {}), '(cost_history, acc)\n', (3816, 3835), True, 'import numpy as np\n'), ((4184, 4223), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (4191, 4223), True, 'import tensorflow as tf\n'), ((1492, 1527), 'tensorflow.random_normal', 'tf.random_normal', (['shape'], {'stddev': '(0.1)'}), '(shape, stddev=0.1)\n', (1508, 1527), True, 'import tensorflow as tf\n'), ((1542, 1570), 'tensorflow.random_normal', 'tf.random_normal', (['[shape[1]]'], {}), '([shape[1]])\n', (1558, 1570), True, 'import tensorflow as tf\n'), ((1673, 1702), 'tensorflow.matmul', 'tf.matmul', (['previous_h', 'weight'], {}), '(previous_h, weight)\n', (1682, 1702), True, 'import tensorflow as tf\n')]
|
import os
import sys
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), '../..'))
from utils.general_class import ModelPlugin
from utils.ortools_op import SolveMaxMatching
from utils.visual_op import matrix_image2big_image
from utils.writer_op import write_pkl, write_gif
from utils.tqdm_op import tqdm_range
from utils.eval_op import DisentanglemetricFactorMask, DisentanglemetricFactorJointMask
from utils.np_op import np_softmax
from tfops.transform_op import apply_tf_op, apply_tf_op_multi_output, apply_tf_op_multi_input
from tfops.train_op import get_train_op_v2
from tfops.lr_op import DECAY_DICT, DECAY_PARAMS_DICT
from tfops.nets import encoder1_64, decoder1_64
from tfops.loss import sigmoid_cross_entropy_without_mean, vae_kl_cost_weight
import tensorflow as tf
import numpy as np
class Model(ModelPlugin):
def __init__(self, dataset, logfilepath, args):
super().__init__(dataset, logfilepath, args)
self.build()
def build(self):
self.logger.info("Model building starts")
tf.reset_default_graph()
tf.set_random_seed(self.args.rseed)
self.input1 = tf.placeholder(tf.float32, shape = [self.args.nbatch, self.height, self.width, self.nchannel])
self.istrain = tf.placeholder(tf.bool, shape= [])
self.generate_sess()
# Encoding
self.encoder_net = encoder1_64
self.decoder_net = decoder1_64
# Encoder
self.mean_total, self.stddev_total = tf.split(self.encoder_net(self.input1, output_dim=2*self.args.nconti, scope='encoder', reuse=False)['output'], num_or_size_splits=2, axis=1)
self.stddev_total = tf.nn.softplus(self.stddev_total)
self.z_sample = tf.add(self.mean_total, tf.multiply(self.stddev_total, tf.random_normal([self.args.nbatch, self.args.nconti])))
self.dec_output = self.decoder_net(z=self.z_sample, output_channel=self.nchannel, scope="decoder", reuse=False)['output']
# Unary vector
self.rec_cost_vector = sigmoid_cross_entropy_without_mean(labels=self.input1, logits=self.dec_output)
self.rec_cost = tf.reduce_mean(self.rec_cost_vector)
self.loss_list = list()
for idx in range(self.args.nconti):
weight = tf.constant(np.array((idx+1)*[self.args.beta_min] + (self.args.nconti-idx-1)*[self.args.beta_max]), dtype=tf.float32)
kl_cost = vae_kl_cost_weight(mean=self.mean_total, stddev=self.stddev_total, weight=weight)
self.loss_list.append(self.rec_cost+kl_cost+tf.losses.get_regularization_loss())
# Decode
self.latent_ph = tf.placeholder(tf.float32, shape = [self.args.nbatch, self.args.nconti])
self.dec_output_ph = tf.nn.sigmoid(self.decoder_net(z=self.latent_ph, output_channel=self.nchannel, scope="decoder", reuse=True)['output'])
self.logger.info("Model building ends")
def decode(self, latent_input):
return apply_tf_op(inputs=latent_input, session=self.sess, input_gate=self.latent_ph, output_gate=self.dec_output_ph, batch_size=self.args.nbatch)
def set_up_train(self):
self.logger.info("Model setting up train starts")
if not hasattr(self, 'start_iter'): self.start_iter = 0
self.logger.info("Start iter: {}".format(self.start_iter))
decay_func = DECAY_DICT[self.args.dtype]
decay_params = DECAY_PARAMS_DICT[self.args.dtype][self.args.nbatch][self.args.dptype].copy()
decay_params['initial_step'] = self.start_iter
self.lr, update_step_op = decay_func(**decay_params)
self.update_step_op = [update_step_op]
var_list = [v for v in tf.trainable_variables() if 'encoder' in v.name] + [v for v in tf.trainable_variables() if 'decoder' in v.name]
with tf.control_dependencies(tf.get_collection("update_ops")):
self.train_op_list = [get_train_op_v2(tf.train.AdamOptimizer(learning_rate=self.lr, beta1=0.9, beta2=0.999), loss=self.loss_list[v], var_list=var_list) for v in range(self.args.nconti)]
self.logger.info("Model setting up train ends")
def run_batch(self, train_idx):
feed_dict = dict()
feed_dict[self.input1] = self.dataset.next_batch(batch_size=self.args.nbatch)[0]
feed_dict[self.istrain] = True
idx = min(train_idx, self.args.nconti-1)
self.sess.run([self.train_op_list[idx]], feed_dict=feed_dict)
def train(self, niter, piter, siter, save_dir=None, asset_dir=None):
self.logger.info("Model training starts")
final_iter = self.start_iter+niter
max_accuracy = -1
for iter_ in tqdm_range(self.start_iter, final_iter):
train_idx = (iter_ - self.start_iter)//piter
self.run_batch(train_idx)
if (iter_+1)%siter==0 or iter_+1==final_iter:
accuracy = self.evaluate()
self.latent_traversal_gif(path=asset_dir+'{}.gif'.format(iter_+1))
if max_accuracy==-1 or max_accuracy<accuracy:
self.save(iter_, save_dir)
self.logger.info("Save process")
max_accuracy = accuracy
self.logger.info("Model training ends")
def evaluate(self, print_option=False, eps=1e-8, nsample=1024):
total_mean, total_std = self.get_mean_std()
return DisentanglemetricFactorMask(mean=total_mean, std=total_std, nclasses=self.dataset.latents_sizes, sampler=self.dataset.next_batch_latent_fix_idx, print_option=print_option)
def get_mean_std(self):
total_mean, total_std = apply_tf_op_multi_output(inputs=self.image, session=self.sess, input_gate=self.input1, output_gate_list=[self.mean_total, self.stddev_total], batch_size=self.args.nbatch, train_gate=self.istrain)
return total_mean, total_std
def latent_traversal_gif(self, path, nimage=50, nmin=-1.0, nmax=1.0):
gif = list()
for i in range(nimage):
value = nmin + (nmax - nmin)*i/nimage
latent_conti = value*np.eye(self.args.nconti)
gif.append(matrix_image2big_image(np.expand_dims(self.decode(latent_input=latent_conti), axis=0)))
write_gif(content=gif, path=path)
|
[
"tensorflow.trainable_variables",
"tensorflow.get_collection",
"tensorflow.reset_default_graph",
"utils.eval_op.DisentanglemetricFactorMask",
"os.path.dirname",
"tensorflow.set_random_seed",
"tfops.transform_op.apply_tf_op_multi_output",
"tensorflow.placeholder",
"tensorflow.losses.get_regularization_loss",
"tfops.transform_op.apply_tf_op",
"tensorflow.reduce_mean",
"utils.tqdm_op.tqdm_range",
"tensorflow.random_normal",
"utils.writer_op.write_gif",
"tfops.loss.vae_kl_cost_weight",
"tfops.loss.sigmoid_cross_entropy_without_mean",
"numpy.array",
"tensorflow.nn.softplus",
"numpy.eye",
"tensorflow.train.AdamOptimizer"
] |
[((1050, 1074), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (1072, 1074), True, 'import tensorflow as tf\n'), ((1083, 1118), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['self.args.rseed'], {}), '(self.args.rseed)\n', (1101, 1118), True, 'import tensorflow as tf\n'), ((1142, 1238), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[self.args.nbatch, self.height, self.width, self.nchannel]'}), '(tf.float32, shape=[self.args.nbatch, self.height, self.width,\n self.nchannel])\n', (1156, 1238), True, 'import tensorflow as tf\n'), ((1260, 1293), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {'shape': '[]'}), '(tf.bool, shape=[])\n', (1274, 1293), True, 'import tensorflow as tf\n'), ((1655, 1688), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['self.stddev_total'], {}), '(self.stddev_total)\n', (1669, 1688), True, 'import tensorflow as tf\n'), ((2011, 2089), 'tfops.loss.sigmoid_cross_entropy_without_mean', 'sigmoid_cross_entropy_without_mean', ([], {'labels': 'self.input1', 'logits': 'self.dec_output'}), '(labels=self.input1, logits=self.dec_output)\n', (2045, 2089), False, 'from tfops.loss import sigmoid_cross_entropy_without_mean, vae_kl_cost_weight\n'), ((2114, 2150), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.rec_cost_vector'], {}), '(self.rec_cost_vector)\n', (2128, 2150), True, 'import tensorflow as tf\n'), ((2607, 2677), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[self.args.nbatch, self.args.nconti]'}), '(tf.float32, shape=[self.args.nbatch, self.args.nconti])\n', (2621, 2677), True, 'import tensorflow as tf\n'), ((2929, 3073), 'tfops.transform_op.apply_tf_op', 'apply_tf_op', ([], {'inputs': 'latent_input', 'session': 'self.sess', 'input_gate': 'self.latent_ph', 'output_gate': 'self.dec_output_ph', 'batch_size': 'self.args.nbatch'}), '(inputs=latent_input, session=self.sess, input_gate=self.\n latent_ph, output_gate=self.dec_output_ph, batch_size=self.args.nbatch)\n', (2940, 3073), False, 'from tfops.transform_op import apply_tf_op, apply_tf_op_multi_output, apply_tf_op_multi_input\n'), ((4602, 4641), 'utils.tqdm_op.tqdm_range', 'tqdm_range', (['self.start_iter', 'final_iter'], {}), '(self.start_iter, final_iter)\n', (4612, 4641), False, 'from utils.tqdm_op import tqdm_range\n'), ((5314, 5494), 'utils.eval_op.DisentanglemetricFactorMask', 'DisentanglemetricFactorMask', ([], {'mean': 'total_mean', 'std': 'total_std', 'nclasses': 'self.dataset.latents_sizes', 'sampler': 'self.dataset.next_batch_latent_fix_idx', 'print_option': 'print_option'}), '(mean=total_mean, std=total_std, nclasses=self.\n dataset.latents_sizes, sampler=self.dataset.next_batch_latent_fix_idx,\n print_option=print_option)\n', (5341, 5494), False, 'from utils.eval_op import DisentanglemetricFactorMask, DisentanglemetricFactorJointMask\n'), ((5547, 5751), 'tfops.transform_op.apply_tf_op_multi_output', 'apply_tf_op_multi_output', ([], {'inputs': 'self.image', 'session': 'self.sess', 'input_gate': 'self.input1', 'output_gate_list': '[self.mean_total, self.stddev_total]', 'batch_size': 'self.args.nbatch', 'train_gate': 'self.istrain'}), '(inputs=self.image, session=self.sess, input_gate=\n self.input1, output_gate_list=[self.mean_total, self.stddev_total],\n batch_size=self.args.nbatch, train_gate=self.istrain)\n', (5571, 5751), False, 'from tfops.transform_op import apply_tf_op, apply_tf_op_multi_output, apply_tf_op_multi_input\n'), ((6135, 6168), 'utils.writer_op.write_gif', 'write_gif', ([], {'content': 'gif', 'path': 'path'}), '(content=gif, path=path)\n', (6144, 6168), False, 'from utils.writer_op import write_pkl, write_gif\n'), ((66, 91), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (81, 91), False, 'import os\n'), ((2389, 2475), 'tfops.loss.vae_kl_cost_weight', 'vae_kl_cost_weight', ([], {'mean': 'self.mean_total', 'stddev': 'self.stddev_total', 'weight': 'weight'}), '(mean=self.mean_total, stddev=self.stddev_total, weight=\n weight)\n', (2407, 2475), False, 'from tfops.loss import sigmoid_cross_entropy_without_mean, vae_kl_cost_weight\n'), ((1769, 1823), 'tensorflow.random_normal', 'tf.random_normal', (['[self.args.nbatch, self.args.nconti]'], {}), '([self.args.nbatch, self.args.nconti])\n', (1785, 1823), True, 'import tensorflow as tf\n'), ((2261, 2361), 'numpy.array', 'np.array', (['((idx + 1) * [self.args.beta_min] + (self.args.nconti - idx - 1) * [self.\n args.beta_max])'], {}), '((idx + 1) * [self.args.beta_min] + (self.args.nconti - idx - 1) *\n [self.args.beta_max])\n', (2269, 2361), True, 'import numpy as np\n'), ((3787, 3818), 'tensorflow.get_collection', 'tf.get_collection', (['"""update_ops"""'], {}), "('update_ops')\n", (3804, 3818), True, 'import tensorflow as tf\n'), ((5991, 6015), 'numpy.eye', 'np.eye', (['self.args.nconti'], {}), '(self.args.nconti)\n', (5997, 6015), True, 'import numpy as np\n'), ((2527, 2562), 'tensorflow.losses.get_regularization_loss', 'tf.losses.get_regularization_loss', ([], {}), '()\n', (2560, 2562), True, 'import tensorflow as tf\n'), ((3636, 3660), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (3658, 3660), True, 'import tensorflow as tf\n'), ((3699, 3723), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (3721, 3723), True, 'import tensorflow as tf\n'), ((3871, 3940), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.lr', 'beta1': '(0.9)', 'beta2': '(0.999)'}), '(learning_rate=self.lr, beta1=0.9, beta2=0.999)\n', (3893, 3940), True, 'import tensorflow as tf\n')]
|
from skfda.representation.basis import (Basis, FDataBasis, Constant, Monomial,
BSpline, Fourier)
from skfda.representation.grid import FDataGrid
from skfda import concatenate
import unittest
import numpy as np
class TestBasis(unittest.TestCase):
# def setUp(self): could be defined for set up before any test
def test_from_data_cholesky(self):
t = np.linspace(0, 1, 5)
x = np.sin(2 * np.pi * t) + np.cos(2 * np.pi * t)
basis = BSpline((0, 1), n_basis=5)
np.testing.assert_array_almost_equal(
FDataBasis.from_data(x, t, basis, method='cholesky'
).coefficients.round(2),
np.array([[1., 2.78, -3., -0.78, 1.]])
)
def test_from_data_qr(self):
t = np.linspace(0, 1, 5)
x = np.sin(2 * np.pi * t) + np.cos(2 * np.pi * t)
basis = BSpline((0, 1), n_basis=5)
np.testing.assert_array_almost_equal(
FDataBasis.from_data(x, t, basis, method='qr'
).coefficients.round(2),
np.array([[1., 2.78, -3., -0.78, 1.]])
)
def test_basis_product_generic(self):
monomial = Monomial(n_basis=5)
fourier = Fourier(n_basis=3)
prod = BSpline(n_basis=9, order=8)
self.assertEqual(Basis.default_basis_of_product(
monomial, fourier), prod)
def test_basis_constant_product(self):
constant = Constant()
monomial = Monomial()
fourier = Fourier()
bspline = BSpline(n_basis=5, order=3)
self.assertEqual(constant.basis_of_product(monomial), monomial)
self.assertEqual(constant.basis_of_product(fourier), fourier)
self.assertEqual(constant.basis_of_product(bspline), bspline)
self.assertEqual(monomial.basis_of_product(constant), monomial)
self.assertEqual(fourier.basis_of_product(constant), fourier)
self.assertEqual(bspline.basis_of_product(constant), bspline)
def test_basis_fourier_product(self):
# Test when periods are the same
fourier = Fourier(n_basis=5)
fourier2 = Fourier(n_basis=3)
prod = Fourier(n_basis=7)
self.assertEqual(fourier.basis_of_product(fourier2), prod)
# Test when periods are different
fourier2 = Fourier(n_basis=3, period=2)
prod = BSpline(n_basis=9, order=8)
self.assertEqual(fourier.basis_of_product(fourier2), prod)
def test_basis_monomial_product(self):
monomial = Monomial(n_basis=5)
monomial2 = Monomial(n_basis=3)
prod = Monomial(n_basis=8)
self.assertEqual(monomial.basis_of_product(monomial2), prod)
def test_basis_bspline_product(self):
bspline = BSpline(n_basis=6, order=4)
bspline2 = BSpline(domain_range=(0, 1), n_basis=6,
order=4, knots=[0, 0.3, 1 / 3, 1])
prod = BSpline(domain_range=(0, 1), n_basis=10, order=7,
knots=[0, 0.3, 1 / 3, 2 / 3, 1])
self.assertEqual(bspline.basis_of_product(bspline2), prod)
def test_basis_inner_matrix(self):
np.testing.assert_array_almost_equal(Monomial(n_basis=3)._inner_matrix(),
[[1, 1 / 2, 1 / 3], [1 / 2, 1 / 3, 1 / 4], [1 / 3, 1 / 4, 1 / 5]])
np.testing.assert_array_almost_equal(Monomial(n_basis=3)._inner_matrix(Monomial(n_basis=3)),
[[1, 1 / 2, 1 / 3], [1 / 2, 1 / 3, 1 / 4], [1 / 3, 1 / 4, 1 / 5]])
np.testing.assert_array_almost_equal(Monomial(n_basis=3)._inner_matrix(Monomial(n_basis=4)),
[[1, 1 / 2, 1 / 3, 1 / 4], [1 / 2, 1 / 3, 1 / 4, 1 / 5], [1 / 3, 1 / 4, 1 / 5, 1 / 6]])
# TODO testing with other basis
def test_basis_gram_matrix(self):
np.testing.assert_allclose(Monomial(n_basis=3).gram_matrix(),
[[1, 1 / 2, 1 / 3], [1 / 2, 1 / 3, 1 / 4], [1 / 3, 1 / 4, 1 / 5]])
np.testing.assert_allclose(Fourier(n_basis=3).gram_matrix(),
np.identity(3))
np.testing.assert_allclose(BSpline(n_basis=6).gram_matrix().round(4),
np.array([[4.760e-02, 2.920e-02, 6.200e-03,
4.000e-04, 0.000e+00, 0.000e+00],
[2.920e-02, 7.380e-02, 5.210e-02,
1.150e-02, 1.000e-04, 0.000e+00],
[6.200e-03, 5.210e-02, 1.089e-01,
7.100e-02, 1.150e-02, 4.000e-04],
[4.000e-04, 1.150e-02, 7.100e-02,
1.089e-01, 5.210e-02, 6.200e-03],
[0.000e+00, 1.000e-04, 1.150e-02,
5.210e-02, 7.380e-02, 2.920e-02],
[0.000e+00, 0.000e+00, 4.000e-04,
6.200e-03, 2.920e-02, 4.760e-02]]))
def test_basis_basis_inprod(self):
monomial = Monomial(n_basis=4)
bspline = BSpline(n_basis=5, order=4)
np.testing.assert_array_almost_equal(
monomial.inner_product(bspline).round(3),
np.array(
[[0.12499983, 0.25000035, 0.24999965, 0.25000035, 0.12499983],
[0.01249991, 0.07500017, 0.12499983, 0.17500017, 0.11249991],
[0.00208338, 0.02916658, 0.07083342, 0.12916658, 0.10208338],
[0.00044654, 0.01339264, 0.04375022, 0.09910693, 0.09330368]])
.round(3)
)
np.testing.assert_array_almost_equal(
monomial.inner_product(bspline),
bspline.inner_product(monomial).T
)
def test_basis_fdatabasis_inprod(self):
monomial = Monomial(n_basis=4)
bspline = BSpline(n_basis=5, order=3)
bsplinefd = FDataBasis(bspline, np.arange(0, 15).reshape(3, 5))
np.testing.assert_array_almost_equal(
monomial.inner_product(bsplinefd).round(3),
np.array([[2., 7., 12.],
[1.29626206, 3.79626206, 6.29626206],
[0.96292873, 2.62959539, 4.29626206],
[0.7682873, 2.0182873, 3.2682873]]).round(3)
)
def test_fdatabasis_fdatabasis_inprod(self):
monomial = Monomial(n_basis=4)
monomialfd = FDataBasis(monomial, [[5, 4, 1, 0],
[4, 2, 1, 0],
[4, 1, 6, 4],
[4, 5, 0, 1],
[5, 6, 2, 0]])
bspline = BSpline(n_basis=5, order=3)
bsplinefd = FDataBasis(bspline, np.arange(0, 15).reshape(3, 5))
np.testing.assert_array_almost_equal(
monomialfd.inner_product(bsplinefd).round(3),
np.array([[16.14797697, 52.81464364, 89.4813103],
[11.55565285, 38.22211951, 64.88878618],
[18.14698361, 55.64698361, 93.14698361],
[15.2495976, 48.9995976, 82.7495976],
[19.70392982, 63.03676315, 106.37009648]]).round(3)
)
np.testing.assert_array_almost_equal(
monomialfd._inner_product_integrate(
bsplinefd, None, None).round(3),
np.array([[16.14797697, 52.81464364, 89.4813103],
[11.55565285, 38.22211951, 64.88878618],
[18.14698361, 55.64698361, 93.14698361],
[15.2495976, 48.9995976, 82.7495976],
[19.70392982, 63.03676315, 106.37009648]]).round(3)
)
def test_comutativity_inprod(self):
monomial = Monomial(n_basis=4)
bspline = BSpline(n_basis=5, order=3)
bsplinefd = FDataBasis(bspline, np.arange(0, 15).reshape(3, 5))
np.testing.assert_array_almost_equal(
bsplinefd.inner_product(monomial).round(3),
np.transpose(monomial.inner_product(bsplinefd).round(3))
)
def test_fdatabasis_times_fdatabasis_fdatabasis(self):
monomial = FDataBasis(Monomial(n_basis=3), [1, 2, 3])
bspline = FDataBasis(BSpline(n_basis=6, order=4), [1, 2, 4, 1, 0, 1])
times_fdar = monomial.times(bspline)
prod_basis = BSpline(n_basis=9, order=6, knots=[0, 0.25, 0.5, 0.75, 1])
prod_coefs = np.array([[0.9788352, 1.6289955, 2.7004969, 6.2678739,
8.7636441, 4.0069960, 0.7126961, 2.8826708,
6.0052311]])
self.assertEqual(prod_basis, times_fdar.basis)
np.testing.assert_array_almost_equal(
prod_coefs, times_fdar.coefficients)
def test_fdatabasis_times_fdatabasis_list(self):
monomial = FDataBasis(Monomial(n_basis=3),
[[1, 2, 3], [4, 5, 6], [7, 8, 9]])
result = monomial.times([3, 2, 1])
expec_basis = Monomial(n_basis=3)
expec_coefs = np.array([[3, 6, 9], [8, 10, 12], [7, 8, 9]])
self.assertEqual(expec_basis, result.basis)
np.testing.assert_array_almost_equal(expec_coefs, result.coefficients)
def test_fdatabasis_times_fdatabasis_int(self):
monomial = FDataBasis(Monomial(n_basis=3),
[[1, 2, 3], [4, 5, 6], [7, 8, 9]])
result = monomial.times(3)
expec_basis = Monomial(n_basis=3)
expec_coefs = np.array([[3, 6, 9], [12, 15, 18], [21, 24, 27]])
self.assertEqual(expec_basis, result.basis)
np.testing.assert_array_almost_equal(expec_coefs, result.coefficients)
def test_fdatabasis__add__(self):
monomial1 = FDataBasis(Monomial(n_basis=3), [1, 2, 3])
monomial2 = FDataBasis(Monomial(n_basis=3), [[1, 2, 3], [3, 4, 5]])
np.testing.assert_equal(monomial1 + monomial2,
FDataBasis(Monomial(n_basis=3),
[[2, 4, 6], [4, 6, 8]]))
np.testing.assert_equal(monomial2 + 1,
FDataBasis(Monomial(n_basis=3),
[[2, 2, 3], [4, 4, 5]]))
np.testing.assert_equal(1 + monomial2,
FDataBasis(Monomial(n_basis=3),
[[2, 2, 3], [4, 4, 5]]))
np.testing.assert_equal(monomial2 + [1, 2],
FDataBasis(Monomial(n_basis=3),
[[2, 2, 3], [5, 4, 5]]))
np.testing.assert_equal([1, 2] + monomial2,
FDataBasis(Monomial(n_basis=3),
[[2, 2, 3], [5, 4, 5]]))
np.testing.assert_raises(NotImplementedError, monomial2.__add__,
FDataBasis(Fourier(n_basis=3),
[[2, 2, 3], [5, 4, 5]]))
def test_fdatabasis__sub__(self):
monomial1 = FDataBasis(Monomial(n_basis=3), [1, 2, 3])
monomial2 = FDataBasis(Monomial(n_basis=3), [[1, 2, 3], [3, 4, 5]])
np.testing.assert_equal(monomial1 - monomial2,
FDataBasis(Monomial(n_basis=3),
[[0, 0, 0], [-2, -2, -2]]))
np.testing.assert_equal(monomial2 - 1,
FDataBasis(Monomial(n_basis=3),
[[0, 2, 3], [2, 4, 5]]))
np.testing.assert_equal(1 - monomial2,
FDataBasis(Monomial(n_basis=3),
[[0, -2, -3], [-2, -4, -5]]))
np.testing.assert_equal(monomial2 - [1, 2],
FDataBasis(Monomial(n_basis=3),
[[0, 2, 3], [1, 4, 5]]))
np.testing.assert_equal([1, 2] - monomial2,
FDataBasis(Monomial(n_basis=3),
[[0, -2, -3], [-1, -4, -5]]))
np.testing.assert_raises(NotImplementedError, monomial2.__sub__,
FDataBasis(Fourier(n_basis=3),
[[2, 2, 3], [5, 4, 5]]))
def test_fdatabasis__mul__(self):
monomial1 = FDataBasis(Monomial(n_basis=3), [1, 2, 3])
monomial2 = FDataBasis(Monomial(n_basis=3), [[1, 2, 3], [3, 4, 5]])
np.testing.assert_equal(monomial1 * 2,
FDataBasis(Monomial(n_basis=3),
[[2, 4, 6]]))
np.testing.assert_equal(3 * monomial2,
FDataBasis(Monomial(n_basis=3),
[[3, 6, 9], [9, 12, 15]]))
np.testing.assert_equal(3 * monomial2,
monomial2 * 3)
np.testing.assert_equal(monomial2 * [1, 2],
FDataBasis(Monomial(n_basis=3),
[[1, 2, 3], [6, 8, 10]]))
np.testing.assert_equal([1, 2] * monomial2,
FDataBasis(Monomial(n_basis=3),
[[1, 2, 3], [6, 8, 10]]))
np.testing.assert_raises(NotImplementedError, monomial2.__mul__,
FDataBasis(Fourier(n_basis=3),
[[2, 2, 3], [5, 4, 5]]))
np.testing.assert_raises(NotImplementedError, monomial2.__mul__,
monomial2)
def test_fdatabasis__mul__2(self):
monomial1 = FDataBasis(Monomial(n_basis=3), [1, 2, 3])
monomial2 = FDataBasis(Monomial(n_basis=3), [[1, 2, 3], [3, 4, 5]])
np.testing.assert_equal(monomial1 / 2,
FDataBasis(Monomial(n_basis=3),
[[1 / 2, 1, 3 / 2]]))
np.testing.assert_equal(monomial2 / 2,
FDataBasis(Monomial(n_basis=3),
[[1 / 2, 1, 3 / 2], [3 / 2, 2, 5 / 2]]))
np.testing.assert_equal(monomial2 / [1, 2],
FDataBasis(Monomial(n_basis=3),
[[1, 2, 3], [3 / 2, 2, 5 / 2]]))
def test_fdatabasis_derivative_constant(self):
monomial = FDataBasis(Monomial(n_basis=8),
[1, 5, 8, 9, 7, 8, 4, 5])
monomial2 = FDataBasis(Monomial(n_basis=5),
[[4, 9, 7, 4, 3],
[1, 7, 9, 8, 5],
[4, 6, 6, 6, 8]])
np.testing.assert_equal(monomial.derivative(),
FDataBasis(Monomial(n_basis=7),
[5, 16, 27, 28, 40, 24, 35]))
np.testing.assert_equal(monomial.derivative(order=0), monomial)
np.testing.assert_equal(monomial.derivative(order=6),
FDataBasis(Monomial(n_basis=2),
[2880, 25200]))
np.testing.assert_equal(monomial2.derivative(),
FDataBasis(Monomial(n_basis=4),
[[9, 14, 12, 12],
[7, 18, 24, 20],
[6, 12, 18, 32]]))
np.testing.assert_equal(monomial2.derivative(order=0), monomial2)
np.testing.assert_equal(monomial2.derivative(order=3),
FDataBasis(Monomial(n_basis=2),
[[24, 72],
[48, 120],
[36, 192]]))
def test_fdatabasis_derivative_monomial(self):
monomial = FDataBasis(Monomial(n_basis=8),
[1, 5, 8, 9, 7, 8, 4, 5])
monomial2 = FDataBasis(Monomial(n_basis=5),
[[4, 9, 7, 4, 3],
[1, 7, 9, 8, 5],
[4, 6, 6, 6, 8]])
np.testing.assert_equal(monomial.derivative(),
FDataBasis(Monomial(n_basis=7),
[5, 16, 27, 28, 40, 24, 35]))
np.testing.assert_equal(monomial.derivative(order=0), monomial)
np.testing.assert_equal(monomial.derivative(order=6),
FDataBasis(Monomial(n_basis=2),
[2880, 25200]))
np.testing.assert_equal(monomial2.derivative(),
FDataBasis(Monomial(n_basis=4),
[[9, 14, 12, 12],
[7, 18, 24, 20],
[6, 12, 18, 32]]))
np.testing.assert_equal(monomial2.derivative(order=0), monomial2)
np.testing.assert_equal(monomial2.derivative(order=3),
FDataBasis(Monomial(n_basis=2),
[[24, 72],
[48, 120],
[36, 192]]))
def test_fdatabasis_derivative_fourier(self):
fourier = FDataBasis(Fourier(n_basis=7),
[1, 5, 8, 9, 8, 4, 5])
fourier2 = FDataBasis(Fourier(n_basis=5),
[[4, 9, 7, 4, 3],
[1, 7, 9, 8, 5],
[4, 6, 6, 6, 8]])
fou0 = fourier.derivative(order=0)
fou1 = fourier.derivative()
fou2 = fourier.derivative(order=2)
np.testing.assert_equal(fou1.basis, fourier.basis)
np.testing.assert_almost_equal(fou1.coefficients.round(5),
np.atleast_2d([0, -50.26548, 31.41593,
-100.53096, 113.09734,
-94.24778, 75.39822]))
np.testing.assert_equal(fou0, fourier)
np.testing.assert_equal(fou2.basis, fourier.basis)
np.testing.assert_almost_equal(fou2.coefficients.round(5),
np.atleast_2d([0, -197.39209, -315.82734,
-1421.22303, -1263.30936,
-1421.22303, -1776.52879]))
fou0 = fourier2.derivative(order=0)
fou1 = fourier2.derivative()
fou2 = fourier2.derivative(order=2)
np.testing.assert_equal(fou1.basis, fourier2.basis)
np.testing.assert_almost_equal(fou1.coefficients.round(5),
[[0, -43.98230, 56.54867, -37.69911, 50.26548],
[0, -56.54867, 43.98230, -
62.83185, 100.53096],
[0, -37.69911, 37.69911, -100.53096, 75.39822]])
np.testing.assert_equal(fou0, fourier2)
np.testing.assert_equal(fou2.basis, fourier2.basis)
np.testing.assert_almost_equal(fou2.coefficients.round(5),
[[0, -355.30576, -276.34892, -631.65468, -473.74101],
[0, -276.34892, -355.30576, -
1263.30936, -789.56835],
[0, -236.87051, -236.87051, -947.48202, -1263.30936]])
def test_fdatabasis_derivative_bspline(self):
bspline = FDataBasis(BSpline(n_basis=8),
[1, 5, 8, 9, 7, 8, 4, 5])
bspline2 = FDataBasis(BSpline(n_basis=5),
[[4, 9, 7, 4, 3],
[1, 7, 9, 8, 5],
[4, 6, 6, 6, 8]])
bs0 = bspline.derivative(order=0)
bs1 = bspline.derivative()
bs2 = bspline.derivative(order=2)
np.testing.assert_equal(bs1.basis, BSpline(n_basis=7, order=3))
np.testing.assert_almost_equal(bs1.coefficients,
np.atleast_2d([60, 22.5, 5,
-10, 5, -30, 15]))
np.testing.assert_equal(bs0, bspline)
np.testing.assert_equal(bs2.basis, BSpline(n_basis=6, order=2))
np.testing.assert_almost_equal(bs2.coefficients,
np.atleast_2d([-375, -87.5, -75,
75, -175, 450]))
bs0 = bspline2.derivative(order=0)
bs1 = bspline2.derivative()
bs2 = bspline2.derivative(order=2)
np.testing.assert_equal(bs1.basis, BSpline(n_basis=4, order=3))
np.testing.assert_almost_equal(bs1.coefficients,
[[30, -6, -9, -6],
[36, 6, -3, -18],
[12, 0, 0, 12]])
np.testing.assert_equal(bs0, bspline2)
np.testing.assert_equal(bs2.basis, BSpline(n_basis=3, order=2))
np.testing.assert_almost_equal(bs2.coefficients,
[[-144, -6, 12],
[-120, -18, -60],
[-48, 0, 48]])
def test_concatenate(self):
sample1 = np.arange(0, 10)
sample2 = np.arange(10, 20)
fd1 = FDataGrid([sample1]).to_basis(Fourier(n_basis=5))
fd2 = FDataGrid([sample2]).to_basis(Fourier(n_basis=5))
fd = concatenate([fd1, fd2])
np.testing.assert_equal(fd.n_samples, 2)
np.testing.assert_equal(fd.dim_codomain, 1)
np.testing.assert_equal(fd.dim_domain, 1)
np.testing.assert_array_equal(fd.coefficients, np.concatenate(
[fd1.coefficients, fd2.coefficients]))
if __name__ == '__main__':
print()
unittest.main()
|
[
"numpy.sin",
"numpy.arange",
"numpy.testing.assert_array_almost_equal",
"numpy.atleast_2d",
"unittest.main",
"skfda.representation.basis.BSpline",
"numpy.testing.assert_almost_equal",
"numpy.identity",
"numpy.linspace",
"numpy.testing.assert_equal",
"numpy.testing.assert_raises",
"skfda.representation.basis.Fourier",
"skfda.representation.basis.Constant",
"skfda.concatenate",
"numpy.cos",
"skfda.representation.basis.Basis.default_basis_of_product",
"numpy.concatenate",
"skfda.representation.basis.FDataBasis.from_data",
"skfda.representation.basis.Monomial",
"numpy.array",
"skfda.representation.grid.FDataGrid",
"skfda.representation.basis.FDataBasis"
] |
[((22203, 22218), 'unittest.main', 'unittest.main', ([], {}), '()\n', (22216, 22218), False, 'import unittest\n'), ((409, 429), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(5)'], {}), '(0, 1, 5)\n', (420, 429), True, 'import numpy as np\n'), ((504, 530), 'skfda.representation.basis.BSpline', 'BSpline', (['(0, 1)'], {'n_basis': '(5)'}), '((0, 1), n_basis=5)\n', (511, 530), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((806, 826), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(5)'], {}), '(0, 1, 5)\n', (817, 826), True, 'import numpy as np\n'), ((901, 927), 'skfda.representation.basis.BSpline', 'BSpline', (['(0, 1)'], {'n_basis': '(5)'}), '((0, 1), n_basis=5)\n', (908, 927), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((1213, 1232), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(5)'}), '(n_basis=5)\n', (1221, 1232), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((1251, 1269), 'skfda.representation.basis.Fourier', 'Fourier', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (1258, 1269), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((1285, 1312), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'n_basis': '(9)', 'order': '(8)'}), '(n_basis=9, order=8)\n', (1292, 1312), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((1471, 1481), 'skfda.representation.basis.Constant', 'Constant', ([], {}), '()\n', (1479, 1481), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((1501, 1511), 'skfda.representation.basis.Monomial', 'Monomial', ([], {}), '()\n', (1509, 1511), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((1530, 1539), 'skfda.representation.basis.Fourier', 'Fourier', ([], {}), '()\n', (1537, 1539), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((1558, 1585), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'n_basis': '(5)', 'order': '(3)'}), '(n_basis=5, order=3)\n', (1565, 1585), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((2112, 2130), 'skfda.representation.basis.Fourier', 'Fourier', ([], {'n_basis': '(5)'}), '(n_basis=5)\n', (2119, 2130), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((2150, 2168), 'skfda.representation.basis.Fourier', 'Fourier', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (2157, 2168), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((2184, 2202), 'skfda.representation.basis.Fourier', 'Fourier', ([], {'n_basis': '(7)'}), '(n_basis=7)\n', (2191, 2202), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((2332, 2360), 'skfda.representation.basis.Fourier', 'Fourier', ([], {'n_basis': '(3)', 'period': '(2)'}), '(n_basis=3, period=2)\n', (2339, 2360), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((2376, 2403), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'n_basis': '(9)', 'order': '(8)'}), '(n_basis=9, order=8)\n', (2383, 2403), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((2534, 2553), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(5)'}), '(n_basis=5)\n', (2542, 2553), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((2574, 2593), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (2582, 2593), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((2609, 2628), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(8)'}), '(n_basis=8)\n', (2617, 2628), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((2759, 2786), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'n_basis': '(6)', 'order': '(4)'}), '(n_basis=6, order=4)\n', (2766, 2786), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((2806, 2880), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'domain_range': '(0, 1)', 'n_basis': '(6)', 'order': '(4)', 'knots': '[0, 0.3, 1 / 3, 1]'}), '(domain_range=(0, 1), n_basis=6, order=4, knots=[0, 0.3, 1 / 3, 1])\n', (2813, 2880), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((2923, 3009), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'domain_range': '(0, 1)', 'n_basis': '(10)', 'order': '(7)', 'knots': '[0, 0.3, 1 / 3, 2 / 3, 1]'}), '(domain_range=(0, 1), n_basis=10, order=7, knots=[0, 0.3, 1 / 3, 2 /\n 3, 1])\n', (2930, 3009), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((5244, 5263), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(4)'}), '(n_basis=4)\n', (5252, 5263), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((5282, 5309), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'n_basis': '(5)', 'order': '(4)'}), '(n_basis=5, order=4)\n', (5289, 5309), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((5992, 6011), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(4)'}), '(n_basis=4)\n', (6000, 6011), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((6030, 6057), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'n_basis': '(5)', 'order': '(3)'}), '(n_basis=5, order=3)\n', (6037, 6057), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((6536, 6555), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(4)'}), '(n_basis=4)\n', (6544, 6555), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((6577, 6674), 'skfda.representation.basis.FDataBasis', 'FDataBasis', (['monomial', '[[5, 4, 1, 0], [4, 2, 1, 0], [4, 1, 6, 4], [4, 5, 0, 1], [5, 6, 2, 0]]'], {}), '(monomial, [[5, 4, 1, 0], [4, 2, 1, 0], [4, 1, 6, 4], [4, 5, 0, 1\n ], [5, 6, 2, 0]])\n', (6587, 6674), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((6860, 6887), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'n_basis': '(5)', 'order': '(3)'}), '(n_basis=5, order=3)\n', (6867, 6887), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((7934, 7953), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(4)'}), '(n_basis=4)\n', (7942, 7953), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((7972, 7999), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'n_basis': '(5)', 'order': '(3)'}), '(n_basis=5, order=3)\n', (7979, 7999), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((8521, 8579), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'n_basis': '(9)', 'order': '(6)', 'knots': '[0, 0.25, 0.5, 0.75, 1]'}), '(n_basis=9, order=6, knots=[0, 0.25, 0.5, 0.75, 1])\n', (8528, 8579), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((8601, 8715), 'numpy.array', 'np.array', (['[[0.9788352, 1.6289955, 2.7004969, 6.2678739, 8.7636441, 4.006996, \n 0.7126961, 2.8826708, 6.0052311]]'], {}), '([[0.9788352, 1.6289955, 2.7004969, 6.2678739, 8.7636441, 4.006996,\n 0.7126961, 2.8826708, 6.0052311]])\n', (8609, 8715), True, 'import numpy as np\n'), ((8847, 8920), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['prod_coefs', 'times_fdar.coefficients'], {}), '(prod_coefs, times_fdar.coefficients)\n', (8883, 8920), True, 'import numpy as np\n'), ((9170, 9189), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (9178, 9189), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((9212, 9257), 'numpy.array', 'np.array', (['[[3, 6, 9], [8, 10, 12], [7, 8, 9]]'], {}), '([[3, 6, 9], [8, 10, 12], [7, 8, 9]])\n', (9220, 9257), True, 'import numpy as np\n'), ((9319, 9389), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['expec_coefs', 'result.coefficients'], {}), '(expec_coefs, result.coefficients)\n', (9355, 9389), True, 'import numpy as np\n'), ((9617, 9636), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (9625, 9636), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((9659, 9708), 'numpy.array', 'np.array', (['[[3, 6, 9], [12, 15, 18], [21, 24, 27]]'], {}), '([[3, 6, 9], [12, 15, 18], [21, 24, 27]])\n', (9667, 9708), True, 'import numpy as np\n'), ((9770, 9840), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['expec_coefs', 'result.coefficients'], {}), '(expec_coefs, result.coefficients)\n', (9806, 9840), True, 'import numpy as np\n'), ((12988, 13041), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['(3 * monomial2)', '(monomial2 * 3)'], {}), '(3 * monomial2, monomial2 * 3)\n', (13011, 13041), True, 'import numpy as np\n'), ((13660, 13735), 'numpy.testing.assert_raises', 'np.testing.assert_raises', (['NotImplementedError', 'monomial2.__mul__', 'monomial2'], {}), '(NotImplementedError, monomial2.__mul__, monomial2)\n', (13684, 13735), True, 'import numpy as np\n'), ((17956, 18006), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['fou1.basis', 'fourier.basis'], {}), '(fou1.basis, fourier.basis)\n', (17979, 18006), True, 'import numpy as np\n'), ((18314, 18352), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['fou0', 'fourier'], {}), '(fou0, fourier)\n', (18337, 18352), True, 'import numpy as np\n'), ((18361, 18411), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['fou2.basis', 'fourier.basis'], {}), '(fou2.basis, fourier.basis)\n', (18384, 18411), True, 'import numpy as np\n'), ((18857, 18908), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['fou1.basis', 'fourier2.basis'], {}), '(fou1.basis, fourier2.basis)\n', (18880, 18908), True, 'import numpy as np\n'), ((19293, 19332), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['fou0', 'fourier2'], {}), '(fou0, fourier2)\n', (19316, 19332), True, 'import numpy as np\n'), ((19341, 19392), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['fou2.basis', 'fourier2.basis'], {}), '(fou2.basis, fourier2.basis)\n', (19364, 19392), True, 'import numpy as np\n'), ((20534, 20571), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['bs0', 'bspline'], {}), '(bs0, bspline)\n', (20557, 20571), True, 'import numpy as np\n'), ((21048, 21154), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['bs1.coefficients', '[[30, -6, -9, -6], [36, 6, -3, -18], [12, 0, 0, 12]]'], {}), '(bs1.coefficients, [[30, -6, -9, -6], [36, 6,\n -3, -18], [12, 0, 0, 12]])\n', (21078, 21154), True, 'import numpy as np\n'), ((21278, 21316), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['bs0', 'bspline2'], {}), '(bs0, bspline2)\n', (21301, 21316), True, 'import numpy as np\n'), ((21397, 21500), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['bs2.coefficients', '[[-144, -6, 12], [-120, -18, -60], [-48, 0, 48]]'], {}), '(bs2.coefficients, [[-144, -6, 12], [-120, -\n 18, -60], [-48, 0, 48]])\n', (21427, 21500), True, 'import numpy as np\n'), ((21666, 21682), 'numpy.arange', 'np.arange', (['(0)', '(10)'], {}), '(0, 10)\n', (21675, 21682), True, 'import numpy as np\n'), ((21701, 21718), 'numpy.arange', 'np.arange', (['(10)', '(20)'], {}), '(10, 20)\n', (21710, 21718), True, 'import numpy as np\n'), ((21861, 21884), 'skfda.concatenate', 'concatenate', (['[fd1, fd2]'], {}), '([fd1, fd2])\n', (21872, 21884), False, 'from skfda import concatenate\n'), ((21894, 21934), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['fd.n_samples', '(2)'], {}), '(fd.n_samples, 2)\n', (21917, 21934), True, 'import numpy as np\n'), ((21943, 21986), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['fd.dim_codomain', '(1)'], {}), '(fd.dim_codomain, 1)\n', (21966, 21986), True, 'import numpy as np\n'), ((21995, 22036), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['fd.dim_domain', '(1)'], {}), '(fd.dim_domain, 1)\n', (22018, 22036), True, 'import numpy as np\n'), ((442, 463), 'numpy.sin', 'np.sin', (['(2 * np.pi * t)'], {}), '(2 * np.pi * t)\n', (448, 463), True, 'import numpy as np\n'), ((466, 487), 'numpy.cos', 'np.cos', (['(2 * np.pi * t)'], {}), '(2 * np.pi * t)\n', (472, 487), True, 'import numpy as np\n'), ((711, 752), 'numpy.array', 'np.array', (['[[1.0, 2.78, -3.0, -0.78, 1.0]]'], {}), '([[1.0, 2.78, -3.0, -0.78, 1.0]])\n', (719, 752), True, 'import numpy as np\n'), ((839, 860), 'numpy.sin', 'np.sin', (['(2 * np.pi * t)'], {}), '(2 * np.pi * t)\n', (845, 860), True, 'import numpy as np\n'), ((863, 884), 'numpy.cos', 'np.cos', (['(2 * np.pi * t)'], {}), '(2 * np.pi * t)\n', (869, 884), True, 'import numpy as np\n'), ((1102, 1143), 'numpy.array', 'np.array', (['[[1.0, 2.78, -3.0, -0.78, 1.0]]'], {}), '([[1.0, 2.78, -3.0, -0.78, 1.0]])\n', (1110, 1143), True, 'import numpy as np\n'), ((1338, 1387), 'skfda.representation.basis.Basis.default_basis_of_product', 'Basis.default_basis_of_product', (['monomial', 'fourier'], {}), '(monomial, fourier)\n', (1368, 1387), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((4135, 4149), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (4146, 4149), True, 'import numpy as np\n'), ((4264, 4573), 'numpy.array', 'np.array', (['[[0.0476, 0.0292, 0.0062, 0.0004, 0.0, 0.0], [0.0292, 0.0738, 0.0521, \n 0.0115, 0.0001, 0.0], [0.0062, 0.0521, 0.1089, 0.071, 0.0115, 0.0004],\n [0.0004, 0.0115, 0.071, 0.1089, 0.0521, 0.0062], [0.0, 0.0001, 0.0115, \n 0.0521, 0.0738, 0.0292], [0.0, 0.0, 0.0004, 0.0062, 0.0292, 0.0476]]'], {}), '([[0.0476, 0.0292, 0.0062, 0.0004, 0.0, 0.0], [0.0292, 0.0738, \n 0.0521, 0.0115, 0.0001, 0.0], [0.0062, 0.0521, 0.1089, 0.071, 0.0115, \n 0.0004], [0.0004, 0.0115, 0.071, 0.1089, 0.0521, 0.0062], [0.0, 0.0001,\n 0.0115, 0.0521, 0.0738, 0.0292], [0.0, 0.0, 0.0004, 0.0062, 0.0292, \n 0.0476]])\n', (4272, 4573), True, 'import numpy as np\n'), ((8344, 8363), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (8352, 8363), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((8405, 8432), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'n_basis': '(6)', 'order': '(4)'}), '(n_basis=6, order=4)\n', (8412, 8432), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((9018, 9037), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (9026, 9037), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((9473, 9492), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (9481, 9492), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((9911, 9930), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (9919, 9930), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((9974, 9993), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (9982, 9993), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((11210, 11229), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (11218, 11229), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((11273, 11292), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (11281, 11292), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((12522, 12541), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (12530, 12541), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((12585, 12604), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (12593, 12604), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((13840, 13859), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (13848, 13859), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((13903, 13922), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (13911, 13922), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((14595, 14614), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(8)'}), '(n_basis=8)\n', (14603, 14614), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((14703, 14722), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(5)'}), '(n_basis=5)\n', (14711, 14722), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((16077, 16096), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(8)'}), '(n_basis=8)\n', (16085, 16096), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((16185, 16204), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(5)'}), '(n_basis=5)\n', (16193, 16204), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((17557, 17575), 'skfda.representation.basis.Fourier', 'Fourier', ([], {'n_basis': '(7)'}), '(n_basis=7)\n', (17564, 17575), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((17659, 17677), 'skfda.representation.basis.Fourier', 'Fourier', ([], {'n_basis': '(5)'}), '(n_basis=5)\n', (17666, 17677), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((18113, 18201), 'numpy.atleast_2d', 'np.atleast_2d', (['[0, -50.26548, 31.41593, -100.53096, 113.09734, -94.24778, 75.39822]'], {}), '([0, -50.26548, 31.41593, -100.53096, 113.09734, -94.24778, \n 75.39822])\n', (18126, 18201), True, 'import numpy as np\n'), ((18518, 18617), 'numpy.atleast_2d', 'np.atleast_2d', (['[0, -197.39209, -315.82734, -1421.22303, -1263.30936, -1421.22303, -1776.52879]'], {}), '([0, -197.39209, -315.82734, -1421.22303, -1263.30936, -\n 1421.22303, -1776.52879])\n', (18531, 18617), True, 'import numpy as np\n'), ((19867, 19885), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'n_basis': '(8)'}), '(n_basis=8)\n', (19874, 19885), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((19972, 19990), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'n_basis': '(5)'}), '(n_basis=5)\n', (19979, 19990), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((20300, 20327), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'n_basis': '(7)', 'order': '(3)'}), '(n_basis=7, order=3)\n', (20307, 20327), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((20425, 20470), 'numpy.atleast_2d', 'np.atleast_2d', (['[60, 22.5, 5, -10, 5, -30, 15]'], {}), '([60, 22.5, 5, -10, 5, -30, 15])\n', (20438, 20470), True, 'import numpy as np\n'), ((20615, 20642), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'n_basis': '(6)', 'order': '(2)'}), '(n_basis=6, order=2)\n', (20622, 20642), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((20740, 20788), 'numpy.atleast_2d', 'np.atleast_2d', (['[-375, -87.5, -75, 75, -175, 450]'], {}), '([-375, -87.5, -75, 75, -175, 450])\n', (20753, 20788), True, 'import numpy as np\n'), ((21011, 21038), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'n_basis': '(4)', 'order': '(3)'}), '(n_basis=4, order=3)\n', (21018, 21038), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((21360, 21387), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'n_basis': '(3)', 'order': '(2)'}), '(n_basis=3, order=2)\n', (21367, 21387), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((21763, 21781), 'skfda.representation.basis.Fourier', 'Fourier', ([], {'n_basis': '(5)'}), '(n_basis=5)\n', (21770, 21781), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((21827, 21845), 'skfda.representation.basis.Fourier', 'Fourier', ([], {'n_basis': '(5)'}), '(n_basis=5)\n', (21834, 21845), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((22092, 22144), 'numpy.concatenate', 'np.concatenate', (['[fd1.coefficients, fd2.coefficients]'], {}), '([fd1.coefficients, fd2.coefficients])\n', (22106, 22144), True, 'import numpy as np\n'), ((3410, 3429), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (3418, 3429), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((3624, 3643), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(4)'}), '(n_basis=4)\n', (3632, 3643), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((10118, 10137), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (10126, 10137), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((10297, 10316), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (10305, 10316), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((10476, 10495), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (10484, 10495), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((10660, 10679), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (10668, 10679), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((10844, 10863), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (10852, 10863), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((11051, 11069), 'skfda.representation.basis.Fourier', 'Fourier', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (11058, 11069), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((11417, 11436), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (11425, 11436), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((11599, 11618), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (11607, 11618), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((11778, 11797), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (11786, 11797), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((11967, 11986), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (11975, 11986), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((12151, 12170), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (12159, 12170), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((12363, 12381), 'skfda.representation.basis.Fourier', 'Fourier', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (12370, 12381), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((12721, 12740), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (12729, 12740), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((12889, 12908), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (12897, 12908), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((13170, 13189), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (13178, 13189), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((13355, 13374), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (13363, 13374), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((13563, 13581), 'skfda.representation.basis.Fourier', 'Fourier', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (13570, 13581), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((14039, 14058), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (14047, 14058), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((14215, 14234), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (14223, 14234), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((14416, 14435), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (14424, 14435), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((14971, 14990), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(7)'}), '(n_basis=7)\n', (14979, 14990), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((15242, 15261), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(2)'}), '(n_basis=2)\n', (15250, 15261), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((15422, 15441), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(4)'}), '(n_basis=4)\n', (15430, 15441), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((15808, 15827), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(2)'}), '(n_basis=2)\n', (15816, 15827), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((16453, 16472), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(7)'}), '(n_basis=7)\n', (16461, 16472), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((16724, 16743), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(2)'}), '(n_basis=2)\n', (16732, 16743), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((16904, 16923), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(4)'}), '(n_basis=4)\n', (16912, 16923), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((17290, 17309), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(2)'}), '(n_basis=2)\n', (17298, 17309), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((21733, 21753), 'skfda.representation.grid.FDataGrid', 'FDataGrid', (['[sample1]'], {}), '([sample1])\n', (21742, 21753), False, 'from skfda.representation.grid import FDataGrid\n'), ((21797, 21817), 'skfda.representation.grid.FDataGrid', 'FDataGrid', (['[sample2]'], {}), '([sample2])\n', (21806, 21817), False, 'from skfda.representation.grid import FDataGrid\n'), ((3181, 3200), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (3189, 3200), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((3376, 3395), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (3384, 3395), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((3590, 3609), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (3598, 3609), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((3894, 3913), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (3902, 3913), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((4066, 4084), 'skfda.representation.basis.Fourier', 'Fourier', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (4073, 4084), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((5422, 5695), 'numpy.array', 'np.array', (['[[0.12499983, 0.25000035, 0.24999965, 0.25000035, 0.12499983], [0.01249991,\n 0.07500017, 0.12499983, 0.17500017, 0.11249991], [0.00208338, \n 0.02916658, 0.07083342, 0.12916658, 0.10208338], [0.00044654, \n 0.01339264, 0.04375022, 0.09910693, 0.09330368]]'], {}), '([[0.12499983, 0.25000035, 0.24999965, 0.25000035, 0.12499983], [\n 0.01249991, 0.07500017, 0.12499983, 0.17500017, 0.11249991], [\n 0.00208338, 0.02916658, 0.07083342, 0.12916658, 0.10208338], [\n 0.00044654, 0.01339264, 0.04375022, 0.09910693, 0.09330368]])\n', (5430, 5695), True, 'import numpy as np\n'), ((6098, 6114), 'numpy.arange', 'np.arange', (['(0)', '(15)'], {}), '(0, 15)\n', (6107, 6114), True, 'import numpy as np\n'), ((6245, 6389), 'numpy.array', 'np.array', (['[[2.0, 7.0, 12.0], [1.29626206, 3.79626206, 6.29626206], [0.96292873, \n 2.62959539, 4.29626206], [0.7682873, 2.0182873, 3.2682873]]'], {}), '([[2.0, 7.0, 12.0], [1.29626206, 3.79626206, 6.29626206], [\n 0.96292873, 2.62959539, 4.29626206], [0.7682873, 2.0182873, 3.2682873]])\n', (6253, 6389), True, 'import numpy as np\n'), ((6928, 6944), 'numpy.arange', 'np.arange', (['(0)', '(15)'], {}), '(0, 15)\n', (6937, 6944), True, 'import numpy as np\n'), ((7077, 7298), 'numpy.array', 'np.array', (['[[16.14797697, 52.81464364, 89.4813103], [11.55565285, 38.22211951, \n 64.88878618], [18.14698361, 55.64698361, 93.14698361], [15.2495976, \n 48.9995976, 82.7495976], [19.70392982, 63.03676315, 106.37009648]]'], {}), '([[16.14797697, 52.81464364, 89.4813103], [11.55565285, 38.22211951,\n 64.88878618], [18.14698361, 55.64698361, 93.14698361], [15.2495976, \n 48.9995976, 82.7495976], [19.70392982, 63.03676315, 106.37009648]])\n', (7085, 7298), True, 'import numpy as np\n'), ((7554, 7775), 'numpy.array', 'np.array', (['[[16.14797697, 52.81464364, 89.4813103], [11.55565285, 38.22211951, \n 64.88878618], [18.14698361, 55.64698361, 93.14698361], [15.2495976, \n 48.9995976, 82.7495976], [19.70392982, 63.03676315, 106.37009648]]'], {}), '([[16.14797697, 52.81464364, 89.4813103], [11.55565285, 38.22211951,\n 64.88878618], [18.14698361, 55.64698361, 93.14698361], [15.2495976, \n 48.9995976, 82.7495976], [19.70392982, 63.03676315, 106.37009648]])\n', (7562, 7775), True, 'import numpy as np\n'), ((8040, 8056), 'numpy.arange', 'np.arange', (['(0)', '(15)'], {}), '(0, 15)\n', (8049, 8056), True, 'import numpy as np\n'), ((589, 641), 'skfda.representation.basis.FDataBasis.from_data', 'FDataBasis.from_data', (['x', 't', 'basis'], {'method': '"""cholesky"""'}), "(x, t, basis, method='cholesky')\n", (609, 641), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((986, 1032), 'skfda.representation.basis.FDataBasis.from_data', 'FDataBasis.from_data', (['x', 't', 'basis'], {'method': '"""qr"""'}), "(x, t, basis, method='qr')\n", (1006, 1032), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((4186, 4204), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'n_basis': '(6)'}), '(n_basis=6)\n', (4193, 4204), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n')]
|
import numpy as np
import sys as sys
import csv
sys.path.insert(1, './../Tools')
from DispersionRelationDeterminantFullConductivityZeff import VectorFinder_auto_Extensive
#************Start of user block******************
#para= [nu, zeff,eta, shat, beta, ky, mu]
para_min=[0.1, 1., 0.5, 0.001,0.0005, 0.01, 0.]
para_max=[10., 5., 5., 0.05, 0.02, 0.2, 10.]
path='.'
Output_csv=path+'/0MTM_scan_PC.csv'
xstar=10.
ModIndex=1
#************End of user block******************
para_min=np.array(para_min)
para_max=np.array(para_max)
width=(para_max-para_min)
with open(Output_csv, 'w', newline='') as csvfile: #clear all and then write a row
csv_data = csv.writer(csvfile, delimiter=',')
csv_data.writerow(['omega_omega_n','gamma_omega_n',\
'nu','zeff','eta','shat','beta','ky',\
'ModIndex','mu','xstar'])
csvfile.close()
while 1==1:
param=para_min+width*np.random.rand(7)
[nu,zeff,eta,shat,beta,ky,mu]=param
w0=VectorFinder_auto_Extensive(nu,zeff,eta,shat,beta,ky,1,mu,xstar)
#w0=1+1j
omega=np.real(w0)
gamma=np.imag(w0)
print(str(omega)+','+str(gamma)+','+str(nu)+','+str(zeff)+','\
+str(eta)+','+str(shat)+','+str(beta)+','+str(ky)+','\
+str(ModIndex)+','+str(mu)+','+str(xstar))
with open(Output_csv, 'a+', newline='') as csvfile: #adding a row
csv_data = csv.writer(csvfile, delimiter=',')
csv_data.writerow([ omega,gamma,nu,zeff,eta,shat,beta,ky,\
ModIndex,mu,xstar ])
csvfile.close()
print('******w*****')
print('w='+str(w0))
|
[
"csv.writer",
"sys.path.insert",
"numpy.imag",
"numpy.array",
"DispersionRelationDeterminantFullConductivityZeff.VectorFinder_auto_Extensive",
"numpy.real",
"numpy.random.rand"
] |
[((48, 80), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""./../Tools"""'], {}), "(1, './../Tools')\n", (63, 80), True, 'import sys as sys\n'), ((505, 523), 'numpy.array', 'np.array', (['para_min'], {}), '(para_min)\n', (513, 523), True, 'import numpy as np\n'), ((533, 551), 'numpy.array', 'np.array', (['para_max'], {}), '(para_max)\n', (541, 551), True, 'import numpy as np\n'), ((681, 715), 'csv.writer', 'csv.writer', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (691, 715), False, 'import csv\n'), ((982, 1054), 'DispersionRelationDeterminantFullConductivityZeff.VectorFinder_auto_Extensive', 'VectorFinder_auto_Extensive', (['nu', 'zeff', 'eta', 'shat', 'beta', 'ky', '(1)', 'mu', 'xstar'], {}), '(nu, zeff, eta, shat, beta, ky, 1, mu, xstar)\n', (1009, 1054), False, 'from DispersionRelationDeterminantFullConductivityZeff import VectorFinder_auto_Extensive\n'), ((1071, 1082), 'numpy.real', 'np.real', (['w0'], {}), '(w0)\n', (1078, 1082), True, 'import numpy as np\n'), ((1093, 1104), 'numpy.imag', 'np.imag', (['w0'], {}), '(w0)\n', (1100, 1104), True, 'import numpy as np\n'), ((1392, 1426), 'csv.writer', 'csv.writer', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (1402, 1426), False, 'import csv\n'), ((916, 933), 'numpy.random.rand', 'np.random.rand', (['(7)'], {}), '(7)\n', (930, 933), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3get_coo
"""
Lattice dynamics model
"""
import h5py
import os
from itertools import product
import numpy as np
from numpy.linalg import norm
import scipy.linalg
import scipy.io
import scipy.sparse
from scipy.sparse import lil_matrix as spmat
from .basic_lattice_model import BasicLatticeModel
from .interface_vasp import Poscar
from .cluster import Cluster
from .structure import SupercellStructure
from .util.mathtool import MixedIndexForImproper, IntegerDigits, tensor_constraint, RMS, mychop
from .util.tool import pad_right, matrix2text
from _c_util import fct_trans_c, ld_get_correlation, get_nullspace, init_ldff_basis, ldff_get_corr
from .coord_utils import ReadPBC2Cart
from .util.string_utils import str2arr, str2bool
import logging
logger = logging.getLogger(__name__)
debug_level = 10
class LDModel(BasicLatticeModel):
"""
Lattice dynamics
atomic variables: vector displacements on each atom
property: total potential energy (electronic + nucleic potential, but NOT kinetic of nucleic)
force
"""
def __init__(self, prim, raw_clusters, **kwargs):
"""
:param prim:
:param raw_clusters:
:return:
"""
BasicLatticeModel.__init__(self, prim, raw_clusters, **kwargs)
self.all_range={o: max(self.proper_range[o],self.imp_range[o]) for o in self.proper_range}
self.dipole_force = False if (not kwargs.get('dipole_force', True)) or \
('born_charge' not in self.prim[0].properties.keys()) or \
(self.prim.intensive_properties['epsilon_inf'] is None) else True
self.symm_residual_force = kwargs.get('symm_residual_force', True)
self.Cmat = None
self.ldff = None
self._dpcor = None
def translational_invariance(self):
"""
Translational invariance Matrix
:return: C matrix
"""
print("Applying translational invariance.")
crange = self.all_range
prim = self.prim
orbits = self.orbits
rot_mats = [s.rot for s in prim.spacegroup]
maxnpt = max([orb.cluster.order for orb in orbits])
Cmat = self.Cmat1
BmatCollect = []
#self.prim._init_all_nb_list([crange[ord] for ord in range(2, 1+max(crange.keys()))])
for iorb in range(len(orbits)):
orb = orbits[iorb]
clus = orb.cluster
if clus.order >= maxnpt:
continue
npt_ex = clus.order + 1
cut = crange[npt_ex]
if debug_level > 5555:
print("debug> order %d cutoff %f" %(npt_ex, cut))
if clus.diameter > cut:
if debug_level > 9990:
print(" %d diameter %.4f out of range %.4f" % (iorb, clus.diameter, cut))
continue
# find all points within range of clus
if clus.order <=0:
# empty cluster
sumPts = [[0,0,0, l] for l in range(prim.num_sites)]
else:
sumPts = prim.find_nb_cluster(np.array(clus.ijkls), crange[npt_ex])
if debug_level > 55:
print("debug> translation of", repr(clus))
dimTensor = 3** npt_ex
#Bmat = spmat((dimTensor, self.nfct_tot))
# annoying bug in scipy.sparse matrices !! Bmat not computed properly. Using normal/dense matrices for now.
Bmat = spmat((dimTensor, self.nfct_tot)).todense()
foundClus = False
for sumpt in sumPts:
clusSum = clus.append_site(sumpt)
if debug_level > 5555:
print(" ", iorb, " searching ", sumpt)
#idSum= Cluster2MI[clusSum]//Sort;
# find the orbit that each summed cluster corresponds to *)
[found, ioF, icF, igF, pi] = self.identify_cluster(clusSum)
# print("identified",[found, ioF, igF, pi])
if found:
foundClus = True
# annoying bug in scipy.sparse matrices !! Bmat not computed properly
Bmat[:, self.orb_idx_full[ioF]:self.orb_idx_full[ioF+1]]+=\
fct_trans_c(npt_ex, 3, rot_mats[igF], pi).todense()
# fct_trans_c(npt_ex, 3, rot_mats[igF], pi)
if not foundClus:
print(' ',iorb, " nothing found for ", clus)
continue
# annoying bug in scipy.sparse matrices !! Bmat not computed properly
Bmat = spmat(Bmat)
BmatCollect.append(Bmat)
Bmat = Bmat.dot(Cmat.T)
if npt_ex > 999999:
print("bypassing ", clus)
# BmatCollect.extend( Select[RowReduce[Chop[Bmat,10.^-10]],(Norm[#]> 10.^-6)&]])
else:
if debug_level > 99999:
print(" calc null bmat")
print(Bmat)
# if not (scipy.sparse.isspmatrix(Bmat) and Bmat.getnnz()<=0):
# Cmat = nullspace_rref(Bmat.toarray()).dot(Cmat)
Cmat = mychop(get_nullspace(Bmat).dot(Cmat), 1e-12)
print(" %4d + sum(a) %d remaining" % (iorb, Cmat.shape[0]))
self.Cmat = Cmat
return BmatCollect
def symmetrize(self):
self.isotropy_derivative_constraint()
self.translational_invariance()
#self.CheckNumericTranslationalInvariance()
# self.process_fct_order()
def prepare_index_full(self):
for orb in self.orbits:
orb.ncorr_full = 3**orb.cluster.order
super().prepare_index_full()
self.nfct_tot = self.ncorr_full
def prepare_index(self):
# self.process_all_fct()
self.nfct = self.Cmat.shape[0]
allfct_ord = np.hstack([np.full(3**o.cluster.order, o.cluster.order, dtype=np.int) for o in self.orbits])
self.fct_ord = [allfct_ord[row[0]] for row in self.Cmat.tolil().rows]
self.ord_range = {o: len(self.fct_ord) - self.fct_ord[::-1].index(o) - 1 if o in self.fct_ord else 0
for o in range(self.maxorder+1)}
self.ord_range[-1] = -1
self.fct_ord = np.array(self.fct_ord)
self.allfct_orbidx = np.hstack([np.full(3**o.cluster.order, i,dtype=int) for i, o in enumerate(self.orbits)])
self.fct_orbidx = [self.allfct_orbidx[row[0]] for row in self.Cmat.tolil().rows]
np.savetxt('num_fct_ord.txt', [self.ord_range[o]-self.ord_range[o-1] for o in range(self.maxorder+1)], fmt='%d')
def isotropy_derivative_constraint(self):
"""
Apply isotropy and derivative commutativity constraints for all clusters in this model
:return:
"""
clusters, iso_list, pi_list = zip(*[[orb.cluster, [x[0] for x in orb.isotropy[1:]],
[x[1] for x in orb.isotropy[1:]]] for orb in self.orbits])
Cmats = self.calc_isotropy_derivative_constraint(self.prim.spacegroup, clusters, iso_list, pi_list, self.nfct_tot, self.symm_residual_force)
self.Cmat1 = scipy.sparse.vstack(Cmats)
print("Isotropy/deriv. constraints done. After dim/before=", self.Cmat1.shape)
return Cmats
@staticmethod
def calc_isotropy_derivative_constraint(ops, clusters, iso_list, pi_list, nfct_tot, symm_residual_force=True):
"""
Apply isotropy and derivative commutativity constraints for all clusters in this model
symm_residual_force: Whether to symmetrize the point cluster, i.e. residual forces. Default to True. If fitting phonons with small displacement and supercells that do not preserve symmetry (i.e. non-cubic low symmetry supercell for cubic system)
:return:
"""
print("Applying point group symmetry")
Cmats = []
ltothis=0
for iorb in range(len(clusters)):
clus = clusters[iorb]
npt = clus.order
dimThis = 3**npt
ltothis+=dimThis
if npt <= 0:
null = scipy.sparse.identity(dimThis)
else:
idx_constr = MixedIndexForImproper(clus.vertices, 3)[0]
igs = iso_list[iorb]
pis = pi_list[iorb]
if (not symm_residual_force) and (npt==1):
print("WARNING: symmetrization of point cluster (i.e. residual force) turned OFF")
igs = igs[:0]
pis = pis[:0]
# print('debug isotropy2 igs, pis ', igs, list(igs), pis, 'zipped', *zip(*orb.isotropy[1:]))
null = tensor_constraint(3, npt, [ops[ig].rot for ig in igs], pis, other_constraits=idx_constr)
nfree = null.shape[0]
print(" %4d null= %d/%d" %(iorb, nfree, dimThis), repr(clus), end='')
if nfree>0:
if npt <=2 and debug_level > 99999:
print([clus, null])
if ltothis-dimThis>0:
null = scipy.sparse.bmat([[spmat((nfree, ltothis-dimThis)), null]])
if nfct_tot-ltothis>0:
null = scipy.sparse.bmat([[null, spmat((nfree, nfct_tot-ltothis))]])
Cmats.append(null)
print()
else:
print(' vanishing cluster!')
return Cmats
@staticmethod
def write_mat(Cmat, outf):
"""
Export Cmat
:param outf:
:return:
"""
if Cmat is None:
raise ValueError("Cmat not set for this model")
print("writing matrix", Cmat.shape, "to", outf)
scipy.io.mmwrite(outf, Cmat)
@property
def ncorr(self): return self.nfct + (0 if self.ldff is None else self.ldff.ncorr)
def get_params(self):
all_ord=list(set([o.cluster.order for o in self.orbits]))
ldff = self.ldff
if ldff is None:
ld_diag = np.arange(0)
ld_scalelist = [1]
maxfitord = self.maxorder
else:
ld_diag = np.ones(ldff.ncorr)
ld_scalelist = ldffscale_list
maxfitord = self.maxorder+1
all_ord+=[maxfitord]
self.ord_range[maxfitord] = self.ord_range[self.maxorder] + ldff.ncorr
param = dict()
for o in range(maxfitord+1):
param[o]=self.ord_range[o]-self.ord_range[o-1]
return param
def get_submodels(self, name_ord, u_list, lr_pair_penalty=0, ldffscale_list=[1], knownsol=None):
"""
:param name_ord: list of [name, fct order]
:param u_list: list of uscale
:param lr_pair_penalty: penalty exp(-penalty*radius) for pair clusters
:return: list of [name, matrix] defining the different fittings
"""
all_ord=list(set([o.cluster.order for o in self.orbits]))
ldff = self.ldff
if ldff is None:
ld_diag = np.arange(0)
ld_scalelist = [1]
maxfitord = self.maxorder
else:
ld_diag = np.ones(ldff.ncorr)
ld_scalelist = ldffscale_list
maxfitord = self.maxorder+1
all_ord+=[maxfitord]
self.ord_range[maxfitord] = self.ord_range[self.maxorder] + ldff.ncorr
sol0 = np.zeros(self.ncorr)
if knownsol:
print(" Reading previous solution from %s"%(knownsol))
input_sol = self.load_solution(knownsol).reshape(-1)
sol0[:min(sol0.size, input_sol.size)] = input_sol[:min(sol0.size, input_sol.size)]
param = dict()
for o in range(maxfitord+1):
param[o]=self.ord_range[o]-self.ord_range[o-1]
name_ord = self.process_name_ord(name_ord, all_ord)
pair_r0 = np.min([orb.cluster.diameter for orb in self.orbits if orb.cluster.order==2 and orb.cluster.order_uniq==2])
pair_diameter=np.array([self.orbits[idx].cluster.diameter-pair_r0 if self.orbits[idx].cluster.order==2 and self.orbits[idx].cluster.order_uniq==2 else 0 for idx in self.fct_orbidx])
return [[nm+ ' uscale= %g'%(uscale) + str('' if ldff is None else " ldffscale=%g"%(ldffscale)),
scipy.sparse.diags(np.hstack(((1/uscale)**(self.fct_ord-1)* np.exp(-pair_diameter*lr_pair_penalty), ldffscale*ld_diag)), 0).tocsr()[:,
sum([list(range(self.ord_range[i-1]+1, self.ord_range[i]+1)) for i in o if i<=maxfitord], []) if o[0]>=0 else list(range(-o[0],-o[1]))], sol0]
for (nm, o) in name_ord for uscale in u_list for ldffscale in ld_scalelist]
def CheckNumericTranslationalInvariance(self, trans=np.array([1.0, 2.0, 3.0])):
"""
Apply uniform translation, calculate the force
:return:
"""
print(" To be implemented: checking translation", trans)
def get_full_fct(self, sol_sym):
"""
Return all FCT elements from symmetry reduced parameters
:param sol_sym: symmetrized solution vector
:return: expanded, full FCT's without symmetry
"""
return self.Cmat.T.dot(sol_sym[:self.nfct])
def get_correlation(self, sclist, wtFunc= lambda x:1, corrtype='f', delForce=1, shift=True, **kwargs):
"""
:param sclist: [ [sc0, [sub0, sub1, ...]], [sc1, [sub11, ...]]]
:param wtFunc:
:param corrTyp: 'e' for energy, 'f' for force
:param delForce: which components to delete
:param shift: whether to subtract the shift (average force)
:param residual_force: Whether to subtract residual forces of equilibrium structure, if found
:return: correlation matrix A
"""
import os.path
ldff = self.ldff
theC = self.Cmat.T
ncorr = theC.shape[1]
residual_force= str2bool(kwargs['setting'].get('residual_force', 'F'))
if ldff is not None:
ncorr += ldff.ncorr
if corrtype == 'e':
totNF = sum([len(sc[1]) for sc in sclist])
elif corrtype == 'f':
# ignore the last atom because of translational invariance (subtract delForce=1)
totNF = sum([3*(Poscar.from_file(rd+"/POSCAR").structure.num_sites - delForce) for sc in sclist for rd in sc[1]])
else:
raise ValueError("ERROR: expecting to fit f(orce) or e(energy) but found %s"%(corrtype))
print(" Total number of linear equations", totNF)
assert totNF>0, ValueError("ERROR got no input data")
Alist = np.zeros((totNF, ncorr))
Flist = np.zeros((totNF, 3))
Astart=0
for sc in sclist:
print(" reading supercell", sc[0])
SCinfo= SupercellStructure.from_file(self.prim, sc[0])
SCinfo.to_unperturbed()
x0frac = SCinfo.frac_coords
#SCinfo = SupercellStructure(self.prim, SCmat, None, x0frac)
ncell = SCinfo.n_cell
clusALL = self.translate_to_supercell(SCinfo)
if corrtype=='f' and residual_force and os.path.exists(os.path.dirname(sc[0])+"/residual_force.txt"):
print(" found 'residual_force.txt'")
f0 = np.loadtxt(os.path.dirname(sc[0])+"/residual_force.txt")
if shift:
f0-= np.mean(f0, axis=0)
else:
f0 = 0
if self.dipole_force:
fcmfile= os.path.dirname(os.path.abspath(sc[0]))+"/fcm_dp"
if False and os.path.isfile(fcmfile):
print(" reading dipole FC for supercell "+fcmfile)
fcm_dp = np.loadtxt(fcmfile)
else:
print(" computing long-range forces")
fcm_dp = self.get_hessian_dipole_corrected(SCinfo)
np.savetxt(fcmfile, fcm_dp)
else:
fcm_dp = None
if ldff is not None:
radialC= self.translate_to_supercell(SCinfo, ldff.orb_idx)
if debug_level > 10:
print("supercell clusters generated")
for rd in sc[1]:
rundir = rd
dx= ReadPBC2Cart(rundir + "/POSCAR", x0frac)
# weight= wtFunc(dx, uScale)
# weight = np.ones(dx.shape[0])
weight = 1
if debug_level > 2:
print(" config",rundir, " weight=", weight, " max |dx|=", np.amax(norm(dx,axis=1)))
dx_sort = dx
print('dx_sort : ',type(dx_sort),dx_sort.shape)
print('clusALL : ',type(clusALL),len(clusALL))
print('theC : ',type(theC),theC.shape)
Amat= self.calc_correlation(dx_sort, clusALL).dot(theC)
if ldff is not None:
Amat = scipy.sparse.hstack((Amat, ldff.calc_correlation(dx_sort, radialC, ncell))).tocsr()
if corrtype == "e":
thisNF = 1
if os.path.isfile(rundir+"/energy.txt"):
values = np.loadtxt(rundir+"/energy.txt", ndmin=1)/ncell
else:
print("WARNING: no energy.txt file found. Proceeding with 0...")
values = np.zeros(1)
valuesFit = values.copy()
if fcm_dp is not None:
en_dp = 0.5*np.dot(dx_sort.reshape(-1),fcm_dp.dot(dx_sort.reshape(-1)))
np.savetxt(rundir+"/energy.txt_dp", en_dp)
valuesFit -= en_dp/ncell
Amat = Amat[-1:]/ncell
elif corrtype == 'f':
thisNF = 3*(len(dx)- delForce)
if os.path.exists(rundir+"/force.txt"):
values = np.loadtxt(rundir+"/force.txt")
shift_size= np.linalg.norm(np.sum(values, axis=0))
if shift_size >1e-3:
print("WARNING: large shift in force %.4f in %"(shift_size, rundir+"/force.txt"))
if shift:
values-= np.mean(values, axis=0)
valuesFit = values.copy() - f0
else:
print("WARNING: force.txt not found in %s ... setting to zero ... OK for renormalization"%(rundir))
values = np.zeros((len(dx), 3))
valuesFit = values.copy()
assert values.shape == dx.shape, 'force [%d %d] coords [%d %d]' % (values.shape[0], values.shape[1], dx.shape[0], dx.shape[1])
if fcm_dp is not None:
f_dp = -fcm_dp.dot(dx_sort.reshape(-1)).reshape((-1,3))
#np.savetxt(rundir+"/force.txt_dp", f_dp)
valuesFit -= f_dp
values = values.flatten()[:thisNF]
valuesFit = valuesFit.flatten()[:thisNF]
if debug_level >30:
print("forces read in")
if thisNF != len(values):
raise ValueError("ERROR: expecting ", thisNF, " but found ", len(values), " force components")
Amat = Amat[:thisNF]
if debug_level >9999:
print(" A size", Amat.shape, Amat.__class__)
Alist[Astart:Astart+thisNF, :] = (Amat * weight).todense()
Flist[Astart:Astart+thisNF, 0] = valuesFit * weight
Flist[Astart:Astart+thisNF, 1] = np.full((thisNF), weight, dtype=np.double)
Flist[Astart:Astart+thisNF, 2] = values-valuesFit
Astart += thisNF
return [spmat(Alist), Flist]
def calc_correlation(self, dx, clusALL):
"""
:param dx:
:param clusALL: all clusters in the supercell
:return:
"""
maxnpt = self.maxorder
return spmat(ld_get_correlation(dx.shape[0], len(self.orbits), maxnpt, self.nfct_tot,
np.array(dx),
np.array([pad_right(np.array(clus[0]), maxnpt) for clus in clusALL], dtype=np.int32),
np.array([clus[1] for clus in clusALL], dtype=np.int32),
np.array([clus[2] for clus in clusALL], dtype=np.int32),
np.array([orb.cluster.order for orb in self.orbits], dtype=np.int32),
np.array([orb.cluster.factorial for orb in self.orbits]),
np.array([op.rot_inv for op in self.prim.spacegroup])))
def save_fct(self, sol, outf, scmat, combine_improper=True):
"""
:param sol: solution vector
:param outf: output filename. Two files .lat and .pot, will be generated
:param scmat: supercell integer 3x3 matrix
:return:
"""
print(" saving lattice and potential to", outf)
scinfo = SupercellStructure.from_scmat(self.prim, scmat)
self.save_fct_lat(outf+'.lat', scinfo)
assert sol.shape[0] >= self.nfct
self.save_fct_pot(outf+'.pot', self.get_full_fct(sol), sol[self.nfct:], scinfo,
combine_improper=combine_improper)
def save_fct_lat(self, outf, scinfo):
"""
:param outf:
:param scinfo:
:return:
"""
# write lattice points
fp = open(outf, 'w')
natom = self.prim.num_sites
ncell = scinfo.n_cell
SCposFrac= scinfo.frac_coords
outs =[matrix2text(self.prim.lattice._matrix), matrix2text(scinfo.sc_mat), str(natom)]
outs += ["%f %f %f %d" % tuple(self.prim.frac_coords[i].tolist()+[self.prim.atomic_numbers[i]]) for i in range(natom)]
outs += [str(SCposFrac.shape[0]), '']
fp.write('\n'.join(outs))
for iA in range(natom):
for jLat in range(scinfo.n_cell):
fp.write("%d %s %d %s\n" %(iA*ncell+ jLat+1, matrix2text([scinfo.sc_ref[jLat]]),
iA+1, matrix2text([SCposFrac[iA*ncell+ jLat]])))
fp.close()
def save_fct_pot(self, outf, sol_fct, sol_ff, scinfo, tol=1.E-12, combine_improper=False, output_ijkl=True):
"""
write potential file.
:param outf:
:param scinfo:
:return:
"""
ldff = self.ldff
fp = open(outf, 'w')
natom = self.prim.num_sites
dim = 3
ops = self.prim.spacegroup
fc_norm = []
if self.dipole_force:
fcm_dp = self.get_hessian_dipole_corrected(scinfo)
else:
fcm_dp = np.zeros((scinfo.num_sites*3, scinfo.num_sites*3))
flag_dp = np.zeros((scinfo.num_sites, scinfo.num_sites),dtype=np.int)
for iO, orb in enumerate(self.orbits):
clus0 = orb.cluster
npt = clus0.order
fac = clus0.factorial
val = sol_fct[self.orb_idx_full[iO]:self.orb_idx_full[iO+1]]
fc_norm.append([clus0.diameter, np.linalg.norm(val),clus0.order, clus0.order_uniq])
if ldff is not None:
ppout = ldff.tostr(sol_ff, iO)
else:
ppout = "0\n0"
if fc_norm[-1][1]>tol or (ppout != "0\n0"):
for ic, clus in enumerate(orb.clusters):
trans_cluster = np.array(BasicLatticeModel.translate_cluster_to_supercell(scinfo, clus))
valTrans = fct_trans_c(npt, 3, ops[orb.clusters_ig[ic]].rot, np.arange(npt, dtype=int)).dot(val)
if npt==2:
valTrans+= fcm_dp[trans_cluster[0,0]*3:trans_cluster[0,0]*3+3,trans_cluster[0,1]*3:trans_cluster[0,1]*3+3].flatten()
flag_dp[trans_cluster.T.tolist()]=1
if combine_improper:
valTrans= clus.reduce_improper_fct_output(valTrans) # fewer terms to save!
# fctTrans = valTrans.reshape([dim for _ in range(npt)])
# If[npt==2,AppendTo[pairc, orbitUniq[[iC,icOrb,1]]]; AppendTo[pairFCM, fctTrans]];
# if ic <= 3:
# print(iO, ic, clus)
# print(valTrans)
# print(trans_cluster)
clus_out = matrix2text(clus._ijkls if output_ijkl else clus.frac_coords)
outs = [str(npt), clus_out, str(len(trans_cluster)), matrix2text(trans_cluster+1),
LDModel.fct2str(npt, valTrans/fac, tol), ppout]
fp.write("\n".join(outs) + "\n\n")
np.savetxt("fct_norm_vs_diameter.txt", fc_norm, header='col1=diameter col2=norm col3=npt col4=npt_uniq')
return
for i1 in range(scinfo.num_sites):
for i2 in range(i1, scinfo.num_sites):
if flag_dp[i1,i2]:
continue
npt = 2
# WARNING: TODO: convert pair of coords to minimal distance within the supercell periodic boundary condition
clus = Cluster.from_coords(scinfo.cart_coords[[i1,i2]], self.prim, frac_coords=False)
fac = clus.factorial
clus_out = matrix2text(clus._ijkls if output_ijkl else clus.frac_coords)
ppout = "0\n0"
valTrans = fcm_dp[i1*3:i1*3+3, i2*3:i2*3+3].flatten()
outs = [str(npt), clus_out, str(1), matrix2text(np.array([[i1,i2]])+1),
LDModel.fct2str(npt, valTrans/fac, tol), ppout]
fp.write("\n".join(outs) + "\n\n")
def save_fcshengbte(self, sol, ord, tol=1e-20):
assert ord in (3,4), "Only order 3 or 4 FCTs are accepted by shengbte, got %d"%(ord)
import io, re
sol_fct = self.get_full_fct(sol)
ops = self.prim.spacegroup
print("WRITING FORCE_CONSTANTS_%s"%({3:"3RD",4:"4TH"}[ord]))
fc_name="FORCE_CONSTANTS_%s"%({3:"3RD",4:"4TH"}[ord])
hf_name="fc%s.hdf5"%({3:"3RD",4:"4TH"}[ord])
fp=io.StringIO()
icount=0
for iO, orb in enumerate(self.orbits):
clus0 = orb.cluster
npt = orb.order
if npt != ord:
continue
val = sol_fct[self.orb_idx_full[iO]:self.orb_idx_full[iO+1]]
if np.amax(np.abs(val)) <= tol:
continue
perms = clus0.permutations()
for ic, clus in enumerate(orb.clusters):
ijkls = clus._ijkls_np
valTrans = fct_trans_c(npt, 3, ops[orb.clusters_ig[ic]].rot, np.arange(npt, dtype=int)).dot(val).reshape((3,)*ord)
#print('debug iorb, ic iper', iO, ic, len(perms), clus)
for iper in perms:
icount+=1
#print('debug', icount, clus0.ijkls, iO, ic)
ijk_other= matrix2text(self.prim.lattice.get_cartesian_coords(ijkls[iper[1:],:3]- ijkls[iper[0:1],:3]))
valPerm = np.transpose(valTrans, iper).reshape((-1))
fp.write("\n%d\n%s\n%s\n"%(icount, ijk_other, matrix2text(ijkls[iper,3]+1)))
fp.write(re.sub(r".*\n", r"",LDModel.fct2str(npt, valPerm, -1),count=1)+'\n')
with open(fc_name, 'w') as modified: modified.write("%d\n"%(icount) + fp.getvalue())
fp.close()
# for original (unmodified) version of ShengBTE
def save_fcshengbte_original(self, sol, ord, tol=1e-20, output_ijkl=True):
from .util.tool_for_original_shengbte import LPTClusterEquivalentByTranslation, relativePosition, FCTrans, ListFlat
assert ord in (3,4), "Only order 3 or 4 FCTs are accepted by shengbte, got %d"%(ord)
import io, re
import os.path
from f_util import f_util
# uscale = u_list[0]
np.savetxt('sol',sol)
# print('SOL : ',len(sol),'\n',sol)
sol_fct = self.get_full_fct(sol) # expend independent FCT over the null space (isotropy group, translational invariance)
# print('SOL_FCT : ',len(sol_fct),'\n',sol_fct)
np.savetxt('sol_fct',sol_fct)
ops = self.prim.spacegroup
# scinfo = SupercellStructure.from_scmat(self.prim, scmat)
# flag_dp = np.zeros((scinfo.num_sites, scinfo.num_sites),dtype=np.int)
fc_name="FORCE_CONSTANTS_%s"%({3:"3RD",4:"4TH"}[ord])
fp=io.StringIO()
fp2=io.StringIO()
icount=0
icount2=0
#######################################################################################
### Apply triplet selection - JP modeled after Yi's codes phononFCT.py and tools.py ###
#######################################################################################
R = self.prim.lattice.matrix
rmat = np.array(R).tolist()
print('rmat : \n',rmat)
apos = self.prim.frac_coords
poscar = np.array(apos).tolist()
print('poscar : \n',poscar)
natom = len(apos)
print('natom : ',natom)
cutoff = 6
nlat = 2
# normally requires 5 parameters, but calling using f2py relieves the need for 5th natom
f_util.select_triplet(poscar, rmat, cutoff, nlat)
#-----------------------------------------------------------
lines=[list(map(int, line.split())) for line in open('triplet-selection','r')]
selclus=[ [ [[0,0,0],line1[3]],[[line1[4],line1[5],line1[6]],line1[7]],[[line1[8],line1[9],line1[10]],line1[11]] ] for line1 in lines ]
counter=0
icount2=0
fctsym=[]
for clus in selclus: # loop over all selected triplet clusters (could be in any cell)
print('CLUS : \n',clus)
counter=counter+1
npt=len(clus) # 2 for pair, 3 for triplets, etc.
foundOrb=False
for iO, orb in enumerate(self.orbits): # loop over all orbits
clus0 = orb.cluster
npt = clus0.order
if npt != ord:
continue
#val = sol_fct[self.orb_idx_full[iO]:self.orb_idx_full[iO+1]]/(pow(uscale, npt-1))
for ic2, clus2 in enumerate(orb.clusters): # loop over all clusters in the orbits
tmp = list(clus2.vertices)
clustry = [[[tmp[0].ijkl[0],tmp[0].ijkl[1],tmp[0].ijkl[2]],tmp[0].ijkl[3]],[[tmp[1].ijkl[0],tmp[1].ijkl[1],tmp[1].ijkl[2]],tmp[1].ijkl[3]],[[tmp[2].ijkl[0],tmp[2].ijkl[1],tmp[2].ijkl[2]],tmp[2].ijkl[3]]]
foundOrb = LPTClusterEquivalentByTranslation(clustry, clus, True) # check if this cluster is the translated version of selclus
if foundOrb != False: # if match found
print('Cluster Matched! \n',clustry)
print('SOL_FCT INDICES : ',self.orb_idx_full[iO],self.orb_idx_full[iO+1])
print('foundOrb : ',foundOrb)
val = sol_fct[self.orb_idx_full[iO]:self.orb_idx_full[iO+1]] # load full FCT for a cluster
print('Distinct FCT : \n',val)
fctsym.append(ListFlat(list(val)))
icount2+=1
valTrans2 = np.array(FCTrans(npt, 3, ops[orb.clusters_ig[ic2]].rot, relativePosition(clustry, foundOrb))).dot(val)
print('Transformed FCT : \n',valTrans2,'\n')
# get lattice coordinates of the 2 other cells by zero-referencing to the 1st cell
#ijk_other= matrix2text(self.prim.lattice.get_cartesian_coords(clus._ijkls_np[1:,:3] - clus._ijkls_np[0:1,:3]))
difference = np.array([np.array(clus[1][0]) - np.array(clus[0][0]),np.array(clus[2][0]) - np.array(clus[0][0])])
#print(difference)
ijk_other= matrix2text(self.prim.lattice.get_cartesian_coords(difference))
#print('ijk_other : \n',ijk_other)
#fp.write("\n%d\n%s\n%s\n"%(icount2, ijk_other, matrix2text(clus._ijkls_np[:,3]+1)))
fp.write("\n%d\n%s\n%s\n"%(icount2, ijk_other, matrix2text(np.array([clus[0][1],clus[1][1],clus[2][1]])+1)))
fp.write(re.sub(r".*\n", r"",LDModel.fct2str(npt, valTrans2, -1),count=1)+'\n')
break
np.savetxt('fctsym',fctsym)
with open(fc_name, 'w') as modified: modified.write("%d\n"%(icount2) + fp.getvalue())
fp.close()
def load_solution(self, sol_f, potential_coords_ijkl=True):
"""
sol_f: file_name_of_solution [order_to_keep]
File format is either solution vector or potential file
order_to_keep is like 0,1,2 (no space)
"""
solinf = sol_f.split()
if solinf[0][-4:].lower()!='.pot':
print(" Loading symmetrized FCT from %s"% (solinf[0]))
sol= np.loadtxt(solinf[0], ndmin=2)
if len(solinf)>1:
print("WARNING!!!! only order %s will be kept"%(solinf[1]))
tmp = np.zeros_like(sol)
for ord in eval("[%s]"%(solinf[1])):
print("ord= %d corresponding idx="%(ord), (self.ord_range[ord-1]+1,self.ord_range[ord]+1))
tmp[:,self.ord_range[ord-1]+1:self.ord_range[ord]+1] = sol[:,self.ord_range[ord-1]+1:self.ord_range[ord]+1]
sol=tmp
return sol
else:
from .util.io_utils import read_nrecord_array
print(" Loading symmetrized FCT from potential %s"% (solinf[0]))
full_fct= np.zeros(self.nfct_tot)
lines = open(solinf[0], 'r').readlines()
line=0
while line<len(lines):
line, xyz=read_nrecord_array(lines, line)
if potential_coords_ijkl:
clus= Cluster.from_ijkl(xyz.astype(int), self.prim)
else:
clus= Cluster.from_coords(xyz, self.prim)
line, clus_instances=read_nrecord_array(lines, line)
line, ijval=read_nrecord_array(lines, line)
line, rad1=read_nrecord_array(lines, line)
line, rad2=read_nrecord_array(lines, line)
line += 1 # empty line
[found, ioF, icF, igF, pi] = self.identify_cluster(clus)
if (not found) or (icF != 0) or (igF != 0):
continue
ord = clus.order
# print("found cluster order=%d id=%d line=%d"%(ord, ioF, line))
fct= np.zeros((3,)*ord)
for x in ijval:
#print(" debug x=", x, tuple(x[:ord].astype(int)-1), x[-1])
fct[tuple(x[:ord].astype(int)-1)] = x[-1]
fct = fct*clus.factorial
fct = fct_trans_c(ord, 3, self.prim.spacegroup[igF].rot, pi).T.dot(fct.reshape((-1)))
full_fct[self.orb_idx_full[ioF]:self.orb_idx_full[ioF+1]] = fct.reshape((-1))
#print("debug full_fct=", full_fct)
sol = scipy.sparse.linalg.lsqr(self.Cmat.T, full_fct,atol=1e-20,btol=1e-20)
#print("debug sol=", sol)
np.savetxt(solinf[0]+'_loaded_sol', sol[0])
if sol[3] > 1E-4:
print("WARNING large error %f loading potential to symmetrized FCT"%(sol[3]))
return np.array([sol[0]])
def get_pair_info(self, sol_fct, ord=2, tol=1.E-20):
"""
extract pair interactions for phonon calculations.
:param ord: usually 2
:param sol_fct: solution vector
:return:
"""
natom = self.prim.num_sites
ops = self.prim.spacegroup
pairijk = []
pairTyp = []
pairFCM = []
dim=3
for iO, orb in enumerate(self.orbits):
npt = orb.cluster.order
if npt != ord:
continue
val = sol_fct[self.orb_idx_full[iO]:self.orb_idx_full[iO+1]]
if (abs(val)<=tol).all():
continue
for ic, clus in enumerate(orb.clusters):
valTrans = fct_trans_c(npt, 3, ops[orb.clusters_ig[ic]].rot, np.arange(npt, dtype=int)).dot(val)
fctTrans = valTrans.reshape([dim]*npt)
pairijk.append(clus._ijkls_np[0,:3] - clus._ijkls_np[1,:3])
pairTyp.append(clus._ijkls_np[:,3])
pairFCM.append(fctTrans)
if len(pairijk)>0:
return (np.array(pairijk), pairTyp, pairFCM)
else:
return (np.zeros((1,3),dtype=int), np.zeros((1,2),dtype=int), np.zeros((1,3,3)))
@staticmethod
def fct2str(npt, fct, tol=1.E-12):
"""
Note fct is a 1-D array, NOT tensor
"""
outs = ["%s %.15f"%(matrix2text([IntegerDigits(i, 3, npt)+1]), fct[i]) for i in range(3**npt) if abs(fct[i]) > tol]
return "\n".join([str(len(outs))] + outs)
@staticmethod
def get_hessian_dipole(s):
"""
:param s: structure with epsilon_inf and born_charge
"""
from f_phonon import f_phonon
if s.intensive_properties['epsilon_inf'] is None:
return np.zeros((3*s.num_sites,3*s.num_sites))
return f_phonon.get_fcm_dipole(s.lattice.matrix.T, s.lattice.inv_matrix, 1E-18, s.cart_coords.T,
np.array(s.site_properties['born_charge']).transpose([1,2,0]), s.intensive_properties['epsilon_inf'].T, [0,0,0]).real
def get_hessian_dipole_corrected(self, s):
"""
s: supercell
"""
from f_phonon import f_phonon
fcm_dp = self.get_hessian_dipole(s)
if self._dpcor is None:
return fcm_dp
f_phonon.init(s.lattice._matrix, s.atomic_masses, s.frac_coords, *self.translate_pairinfo_to_supercell(s, *self._dpcor))
fcm_cor= f_phonon.get_dm([[0.,0.,0.]],3*s.num_sites)[:,:,0].real
#print(fcm_cor.shape, fcm_dp.shape, fcm_cor[0,0], fcm_dp[0,0], s.atomic_masses.__class__, s.atomic_masses)
#for iA in range(s.num_sites):
# for ix in range(3):
# for jA in range(s.num_sites):
# for jx in range(3):
# fcm_cor[iA*3+ix,jA*3+jx]*=np.sqrt(s.atomic_masses[iA]*s.atomic_masses[jA])
#np.savetxt('fcm_dp', fcm_dp)
#np.savetxt('fcm_cor', fcm_cor)
mass= np.sqrt(np.array(s.atomic_masses).repeat(3))
fcm_cor*= np.outer(mass, mass)
#np.savetxt('fcm_corScaled', fcm_cor)
#np.savetxt('fcm_all', fcm_dp+fcm_cor)
return fcm_dp+ fcm_cor
def translate_pairinfo_to_supercell(self, sc, ijk_prim, typ_prim, fcm_prim):
return LDModel.pairinfo_to_supercell(self.prim, sc, ijk_prim, typ_prim, fcm_prim)
@staticmethod
def pairinfo_to_supercell(prim, sc, ijk_prim, typ_prim, fcm_prim):
nfcm_prim = len(ijk_prim)
nfcm = nfcm_prim*sc.n_cell
ijk=np.zeros((nfcm, 3),dtype=int)
typ=np.zeros((nfcm, 2),dtype=int)
fcm=np.zeros((nfcm, 3,3))
for i in range(nfcm_prim):
clus= Cluster.from_ijkl([np.append(ijk_prim[i],typ_prim[i][0]), [0,0,0,typ_prim[i][1]]], prim)
tc= BasicLatticeModel.translate_cluster_to_supercell(sc, clus)
newijk= np.round(np.dot(clus.frac_coords[0]-clus.frac_coords[1], sc.inv_sc_mat)-(sc.frac_coords[tc[0][0]]-sc.frac_coords[tc[0][1]])).astype(int)
# print(i, "cluster", clus, tc, newijk)
for j in range(sc.n_cell):
fcm[i*sc.n_cell+j] = fcm_prim[i]
typ[i*sc.n_cell+j] = tc[j]
ijk[i*sc.n_cell+j] = newijk
return ijk, typ, fcm
def get_dpcor(self, bondlen, errtol=1e-7):
from f_phonon import f_phonon
offd=[[0,0,1],[1,2,2]]
offdflatUp = [1,2,5]
offdflatDn = [3,6,7]
diagflat= [0,4,8]
fcm_dp = self.get_hessian_dipole(self.prim)
np.savetxt("prim_dp", fcm_dp)
non_symm = [fcm_dp[3*i:3*i+3, 3*i:3*i+3]-fcm_dp[3*i:3*i+3, 3*i:3*i+3].T for i in range(self.prim.num_sites)]
pts=self.l_point_cls()
npt=len(pts)
bvec = np.array([non_symm[pt][offd[0],offd[1]] for pt in pts]).reshape((-1))
if np.linalg.norm(bvec)/np.sqrt(len(bvec)) <1E-13:
print("++ no ASR violation in long-range force constant matrix")
return None
print('************** corrections to long-range force constant matrix *****************')
# create an LD model using nearest neighbor only
ldNN = init_ld_model(self.prim, {'model_type':'LD', 'max_order':2, 'cluster_diameter':str(bondlen),
'proper_diameter':str(bondlen),'cluster_filter':'lambda cls: True'}, {}, 2, 2, 0, False)
#print(ldNN)
C1mats = ldNN.isotropy_derivative_constraint()[1+2*npt:]
if not C1mats:
raise ValueError('ERROR: to get corrections properly, please increase [model]dpcor_bond to approximately the cutoff distance of first neighbor shell')
C1 = spmat(scipy.sparse.vstack(C1mats))[:,1+12*npt:]
nvar= C1.shape[0]
Bmats= ldNN.translational_invariance()[1:]
Bmats = [i[:,1+12*npt:] for i in Bmats]
B1 = spmat(scipy.sparse.vstack(Bmats))
Acorrection = spmat(np.zeros((len(bvec), nvar)))
for i, pt in enumerate(pts):
Acorrection[3*i:3*i+3] = (Bmats[i][offdflatUp]-Bmats[i][offdflatDn]).dot(C1.T)
Acorrection=spmat(Acorrection)
# from cssolve.bregman_func import bregman_func
# solution = bregman_func(Acorrection, bvec, method=1, mu=1E-5, lbd=3,maxIter=2000, tol=1E-6)
# print(get_errors(bvec, Acorrection.dot(solution)))
# print(solution)
dpcor_sol_f='dpcor_sol.dat'
if False and os.path.isfile(dpcor_sol_f):
print('++ Loading dpcor from %s'%(dpcor_sol_f))
solution = np.loadtxt(dpcor_sol_f)
else:
solution = scipy.sparse.linalg.lsqr(Acorrection, bvec)[0]
np.savetxt(dpcor_sol_f, solution)
# solution = np.linalg.lstsq(Acorrection[:-3].todense(), bvec[:-3])[0]
rmse = RMS(bvec - Acorrection.dot(solution))
if rmse > errtol:
raise ValueError('dpcor correction FAILED rmse= %5g. Check symmetry or increase dpcor_bond'%(rmse))
# np.savetxt('Adpcor.out', Acorrection.todense())
# np.savetxt('bdpcor.out', bvec)
#print('correction FCM=',solution)
print('************** corrections done (rmse= %5g) *****************'%(rmse))
# send correction SR FCM to f_phonon
full_sol_proper= np.array(C1.T.dot(solution))
#print(C1.T.dot(solution))
#print(np.zeros(1+3*len(pts)), "\nonsite", -B1.dot(full_sol_proper), "\npair", full_sol_proper)
# print('debug onsite dpcor', B1.dot(full_sol_proper).reshape((-1,3,3)))
full_sol = np.hstack((np.zeros(1+3*len(pts)), -B1.dot(full_sol_proper), full_sol_proper))
# print('debug trans inv', ldNN.translational_invariance())
# print('debug checking trans inv', scipy.sparse.vstack(ldNN.translational_invariance()).dot(full_sol))
#print('DEBUG CORR_pair_info',ldNN.get_pair_info(full_sol))
dpcor_pair = ldNN.get_pair_info(full_sol,2,1E-30)
# f_phonon.init_dpcor(*dpcor_pair)
self._dpcor = dpcor_pair
return dpcor_pair
def init_ld_model(prim, setting, setting_ldff, clus_step, symC_step, ldff_step, dpcor=True, pdfout=None):
"""
model initialization
:param prim:
:param setting:
:param ldff_setting:
:param clus_step:
:param symC_step:
:param ldff_step:
:return: LD model and its associated LDFF model
"""
from scipy.io import mmread, mmwrite
if clus_step <= 0:
exit(0)
assert setting['model_type'] == 'LD', ValueError("This script is intended for lattice dynamics only")
maxorder = int(setting['max_order'])
scale = prim.lattice._scale if str2bool(setting.get('fractional_distance','False')) else 1
irange = (np.hstack(([0.1, 0.1], str2arr(setting['cluster_diameter'])[:maxorder-1]))*scale).tolist()
prange_str = setting.get('proper_diameter', '')
# use cluster_diameter if proper_diameter not specified
if not prange_str:
prange_str = setting['cluster_diameter']
prange = (np.hstack(([0.1, 0.1], str2arr(prange_str)[:maxorder-1]))*scale).tolist()
irange = dict(zip(range(maxorder+1), irange))
prange = dict(zip(range(maxorder+1), prange))
clus_sel = eval(setting['cluster_filter'])
dipole_force = str2bool(setting.get('dipole_force', 'True'))
symm_residual_force = str2bool(setting.get('symm_residual_force', 'True'))
spec = {'maxorder':maxorder, 'prange':prange, 'filter':clus_sel, 'dipole_force':dipole_force, 'symm_residual_force':symm_residual_force}
spec.update({'irange':irange})
if clus_step == 1:
model = LDModel.from_file(prim, setting['cluster_in'], **spec)
elif clus_step in [2, 3]:
model = LDModel.generate_clusters(prim, **spec)
# model.cleanup()
if clus_step == 3:
model.save_clusters(setting['cluster_out'])
else:
print("ERROR: Unknown clus_step: ", clus_step)
exit(-1)
print("+ Obtained %d proper clusters" %(len(model.clusters)), model.tally())
model.generate_improper()
# model.cleanup()
model.get_orbit_isotropy()
model.prepare_index_full()
# if we only need a NN model
if not dpcor:
return model
if model.dipole_force and model._dpcor is None:
model.get_dpcor(setting.getfloat('dpcor_bond', 2.8),setting.getfloat('dpcor_errtol', 1e-7))
print(model)
#model.save_clusters('cluster_all')
######## independent parameters
if symC_step <= 0:
exit(0)
elif symC_step == 1:
model.Cmat = mmread(setting['symC_in'])
# ld.process_fct_order(pdfout)
elif symC_step in [2, 3]:
model.symmetrize()
if symC_step == 3:
mmwrite(setting['symC_out'], model.Cmat)
else:
print("ERROR: Unknown symC_step: ", symC_step)
exit(-1)
model.prepare_index()
print("+ LD symmetrization done. After dim/before=", model.Cmat.shape)
######## Force field on lattice
if len(setting_ldff) <= 0:
model.ldff = None
return model
entries = [k for k, v in setting_ldff.items()]
if (ldff_step <= 0) or ('orbit_indices' not in entries):
model.ldff = None
elif ldff_step == 2:
l234 = list(map(int, setting_ldff['num_basis'].split()))
assert len(l234) >= 1
xpts = list(map(float, setting_ldff['interpolation_pts'].split()))
assert len(xpts) >= 3
if 'polaron_force' in entries:
ldfftype= PolaronFF
model.ldff = PolaronFF(model, str2arr(setting_ldff['orbit_indices'], int).tolist(),
xpts=np.arange(*xpts[:3]),
lmax2=l234[0],
bas2 = eval(setting_ldff['basis_2']),
nradial=int(setting_ldff['nradial']),
dimer_indices=str2arr(setting_ldff['dimer_indices'],int).tolist(),
chgFunc=eval(setting_ldff['chgfunc']),
dchgFunc=eval(setting_ldff['dchgfunc']))
else:
ldfftype= LDFFmodel
model.ldff = LDFFmodel(model, str2arr(setting_ldff['orbit_indices'], int).tolist(),
xpts=np.arange(*xpts[:3]),
lmax2=l234[0],
cut12=str2arr(setting_ldff.get('cut12','-0.7 0.7'),float,(-1,2)),
m12=str2arr(setting_ldff.get('m12','12 6'),int,(-1,2)),
bas2 = eval(setting_ldff['basis_2']))
print("+ %s initialized %d parameters" %(ldfftype.__name__, model.ldff.ncorr))
else:
print("ERROR: Unknown ldff_step: ", ldff_step)
exit(-1)
return model
class LDFFmodel():
"""
LD force field (only depends on interatomic distances)
cut12: cutoffs for extrapolation. if r-r0<cut1 or >cut2, extrapolate to c0+c1/r**m
m12: m1, m2 for cut1, cut2 respectively
"""
def __init__(self, ld, orb_idx, lmax2=-1, bas2=[], lmax3=-1, bas3=[], xpts=np.array([]), cut12=np.array([[-0.7,0.7]]), m12=np.array([[12,6]])):
"""
:param ld the LD model
:param orb_idx indices of the selected orbits used in LDFF
:param bas2 Either 1) a list of basis functions, each takes 1 parameter, dr=r-r0
2) a function b[l, dr] where l=0..lmax-1
:param lmax2 is ignored if bas2 is a list
:param xpts a list of sampling points for dr, e.g. -1, -0.9, ..., 1
"""
self.ld = ld
self.orb_idx = orb_idx
self.lmax2 = len(bas2) if isinstance(bas2, list) else lmax2
self.bas2 = bas2
self.lmax3 = len(bas3) if isinstance(bas3, list) else lmax3
self.bas3 = bas3
self.xpts = xpts
self.cut12=cut12
self.m12=m12
n_xpts = len(xpts)
ncorr_list = []
multi_list = []
ffidx_list = []
npt_list = []
ncorr = 0
for i in orb_idx:
orb = ld.orbits[i]
npt = orb.cluster.order
assert npt == 2, TypeError("Pair interactions for LDFF only")
assert orb.cluster.factorial <= 1, TypeError("LDFF cannot accept improper orbit %d"%(i))
# nvar = npt*(npt-1)/2
ffidx = self.symmetrize_idx(npt)
multi = np.array([x.shape[0] for x in ffidx])
nc = len(ffidx)
ncorr += nc
ncorr_list.append(nc)
multi_list.append(multi)
ffidx_list.append(ffidx)
npt_list.append(npt)
self.ncorr_list = np.array(ncorr_list)
self.ffidx_list = ffidx_list
self.multi_list = np.array(multi_list)
self.npt_list = npt_list
self.ncorr = ncorr
y2 = np.zeros((self.lmax2, n_xpts))
for l in range(self.lmax2):
for ix, x in enumerate(xpts):
y2[l, ix] = self.eval_bas(bas2, l, x)
np.savetxt('ldff_bas.txt', np.vstack((xpts,y2)).T)
self.y2 = y2
init_ldff_basis(2, self.lmax2, xpts, y2)
@staticmethod
def eval_bas(bas, l, x):
return bas[l](x) if isinstance(bas, list) else bas(l, x)
@staticmethod
def eval_val(bas, ppval, x):
return np.dot(ppval, [LDFFmodel.eval_bas(bas, l, x) for l in range(len(ppval))])
def symmetrize_idx(self, npt):
if npt == 2:
return np.arange(self.lmax2)[:,None,None]
elif npt == 3:
print("3-body LDFF symmetrization TO BE IMPLEMENTED")
return [[list(i)] for i in product(range(self.lmax3), range(self.lmax3), range(self.lmax3))]
def tostr(self, sol, io, tol=1.E-12):
"""
:param sol: the whole LDFF coefficients
:param io:
:return:
"""
if io not in self.orb_idx:
return "0\n0"
iff = self.orb_idx.index(io)
npt = self.ld.orbits[io].cluster.order
lm = self.lmax2 if npt == 2 else self.lmax3
ppval= sol[iff*lm:(iff+1)*lm]
ppord= list(range(1,lm+1))
outs = []
# if (abs(ppval)> tol).any():
# ppval=Transpose[{ppord, ppval}];
# ppval=Select[ppval, (Abs[#[[2]]]>0)&];
npp = 0
for i in range(lm):
if abs(ppval[i])>tol:
npp += 1
outs.append("%d %.12f" % (ppord[i], ppval[i]))
outs.append("1\n"+ " ".join(map(str,self.extrapolations(iff, ppval))))
return ("%d\n" % (npp)) + "\n".join(outs)
def extrapolations(self, iff, ppval, dx=1E-4):
exff = list(range(8))
xfrac = self.ld.orbits[self.orb_idx[iff]].cluster.frac_coords
r0 = np.linalg.norm(self.ld.prim.lattice.get_cartesian_coords(xfrac[1]-xfrac[0]))
for i in range(2):
exff[i*3] = self.cut12[0,i] if len(self.cut12)<=1 else self.cut12[iff,i]
xa = exff[i*3]
xb = xa+dx
ya = self.eval_val(self.bas2, ppval, xa)
yb = self.eval_val(self.bas2, ppval, xb)
m= self.m12[0,i] if len(self.m12)<=1 else self.m12[iff,i]
r=r0+xa
exff[i*3+2] = -(yb-ya)/dx*(r**(m+1))/m
exff[i*3+1] = ya - exff[i*3+2]/(r**m)
exff[6+i] = m
return exff
def calc_correlation(self, dx, clusALL, ncell):
"""
:param dx:
:param clusALL: all clusters in the supercell
:return:
"""
len_orb = np.array([len(self.ld.orbits[i].clusters) for i in self.orb_idx])
len_orb_sums = [len_orb[:i].sum() for i in range(len(self.orb_idx))]
clus_id = [len_orb_sums[i] + j for i, ii in enumerate(self.orb_idx)
for j in range(len(self.ld.orbits[ii].clusters)) for _ in range(ncell)]
return spmat(ldff_get_corr(
np.array([self.ld.orbits[i].cluster.order for i in self.orb_idx], dtype=np.int32),
np.array([self.lmax2, self.lmax3, 0], dtype=np.int32),
np.array(self.ncorr_list, dtype=np.int32),
np.array([ii for i in self.multi_list for ii in i], dtype=np.int32),
np.array([iiii for i in self.ffidx_list for ii in i for iii in ii for iiii in iii], dtype=np.int32),
np.array([pad_right(clus.coords,[4,3]) for i in self.orb_idx for clus in self.ld.orbits[i].clusters]),
np.array(dx),
np.array([self.orb_idx.index(clus[1]) for clus in clusALL], dtype=np.int32),
np.array(clus_id, dtype=np.int32),
np.array([pad_right(np.array(clus[0]), 4) for clus in clusALL], dtype=np.int32)))
def plot_pairPES(self, sols):
fname= 'ldff_PES.txt'
header= 'col1=du'
col=2
mat=[self.xpts]
for isol, sol0 in enumerate(sols):
offset=0
sol = sol0[-self.ncorr:]
for i,npt in enumerate(self.npt_list):
if npt==2:
mat.append(np.dot(sol[offset:offset+self.lmax2], self.y2))
header+=" %d=sol_%d_clus_%d"%(col, isol+1, self.orb_idx[i]+1)
col+=1
offset+= self.ncorr_list[i]
np.savetxt(fname, np.array(mat).T, header=header)
print(" LDFF: pair PES exported to %s"%(fname))
|
[
"numpy.abs",
"numpy.sum",
"f_phonon.f_phonon.get_dm",
"numpy.ones",
"scipy.sparse.lil_matrix",
"os.path.isfile",
"numpy.arange",
"numpy.linalg.norm",
"numpy.mean",
"numpy.exp",
"numpy.full",
"os.path.abspath",
"numpy.zeros_like",
"os.path.dirname",
"numpy.savetxt",
"os.path.exists",
"numpy.transpose",
"f_util.f_util.select_triplet",
"_c_util.init_ldff_basis",
"numpy.append",
"numpy.loadtxt",
"io.StringIO",
"numpy.min",
"_c_util.fct_trans_c",
"numpy.dot",
"numpy.vstack",
"numpy.outer",
"scipy.io.mmwrite",
"numpy.zeros",
"scipy.io.mmread",
"numpy.array",
"logging.getLogger",
"_c_util.get_nullspace"
] |
[((778, 805), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (795, 805), False, 'import logging\n'), ((6201, 6223), 'numpy.array', 'np.array', (['self.fct_ord'], {}), '(self.fct_ord)\n', (6209, 6223), True, 'import numpy as np\n'), ((11246, 11266), 'numpy.zeros', 'np.zeros', (['self.ncorr'], {}), '(self.ncorr)\n', (11254, 11266), True, 'import numpy as np\n'), ((11718, 11833), 'numpy.min', 'np.min', (['[orb.cluster.diameter for orb in self.orbits if orb.cluster.order == 2 and \n orb.cluster.order_uniq == 2]'], {}), '([orb.cluster.diameter for orb in self.orbits if orb.cluster.order ==\n 2 and orb.cluster.order_uniq == 2])\n', (11724, 11833), True, 'import numpy as np\n'), ((11848, 12032), 'numpy.array', 'np.array', (['[(self.orbits[idx].cluster.diameter - pair_r0 if self.orbits[idx].cluster.\n order == 2 and self.orbits[idx].cluster.order_uniq == 2 else 0) for idx in\n self.fct_orbidx]'], {}), '([(self.orbits[idx].cluster.diameter - pair_r0 if self.orbits[idx].\n cluster.order == 2 and self.orbits[idx].cluster.order_uniq == 2 else 0) for\n idx in self.fct_orbidx])\n', (11856, 12032), True, 'import numpy as np\n'), ((12582, 12607), 'numpy.array', 'np.array', (['[1.0, 2.0, 3.0]'], {}), '([1.0, 2.0, 3.0])\n', (12590, 12607), True, 'import numpy as np\n'), ((14431, 14455), 'numpy.zeros', 'np.zeros', (['(totNF, ncorr)'], {}), '((totNF, ncorr))\n', (14439, 14455), True, 'import numpy as np\n'), ((14472, 14492), 'numpy.zeros', 'np.zeros', (['(totNF, 3)'], {}), '((totNF, 3))\n', (14480, 14492), True, 'import numpy as np\n'), ((22642, 22702), 'numpy.zeros', 'np.zeros', (['(scinfo.num_sites, scinfo.num_sites)'], {'dtype': 'np.int'}), '((scinfo.num_sites, scinfo.num_sites), dtype=np.int)\n', (22650, 22702), True, 'import numpy as np\n'), ((24543, 24652), 'numpy.savetxt', 'np.savetxt', (['"""fct_norm_vs_diameter.txt"""', 'fc_norm'], {'header': '"""col1=diameter col2=norm col3=npt col4=npt_uniq"""'}), "('fct_norm_vs_diameter.txt', fc_norm, header=\n 'col1=diameter col2=norm col3=npt col4=npt_uniq')\n", (24553, 24652), True, 'import numpy as np\n'), ((25950, 25963), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (25961, 25963), False, 'import io, re\n'), ((27717, 27739), 'numpy.savetxt', 'np.savetxt', (['"""sol"""', 'sol'], {}), "('sol', sol)\n", (27727, 27739), True, 'import numpy as np\n'), ((27974, 28004), 'numpy.savetxt', 'np.savetxt', (['"""sol_fct"""', 'sol_fct'], {}), "('sol_fct', sol_fct)\n", (27984, 28004), True, 'import numpy as np\n'), ((28257, 28270), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (28268, 28270), False, 'import io, re\n'), ((28283, 28296), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (28294, 28296), False, 'import io, re\n'), ((29040, 29089), 'f_util.f_util.select_triplet', 'f_util.select_triplet', (['poscar', 'rmat', 'cutoff', 'nlat'], {}), '(poscar, rmat, cutoff, nlat)\n', (29061, 29089), False, 'from f_util import f_util\n'), ((32232, 32260), 'numpy.savetxt', 'np.savetxt', (['"""fctsym"""', 'fctsym'], {}), "('fctsym', fctsym)\n", (32242, 32260), True, 'import numpy as np\n'), ((38346, 38366), 'numpy.outer', 'np.outer', (['mass', 'mass'], {}), '(mass, mass)\n', (38354, 38366), True, 'import numpy as np\n'), ((38835, 38865), 'numpy.zeros', 'np.zeros', (['(nfcm, 3)'], {'dtype': 'int'}), '((nfcm, 3), dtype=int)\n', (38843, 38865), True, 'import numpy as np\n'), ((38877, 38907), 'numpy.zeros', 'np.zeros', (['(nfcm, 2)'], {'dtype': 'int'}), '((nfcm, 2), dtype=int)\n', (38885, 38907), True, 'import numpy as np\n'), ((38919, 38941), 'numpy.zeros', 'np.zeros', (['(nfcm, 3, 3)'], {}), '((nfcm, 3, 3))\n', (38927, 38941), True, 'import numpy as np\n'), ((39833, 39862), 'numpy.savetxt', 'np.savetxt', (['"""prim_dp"""', 'fcm_dp'], {}), "('prim_dp', fcm_dp)\n", (39843, 39862), True, 'import numpy as np\n'), ((41350, 41368), 'scipy.sparse.lil_matrix', 'spmat', (['Acorrection'], {}), '(Acorrection)\n', (41355, 41368), True, 'from scipy.sparse import lil_matrix as spmat\n'), ((48145, 48157), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (48153, 48157), True, 'import numpy as np\n'), ((48165, 48188), 'numpy.array', 'np.array', (['[[-0.7, 0.7]]'], {}), '([[-0.7, 0.7]])\n', (48173, 48188), True, 'import numpy as np\n'), ((48193, 48212), 'numpy.array', 'np.array', (['[[12, 6]]'], {}), '([[12, 6]])\n', (48201, 48212), True, 'import numpy as np\n'), ((49698, 49718), 'numpy.array', 'np.array', (['ncorr_list'], {}), '(ncorr_list)\n', (49706, 49718), True, 'import numpy as np\n'), ((49782, 49802), 'numpy.array', 'np.array', (['multi_list'], {}), '(multi_list)\n', (49790, 49802), True, 'import numpy as np\n'), ((49876, 49906), 'numpy.zeros', 'np.zeros', (['(self.lmax2, n_xpts)'], {}), '((self.lmax2, n_xpts))\n', (49884, 49906), True, 'import numpy as np\n'), ((50127, 50167), '_c_util.init_ldff_basis', 'init_ldff_basis', (['(2)', 'self.lmax2', 'xpts', 'y2'], {}), '(2, self.lmax2, xpts, y2)\n', (50142, 50167), False, 'from _c_util import fct_trans_c, ld_get_correlation, get_nullspace, init_ldff_basis, ldff_get_corr\n'), ((4555, 4566), 'scipy.sparse.lil_matrix', 'spmat', (['Bmat'], {}), '(Bmat)\n', (4560, 4566), True, 'from scipy.sparse import lil_matrix as spmat\n'), ((9910, 9922), 'numpy.arange', 'np.arange', (['(0)'], {}), '(0)\n', (9919, 9922), True, 'import numpy as np\n'), ((10028, 10047), 'numpy.ones', 'np.ones', (['ldff.ncorr'], {}), '(ldff.ncorr)\n', (10035, 10047), True, 'import numpy as np\n'), ((10894, 10906), 'numpy.arange', 'np.arange', (['(0)'], {}), '(0)\n', (10903, 10906), True, 'import numpy as np\n'), ((11012, 11031), 'numpy.ones', 'np.ones', (['ldff.ncorr'], {}), '(ldff.ncorr)\n', (11019, 11031), True, 'import numpy as np\n'), ((19609, 19621), 'scipy.sparse.lil_matrix', 'spmat', (['Alist'], {}), '(Alist)\n', (19614, 19621), True, 'from scipy.sparse import lil_matrix as spmat\n'), ((22573, 22627), 'numpy.zeros', 'np.zeros', (['(scinfo.num_sites * 3, scinfo.num_sites * 3)'], {}), '((scinfo.num_sites * 3, scinfo.num_sites * 3))\n', (22581, 22627), True, 'import numpy as np\n'), ((32806, 32836), 'numpy.loadtxt', 'np.loadtxt', (['solinf[0]'], {'ndmin': '(2)'}), '(solinf[0], ndmin=2)\n', (32816, 32836), True, 'import numpy as np\n'), ((33497, 33520), 'numpy.zeros', 'np.zeros', (['self.nfct_tot'], {}), '(self.nfct_tot)\n', (33505, 33520), True, 'import numpy as np\n'), ((35084, 35129), 'numpy.savetxt', 'np.savetxt', (["(solinf[0] + '_loaded_sol')", 'sol[0]'], {}), "(solinf[0] + '_loaded_sol', sol[0])\n", (35094, 35129), True, 'import numpy as np\n'), ((35271, 35289), 'numpy.array', 'np.array', (['[sol[0]]'], {}), '([sol[0]])\n', (35279, 35289), True, 'import numpy as np\n'), ((37077, 37121), 'numpy.zeros', 'np.zeros', (['(3 * s.num_sites, 3 * s.num_sites)'], {}), '((3 * s.num_sites, 3 * s.num_sites))\n', (37085, 37121), True, 'import numpy as np\n'), ((41668, 41695), 'os.path.isfile', 'os.path.isfile', (['dpcor_sol_f'], {}), '(dpcor_sol_f)\n', (41682, 41695), False, 'import os\n'), ((41780, 41803), 'numpy.loadtxt', 'np.loadtxt', (['dpcor_sol_f'], {}), '(dpcor_sol_f)\n', (41790, 41803), True, 'import numpy as np\n'), ((41900, 41933), 'numpy.savetxt', 'np.savetxt', (['dpcor_sol_f', 'solution'], {}), '(dpcor_sol_f, solution)\n', (41910, 41933), True, 'import numpy as np\n'), ((45718, 45744), 'scipy.io.mmread', 'mmread', (["setting['symC_in']"], {}), "(setting['symC_in'])\n", (45724, 45744), False, 'from scipy.io import mmread, mmwrite\n'), ((49441, 49478), 'numpy.array', 'np.array', (['[x.shape[0] for x in ffidx]'], {}), '([x.shape[0] for x in ffidx])\n', (49449, 49478), True, 'import numpy as np\n'), ((5818, 5878), 'numpy.full', 'np.full', (['(3 ** o.cluster.order)', 'o.cluster.order'], {'dtype': 'np.int'}), '(3 ** o.cluster.order, o.cluster.order, dtype=np.int)\n', (5825, 5878), True, 'import numpy as np\n'), ((6264, 6307), 'numpy.full', 'np.full', (['(3 ** o.cluster.order)', 'i'], {'dtype': 'int'}), '(3 ** o.cluster.order, i, dtype=int)\n', (6271, 6307), True, 'import numpy as np\n'), ((19450, 19490), 'numpy.full', 'np.full', (['thisNF', 'weight'], {'dtype': 'np.double'}), '(thisNF, weight, dtype=np.double)\n', (19457, 19490), True, 'import numpy as np\n'), ((19952, 19964), 'numpy.array', 'np.array', (['dx'], {}), '(dx)\n', (19960, 19964), True, 'import numpy as np\n'), ((20120, 20175), 'numpy.array', 'np.array', (['[clus[1] for clus in clusALL]'], {'dtype': 'np.int32'}), '([clus[1] for clus in clusALL], dtype=np.int32)\n', (20128, 20175), True, 'import numpy as np\n'), ((20211, 20266), 'numpy.array', 'np.array', (['[clus[2] for clus in clusALL]'], {'dtype': 'np.int32'}), '([clus[2] for clus in clusALL], dtype=np.int32)\n', (20219, 20266), True, 'import numpy as np\n'), ((20302, 20370), 'numpy.array', 'np.array', (['[orb.cluster.order for orb in self.orbits]'], {'dtype': 'np.int32'}), '([orb.cluster.order for orb in self.orbits], dtype=np.int32)\n', (20310, 20370), True, 'import numpy as np\n'), ((20406, 20462), 'numpy.array', 'np.array', (['[orb.cluster.factorial for orb in self.orbits]'], {}), '([orb.cluster.factorial for orb in self.orbits])\n', (20414, 20462), True, 'import numpy as np\n'), ((20498, 20551), 'numpy.array', 'np.array', (['[op.rot_inv for op in self.prim.spacegroup]'], {}), '([op.rot_inv for op in self.prim.spacegroup])\n', (20506, 20551), True, 'import numpy as np\n'), ((28674, 28685), 'numpy.array', 'np.array', (['R'], {}), '(R)\n', (28682, 28685), True, 'import numpy as np\n'), ((28781, 28795), 'numpy.array', 'np.array', (['apos'], {}), '(apos)\n', (28789, 28795), True, 'import numpy as np\n'), ((32966, 32984), 'numpy.zeros_like', 'np.zeros_like', (['sol'], {}), '(sol)\n', (32979, 32984), True, 'import numpy as np\n'), ((34466, 34486), 'numpy.zeros', 'np.zeros', (['((3,) * ord)'], {}), '((3,) * ord)\n', (34474, 34486), True, 'import numpy as np\n'), ((36380, 36397), 'numpy.array', 'np.array', (['pairijk'], {}), '(pairijk)\n', (36388, 36397), True, 'import numpy as np\n'), ((36451, 36478), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {'dtype': 'int'}), '((1, 3), dtype=int)\n', (36459, 36478), True, 'import numpy as np\n'), ((36478, 36505), 'numpy.zeros', 'np.zeros', (['(1, 2)'], {'dtype': 'int'}), '((1, 2), dtype=int)\n', (36486, 36505), True, 'import numpy as np\n'), ((36505, 36524), 'numpy.zeros', 'np.zeros', (['(1, 3, 3)'], {}), '((1, 3, 3))\n', (36513, 36524), True, 'import numpy as np\n'), ((37759, 37810), 'f_phonon.f_phonon.get_dm', 'f_phonon.get_dm', (['[[0.0, 0.0, 0.0]]', '(3 * s.num_sites)'], {}), '([[0.0, 0.0, 0.0]], 3 * s.num_sites)\n', (37774, 37810), False, 'from f_phonon import f_phonon\n'), ((40047, 40103), 'numpy.array', 'np.array', (['[non_symm[pt][offd[0], offd[1]] for pt in pts]'], {}), '([non_symm[pt][offd[0], offd[1]] for pt in pts])\n', (40055, 40103), True, 'import numpy as np\n'), ((40128, 40148), 'numpy.linalg.norm', 'np.linalg.norm', (['bvec'], {}), '(bvec)\n', (40142, 40148), True, 'import numpy as np\n'), ((50074, 50095), 'numpy.vstack', 'np.vstack', (['(xpts, y2)'], {}), '((xpts, y2))\n', (50083, 50095), True, 'import numpy as np\n'), ((50502, 50523), 'numpy.arange', 'np.arange', (['self.lmax2'], {}), '(self.lmax2)\n', (50511, 50523), True, 'import numpy as np\n'), ((52897, 52983), 'numpy.array', 'np.array', (['[self.ld.orbits[i].cluster.order for i in self.orb_idx]'], {'dtype': 'np.int32'}), '([self.ld.orbits[i].cluster.order for i in self.orb_idx], dtype=np.\n int32)\n', (52905, 52983), True, 'import numpy as np\n'), ((52992, 53045), 'numpy.array', 'np.array', (['[self.lmax2, self.lmax3, 0]'], {'dtype': 'np.int32'}), '([self.lmax2, self.lmax3, 0], dtype=np.int32)\n', (53000, 53045), True, 'import numpy as np\n'), ((53059, 53100), 'numpy.array', 'np.array', (['self.ncorr_list'], {'dtype': 'np.int32'}), '(self.ncorr_list, dtype=np.int32)\n', (53067, 53100), True, 'import numpy as np\n'), ((53114, 53181), 'numpy.array', 'np.array', (['[ii for i in self.multi_list for ii in i]'], {'dtype': 'np.int32'}), '([ii for i in self.multi_list for ii in i], dtype=np.int32)\n', (53122, 53181), True, 'import numpy as np\n'), ((53195, 53298), 'numpy.array', 'np.array', (['[iiii for i in self.ffidx_list for ii in i for iii in ii for iiii in iii]'], {'dtype': 'np.int32'}), '([iiii for i in self.ffidx_list for ii in i for iii in ii for iiii in\n iii], dtype=np.int32)\n', (53203, 53298), True, 'import numpy as np\n'), ((53423, 53435), 'numpy.array', 'np.array', (['dx'], {}), '(dx)\n', (53431, 53435), True, 'import numpy as np\n'), ((53538, 53571), 'numpy.array', 'np.array', (['clus_id'], {'dtype': 'np.int32'}), '(clus_id, dtype=np.int32)\n', (53546, 53571), True, 'import numpy as np\n'), ((54247, 54260), 'numpy.array', 'np.array', (['mat'], {}), '(mat)\n', (54255, 54260), True, 'import numpy as np\n'), ((3140, 3160), 'numpy.array', 'np.array', (['clus.ijkls'], {}), '(clus.ijkls)\n', (3148, 3160), True, 'import numpy as np\n'), ((3487, 3520), 'scipy.sparse.lil_matrix', 'spmat', (['(dimTensor, self.nfct_tot)'], {}), '((dimTensor, self.nfct_tot))\n', (3492, 3520), True, 'from scipy.sparse import lil_matrix as spmat\n'), ((15190, 15209), 'numpy.mean', 'np.mean', (['f0'], {'axis': '(0)'}), '(f0, axis=0)\n', (15197, 15209), True, 'import numpy as np\n'), ((15390, 15413), 'os.path.isfile', 'os.path.isfile', (['fcmfile'], {}), '(fcmfile)\n', (15404, 15413), False, 'import os\n'), ((15518, 15537), 'numpy.loadtxt', 'np.loadtxt', (['fcmfile'], {}), '(fcmfile)\n', (15528, 15537), True, 'import numpy as np\n'), ((15711, 15738), 'numpy.savetxt', 'np.savetxt', (['fcmfile', 'fcm_dp'], {}), '(fcmfile, fcm_dp)\n', (15721, 15738), True, 'import numpy as np\n'), ((16888, 16926), 'os.path.isfile', 'os.path.isfile', (["(rundir + '/energy.txt')"], {}), "(rundir + '/energy.txt')\n", (16902, 16926), False, 'import os\n'), ((22963, 22982), 'numpy.linalg.norm', 'np.linalg.norm', (['val'], {}), '(val)\n', (22977, 22982), True, 'import numpy as np\n'), ((26236, 26247), 'numpy.abs', 'np.abs', (['val'], {}), '(val)\n', (26242, 26247), True, 'import numpy as np\n'), ((38291, 38316), 'numpy.array', 'np.array', (['s.atomic_masses'], {}), '(s.atomic_masses)\n', (38299, 38316), True, 'import numpy as np\n'), ((39013, 39051), 'numpy.append', 'np.append', (['ijk_prim[i]', 'typ_prim[i][0]'], {}), '(ijk_prim[i], typ_prim[i][0])\n', (39022, 39051), True, 'import numpy as np\n'), ((45879, 45919), 'scipy.io.mmwrite', 'mmwrite', (["setting['symC_out']", 'model.Cmat'], {}), "(setting['symC_out'], model.Cmat)\n", (45886, 45919), False, 'from scipy.io import mmread, mmwrite\n'), ((14960, 14982), 'os.path.dirname', 'os.path.dirname', (['sc[0]'], {}), '(sc[0])\n', (14975, 14982), False, 'import os\n'), ((15093, 15115), 'os.path.dirname', 'os.path.dirname', (['sc[0]'], {}), '(sc[0])\n', (15108, 15115), False, 'import os\n'), ((15327, 15349), 'os.path.abspath', 'os.path.abspath', (['sc[0]'], {}), '(sc[0])\n', (15342, 15349), False, 'import os\n'), ((17155, 17166), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (17163, 17166), True, 'import numpy as np\n'), ((17376, 17420), 'numpy.savetxt', 'np.savetxt', (["(rundir + '/energy.txt_dp')", 'en_dp'], {}), "(rundir + '/energy.txt_dp', en_dp)\n", (17386, 17420), True, 'import numpy as np\n'), ((17623, 17660), 'os.path.exists', 'os.path.exists', (["(rundir + '/force.txt')"], {}), "(rundir + '/force.txt')\n", (17637, 17660), False, 'import os\n'), ((37261, 37303), 'numpy.array', 'np.array', (["s.site_properties['born_charge']"], {}), "(s.site_properties['born_charge'])\n", (37269, 37303), True, 'import numpy as np\n'), ((46774, 46794), 'numpy.arange', 'np.arange', (['*xpts[:3]'], {}), '(*xpts[:3])\n', (46783, 46794), True, 'import numpy as np\n'), ((47356, 47376), 'numpy.arange', 'np.arange', (['*xpts[:3]'], {}), '(*xpts[:3])\n', (47365, 47376), True, 'import numpy as np\n'), ((54016, 54064), 'numpy.dot', 'np.dot', (['sol[offset:offset + self.lmax2]', 'self.y2'], {}), '(sol[offset:offset + self.lmax2], self.y2)\n', (54022, 54064), True, 'import numpy as np\n'), ((4228, 4269), '_c_util.fct_trans_c', 'fct_trans_c', (['npt_ex', '(3)', 'rot_mats[igF]', 'pi'], {}), '(npt_ex, 3, rot_mats[igF], pi)\n', (4239, 4269), False, 'from _c_util import fct_trans_c, ld_get_correlation, get_nullspace, init_ldff_basis, ldff_get_corr\n'), ((5121, 5140), '_c_util.get_nullspace', 'get_nullspace', (['Bmat'], {}), '(Bmat)\n', (5134, 5140), False, 'from _c_util import fct_trans_c, ld_get_correlation, get_nullspace, init_ldff_basis, ldff_get_corr\n'), ((16347, 16363), 'numpy.linalg.norm', 'norm', (['dx'], {'axis': '(1)'}), '(dx, axis=1)\n', (16351, 16363), False, 'from numpy.linalg import norm\n'), ((16959, 17002), 'numpy.loadtxt', 'np.loadtxt', (["(rundir + '/energy.txt')"], {'ndmin': '(1)'}), "(rundir + '/energy.txt', ndmin=1)\n", (16969, 17002), True, 'import numpy as np\n'), ((17693, 17726), 'numpy.loadtxt', 'np.loadtxt', (["(rundir + '/force.txt')"], {}), "(rundir + '/force.txt')\n", (17703, 17726), True, 'import numpy as np\n'), ((20020, 20037), 'numpy.array', 'np.array', (['clus[0]'], {}), '(clus[0])\n', (20028, 20037), True, 'import numpy as np\n'), ((25363, 25383), 'numpy.array', 'np.array', (['[[i1, i2]]'], {}), '([[i1, i2]])\n', (25371, 25383), True, 'import numpy as np\n'), ((26902, 26930), 'numpy.transpose', 'np.transpose', (['valTrans', 'iper'], {}), '(valTrans, iper)\n', (26914, 26930), True, 'import numpy as np\n'), ((34723, 34777), '_c_util.fct_trans_c', 'fct_trans_c', (['ord', '(3)', 'self.prim.spacegroup[igF].rot', 'pi'], {}), '(ord, 3, self.prim.spacegroup[igF].rot, pi)\n', (34734, 34777), False, 'from _c_util import fct_trans_c, ld_get_correlation, get_nullspace, init_ldff_basis, ldff_get_corr\n'), ((36073, 36098), 'numpy.arange', 'np.arange', (['npt'], {'dtype': 'int'}), '(npt, dtype=int)\n', (36082, 36098), True, 'import numpy as np\n'), ((39187, 39251), 'numpy.dot', 'np.dot', (['(clus.frac_coords[0] - clus.frac_coords[1])', 'sc.inv_sc_mat'], {}), '(clus.frac_coords[0] - clus.frac_coords[1], sc.inv_sc_mat)\n', (39193, 39251), True, 'import numpy as np\n'), ((53605, 53622), 'numpy.array', 'np.array', (['clus[0]'], {}), '(clus[0])\n', (53613, 53622), True, 'import numpy as np\n'), ((9020, 9053), 'scipy.sparse.lil_matrix', 'spmat', (['(nfree, ltothis - dimThis)'], {}), '((nfree, ltothis - dimThis))\n', (9025, 9053), True, 'from scipy.sparse import lil_matrix as spmat\n'), ((9153, 9187), 'scipy.sparse.lil_matrix', 'spmat', (['(nfree, nfct_tot - ltothis)'], {}), '((nfree, nfct_tot - ltothis))\n', (9158, 9187), True, 'from scipy.sparse import lil_matrix as spmat\n'), ((17776, 17798), 'numpy.sum', 'np.sum', (['values'], {'axis': '(0)'}), '(values, axis=0)\n', (17782, 17798), True, 'import numpy as np\n'), ((18026, 18049), 'numpy.mean', 'np.mean', (['values'], {'axis': '(0)'}), '(values, axis=0)\n', (18033, 18049), True, 'import numpy as np\n'), ((23449, 23474), 'numpy.arange', 'np.arange', (['npt'], {'dtype': 'int'}), '(npt, dtype=int)\n', (23458, 23474), True, 'import numpy as np\n'), ((26492, 26517), 'numpy.arange', 'np.arange', (['npt'], {'dtype': 'int'}), '(npt, dtype=int)\n', (26501, 26517), True, 'import numpy as np\n'), ((31548, 31568), 'numpy.array', 'np.array', (['clus[1][0]'], {}), '(clus[1][0])\n', (31556, 31568), True, 'import numpy as np\n'), ((31571, 31591), 'numpy.array', 'np.array', (['clus[0][0]'], {}), '(clus[0][0])\n', (31579, 31591), True, 'import numpy as np\n'), ((31592, 31612), 'numpy.array', 'np.array', (['clus[2][0]'], {}), '(clus[2][0])\n', (31600, 31612), True, 'import numpy as np\n'), ((31615, 31635), 'numpy.array', 'np.array', (['clus[0][0]'], {}), '(clus[0][0])\n', (31623, 31635), True, 'import numpy as np\n'), ((12196, 12236), 'numpy.exp', 'np.exp', (['(-pair_diameter * lr_pair_penalty)'], {}), '(-pair_diameter * lr_pair_penalty)\n', (12202, 12236), True, 'import numpy as np\n'), ((32031, 32077), 'numpy.array', 'np.array', (['[clus[0][1], clus[1][1], clus[2][1]]'], {}), '([clus[0][1], clus[1][1], clus[2][1]])\n', (32039, 32077), True, 'import numpy as np\n')]
|
import glob
import os
import rawpy
import numpy as np
# Returns a tuble
def getInputImagesList():
# Get all short exposure images
res = glob.glob('./dataset/sony/short/0*.ARW')
res.sort()
return (res, [int(os.path.basename(res)[0:5]) for res in res])
def getGroundtruthImagesList():
# Get all short exposure images
res = glob.glob('./dataset/sony/long/0*.ARW')
res.sort()
return (res, [int(os.path.basename(res)[0:5]) for res in res])
def getTestInputImagesList():
res = glob.glob('./dataset/sony/short/1*.ARW')
res.sort()
return(res, [int(os.path.basename(res)[0:5]) for test_fn in test_fns])
def getTestGroundtruthImagesList():
# Get all short exposure images
res = glob.glob('./dataset/sony/long/1*.ARW')
res.sort()
return (res, [int(os.path.basename(res)[0:5]) for res in res])
# Use rawpy to get pictures
def pack_raw(raw):
# Pack Bayer image to 4 channels
im = raw.raw_image_visible.astype(np.float32)
# Subtract the black level
# 16383 == 2^14 (data is 14 bits)
# 512 is hardware specific to the camera
im = np.maximum(im - 512, 0) / (16383 - 512)
im = np.expand_dims(im, axis=2)
img_shape = im.shape
H = img_shape[0]
W = img_shape[1]
out = np.concatenate((im[0:H:2, 0:W:2, :],
im[0:H:2, 1:W:2, :],
im[1:H:2, 1:W:2, :],
im[1:H:2, 0:W:2, :]), axis=2)
return out
def pack_raw_test(raw):
#pack Bayer image to 4 channels
im = np.maximum(im - 512,0)/ (16383 - 512) #subtract the black level
im = np.expand_dims(im,axis=2)
img_shape = im.shape
H = img_shape[0]
W = img_shape[1]
out = np.concatenate((im[0:H:2,0:W:2,:],
im[0:H:2,1:W:2,:],
im[1:H:2,1:W:2,:],
im[1:H:2,0:W:2,:]), axis=2)
return out
|
[
"numpy.maximum",
"os.path.basename",
"numpy.expand_dims",
"glob.glob",
"numpy.concatenate"
] |
[((146, 186), 'glob.glob', 'glob.glob', (['"""./dataset/sony/short/0*.ARW"""'], {}), "('./dataset/sony/short/0*.ARW')\n", (155, 186), False, 'import glob\n'), ((348, 387), 'glob.glob', 'glob.glob', (['"""./dataset/sony/long/0*.ARW"""'], {}), "('./dataset/sony/long/0*.ARW')\n", (357, 387), False, 'import glob\n'), ((511, 551), 'glob.glob', 'glob.glob', (['"""./dataset/sony/short/1*.ARW"""'], {}), "('./dataset/sony/short/1*.ARW')\n", (520, 551), False, 'import glob\n'), ((726, 765), 'glob.glob', 'glob.glob', (['"""./dataset/sony/long/1*.ARW"""'], {}), "('./dataset/sony/long/1*.ARW')\n", (735, 765), False, 'import glob\n'), ((1158, 1184), 'numpy.expand_dims', 'np.expand_dims', (['im'], {'axis': '(2)'}), '(im, axis=2)\n', (1172, 1184), True, 'import numpy as np\n'), ((1263, 1375), 'numpy.concatenate', 'np.concatenate', (['(im[0:H:2, 0:W:2, :], im[0:H:2, 1:W:2, :], im[1:H:2, 1:W:2, :], im[1:H:2, 0\n :W:2, :])'], {'axis': '(2)'}), '((im[0:H:2, 0:W:2, :], im[0:H:2, 1:W:2, :], im[1:H:2, 1:W:2,\n :], im[1:H:2, 0:W:2, :]), axis=2)\n', (1277, 1375), True, 'import numpy as np\n'), ((1609, 1635), 'numpy.expand_dims', 'np.expand_dims', (['im'], {'axis': '(2)'}), '(im, axis=2)\n', (1623, 1635), True, 'import numpy as np\n'), ((1714, 1826), 'numpy.concatenate', 'np.concatenate', (['(im[0:H:2, 0:W:2, :], im[0:H:2, 1:W:2, :], im[1:H:2, 1:W:2, :], im[1:H:2, 0\n :W:2, :])'], {'axis': '(2)'}), '((im[0:H:2, 0:W:2, :], im[0:H:2, 1:W:2, :], im[1:H:2, 1:W:2,\n :], im[1:H:2, 0:W:2, :]), axis=2)\n', (1728, 1826), True, 'import numpy as np\n'), ((1108, 1131), 'numpy.maximum', 'np.maximum', (['(im - 512)', '(0)'], {}), '(im - 512, 0)\n', (1118, 1131), True, 'import numpy as np\n'), ((1535, 1558), 'numpy.maximum', 'np.maximum', (['(im - 512)', '(0)'], {}), '(im - 512, 0)\n', (1545, 1558), True, 'import numpy as np\n'), ((224, 245), 'os.path.basename', 'os.path.basename', (['res'], {}), '(res)\n', (240, 245), False, 'import os\n'), ((425, 446), 'os.path.basename', 'os.path.basename', (['res'], {}), '(res)\n', (441, 446), False, 'import os\n'), ((589, 610), 'os.path.basename', 'os.path.basename', (['res'], {}), '(res)\n', (605, 610), False, 'import os\n'), ((803, 824), 'os.path.basename', 'os.path.basename', (['res'], {}), '(res)\n', (819, 824), False, 'import os\n')]
|
# coding=utf-8
import xml.etree.ElementTree as ET
import sys
import os
import glob
import shutil
import cv2
from multiprocessing import Pool
from multiprocessing import Manager
from multiprocessing import Process
import numpy as np
import pickle
def restore_file(path):
df = open(path, 'rb')
file = pickle.load(df)
df.close()
return file
def save_file(file, path, protocol=None):
df = open(path, 'wb')
if protocol is None:
pickle.dump(file, df)
else:
pickle.dump(file, df, protocol=protocol)
df.close()
print('Successfully save ', path)
def get_direction(xml_path):
tree = ET.parse(xml_path)
rect={}
line=""
root = tree.getroot()
#for name in root.iter('path'):
# rect['path'] = os.path.basename(name.text)
def get_info(ob, name):
for front in ob.iter(name):
return int(front.text)
for ob in root.iter('attributes'):
rect['front'] = get_info(ob, 'front')
rect['back'] = get_info(ob, 'back')
rect['side'] = get_info(ob, 'side')
rect['front_side'] = get_info(ob, 'front_side')
rect['back_side'] = get_info(ob, 'back_side')
rect['noise'] = get_info(ob, 'noise')
try:
sums = sum(rect.values())
except:
sums = 0
return rect, sums
def mkdirs(root_dir):
if os.path.exists(root_dir) is False:
os.mkdir(root_dir)
direction_list = ['front', 'back', 'side', 'front_side', 'back_side', 'noise', 'null', 'error']
for i in direction_list:
if os.path.exists(root_dir+i) is False:
os.mkdir(root_dir+i)
def get_copy_list():
save_dir = 'cuhk03_train_fixed2/'
mkdirs(save_dir)
xml_list = glob.glob('cuhk03_annotations/*.xml')
copy_list = []
print('len(xml_list):', len(xml_list))
key_list = ['front', 'back', 'side', 'front_side', 'back_side', 'noise']
num_dict = {}
for i in key_list:
num_dict[i] = 0
for index, path in enumerate(xml_list):
if index % 5000 == 0:
print(index, len(xml_list))
rect, sums = get_direction(path)
if sums == 0:
#shutil.copyfile(path, save_dir+'null/'+os.path.basename(path))
copy_list.append([path, save_dir+'null/'+os.path.basename(path)])
path1 = path.replace('.xml', '.jpg')
#shutil.copyfile(path1, save_dir+'null/'+os.path.basename(path1))
copy_list.append([path1, save_dir+'null/'+os.path.basename(path1)])
continue
if sums > 1:
#shutil.copyfile(path, save_dir+'error/'+os.path.basename(path))
copy_list.append([path, save_dir+'error/'+os.path.basename(path)])
path1 = path.replace('.xml', '.jpg')
#shutil.copyfile(path1, save_dir+'error/'+os.path.basename(path1))
copy_list.append([path1, save_dir+'error/'+os.path.basename(path1)])
continue
for key in rect.keys():
if rect[key] == 1:
num_dict[key] += 1
path1 = path.replace('.xml', '.jpg')
#shutil.copyfile(path1, save_dir+key+'/'+os.path.basename(path1))
copy_list.append([path1, save_dir+key+'/'+os.path.basename(path1)])
break
print('-------------')
for i in key_list:
print(i, num_dict[i], round(num_dict[i]/len(xml_list), 3))
print('-------------')
print(round((num_dict['front']+num_dict['front_side'])/len(xml_list), 3))
print(round((num_dict['back']+num_dict['back_side'])/len(xml_list), 3))
print(round((num_dict['side'])/len(xml_list), 3))
return copy_list
def copy_img(path_list):
for path in path_list:
shutil.copyfile(path[0], path[1])
def split_direction():
copy_list = get_copy_list()
print('len(copy_list):', len(copy_list))
#exit(0)
num_jobs = 8
index_list = len(copy_list)*np.arange(0,1,1/num_jobs)
index_list = [int(i) for i in index_list]
index_list.append(len(copy_list))
print(index_list)
processes = list()
for i in range(num_jobs):
p = Process(target=copy_img, args=(copy_list[index_list[i]:index_list[i+1]],))
print('Process %d will start.' % i)
p.start()
processes.append(p)
for p in processes:
p.join()
def get_bbox(xml_path):
tree = ET.parse(xml_path)
rect={}
line=""
root = tree.getroot()
#for name in root.iter('path'):
# rect['path'] = os.path.basename(name.text)
def get_info(ob, name):
for front in ob.iter(name):
return int(front.text)
for ob in root.iter('bndbox'):
#for obb in root.iter('bndbox'):
xmin = get_info(ob, 'xmin')
ymin = get_info(ob, 'ymin')
xmax = get_info(ob, 'xmax')
ymax = get_info(ob, 'ymax')
break
print(xmin, xmax, ymin, ymax)
return xmin, xmax, ymin, ymax
if __name__ == '__main__':
'''
name = 'wait_to_crop_train/0010_c6s4_002427_07.jpg'
xmin, xmax, ymin, ymax = get_bbox('wait_to_crop_train/0010_c6s4_002427_07.xml')
img = cv2.imread(name)
#cv2.rectangle(img, (xmin,ymin),(xmax,ymax), (255,0,0),1)
img2 = img[ymin:ymax, xmin:xmax]
cv2.imshow('image', img2)
cv2.waitKey(0)
exit(0)
'''
image_list = glob.glob('wait_to_crop_test/*.jpg')
for name in image_list:
basename = os.path.basename(name)
img = cv2.imread(name)
if os.path.exists('wait_to_crop_test/'+basename[:-4]+'.xml'):
xmin, xmax, ymin, ymax = get_bbox('wait_to_crop_test/'+basename[:-4]+'.xml')
img = cv2.imread(name)
img2 = img[ymin:ymax, xmin:xmax]
cv2.imwrite('crop_test/'+basename, img2)
exit(0)
#split_direction()
image_map_direction = {}
direction_map_image = {}
img_list = []
save_dir = 'cuhk03_train_fixed2/'
direction_list = ['front', 'back', 'side', 'front_side', 'back_side', 'noise']
map_int = {'front':0,
'front_side': 0,
'side':1,
'noise':1,
'back': 2,
'back_side':2,}
map_int2 = {'front':0,
'front_side': 1,
'side':2,
'noise':5,
'back': 3,
'back_side':4,}
direction_int_list = []
direction_int_list2 = []
for i in direction_list:
image_list = os.listdir(save_dir+i)
direction_map_image[i] = image_list
for name in image_list:
image_map_direction[name] = (map_int[i], i)
direction_int_list.append(map_int[i])
direction_int_list2.append(map_int2[i])
if name[-8:] == '.jpg.jpg':
image_map_direction[name[:-4]] = (map_int[i], i)
print(name, name[:-4])
print(len(direction_int_list),
round(direction_int_list.count(0)/len(direction_int_list), 2),
round(direction_int_list.count(1)/len(direction_int_list), 2),
round(direction_int_list.count(2)/len(direction_int_list), 2))
print(set(direction_int_list))
print(len(direction_int_list2),
round(direction_int_list2.count(0)/len(direction_int_list2), 2),
round(direction_int_list2.count(1)/len(direction_int_list2), 2),
round(direction_int_list2.count(2)/len(direction_int_list2), 2),
round(direction_int_list2.count(3)/len(direction_int_list2), 2),
round(direction_int_list2.count(4)/len(direction_int_list2), 2),
round(direction_int_list2.count(5)/len(direction_int_list2), 2))
print(set(direction_int_list2))
save_file(image_map_direction, 'cuhk03_image_map_direction.pkl')
save_file(direction_map_image, 'cuhk03_direction_map_image.pkl')
save_file(image_map_direction, 'cuhk03_image_map_direction_py2.pkl', 2)
save_file(direction_map_image, 'cuhk03_direction_map_image_py2.pkl', 2)
print(len(image_map_direction))
exit(0)
print(image_map_direction)
exit(0)
image_map_direction = {}
direction_map_image = {}
save_dir = 'market1501_full_fixed2/'
direction_list = ['front', 'back', 'side', 'front_side', 'back_side', 'noise', 'null', 'error']
for i in direction_list:
image_list = os.listdir(save_dir+i)
exit(0)
exit(0)
#save_dir = 'DukeMTMC-reID_detail/'
save_dir = 'DukeMTMC-reID_detail/'
direction_list = ['front', 'back', 'side', 'front_side', 'back_side']
for i in direction_list:
listglob1 = glob.glob(save_dir+i+'/*.jpg')
for path in listglob1:
img = cv2.imread(path)
img = cv2.resize(img, ((50,120)))
cv2.imwrite(path, img)
#line = rect['path'] + "\t"+ rect['xmin']+ "\t"+rect['ymin']+"\t"+rect['xmax']+"\t"+rect['ymax']
|
[
"xml.etree.ElementTree.parse",
"pickle.dump",
"os.mkdir",
"os.path.basename",
"cv2.imwrite",
"os.path.exists",
"cv2.imread",
"pickle.load",
"numpy.arange",
"glob.glob",
"shutil.copyfile",
"multiprocessing.Process",
"os.listdir",
"cv2.resize"
] |
[((309, 324), 'pickle.load', 'pickle.load', (['df'], {}), '(df)\n', (320, 324), False, 'import pickle\n'), ((634, 652), 'xml.etree.ElementTree.parse', 'ET.parse', (['xml_path'], {}), '(xml_path)\n', (642, 652), True, 'import xml.etree.ElementTree as ET\n'), ((1723, 1760), 'glob.glob', 'glob.glob', (['"""cuhk03_annotations/*.xml"""'], {}), "('cuhk03_annotations/*.xml')\n", (1732, 1760), False, 'import glob\n'), ((4349, 4367), 'xml.etree.ElementTree.parse', 'ET.parse', (['xml_path'], {}), '(xml_path)\n', (4357, 4367), True, 'import xml.etree.ElementTree as ET\n'), ((5306, 5342), 'glob.glob', 'glob.glob', (['"""wait_to_crop_test/*.jpg"""'], {}), "('wait_to_crop_test/*.jpg')\n", (5315, 5342), False, 'import glob\n'), ((458, 479), 'pickle.dump', 'pickle.dump', (['file', 'df'], {}), '(file, df)\n', (469, 479), False, 'import pickle\n'), ((498, 538), 'pickle.dump', 'pickle.dump', (['file', 'df'], {'protocol': 'protocol'}), '(file, df, protocol=protocol)\n', (509, 538), False, 'import pickle\n'), ((1354, 1378), 'os.path.exists', 'os.path.exists', (['root_dir'], {}), '(root_dir)\n', (1368, 1378), False, 'import os\n'), ((1397, 1415), 'os.mkdir', 'os.mkdir', (['root_dir'], {}), '(root_dir)\n', (1405, 1415), False, 'import os\n'), ((3709, 3742), 'shutil.copyfile', 'shutil.copyfile', (['path[0]', 'path[1]'], {}), '(path[0], path[1])\n', (3724, 3742), False, 'import shutil\n'), ((3908, 3937), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(1 / num_jobs)'], {}), '(0, 1, 1 / num_jobs)\n', (3917, 3937), True, 'import numpy as np\n'), ((4106, 4182), 'multiprocessing.Process', 'Process', ([], {'target': 'copy_img', 'args': '(copy_list[index_list[i]:index_list[i + 1]],)'}), '(target=copy_img, args=(copy_list[index_list[i]:index_list[i + 1]],))\n', (4113, 4182), False, 'from multiprocessing import Process\n'), ((5390, 5412), 'os.path.basename', 'os.path.basename', (['name'], {}), '(name)\n', (5406, 5412), False, 'import os\n'), ((5427, 5443), 'cv2.imread', 'cv2.imread', (['name'], {}), '(name)\n', (5437, 5443), False, 'import cv2\n'), ((5455, 5516), 'os.path.exists', 'os.path.exists', (["('wait_to_crop_test/' + basename[:-4] + '.xml')"], {}), "('wait_to_crop_test/' + basename[:-4] + '.xml')\n", (5469, 5516), False, 'import os\n'), ((6422, 6446), 'os.listdir', 'os.listdir', (['(save_dir + i)'], {}), '(save_dir + i)\n', (6432, 6446), False, 'import os\n'), ((8269, 8293), 'os.listdir', 'os.listdir', (['(save_dir + i)'], {}), '(save_dir + i)\n', (8279, 8293), False, 'import os\n'), ((8526, 8560), 'glob.glob', 'glob.glob', (["(save_dir + i + '/*.jpg')"], {}), "(save_dir + i + '/*.jpg')\n", (8535, 8560), False, 'import glob\n'), ((1556, 1584), 'os.path.exists', 'os.path.exists', (['(root_dir + i)'], {}), '(root_dir + i)\n', (1570, 1584), False, 'import os\n'), ((1605, 1627), 'os.mkdir', 'os.mkdir', (['(root_dir + i)'], {}), '(root_dir + i)\n', (1613, 1627), False, 'import os\n'), ((5621, 5637), 'cv2.imread', 'cv2.imread', (['name'], {}), '(name)\n', (5631, 5637), False, 'import cv2\n'), ((5695, 5737), 'cv2.imwrite', 'cv2.imwrite', (["('crop_test/' + basename)", 'img2'], {}), "('crop_test/' + basename, img2)\n", (5706, 5737), False, 'import cv2\n'), ((8606, 8622), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (8616, 8622), False, 'import cv2\n'), ((8641, 8667), 'cv2.resize', 'cv2.resize', (['img', '(50, 120)'], {}), '(img, (50, 120))\n', (8651, 8667), False, 'import cv2\n'), ((8681, 8703), 'cv2.imwrite', 'cv2.imwrite', (['path', 'img'], {}), '(path, img)\n', (8692, 8703), False, 'import cv2\n'), ((2273, 2295), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (2289, 2295), False, 'import os\n'), ((2479, 2502), 'os.path.basename', 'os.path.basename', (['path1'], {}), '(path1)\n', (2495, 2502), False, 'import os\n'), ((2678, 2700), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (2694, 2700), False, 'import os\n'), ((2886, 2909), 'os.path.basename', 'os.path.basename', (['path1'], {}), '(path1)\n', (2902, 2909), False, 'import os\n'), ((3224, 3247), 'os.path.basename', 'os.path.basename', (['path1'], {}), '(path1)\n', (3240, 3247), False, 'import os\n')]
|
#pylint: disable=E1101, E0401, E1102, W0621, W0221
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import random
import time
from random import SystemRandom
import models
import utils
parser = argparse.ArgumentParser()
parser.add_argument('--niters', type=int, default=2000)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--latent-dim', type=int, default=32)
parser.add_argument('--rec-hidden', type=int, default=32)
parser.add_argument('--gen-hidden', type=int, default=50)
parser.add_argument('--embed-time', type=int, default=128)
parser.add_argument('--save', type=int, default=1)
parser.add_argument('--enc', type=str, default='mtan_rnn')
parser.add_argument('--dec', type=str, default='mtan_rnn')
parser.add_argument('--fname', type=str, default=None)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--split', type=int, default=0)
parser.add_argument('--n', type=int, default=8000)
parser.add_argument('--batch-size', type=int, default=50)
parser.add_argument('--classif', action='store_true',
help="Include binary classification loss")
parser.add_argument('--freq', type=float, default=10.)
parser.add_argument('--k-iwae', type=int, default=10)
parser.add_argument('--norm', action='store_true')
parser.add_argument('--kl', action='store_true')
parser.add_argument('--learn-emb', action='store_true')
parser.add_argument('--dataset', type=str, default='activity')
parser.add_argument('--alpha', type=int, default=100.)
parser.add_argument('--enc-num-heads', type=int, default=1)
parser.add_argument('--dec-num-heads', type=int, default=1)
parser.add_argument('--num-ref-points', type=int, default=128)
parser.add_argument('--classify-pertp', action='store_true')
args = parser.parse_args()
if __name__ == '__main__':
experiment_id = int(SystemRandom().random()*100000)
print(args, experiment_id)
seed = args.seed
torch.manual_seed(seed)
np.random.seed(seed)
torch.cuda.manual_seed(seed)
random.seed(seed)
device = torch.device(
'cuda' if torch.cuda.is_available() else 'cpu')
if args.dataset == 'activity':
data_obj = utils.get_activity_data(args, 'cpu')
train_loader = data_obj["train_dataloader"]
test_loader = data_obj["test_dataloader"]
val_loader = data_obj["val_dataloader"]
dim = data_obj["input_dim"]
if args.enc == 'enc_rnn3':
rec = models.enc_rnn3(
dim, torch.linspace(0, 1., 50), args.latent_dim, args.rec_hidden, 128, learn_emb=args.learn_emb).to(device)
elif args.enc == 'mtan_rnn':
rec = models.enc_mtan_rnn(
dim, torch.linspace(0, 1., args.num_ref_points), args.latent_dim, args.rec_hidden,
embed_time=128, learn_emb=args.learn_emb, num_heads=args.enc_num_heads).to(device)
if args.dec == 'rnn3':
dec = models.dec_rnn3(
dim, torch.linspace(0, 1., 50), args.latent_dim, args.gen_hidden, 128, learn_emb=args.learn_emb).to(device)
elif args.dec == 'mtan_rnn':
dec = models.dec_mtan_rnn(
dim, torch.linspace(0, 1., args.num_ref_points), args.latent_dim, args.gen_hidden,
embed_time=128, learn_emb=args.learn_emb, num_heads=args.dec_num_heads).to(device)
classifier = nn.Sequential(
nn.Linear(args.latent_dim, 300),
nn.ReLU(),
nn.Linear(300, 300),
nn.ReLU(),
nn.Linear(300, 11))
classifier = classifier.to(device)
params = (list(rec.parameters()) + list(dec.parameters()) + list(classifier.parameters()))
print('parameters:', utils.count_parameters(rec), utils.count_parameters(dec), utils.count_parameters(classifier))
optimizer = optim.Adam(params, lr=args.lr)
criterion = nn.CrossEntropyLoss()
best_val_loss = float('inf')
total_time = 0.
for itr in range(1, args.niters + 1):
train_recon_loss, train_ce_loss = 0, 0
mse = 0
train_n = 0
train_acc = 0
#avg_reconst, avg_kl, mse = 0, 0, 0
if args.kl:
wait_until_kl_inc = 10
if itr < wait_until_kl_inc:
kl_coef = 0.
else:
kl_coef = (1-0.99** (itr - wait_until_kl_inc))
else:
kl_coef = 1
start_time = time.time()
for train_batch, label in train_loader:
train_batch, label = train_batch.to(device), label.to(device)
batch_len = train_batch.shape[0]
observed_data, observed_mask, observed_tp \
= train_batch[:, :, :dim], train_batch[:, :, dim:2*dim], train_batch[:, :, -1]
out = rec(torch.cat((observed_data, observed_mask), 2), observed_tp)
qz0_mean, qz0_logvar = out[:, :, :args.latent_dim], out[:, :, args.latent_dim:]
epsilon = torch.randn(args.k_iwae, qz0_mean.shape[0], qz0_mean.shape[1], qz0_mean.shape[2]).to(device)
z0 = epsilon * torch.exp(.5 * qz0_logvar) + qz0_mean
z0 = z0.view(-1, qz0_mean.shape[1], qz0_mean.shape[2])
print(z0.shape)
pred_y = classifier(z0)
print(pred_y.shape)
# compute loss
if args.classify_pertp:
N = label.size(-1)
out = pred_y.view(-1, N)
label = label.view(-1, N)
_, label = label.max(-1)
ce_loss = criterion(out, label.long())
else:
loss = criterion(out, label)
loss = ce_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_ce_loss += ce_loss.item() * batch_len
train_acc += torch.mean((out.argmax(1) == label).float()).item() * batch_len
train_n += batch_len
total_time += time.time() - start_time
val_loss, val_acc, val_auc = utils.evaluate_classifier(
rec, val_loader, dec=dec, args=args, classifier=classifier, reconst=True, num_sample=1, dim=dim)
if val_loss <= best_val_loss:
best_val_loss = min(best_val_loss, val_loss)
rec_state_dict = rec.state_dict()
dec_state_dict = dec.state_dict()
classifier_state_dict = classifier.state_dict()
optimizer_state_dict = optimizer.state_dict()
test_loss, test_acc, test_auc = utils.evaluate_classifier(
rec, test_loader, dec=dec, args=args, classifier=classifier, reconst=True, num_sample=1, dim=dim)
print('Iter: {}, recon_loss: {:.4f}, ce_loss: {:.4f}, acc: {:.4f}, mse: {:.4f}, val_loss: {:.4f}, val_acc: {:.4f}, test_acc: {:.4f}, test_auc: {:.4f}'
.format(itr, train_recon_loss/train_n, train_ce_loss/train_n,
train_acc/train_n, mse/train_n, val_loss, val_acc, test_acc, test_auc))
if itr % 100 == 0 and args.save:
torch.save({
'args': args,
'epoch': itr,
'rec_state_dict': rec_state_dict,
'dec_state_dict': dec_state_dict,
'optimizer_state_dict': optimizer_state_dict,
'classifier_state_dict': classifier_state_dict,
'loss': -loss,
}, args.dataset + '_' +
args.enc + '_' +
args.dec + '_' +
str(experiment_id) +
'.h5')
print(best_val_loss)
print(total_time)
|
[
"utils.get_activity_data",
"numpy.random.seed",
"argparse.ArgumentParser",
"torch.cat",
"torch.randn",
"torch.exp",
"random.seed",
"torch.nn.Linear",
"torch.manual_seed",
"torch.cuda.manual_seed",
"torch.optim.Adam",
"torch.cuda.is_available",
"utils.count_parameters",
"torch.nn.ReLU",
"random.SystemRandom",
"utils.evaluate_classifier",
"torch.nn.CrossEntropyLoss",
"time.time",
"torch.linspace"
] |
[((245, 270), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (268, 270), False, 'import argparse\n'), ((1963, 1986), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1980, 1986), False, 'import torch\n'), ((1991, 2011), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2005, 2011), True, 'import numpy as np\n'), ((2016, 2044), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (2038, 2044), False, 'import torch\n'), ((2049, 2066), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (2060, 2066), False, 'import random\n'), ((3768, 3798), 'torch.optim.Adam', 'optim.Adam', (['params'], {'lr': 'args.lr'}), '(params, lr=args.lr)\n', (3778, 3798), True, 'import torch.optim as optim\n'), ((3815, 3836), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (3834, 3836), True, 'import torch.nn as nn\n'), ((2209, 2245), 'utils.get_activity_data', 'utils.get_activity_data', (['args', '"""cpu"""'], {}), "(args, 'cpu')\n", (2232, 2245), False, 'import utils\n'), ((3355, 3386), 'torch.nn.Linear', 'nn.Linear', (['args.latent_dim', '(300)'], {}), '(args.latent_dim, 300)\n', (3364, 3386), True, 'import torch.nn as nn\n'), ((3400, 3409), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3407, 3409), True, 'import torch.nn as nn\n'), ((3423, 3442), 'torch.nn.Linear', 'nn.Linear', (['(300)', '(300)'], {}), '(300, 300)\n', (3432, 3442), True, 'import torch.nn as nn\n'), ((3456, 3465), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3463, 3465), True, 'import torch.nn as nn\n'), ((3479, 3497), 'torch.nn.Linear', 'nn.Linear', (['(300)', '(11)'], {}), '(300, 11)\n', (3488, 3497), True, 'import torch.nn as nn\n'), ((3658, 3685), 'utils.count_parameters', 'utils.count_parameters', (['rec'], {}), '(rec)\n', (3680, 3685), False, 'import utils\n'), ((3687, 3714), 'utils.count_parameters', 'utils.count_parameters', (['dec'], {}), '(dec)\n', (3709, 3714), False, 'import utils\n'), ((3716, 3750), 'utils.count_parameters', 'utils.count_parameters', (['classifier'], {}), '(classifier)\n', (3738, 3750), False, 'import utils\n'), ((4350, 4361), 'time.time', 'time.time', ([], {}), '()\n', (4359, 4361), False, 'import time\n'), ((5949, 6076), 'utils.evaluate_classifier', 'utils.evaluate_classifier', (['rec', 'val_loader'], {'dec': 'dec', 'args': 'args', 'classifier': 'classifier', 'reconst': '(True)', 'num_sample': '(1)', 'dim': 'dim'}), '(rec, val_loader, dec=dec, args=args, classifier=\n classifier, reconst=True, num_sample=1, dim=dim)\n', (5974, 6076), False, 'import utils\n'), ((6430, 6558), 'utils.evaluate_classifier', 'utils.evaluate_classifier', (['rec', 'test_loader'], {'dec': 'dec', 'args': 'args', 'classifier': 'classifier', 'reconst': '(True)', 'num_sample': '(1)', 'dim': 'dim'}), '(rec, test_loader, dec=dec, args=args, classifier=\n classifier, reconst=True, num_sample=1, dim=dim)\n', (6455, 6558), False, 'import utils\n'), ((2112, 2137), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2135, 2137), False, 'import torch\n'), ((5887, 5898), 'time.time', 'time.time', ([], {}), '()\n', (5896, 5898), False, 'import time\n'), ((4703, 4747), 'torch.cat', 'torch.cat', (['(observed_data, observed_mask)', '(2)'], {}), '((observed_data, observed_mask), 2)\n', (4712, 4747), False, 'import torch\n'), ((1875, 1889), 'random.SystemRandom', 'SystemRandom', ([], {}), '()\n', (1887, 1889), False, 'from random import SystemRandom\n'), ((2504, 2530), 'torch.linspace', 'torch.linspace', (['(0)', '(1.0)', '(50)'], {}), '(0, 1.0, 50)\n', (2518, 2530), False, 'import torch\n'), ((2949, 2975), 'torch.linspace', 'torch.linspace', (['(0)', '(1.0)', '(50)'], {}), '(0, 1.0, 50)\n', (2963, 2975), False, 'import torch\n'), ((4876, 4962), 'torch.randn', 'torch.randn', (['args.k_iwae', 'qz0_mean.shape[0]', 'qz0_mean.shape[1]', 'qz0_mean.shape[2]'], {}), '(args.k_iwae, qz0_mean.shape[0], qz0_mean.shape[1], qz0_mean.\n shape[2])\n', (4887, 4962), False, 'import torch\n'), ((4996, 5023), 'torch.exp', 'torch.exp', (['(0.5 * qz0_logvar)'], {}), '(0.5 * qz0_logvar)\n', (5005, 5023), False, 'import torch\n'), ((2692, 2735), 'torch.linspace', 'torch.linspace', (['(0)', '(1.0)', 'args.num_ref_points'], {}), '(0, 1.0, args.num_ref_points)\n', (2706, 2735), False, 'import torch\n'), ((3137, 3180), 'torch.linspace', 'torch.linspace', (['(0)', '(1.0)', 'args.num_ref_points'], {}), '(0, 1.0, args.num_ref_points)\n', (3151, 3180), False, 'import torch\n')]
|
"""Gaussian process-based minimization algorithms."""
import numpy as np
from sklearn.utils import check_random_state
from .base import base_minimize
from ..utils import cook_estimator
from ..utils import normalize_dimensions
def gp_minimize(func, dimensions, base_estimator=None,
n_calls=100, n_random_starts=10,
acq_func="gp_hedge", acq_optimizer="auto", x0=None, y0=None,
random_state=None, verbose=False, callback=None,
n_points=10000, n_restarts_optimizer=5, xi=0.01, kappa=1.96,
noise="gaussian", n_jobs=1, model_queue_size=None):
"""Bayesian optimization using Gaussian Processes.
If every function evaluation is expensive, for instance
when the parameters are the hyperparameters of a neural network
and the function evaluation is the mean cross-validation score across
ten folds, optimizing the hyperparameters by standard optimization
routines would take for ever!
The idea is to approximate the function using a Gaussian process.
In other words the function values are assumed to follow a multivariate
gaussian. The covariance of the function values are given by a
GP kernel between the parameters. Then a smart choice to choose the
next parameter to evaluate can be made by the acquisition function
over the Gaussian prior which is much quicker to evaluate.
The total number of evaluations, `n_calls`, are performed like the
following. If `x0` is provided but not `y0`, then the elements of `x0`
are first evaluated, followed by `n_random_starts` evaluations.
Finally, `n_calls - len(x0) - n_random_starts` evaluations are
made guided by the surrogate model. If `x0` and `y0` are both
provided then `n_random_starts` evaluations are first made then
`n_calls - n_random_starts` subsequent evaluations are made
guided by the surrogate model.
Parameters
----------
func : callable
Function to minimize. Should take a single list of parameters
and return the objective value.
If you have a search-space where all dimensions have names,
then you can use :func:`skopt.utils.use_named_args` as a decorator
on your objective function, in order to call it directly
with the named arguments. See `use_named_args` for an example.
dimensions : [list, shape (n_dims,)
List of search space dimensions.
Each search dimension can be defined either as
- a `(lower_bound, upper_bound)` tuple (for `Real` or `Integer`
dimensions),
- a `(lower_bound, upper_bound, "prior")` tuple (for `Real`
dimensions),
- as a list of categories (for `Categorical` dimensions), or
- an instance of a `Dimension` object (`Real`, `Integer` or
`Categorical`).
.. note:: The upper and lower bounds are inclusive for `Integer`
dimensions.
base_estimator : a Gaussian process estimator
The Gaussian process estimator to use for optimization.
By default, a Matern kernel is used with the following
hyperparameters tuned.
- All the length scales of the Matern kernel.
- The covariance amplitude that each element is multiplied with.
- Noise that is added to the matern kernel. The noise is assumed
to be iid gaussian.
n_calls : int, default=100
Number of calls to `func`.
n_random_starts : int, default=10
Number of evaluations of `func` with random points before
approximating it with `base_estimator`.
acq_func : string, default=`"gp_hedge"`
Function to minimize over the gaussian prior. Can be either
- `"LCB"` for lower confidence bound.
- `"EI"` for negative expected improvement.
- `"PI"` for negative probability of improvement.
- `"gp_hedge"` Probabilistically choose one of the above three
acquisition functions at every iteration. The weightage
given to these gains can be set by :math:`\eta` through
`acq_func_kwargs`.
- The gains `g_i` are initialized to zero.
- At every iteration,
- Each acquisition function is optimised independently to
propose an candidate point `X_i`.
- Out of all these candidate points, the next point `X_best` is
chosen by :math:`softmax(\eta g_i)`
- After fitting the surrogate model with `(X_best, y_best)`,
the gains are updated such that :math:`g_i -= \mu(X_i)`
- `"EIps"` for negated expected improvement per second to take into
account the function compute time. Then, the objective function is
assumed to return two values, the first being the objective value and
the second being the time taken in seconds.
- `"PIps"` for negated probability of improvement per second. The
return type of the objective function is assumed to be similar to
that of `"EIps
acq_optimizer : string, `"sampling"` or `"lbfgs"`, default=`"lbfgs"`
Method to minimize the acquistion function. The fit model
is updated with the optimal value obtained by optimizing `acq_func`
with `acq_optimizer`.
The `acq_func` is computed at `n_points` sampled randomly.
- If set to `"auto"`, then `acq_optimizer` is configured on the
basis of the space searched over.
If the space is Categorical then this is set to be "sampling"`.
- If set to `"sampling"`, then the point among these `n_points`
where the `acq_func` is minimum is the next candidate minimum.
- If set to `"lbfgs"`, then
- The `n_restarts_optimizer` no. of points which the acquisition
function is least are taken as start points.
- `"lbfgs"` is run for 20 iterations with these points as initial
points to find local minima.
- The optimal of these local minima is used to update the prior.
x0 : list, list of lists or `None`
Initial input points.
- If it is a list of lists, use it as a list of input points.
- If it is a list, use it as a single initial input point.
- If it is `None`, no initial input points are used.
y0 : list, scalar or `None`
Evaluation of initial input points.
- If it is a list, then it corresponds to evaluations of the function
at each element of `x0` : the i-th element of `y0` corresponds
to the function evaluated at the i-th element of `x0`.
- If it is a scalar, then it corresponds to the evaluation of the
function at `x0`.
- If it is None and `x0` is provided, then the function is evaluated
at each element of `x0`.
random_state : int, RandomState instance, or None (default)
Set random state to something other than None for reproducible
results.
verbose : boolean, default=False
Control the verbosity. It is advised to set the verbosity to True
for long optimization runs.
callback : callable, list of callables, optional
If callable then `callback(res)` is called after each call to `func`.
If list of callables, then each callable in the list is called.
n_points : int, default=10000
Number of points to sample to determine the next "best" point.
Useless if acq_optimizer is set to `"lbfgs"`.
n_restarts_optimizer : int, default=5
The number of restarts of the optimizer when `acq_optimizer`
is `"lbfgs"`.
kappa : float, default=1.96
Controls how much of the variance in the predicted values should be
taken into account. If set to be very high, then we are favouring
exploration over exploitation and vice versa.
Used when the acquisition is `"LCB"`.
xi : float, default=0.01
Controls how much improvement one wants over the previous best
values. Used when the acquisition is either `"EI"` or `"PI"`.
noise : float, default="gaussian"
- Use noise="gaussian" if the objective returns noisy observations.
The noise of each observation is assumed to be iid with
mean zero and a fixed variance.
- If the variance is known before-hand, this can be set directly
to the variance of the noise.
- Set this to a value close to zero (1e-10) if the function is
noise-free. Setting to zero might cause stability issues.
n_jobs : int, default=1
Number of cores to run in parallel while running the lbfgs
optimizations over the acquisition function. Valid only
when `acq_optimizer` is set to "lbfgs."
Defaults to 1 core. If `n_jobs=-1`, then number of jobs is set
to number of cores.
model_queue_size : int or None, default=None
Keeps list of models only as long as the argument given. In the
case of None, the list has no capped length.
Returns
-------
res : `OptimizeResult`, scipy object
The optimization result returned as a OptimizeResult object.
Important attributes are:
- `x` [list]: location of the minimum.
- `fun` [float]: function value at the minimum.
- `models`: surrogate models used for each iteration.
- `x_iters` [list of lists]: location of function evaluation for each
iteration.
- `func_vals` [array]: function value for each iteration.
- `space` [Space]: the optimization space.
- `specs` [dict]`: the call specifications.
- `rng` [RandomState instance]: State of the random state
at the end of minimization.
For more details related to the OptimizeResult object, refer
http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.html
.. seealso:: functions :class:`skopt.forest_minimize`,
:class:`skopt.dummy_minimize`
"""
# Check params
rng = check_random_state(random_state)
space = normalize_dimensions(dimensions)
if base_estimator is None:
base_estimator = cook_estimator(
"GP", space=space, random_state=rng.randint(0, np.iinfo(np.int32).max),
noise=noise)
return base_minimize(
func, space, base_estimator=base_estimator,
acq_func=acq_func,
xi=xi, kappa=kappa, acq_optimizer=acq_optimizer, n_calls=n_calls,
n_points=n_points, n_random_starts=n_random_starts,
n_restarts_optimizer=n_restarts_optimizer,
x0=x0, y0=y0, random_state=rng, verbose=verbose,
callback=callback, n_jobs=n_jobs, model_queue_size=model_queue_size)
|
[
"sklearn.utils.check_random_state",
"numpy.iinfo"
] |
[((10132, 10164), 'sklearn.utils.check_random_state', 'check_random_state', (['random_state'], {}), '(random_state)\n', (10150, 10164), False, 'from sklearn.utils import check_random_state\n'), ((10342, 10360), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (10350, 10360), True, 'import numpy as np\n')]
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .laikago_v2 import LaikagoBulletV2
from pybullet_utils import bullet_client
import pybullet
import time
import gym, gym.utils.seeding, gym.spaces
import numpy as np
import math
import torch
from gan import utils
from collections import deque
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
# this is called V4 because it shares same obs space with Laikago env V4
# the robot is still Laikago V2 though, same as env V4
class LaikagoConFEnvV4(gym.Env):
metadata = {'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': 50}
def __init__(self,
render=True,
init_noise=True,
act_noise=False,
obs_noise=False,
control_skip=10,
max_tar_vel=2.5,
energy_weight=0.1,
jl_weight=0.5,
ab=5.0,
q_pen_weight=0.4,
dq_pen_weight=0.001,
vel_r_weight=4.0,
train_dyn=True, # if false, fix dyn and train motor policy
pretrain_dyn=False, # pre-train with deviation to sim
enlarge_act_range=0.0, # make behavior pi more diverse to match collection, only train_dyn
behavior_dir="trained_models_laika_bullet_61/ppo",
behavior_env_name="LaikagoBulletEnv-v4",
behavior_iter=None,
dyn_dir="",
dyn_env_name="LaikagoConFEnv-v4",
dyn_iter=None,
cuda_env=True,
):
self.render = render
self.init_noise = init_noise
self.obs_noise = obs_noise
self.act_noise = act_noise
self.control_skip = int(control_skip)
self._ts = 1. / 500.
self.max_tar_vel = max_tar_vel
self.energy_weight = energy_weight
self.jl_weight = jl_weight
self.ab = ab
self.q_pen_weight = q_pen_weight
self.dq_pen_weight = dq_pen_weight
self.vel_r_weight = vel_r_weight
self.train_dyn = train_dyn
self.enlarge_act_range = enlarge_act_range
self.pretrain_dyn = pretrain_dyn
self.cuda_env = cuda_env
self.ratio = None
if self.render:
self._p = bullet_client.BulletClient(connection_mode=pybullet.GUI)
else:
self._p = bullet_client.BulletClient()
self.np_random = None
self.robot = LaikagoBulletV2(init_noise=self.init_noise,
time_step=self._ts,
np_random=self.np_random)
self.seed(0) # used once temporarily, will be overwritten outside though superclass api
self.viewer = None
self.timer = 0
self.behavior_past_obs_t_idx = [0, 4, 8]
# self.behavior_past_obs_t_idx = [0] # t-3. t-6. t-9 (B does not take past a)
self.generator_past_obs_t_idx = [0, 2]
self.generator_past_act_t_idx = [0]
self.past_obs_array = deque(maxlen=10)
self.past_bact_array = deque(maxlen=10) # only need to store past behavior action
if self.train_dyn:
if behavior_iter:
behavior_iter = int(behavior_iter)
self.dyn_actor_critic = None
# load fixed behavior policy
self.go_actor_critic, _, \
self.recurrent_hidden_states, \
self.masks = utils.load(
behavior_dir, behavior_env_name, self.cuda_env, behavior_iter
)
else:
if dyn_iter:
dyn_iter = int(dyn_iter)
# train motor policy
self.go_actor_critic = None
# load fixed dynamics model
self.dyn_actor_critic, _, \
self.recurrent_hidden_states, \
self.masks = utils.load(
dyn_dir, dyn_env_name, self.cuda_env, dyn_iter
)
#
# self.discri = utils.load_gail_discriminator(dyn_dir,
# dyn_env_name,
# self.cuda_env,
# dyn_iter)
#
# self.feat_select_func = self.robot.feature_selection_all_laika
self.reset_const = 100
self.reset_counter = self.reset_const # do a hard reset first
# self.action_dim = 12
self.init_state = None
obs = self.reset()
#
# self.d_scores = []
# # set up imaginary session for pre-train
# self.set_up_imaginary_session()
if self.train_dyn:
self.action_dim = 12 # 12D action scales, see beginning of step() for comment
else:
self.action_dim = len(self.robot.ctrl_dofs)
self.act = [0.0] * self.action_dim
self.action_space = gym.spaces.Box(low=np.array([-1.] * self.action_dim),
high=np.array([+1.] * self.action_dim))
obs_dummy = np.array([1.12234567] * len(obs))
self.observation_space = gym.spaces.Box(low=-np.inf * obs_dummy, high=np.inf * obs_dummy)
def reset(self):
if self.reset_counter < self.reset_const:
self.reset_counter += 1
self._p.restoreState(self.init_state)
self.robot.soft_reset(self._p)
else:
self.reset_counter = 0
self._p.resetSimulation()
self._p.setTimeStep(self._ts)
self._p.setGravity(0, 0, -10)
self._p.setPhysicsEngineParameter(numSolverIterations=100)
# self._p.setPhysicsEngineParameter(restitutionVelocityThreshold=0.000001)
self.floor_id = self._p.loadURDF(os.path.join(currentdir, 'assets/plane.urdf'), [0, 0, 0.0], useFixedBase=1)
# conf policy does not use bullet collision
self._p.setCollisionFilterGroupMask(self.floor_id, -1, 0, 0)
self.robot.reset(self._p)
self.init_state = self._p.saveState()
self._p.stepSimulation()
self.timer = 0
self.past_obs_array.clear()
self.past_bact_array.clear()
# self.d_scores = []
obs = self.get_extended_observation()
# self.ratios = np.array([[]]).reshape(0, self.action_dim)
return obs
# def set_up_imaginary_session(self):
# # create another bullet session to run reset & rollout
# self._imaginary_p = bullet_client.BulletClient()
# self._imaginary_robot = LaikagoBulletV2(init_noise=self.init_noise,
# time_step=self._ts,
# np_random=self.np_random)
#
# self._imaginary_p.resetSimulation()
# self._imaginary_p.setTimeStep(self._ts)
# self._imaginary_p.setGravity(0, 0, -10)
# self._imaginary_p.setPhysicsEngineParameter(numSolverIterations=100)
# # there is a floor in this session
# floor_i = self._imaginary_p.loadURDF(os.path.join(currentdir, 'assets/plane.urdf'), [0, 0, 0.0], useFixedBase=1)
# self._imaginary_robot.reset(self._imaginary_p)
#
# self._imaginary_robot.soft_reset(self._imaginary_p)
#
# # TODO: change torque limit for this session
#
# self._imaginary_p.stepSimulation()
# def rollout_one_step_imaginary(self):
# # and get the obs vector [no tar vel] in sim
# assert self.train_dyn
# assert self.pretrain_dyn
#
# # robo_obs = self.obs[:-self.behavior_act_len] # TODO: deprecate behavior_act_len
# robo_action = self.obs[-self.behavior_act_len:]
# # print(robo_obs, "in img obs")
# # print(robo_action, "in img act")
#
# # robo_state_vec = self._imaginary_robot.transform_obs_to_state(robo_obs)
# robo_state_vec = self.robot.get_robot_raw_state_vec()
#
# self._imaginary_robot.soft_reset_to_state(self._imaginary_p, robo_state_vec)
# robo_state_i = self._imaginary_robot.get_robot_raw_state_vec()
#
# robo_action = np.clip(robo_action, -1.0, 1.0) # should also clip
# for _ in range(self.control_skip):
# self._imaginary_robot.apply_action(robo_action)
# self._imaginary_p.stepSimulation()
# # if self.render:
# # time.sleep(self._ts * 0.5)
#
# return self._imaginary_robot.get_robot_observation(), robo_state_i # pre-state_i
# def rollout_one_step_imaginary_same_session(self):
# # and get the obs vector [no tar vel] in sim
# assert self.train_dyn
# assert self.pretrain_dyn
#
# robo_action = self.obs[-self.behavior_act_len:]
#
# robo_action = np.clip(robo_action, -1.0, 1.0) # should also clip
# for _ in range(self.control_skip):
# self.robot.apply_action(robo_action)
# self._p.stepSimulation()
#
# return self.robot.get_robot_observation()
# def calc_obs_dist_pretrain(self, obs1, obs2):
# # TODO quat dist
# # print(np.array(obs1))
# # print("2", np.array(obs2))
# # print(np.linalg.norm(np.array(obs1) - np.array(obs2)))
# # print(1.5-np.linalg.norm(np.array(obs1[36:]) - np.array(obs2[36:])))
# # return -np.mean(np.abs((np.array(obs1[:36]) - np.array(obs2[:36])) / np.array(obs2[:36]))) * 100
# return 0.4-np.sum(np.abs(np.array(obs1[:36]) - np.array(obs2[:36]))) # obs len 48
# # return 6.0 -np.sum(np.abs(np.array(obs1[3:]) - np.array(obs2[3:]))) \
# # -np.sum(np.abs(np.array(obs1[:6]) - np.array(obs2[:6]))) * 20.0 # obs len 48
def step(self, a):
# TODO: currently for laika, env_action is 12D, 4 feet 3D without wrench
if self.train_dyn:
env_action = a
robo_action = self.past_bact_array[0] # after tanh
else:
robo_action = a
robo_action = np.tanh(robo_action)
# update past_bact after tanh
utils.push_recent_value(self.past_bact_array, robo_action)
env_pi_obs = utils.select_and_merge_from_s_a(
s_mt=list(self.past_obs_array),
a_mt=list(self.past_bact_array),
s_idx=self.generator_past_obs_t_idx,
a_idx=self.generator_past_act_t_idx
)
env_pi_obs_nn = utils.wrap(env_pi_obs, is_cuda=self.cuda_env)
with torch.no_grad():
_, env_action_nn, _, self.recurrent_hidden_states = self.dyn_actor_critic.act(
env_pi_obs_nn, self.recurrent_hidden_states, self.masks, deterministic=False
)
env_action = utils.unwrap(env_action_nn, is_cuda=self.cuda_env)
# if self.ratio is None:
# self.ratio = np.array([env_action / robo_action])
# else:
# self.ratio = np.append(self.ratio, [env_action / robo_action], axis=0)
# self.ratios = np.append(self.ratios, [env_action / robo_action], axis=0)
#
# env_pi_obs_feat = self.feat_select_func(self.obs)
# dis_state = np.concatenate((env_pi_obs_feat, robo_action))
# dis_state = utils.wrap(dis_state, is_cuda=self.cuda_env)
root_pos, _ = self.robot.get_link_com_xyz_orn(-1)
x_0 = root_pos[0]
# this is post noise (unseen), different from seen diversify of self.enlarge_act_scale
if self.act_noise:
robo_action = utils.perturb(robo_action, 0.05, self.np_random)
# when call info, should call before sim_step() as in v4 (append s_t+1 later)
# info will be used to construct D input outside.
past_info = self.construct_past_traj_window()
# # TODO
# if self.pretrain_dyn:
# # self.state_id = self._p.saveState()
# self.img_obs, pre_s_i = self.rollout_one_step_imaginary() # takes the old self.obs
# # img_obs = self.rollout_one_step_imaginary_same_session()
# # self._p.restoreState(self.state_id)
# pre_s = self.robot.get_robot_raw_state_vec()
# # print(pre_s_i)
# # print(pre_s)
# assert np.allclose(pre_s, pre_s_i, atol=1e-5)
for _ in range(self.control_skip):
self.robot.apply_action(robo_action)
self.apply_scale_clip_conf_from_pi_new(env_action)
self._p.stepSimulation()
if self.render:
time.sleep(self._ts * 1.0)
self.timer += 1
obs_new = self.get_extended_observation() # and update past_obs_array
past_info += [self.past_obs_array[0]] # s_t+1
root_pos, _ = self.robot.get_link_com_xyz_orn(-1)
x_1 = root_pos[0]
self.velx = (x_1 - x_0) / (self.control_skip * self._ts)
y_1 = root_pos[1]
height = root_pos[2]
q, dq = self.robot.get_q_dq(self.robot.ctrl_dofs)
# print(np.max(np.abs(dq)))
# in_support = self.robot.is_root_com_in_support()
if not self.pretrain_dyn:
reward = self.ab # alive bonus
tar = np.minimum(self.timer / 500, self.max_tar_vel)
reward += np.minimum(self.velx, tar) * self.vel_r_weight
# print("v", self.velx, "tar", tar)
reward += -self.energy_weight * np.square(robo_action).sum()
# print("act norm", -self.energy_weight * np.square(a).sum())
pos_mid = 0.5 * (self.robot.ll + self.robot.ul)
q_scaled = 2 * (q - pos_mid) / (self.robot.ul - self.robot.ll)
joints_at_limit = np.count_nonzero(np.abs(q_scaled) > 0.97)
reward += -self.jl_weight * joints_at_limit
# print("jl", -self.jl_weight * joints_at_limit)
reward += -np.minimum(np.sum(np.square(dq)) * self.dq_pen_weight, 5.0)
weight = np.array([2.0, 1.0, 1.0] * 4)
reward += -np.minimum(np.sum(np.square(q - self.robot.init_q) * weight) * self.q_pen_weight, 5.0)
# print("vel pen", -np.minimum(np.sum(np.abs(dq)) * self.dq_pen_weight, 5.0))
# print("pos pen", -np.minimum(np.sum(np.square(q - self.robot.init_q)) * self.q_pen_weight, 5.0))
y_1 = root_pos[1]
reward += -y_1 * 0.5
# print("dev pen", -y_1*0.5)
else:
# reward = self.calc_obs_dist_pretrain(self.img_obs[:-4], self.obs[:len(self.img_obs[:-4])])
reward = 0 # TODO
# print("______")
# print(in_support)
# print("h", height)
# print("dq.", np.abs(dq))
# print((np.abs(dq) < 50).all())
# print("------")
# conf policy will not have body-in-contact flag
not_done = (np.abs(dq) < 90).all() and (height > 0.2) and (height < 1.0)
# not_done = (abs(y_1) < 5.0) and (height > 0.1) and (height < 1.0) and (rpy[2] > 0.1)
# not_done = True
#
# if not not_done:
# print(self.ratio.shape)
# labels = list("123456789ABC")
# data = self.ratio
# from matplotlib import pyplot as plt
# width = 0.4
# fig, ax = plt.subplots()
# for i, l in enumerate(labels):
# x = np.ones(data.shape[0]) * i + (np.random.rand(data.shape[0]) * width - width / 2.)
# ax.scatter(x, data[:, i], s=25)
# median = np.median(data[:, i])
# ax.plot([i - width / 2., i + width / 2.], [median, median], color="k")
#
# plt.ylim(-5, 5)
# ax.set_xticks(range(len(labels)))
# ax.set_xticklabels(labels)
# plt.show()
# self.ratio = None
# if not self.train_dyn:
# dis_action = self.feat_select_func(self.obs)
# dis_action = utils.wrap(dis_action, is_cuda=self.cuda_env)
# d_score = self.discri.predict_prob_single_step(dis_state, dis_action)
# self.d_scores.append(utils.unwrap(d_score, is_cuda=self.cuda_env))
# # if len(self.d_scores) > 20 and np.mean(self.d_scores[-20:]) < 0.4:
# # not_done = False
# # if not not_done or self.timer==1000:
# # print(np.mean(self.d_scores))
return obs_new, reward, not not_done, {"sas_window": past_info}
# def return_imaginary_obs(self):
# # mods self.obs
# obs_i = np.copy(self.obs)
# # obs_i[:len(self.img_obs[:-4])] = self.img_obs[:-4]
# obs_i[:len(self.img_obs)] = self.img_obs
# return obs_i
def apply_scale_clip_conf_from_pi_new(self, con_f):
approx_mass = 26.0
max_fz = approx_mass * 9.81 * 2 # 2mg # TODO
for foot_ind, link in enumerate(self.robot.feet):
this_con_f = np.tanh(con_f[foot_ind * 3: (foot_ind + 1) * 3]) # [-1 ,1]
pos, _ = self.robot.get_link_com_xyz_orn(link, fk=1)
if pos[2] < 0.01:
# first dim represents fz
# fz = np.abs(this_con_f[0]) * max_fz
fz = (this_con_f[0] + 1) / 2.0 * max_fz
else:
fz = 0.0
fx = this_con_f[1] * 1.5 * fz
fy = this_con_f[2] * 1.5 * fz
utils.apply_external_world_force_on_local_point(self.robot.go_id, link,
[fx, fy, fz],
[0, 0, 0],
self._p)
def construct_past_traj_window(self):
# st, ... st-9, at, ..., at-9
# call this before s_t+1 enters deque
# order does not matter as long as it is the same in policy & expert batch
# print(list(self.past_obs_array) + list(self.past_act_array))
return list(self.past_obs_array) + list(self.past_bact_array)
def get_ave_dx(self):
return self.velx
def get_dist(self):
return self.robot.get_link_com_xyz_orn(-1)[0][0]
def get_extended_observation(self):
# with vel false
cur_state = self.robot.get_robot_observation(with_vel=False)
if self.obs_noise:
cur_state = utils.perturb(cur_state, 0.1, self.np_random)
# then update past obs
utils.push_recent_value(self.past_obs_array, cur_state)
# then construct behavior obs
b_obs_all = utils.select_and_merge_from_s_a(
s_mt=list(self.past_obs_array),
a_mt=list(self.past_bact_array),
s_idx=self.behavior_past_obs_t_idx,
a_idx=np.array([])
)
# if train motor, return behavior obs and we are done
if not self.train_dyn:
return b_obs_all
# else, train dyn
# rollout b_pi
obs_nn = utils.wrap(b_obs_all, is_cuda=self.cuda_env)
with torch.no_grad():
_, action_nn, _, self.recurrent_hidden_states = self.go_actor_critic.act(
obs_nn, self.recurrent_hidden_states, self.masks, deterministic=False
)
b_cur_act = list(utils.unwrap(action_nn, is_cuda=self.cuda_env))
b_cur_act = utils.perturb(b_cur_act, self.enlarge_act_range, self.np_random)
b_cur_act = np.tanh(b_cur_act)
# Store action after tanh (-1,1)
utils.push_recent_value(self.past_bact_array, b_cur_act)
# construct G obs from updated past obs&b_act
g_obs_all = utils.select_and_merge_from_s_a(
s_mt=list(self.past_obs_array),
a_mt=list(self.past_bact_array),
s_idx=self.generator_past_obs_t_idx,
a_idx=self.generator_past_act_t_idx
)
return g_obs_all
def seed(self, seed=None):
self.np_random, seed = gym.utils.seeding.np_random(seed)
self.robot.np_random = self.np_random # use the same np_randomizer for robot as for env
return [seed]
def getSourceCode(self):
s = inspect.getsource(type(self))
s = s + inspect.getsource(type(self.robot))
return s
def cam_track_torso_link(self):
distance = 2
yaw = 0
root_pos, _ = self.robot.get_link_com_xyz_orn(-1)
distance -= root_pos[1]
self._p.resetDebugVisualizerCamera(distance, yaw, -20, [root_pos[0], 0.0, 0.4])
|
[
"numpy.abs",
"gan.utils.apply_external_world_force_on_local_point",
"torch.no_grad",
"os.path.join",
"collections.deque",
"gym.utils.seeding.np_random",
"gan.utils.push_recent_value",
"gan.utils.perturb",
"gan.utils.load",
"numpy.minimum",
"numpy.tanh",
"pybullet_utils.bullet_client.BulletClient",
"numpy.square",
"time.sleep",
"inspect.currentframe",
"gan.utils.unwrap",
"numpy.array",
"gym.spaces.Box",
"gan.utils.wrap"
] |
[((3675, 3691), 'collections.deque', 'deque', ([], {'maxlen': '(10)'}), '(maxlen=10)\n', (3680, 3691), False, 'from collections import deque\n'), ((3723, 3739), 'collections.deque', 'deque', ([], {'maxlen': '(10)'}), '(maxlen=10)\n', (3728, 3739), False, 'from collections import deque\n'), ((5809, 5873), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': '(-np.inf * obs_dummy)', 'high': '(np.inf * obs_dummy)'}), '(low=-np.inf * obs_dummy, high=np.inf * obs_dummy)\n', (5823, 5873), False, 'import gym, gym.utils.seeding, gym.spaces\n'), ((19150, 19205), 'gan.utils.push_recent_value', 'utils.push_recent_value', (['self.past_obs_array', 'cur_state'], {}), '(self.past_obs_array, cur_state)\n', (19173, 19205), False, 'from gan import utils\n'), ((19665, 19709), 'gan.utils.wrap', 'utils.wrap', (['b_obs_all'], {'is_cuda': 'self.cuda_env'}), '(b_obs_all, is_cuda=self.cuda_env)\n', (19675, 19709), False, 'from gan import utils\n'), ((20019, 20083), 'gan.utils.perturb', 'utils.perturb', (['b_cur_act', 'self.enlarge_act_range', 'self.np_random'], {}), '(b_cur_act, self.enlarge_act_range, self.np_random)\n', (20032, 20083), False, 'from gan import utils\n'), ((20104, 20122), 'numpy.tanh', 'np.tanh', (['b_cur_act'], {}), '(b_cur_act)\n', (20111, 20122), True, 'import numpy as np\n'), ((20173, 20229), 'gan.utils.push_recent_value', 'utils.push_recent_value', (['self.past_bact_array', 'b_cur_act'], {}), '(self.past_bact_array, b_cur_act)\n', (20196, 20229), False, 'from gan import utils\n'), ((20623, 20656), 'gym.utils.seeding.np_random', 'gym.utils.seeding.np_random', (['seed'], {}), '(seed)\n', (20650, 20656), False, 'import gym, gym.utils.seeding, gym.spaces\n'), ((922, 944), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (942, 944), False, 'import inspect\n'), ((2930, 2986), 'pybullet_utils.bullet_client.BulletClient', 'bullet_client.BulletClient', ([], {'connection_mode': 'pybullet.GUI'}), '(connection_mode=pybullet.GUI)\n', (2956, 2986), False, 'from pybullet_utils import bullet_client\n'), ((3023, 3051), 'pybullet_utils.bullet_client.BulletClient', 'bullet_client.BulletClient', ([], {}), '()\n', (3049, 3051), False, 'from pybullet_utils import bullet_client\n'), ((4093, 4166), 'gan.utils.load', 'utils.load', (['behavior_dir', 'behavior_env_name', 'self.cuda_env', 'behavior_iter'], {}), '(behavior_dir, behavior_env_name, self.cuda_env, behavior_iter)\n', (4103, 4166), False, 'from gan import utils\n'), ((4515, 4573), 'gan.utils.load', 'utils.load', (['dyn_dir', 'dyn_env_name', 'self.cuda_env', 'dyn_iter'], {}), '(dyn_dir, dyn_env_name, self.cuda_env, dyn_iter)\n', (4525, 4573), False, 'from gan import utils\n'), ((10732, 10752), 'numpy.tanh', 'np.tanh', (['robo_action'], {}), '(robo_action)\n', (10739, 10752), True, 'import numpy as np\n'), ((10807, 10865), 'gan.utils.push_recent_value', 'utils.push_recent_value', (['self.past_bact_array', 'robo_action'], {}), '(self.past_bact_array, robo_action)\n', (10830, 10865), False, 'from gan import utils\n'), ((11229, 11274), 'gan.utils.wrap', 'utils.wrap', (['env_pi_obs'], {'is_cuda': 'self.cuda_env'}), '(env_pi_obs, is_cuda=self.cuda_env)\n', (11239, 11274), False, 'from gan import utils\n'), ((11544, 11594), 'gan.utils.unwrap', 'utils.unwrap', (['env_action_nn'], {'is_cuda': 'self.cuda_env'}), '(env_action_nn, is_cuda=self.cuda_env)\n', (11556, 11594), False, 'from gan import utils\n'), ((12337, 12385), 'gan.utils.perturb', 'utils.perturb', (['robo_action', '(0.05)', 'self.np_random'], {}), '(robo_action, 0.05, self.np_random)\n', (12350, 12385), False, 'from gan import utils\n'), ((13985, 14031), 'numpy.minimum', 'np.minimum', (['(self.timer / 500)', 'self.max_tar_vel'], {}), '(self.timer / 500, self.max_tar_vel)\n', (13995, 14031), True, 'import numpy as np\n'), ((14726, 14755), 'numpy.array', 'np.array', (['([2.0, 1.0, 1.0] * 4)'], {}), '([2.0, 1.0, 1.0] * 4)\n', (14734, 14755), True, 'import numpy as np\n'), ((17655, 17702), 'numpy.tanh', 'np.tanh', (['con_f[foot_ind * 3:(foot_ind + 1) * 3]'], {}), '(con_f[foot_ind * 3:(foot_ind + 1) * 3])\n', (17662, 17702), True, 'import numpy as np\n'), ((18105, 18214), 'gan.utils.apply_external_world_force_on_local_point', 'utils.apply_external_world_force_on_local_point', (['self.robot.go_id', 'link', '[fx, fy, fz]', '[0, 0, 0]', 'self._p'], {}), '(self.robot.go_id, link, [fx,\n fy, fz], [0, 0, 0], self._p)\n', (18152, 18214), False, 'from gan import utils\n'), ((19064, 19109), 'gan.utils.perturb', 'utils.perturb', (['cur_state', '(0.1)', 'self.np_random'], {}), '(cur_state, 0.1, self.np_random)\n', (19077, 19109), False, 'from gan import utils\n'), ((19723, 19738), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (19736, 19738), False, 'import torch\n'), ((19951, 19997), 'gan.utils.unwrap', 'utils.unwrap', (['action_nn'], {'is_cuda': 'self.cuda_env'}), '(action_nn, is_cuda=self.cuda_env)\n', (19963, 19997), False, 'from gan import utils\n'), ((5604, 5638), 'numpy.array', 'np.array', (['([-1.0] * self.action_dim)'], {}), '([-1.0] * self.action_dim)\n', (5612, 5638), True, 'import numpy as np\n'), ((5687, 5721), 'numpy.array', 'np.array', (['([+1.0] * self.action_dim)'], {}), '([+1.0] * self.action_dim)\n', (5695, 5721), True, 'import numpy as np\n'), ((6453, 6498), 'os.path.join', 'os.path.join', (['currentdir', '"""assets/plane.urdf"""'], {}), "(currentdir, 'assets/plane.urdf')\n", (6465, 6498), False, 'import os\n'), ((11292, 11307), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11305, 11307), False, 'import torch\n'), ((13329, 13355), 'time.sleep', 'time.sleep', (['(self._ts * 1.0)'], {}), '(self._ts * 1.0)\n', (13339, 13355), False, 'import time\n'), ((14054, 14080), 'numpy.minimum', 'np.minimum', (['self.velx', 'tar'], {}), '(self.velx, tar)\n', (14064, 14080), True, 'import numpy as np\n'), ((19453, 19465), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (19461, 19465), True, 'import numpy as np\n'), ((14479, 14495), 'numpy.abs', 'np.abs', (['q_scaled'], {}), '(q_scaled)\n', (14485, 14495), True, 'import numpy as np\n'), ((14193, 14215), 'numpy.square', 'np.square', (['robo_action'], {}), '(robo_action)\n', (14202, 14215), True, 'import numpy as np\n'), ((15587, 15597), 'numpy.abs', 'np.abs', (['dq'], {}), '(dq)\n', (15593, 15597), True, 'import numpy as np\n'), ((14663, 14676), 'numpy.square', 'np.square', (['dq'], {}), '(dq)\n', (14672, 14676), True, 'import numpy as np\n'), ((14797, 14829), 'numpy.square', 'np.square', (['(q - self.robot.init_q)'], {}), '(q - self.robot.init_q)\n', (14806, 14829), True, 'import numpy as np\n')]
|
"""
This script acts as a placeholder to set a threshold for cands >b
and compare those cands with our gold data.
"""
import csv
import logging
import os
import pickle
from enum import Enum
import numpy as np
from tqdm import tqdm
from hack.transistors.transistor_utils import (
Score,
compare_entities,
entity_level_scores,
get_gold_set,
get_implied_parts,
gold_set_to_dic,
)
# Configure logging for Hack
logging.basicConfig(
format="[%(asctime)s][%(levelname)s] %(name)s:%(lineno)s - %(message)s",
level=logging.DEBUG,
handlers=[
logging.FileHandler(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "analysis.log")
),
logging.StreamHandler(),
],
)
logger = logging.getLogger(__name__)
# Enum for tracking
class Relation(Enum):
STG_TEMP_MIN = "stg_temp_min"
STG_TEMP_MAX = "stg_temp_max"
POLARITY = "polarity"
CE_V_MAX = "ce_v_max"
def load_parts_by_doc():
dirname = os.path.dirname(os.path.abspath(__file__))
pickle_file = os.path.join(dirname, "data/parts_by_doc_new.pkl")
with open(pickle_file, "rb") as f:
return pickle.load(f)
def capitalize_filenames(filenames):
output = set()
for filename in filenames:
output.add(filename.upper())
return output
def print_score(score, description):
logger.info("===================================================")
logger.info(description)
logger.info("===================================================")
logger.info(f"Corpus Precision {score.prec:.3f}")
logger.info(f"Corpus Recall {score.rec:.3f}")
logger.info(f"Corpus F1 {score.f1:.3f}")
logger.info("---------------------------------------------------")
logger.info(
f"TP: {len(score.TP)} " f"| FP: {len(score.FP)} " f"| FN: {len(score.FN)}"
)
logger.info("===================================================\n")
def get_entity_set(file, parts_by_doc, b=0.0):
entities = set()
errors = set()
with open(file, "r") as input:
reader = csv.reader(input)
for line in reader:
try:
(doc, part, val, score) = line
if float(score) > b:
# Add implied parts as well
for p in get_implied_parts(part, doc, parts_by_doc):
entities.add((doc, p, val))
except KeyError:
if doc not in errors:
logger.warning(f"{doc} was not found in parts_by_doc.")
errors.add(doc)
continue
except Exception as e:
logger.error(f"{e} while getting entity set from {file}.")
return entities
def get_parts(entities):
parts = set()
for (doc, part, val) in entities:
parts.add(part)
return parts
def get_filenames(entities):
filenames = set()
for (doc, part, val) in entities:
filenames.add(doc)
return filenames
def print_filenames_to_file(entities, outfile):
with open(outfile, "w") as outfile:
writer = csv.writer(outfile)
for (doc, part, val) in entities:
writer.writerow([doc])
def get_filenames_from_file(file):
filenames = set()
with open(file, "r") as input:
reader = csv.reader(input)
for line in reader:
filenames.add(line[0].upper())
return filenames
def filter_filenames(entities, filenames):
result = set()
for (doc, part, val) in entities:
if doc in filenames:
result.add((doc, part, val))
if len(result) == 0:
logger.debug(
f"Filtering for {len(get_filenames(entities))} "
+ "entity filenames turned up empty."
)
return result
def main(
num=100,
relation=Relation.CE_V_MAX.value,
devfile="ce_v_max_dev_probs.csv",
testfile="ce_v_max_test_probs.csv",
outfile="analysis/ce_v_max_analysis_discrepancies.csv",
debug=False,
):
# Define output
dirname = os.path.dirname(os.path.abspath(__file__))
discrepancy_file = os.path.join(dirname, outfile)
# Analysis
gold_file = os.path.join(dirname, "data/analysis/our_gold.csv")
filenames_file = os.path.join(dirname, "data/analysis/filenames.csv")
filenames = capitalize_filenames(get_filenames_from_file(filenames_file))
# logger.info(f"Analysis dataset is {len(filenames)}" + " filenames long.")
gold = filter_filenames(
get_gold_set(gold=[gold_file], attribute=relation), filenames
)
# logger.info(f"Original gold set is {len(get_filenames(gold))} filenames long.")
best_score = Score(0, 0, 0, [], [], [])
best_b = 0
best_entities = set()
# Test
test_file = os.path.join(dirname, testfile)
test_filenames = capitalize_filenames(
get_filenames_from_file(os.path.join(dirname, "data/test/filenames.csv"))
)
test_goldfile = os.path.join(dirname, "data/test/test_gold.csv")
test_gold = filter_filenames(
get_gold_set(gold=[test_goldfile], attribute=relation), test_filenames
)
best_test_score = Score(0, 0, 0, [], [], [])
best_test_b = 0
best_test_entities = set()
# Dev
dev_file = os.path.join(dirname, devfile)
dev_filenames = capitalize_filenames(
get_filenames_from_file(os.path.join(dirname, "data/dev/filenames.csv"))
)
dev_goldfile = os.path.join(dirname, "data/dev/dev_gold.csv")
dev_gold = filter_filenames(
get_gold_set(gold=[dev_goldfile], attribute=relation), dev_filenames
)
best_dev_score = Score(0, 0, 0, [], [], [])
best_dev_b = 0
best_dev_entities = set()
# Iterate over `b` values
logger.info(f"Determining best b...")
parts_by_doc = load_parts_by_doc()
for b in tqdm(np.linspace(0, 1, num=num)):
# Dev and Test
dev_entities = get_entity_set(dev_file, parts_by_doc, b=b)
test_entities = get_entity_set(test_file, parts_by_doc, b=b)
# Analysis (combo of dev and test)
entities = filter_filenames(
dev_entities.union(test_entities), get_filenames_from_file(filenames_file)
)
# Score entities against gold data and generate comparison CSV
dev_score = entity_level_scores(
dev_entities, attribute=relation, docs=dev_filenames
)
test_score = entity_level_scores(
test_entities, attribute=relation, docs=test_filenames
)
score = entity_level_scores(entities, attribute=relation, docs=filenames)
if dev_score.f1 > best_dev_score.f1:
best_dev_score = dev_score
best_dev_b = b
best_dev_entities = dev_entities
if test_score.f1 > best_test_score.f1:
best_test_score = test_score
best_test_b = b
best_test_entities = test_entities
if score.f1 > best_score.f1:
best_score = score
best_b = b
best_entities = entities
if debug:
# Test
logger.info("Scoring for test set...")
logger.info(
f"Entity set is {len(get_filenames(best_test_entities))} filenames long."
)
logger.info(f"Gold set is {len(get_filenames(test_gold))} filenames long.")
print_score(
best_test_score,
description=f"Scoring on cands > {best_test_b:.3f} "
+ "against our gold labels.",
)
# Dev
logger.info("Scoring for dev set...")
logger.info(
f"Entity set is {len(get_filenames(best_dev_entities))} filenames long."
)
logger.info(f"Gold set is {len(get_filenames(dev_gold))} filenames long.")
print_score(
best_dev_score,
description=f"Scoring on cands > {best_dev_b:.3f} against our gold labels.",
)
logger.info("Scoring for analysis set...")
# Analysis
# logger.info(f"Entity set is {len(get_filenames(best_entities))} filenames long.")
# logger.info(f"Gold set is {len(get_filenames(gold))} filenames long.")
print_score(
best_score,
description=f"Scoring on cands > {best_b:.3f} against our gold labels.",
)
compare_entities(
set(best_score.FP),
attribute=relation,
type="FP",
outfile=discrepancy_file,
gold_dic=gold_set_to_dic(gold),
)
compare_entities(
set(best_score.FN),
attribute=relation,
type="FN",
outfile=discrepancy_file,
append=True,
entity_dic=gold_set_to_dic(best_entities),
)
|
[
"os.path.abspath",
"hack.transistors.transistor_utils.Score",
"csv.reader",
"csv.writer",
"hack.transistors.transistor_utils.get_implied_parts",
"logging.StreamHandler",
"hack.transistors.transistor_utils.entity_level_scores",
"pickle.load",
"numpy.linspace",
"hack.transistors.transistor_utils.get_gold_set",
"os.path.join",
"hack.transistors.transistor_utils.gold_set_to_dic",
"logging.getLogger"
] |
[((747, 774), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (764, 774), False, 'import logging\n'), ((1041, 1091), 'os.path.join', 'os.path.join', (['dirname', '"""data/parts_by_doc_new.pkl"""'], {}), "(dirname, 'data/parts_by_doc_new.pkl')\n", (1053, 1091), False, 'import os\n'), ((4082, 4112), 'os.path.join', 'os.path.join', (['dirname', 'outfile'], {}), '(dirname, outfile)\n', (4094, 4112), False, 'import os\n'), ((4145, 4196), 'os.path.join', 'os.path.join', (['dirname', '"""data/analysis/our_gold.csv"""'], {}), "(dirname, 'data/analysis/our_gold.csv')\n", (4157, 4196), False, 'import os\n'), ((4218, 4270), 'os.path.join', 'os.path.join', (['dirname', '"""data/analysis/filenames.csv"""'], {}), "(dirname, 'data/analysis/filenames.csv')\n", (4230, 4270), False, 'import os\n'), ((4638, 4664), 'hack.transistors.transistor_utils.Score', 'Score', (['(0)', '(0)', '(0)', '[]', '[]', '[]'], {}), '(0, 0, 0, [], [], [])\n', (4643, 4664), False, 'from hack.transistors.transistor_utils import Score, compare_entities, entity_level_scores, get_gold_set, get_implied_parts, gold_set_to_dic\n'), ((4734, 4765), 'os.path.join', 'os.path.join', (['dirname', 'testfile'], {}), '(dirname, testfile)\n', (4746, 4765), False, 'import os\n'), ((4917, 4965), 'os.path.join', 'os.path.join', (['dirname', '"""data/test/test_gold.csv"""'], {}), "(dirname, 'data/test/test_gold.csv')\n", (4929, 4965), False, 'import os\n'), ((5108, 5134), 'hack.transistors.transistor_utils.Score', 'Score', (['(0)', '(0)', '(0)', '[]', '[]', '[]'], {}), '(0, 0, 0, [], [], [])\n', (5113, 5134), False, 'from hack.transistors.transistor_utils import Score, compare_entities, entity_level_scores, get_gold_set, get_implied_parts, gold_set_to_dic\n'), ((5212, 5242), 'os.path.join', 'os.path.join', (['dirname', 'devfile'], {}), '(dirname, devfile)\n', (5224, 5242), False, 'import os\n'), ((5391, 5437), 'os.path.join', 'os.path.join', (['dirname', '"""data/dev/dev_gold.csv"""'], {}), "(dirname, 'data/dev/dev_gold.csv')\n", (5403, 5437), False, 'import os\n'), ((5576, 5602), 'hack.transistors.transistor_utils.Score', 'Score', (['(0)', '(0)', '(0)', '[]', '[]', '[]'], {}), '(0, 0, 0, [], [], [])\n', (5581, 5602), False, 'from hack.transistors.transistor_utils import Score, compare_entities, entity_level_scores, get_gold_set, get_implied_parts, gold_set_to_dic\n'), ((996, 1021), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1011, 1021), False, 'import os\n'), ((1146, 1160), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1157, 1160), False, 'import pickle\n'), ((2065, 2082), 'csv.reader', 'csv.reader', (['input'], {}), '(input)\n', (2075, 2082), False, 'import csv\n'), ((3085, 3104), 'csv.writer', 'csv.writer', (['outfile'], {}), '(outfile)\n', (3095, 3104), False, 'import csv\n'), ((3293, 3310), 'csv.reader', 'csv.reader', (['input'], {}), '(input)\n', (3303, 3310), False, 'import csv\n'), ((4032, 4057), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (4047, 4057), False, 'import os\n'), ((4466, 4516), 'hack.transistors.transistor_utils.get_gold_set', 'get_gold_set', ([], {'gold': '[gold_file]', 'attribute': 'relation'}), '(gold=[gold_file], attribute=relation)\n', (4478, 4516), False, 'from hack.transistors.transistor_utils import Score, compare_entities, entity_level_scores, get_gold_set, get_implied_parts, gold_set_to_dic\n'), ((5008, 5062), 'hack.transistors.transistor_utils.get_gold_set', 'get_gold_set', ([], {'gold': '[test_goldfile]', 'attribute': 'relation'}), '(gold=[test_goldfile], attribute=relation)\n', (5020, 5062), False, 'from hack.transistors.transistor_utils import Score, compare_entities, entity_level_scores, get_gold_set, get_implied_parts, gold_set_to_dic\n'), ((5479, 5532), 'hack.transistors.transistor_utils.get_gold_set', 'get_gold_set', ([], {'gold': '[dev_goldfile]', 'attribute': 'relation'}), '(gold=[dev_goldfile], attribute=relation)\n', (5491, 5532), False, 'from hack.transistors.transistor_utils import Score, compare_entities, entity_level_scores, get_gold_set, get_implied_parts, gold_set_to_dic\n'), ((5782, 5808), 'numpy.linspace', 'np.linspace', (['(0)', '(1)'], {'num': 'num'}), '(0, 1, num=num)\n', (5793, 5808), True, 'import numpy as np\n'), ((6240, 6313), 'hack.transistors.transistor_utils.entity_level_scores', 'entity_level_scores', (['dev_entities'], {'attribute': 'relation', 'docs': 'dev_filenames'}), '(dev_entities, attribute=relation, docs=dev_filenames)\n', (6259, 6313), False, 'from hack.transistors.transistor_utils import Score, compare_entities, entity_level_scores, get_gold_set, get_implied_parts, gold_set_to_dic\n'), ((6357, 6432), 'hack.transistors.transistor_utils.entity_level_scores', 'entity_level_scores', (['test_entities'], {'attribute': 'relation', 'docs': 'test_filenames'}), '(test_entities, attribute=relation, docs=test_filenames)\n', (6376, 6432), False, 'from hack.transistors.transistor_utils import Score, compare_entities, entity_level_scores, get_gold_set, get_implied_parts, gold_set_to_dic\n'), ((6471, 6536), 'hack.transistors.transistor_utils.entity_level_scores', 'entity_level_scores', (['entities'], {'attribute': 'relation', 'docs': 'filenames'}), '(entities, attribute=relation, docs=filenames)\n', (6490, 6536), False, 'from hack.transistors.transistor_utils import Score, compare_entities, entity_level_scores, get_gold_set, get_implied_parts, gold_set_to_dic\n'), ((704, 727), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (725, 727), False, 'import logging\n'), ((4841, 4889), 'os.path.join', 'os.path.join', (['dirname', '"""data/test/filenames.csv"""'], {}), "(dirname, 'data/test/filenames.csv')\n", (4853, 4889), False, 'import os\n'), ((5317, 5364), 'os.path.join', 'os.path.join', (['dirname', '"""data/dev/filenames.csv"""'], {}), "(dirname, 'data/dev/filenames.csv')\n", (5329, 5364), False, 'import os\n'), ((8345, 8366), 'hack.transistors.transistor_utils.gold_set_to_dic', 'gold_set_to_dic', (['gold'], {}), '(gold)\n', (8360, 8366), False, 'from hack.transistors.transistor_utils import Score, compare_entities, entity_level_scores, get_gold_set, get_implied_parts, gold_set_to_dic\n'), ((8545, 8575), 'hack.transistors.transistor_utils.gold_set_to_dic', 'gold_set_to_dic', (['best_entities'], {}), '(best_entities)\n', (8560, 8575), False, 'from hack.transistors.transistor_utils import Score, compare_entities, entity_level_scores, get_gold_set, get_implied_parts, gold_set_to_dic\n'), ((2289, 2331), 'hack.transistors.transistor_utils.get_implied_parts', 'get_implied_parts', (['part', 'doc', 'parts_by_doc'], {}), '(part, doc, parts_by_doc)\n', (2306, 2331), False, 'from hack.transistors.transistor_utils import Score, compare_entities, entity_level_scores, get_gold_set, get_implied_parts, gold_set_to_dic\n'), ((641, 666), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (656, 666), False, 'import os\n')]
|
import bcolz
import numpy as np
def save_array(fname, arr):
c=bcolz.carray(arr, rootdir=fname, mode='w')
c.flush()
def load_array(fname):
return bcolz.open(fname)[:]
probs = np.load('/home/chicm/ml/kgdata/carvana/results/single/UNet_double_1024_5/submit/049/probs-part10.8.npy')
save_array('/home/chicm/ml/kgdata/carvana/results/single/UNet_double_1024_5/submit/ensemble/probs-part10.8.npy', probs)
|
[
"numpy.load",
"bcolz.open",
"bcolz.carray"
] |
[((191, 305), 'numpy.load', 'np.load', (['"""/home/chicm/ml/kgdata/carvana/results/single/UNet_double_1024_5/submit/049/probs-part10.8.npy"""'], {}), "(\n '/home/chicm/ml/kgdata/carvana/results/single/UNet_double_1024_5/submit/049/probs-part10.8.npy'\n )\n", (198, 305), True, 'import numpy as np\n'), ((68, 110), 'bcolz.carray', 'bcolz.carray', (['arr'], {'rootdir': 'fname', 'mode': '"""w"""'}), "(arr, rootdir=fname, mode='w')\n", (80, 110), False, 'import bcolz\n'), ((160, 177), 'bcolz.open', 'bcolz.open', (['fname'], {}), '(fname)\n', (170, 177), False, 'import bcolz\n')]
|
"""Driver class for SpaceMouse controller.
This class provides a driver support to SpaceMouse on Mac OS X.
In particular, we assume you are using a SpaceMouse Wireless by default.
To set up a new SpaceMouse controller:
1. Download and install driver from https://www.3dconnexion.com/service/drivers.html
2. Install hidapi library through pip
(make sure you run uninstall hid first if it is installed).
3. Make sure SpaceMouse is connected before running the script
4. (Optional) Based on the model of SpaceMouse, you might need to change the
vendor id and product id that correspond to the device.
For Linux support, you can find open-source Linux drivers and SDKs online.
See http://spacenav.sourceforge.net/
"""
import time
import threading
from collections import namedtuple
import numpy as np
try:
import hid
except ModuleNotFoundError as exc:
raise ImportError("Unable to load module hid, required to interface with SpaceMouse. "
"Only Mac OS X is officially supported. Install the additional "
"requirements with `pip install -r requirements-ik.txt`") from exc
from spirl.data.block_stacking.src.robosuite.utils.transform_utils import rotation_matrix
from spirl.data.block_stacking.src.robosuite import Device
AxisSpec = namedtuple("AxisSpec", ["channel", "byte1", "byte2", "scale"])
SPACE_MOUSE_SPEC = {
"x": AxisSpec(channel=1, byte1=1, byte2=2, scale=1),
"y": AxisSpec(channel=1, byte1=3, byte2=4, scale=-1),
"z": AxisSpec(channel=1, byte1=5, byte2=6, scale=-1),
"roll": AxisSpec(channel=1, byte1=7, byte2=8, scale=-1),
"pitch": AxisSpec(channel=1, byte1=9, byte2=10, scale=-1),
"yaw": AxisSpec(channel=1, byte1=11, byte2=12, scale=1),
}
def to_int16(y1, y2):
"""Convert two 8 bit bytes to a signed 16 bit integer."""
x = (y1) | (y2 << 8)
if x >= 32768:
x = -(65536 - x)
return x
def scale_to_control(x, axis_scale=350., min_v=-1.0, max_v=1.0):
"""Normalize raw HID readings to target range."""
x = x / axis_scale
x = min(max(x, min_v), max_v)
return x
def convert(b1, b2):
"""Converts SpaceMouse message to commands."""
return scale_to_control(to_int16(b1, b2))
class SpaceMouse(Device):
"""A minimalistic driver class for SpaceMouse with HID library."""
def __init__(self, vendor_id=9583, product_id=50735):
"""Initialize a SpaceMouse handler.
Args:
vendor_id: HID device vendor id
product_id: HID device product id
Note:
Use hid.enumerate() to view all USB human interface devices (HID).
Make sure SpaceMouse is detected before running the script.
You can look up its vendor/product id from this method.
"""
print("Opening SpaceMouse device")
self.device = hid.device()
self.device.open(vendor_id, product_id) # SpaceMouse
print("Manufacturer: %s" % self.device.get_manufacturer_string())
print("Product: %s" % self.device.get_product_string())
self._display_controls()
self.single_click_and_hold = False
self._control = [0., 0., 0., 0., 0., 0.]
self._reset_state = 0
self.rotation = np.array([[-1., 0., 0.], [0., 1., 0.], [0., 0., -1.]])
self._enabled = False
# launch a new listener thread to listen to SpaceMouse
self.thread = threading.Thread(target=self.run)
self.thread.daemon = True
self.thread.start()
def _display_controls(self):
"""
Method to pretty print controls.
"""
def print_command(char, info):
char += " " * (30 - len(char))
print("{}\t{}".format(char, info))
print("")
print_command("Control", "Command")
print_command("Right button", "reset simulation")
print_command("Left button (hold)", "close gripper")
print_command("Move mouse laterally", "move arm horizontally in x-y plane")
print_command("Move mouse vertically", "move arm vertically")
print_command(
"Twist mouse about an axis", "rotate arm about a corresponding axis"
)
print_command("ESC", "quit")
print("")
def _reset_internal_state(self):
"""
Resets internal state of controller, except for the reset signal.
"""
self.rotation = np.array([[-1., 0., 0.], [0., 1., 0.], [0., 0., -1.]])
def start_control(self):
"""
Method that should be called externally before controller can
start receiving commands.
"""
self._reset_internal_state()
self._reset_state = 0
self._enabled = True
def get_controller_state(self):
"""Returns the current state of the 3d mouse, a dictionary of pos, orn, grasp, and reset."""
dpos = self.control[:3] * 0.005
roll, pitch, yaw = self.control[3:] * 0.005
self.grasp = self.control_gripper
# convert RPY to an absolute orientation
drot1 = rotation_matrix(angle=-pitch, direction=[1., 0, 0], point=None)[:3, :3]
drot2 = rotation_matrix(angle=roll, direction=[0, 1., 0], point=None)[:3, :3]
drot3 = rotation_matrix(angle=yaw, direction=[0, 0, 1.], point=None)[:3, :3]
self.rotation = self.rotation.dot(drot1.dot(drot2.dot(drot3)))
return dict(
dpos=dpos, rotation=self.rotation, grasp=self.grasp, reset=self._reset_state
)
def run(self):
"""Listener method that keeps pulling new messages."""
t_last_click = -1
while True:
d = self.device.read(13)
if d is not None and self._enabled:
if d[0] == 1: ## readings from 6-DoF sensor
self.y = convert(d[1], d[2])
self.x = convert(d[3], d[4])
self.z = convert(d[5], d[6]) * -1.0
self.roll = convert(d[7], d[8])
self.pitch = convert(d[9], d[10])
self.yaw = convert(d[11], d[12])
self._control = [
self.x,
self.y,
self.z,
self.roll,
self.pitch,
self.yaw,
]
elif d[0] == 3: ## readings from the side buttons
# press left button
if d[1] == 1:
t_click = time.time()
elapsed_time = t_click - t_last_click
t_last_click = t_click
self.single_click_and_hold = True
# release left button
if d[1] == 0:
self.single_click_and_hold = False
# right button is for reset
if d[1] == 2:
self._reset_state = 1
self._enabled = False
self._reset_internal_state()
@property
def control(self):
"""Returns 6-DoF control."""
return np.array(self._control)
@property
def control_gripper(self):
"""Maps internal states into gripper commands."""
if self.single_click_and_hold:
return 1.0
return 0
if __name__ == "__main__":
space_mouse = SpaceMouse()
for i in range(100):
print(space_mouse.control, space_mouse.control_gripper)
time.sleep(0.02)
|
[
"threading.Thread",
"hid.device",
"spirl.data.block_stacking.src.robosuite.utils.transform_utils.rotation_matrix",
"time.sleep",
"time.time",
"numpy.array",
"collections.namedtuple"
] |
[((1320, 1382), 'collections.namedtuple', 'namedtuple', (['"""AxisSpec"""', "['channel', 'byte1', 'byte2', 'scale']"], {}), "('AxisSpec', ['channel', 'byte1', 'byte2', 'scale'])\n", (1330, 1382), False, 'from collections import namedtuple\n'), ((2863, 2875), 'hid.device', 'hid.device', ([], {}), '()\n', (2873, 2875), False, 'import hid\n'), ((3259, 3322), 'numpy.array', 'np.array', (['[[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]]'], {}), '([[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]])\n', (3267, 3322), True, 'import numpy as np\n'), ((3430, 3463), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.run'}), '(target=self.run)\n', (3446, 3463), False, 'import threading\n'), ((4420, 4483), 'numpy.array', 'np.array', (['[[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]]'], {}), '([[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]])\n', (4428, 4483), True, 'import numpy as np\n'), ((7167, 7190), 'numpy.array', 'np.array', (['self._control'], {}), '(self._control)\n', (7175, 7190), True, 'import numpy as np\n'), ((7532, 7548), 'time.sleep', 'time.sleep', (['(0.02)'], {}), '(0.02)\n', (7542, 7548), False, 'import time\n'), ((5067, 5131), 'spirl.data.block_stacking.src.robosuite.utils.transform_utils.rotation_matrix', 'rotation_matrix', ([], {'angle': '(-pitch)', 'direction': '[1.0, 0, 0]', 'point': 'None'}), '(angle=-pitch, direction=[1.0, 0, 0], point=None)\n', (5082, 5131), False, 'from spirl.data.block_stacking.src.robosuite.utils.transform_utils import rotation_matrix\n'), ((5155, 5217), 'spirl.data.block_stacking.src.robosuite.utils.transform_utils.rotation_matrix', 'rotation_matrix', ([], {'angle': 'roll', 'direction': '[0, 1.0, 0]', 'point': 'None'}), '(angle=roll, direction=[0, 1.0, 0], point=None)\n', (5170, 5217), False, 'from spirl.data.block_stacking.src.robosuite.utils.transform_utils import rotation_matrix\n'), ((5241, 5302), 'spirl.data.block_stacking.src.robosuite.utils.transform_utils.rotation_matrix', 'rotation_matrix', ([], {'angle': 'yaw', 'direction': '[0, 0, 1.0]', 'point': 'None'}), '(angle=yaw, direction=[0, 0, 1.0], point=None)\n', (5256, 5302), False, 'from spirl.data.block_stacking.src.robosuite.utils.transform_utils import rotation_matrix\n'), ((6534, 6545), 'time.time', 'time.time', ([], {}), '()\n', (6543, 6545), False, 'import time\n')]
|
import numpy as np
from .combine_data import combination_column_range_map
from .load_dataset import load_dataset_np
def statistics(dataset):
if isinstance(dataset, str):
dataset = load_dataset_np(dataset_name=dataset)
if not isinstance(dataset, np.ndarray):
raise TypeError('dataset must be np.ndarray or the name of dataset(string).')
min = np.nanmin(dataset)
max = np.nanmax(dataset)
median = np.nanmedian(dataset)
mean = np.nanmean(dataset)
std = np.nanstd(dataset)
var = np.nanvar(dataset)
result = {
'min': min,
'max': max,
'median': median,
'mean': mean,
'std': std,
'var': var,
}
return result
def statistics_on_every_fields(dataset):
if isinstance(dataset, str):
dataset = load_dataset_np(dataset_name=dataset)
if not isinstance(dataset, np.ndarray):
raise TypeError('dataset must be np.ndarray or the name of dataset(string).')
result = dict()
inner_size = dataset.shape[-1]
for field_name, column_range in combination_column_range_map.items():
if column_range[1] - 1 > inner_size:
continue
sub_dataset = dataset[:, column_range[0]-1: column_range[1]-1]
min = np.nanmin(sub_dataset)
max = np.nanmax(sub_dataset)
median = np.nanmedian(sub_dataset)
mean = np.nanmean(sub_dataset)
std = np.nanstd(sub_dataset)
var = np.nanvar(sub_dataset)
d = {
'min': min,
'max': max,
'median': median,
'mean': mean,
'std': std,
'var': var,
}
result[field_name] = d
return result
def show_statistics(dataset):
result = statistics(dataset)
print('Dataset:', dataset if isinstance(dataset, str) else dataset.shape)
print(result)
def show_statistics_on_every_fields(dataset):
result = statistics_on_every_fields(dataset)
print('Dataset:', dataset if isinstance(dataset, str) else dataset.shape)
print(result)
|
[
"numpy.nanmedian",
"numpy.nanstd",
"numpy.nanmin",
"numpy.nanvar",
"numpy.nanmax",
"numpy.nanmean"
] |
[((373, 391), 'numpy.nanmin', 'np.nanmin', (['dataset'], {}), '(dataset)\n', (382, 391), True, 'import numpy as np\n'), ((402, 420), 'numpy.nanmax', 'np.nanmax', (['dataset'], {}), '(dataset)\n', (411, 420), True, 'import numpy as np\n'), ((434, 455), 'numpy.nanmedian', 'np.nanmedian', (['dataset'], {}), '(dataset)\n', (446, 455), True, 'import numpy as np\n'), ((467, 486), 'numpy.nanmean', 'np.nanmean', (['dataset'], {}), '(dataset)\n', (477, 486), True, 'import numpy as np\n'), ((497, 515), 'numpy.nanstd', 'np.nanstd', (['dataset'], {}), '(dataset)\n', (506, 515), True, 'import numpy as np\n'), ((526, 544), 'numpy.nanvar', 'np.nanvar', (['dataset'], {}), '(dataset)\n', (535, 544), True, 'import numpy as np\n'), ((1254, 1276), 'numpy.nanmin', 'np.nanmin', (['sub_dataset'], {}), '(sub_dataset)\n', (1263, 1276), True, 'import numpy as np\n'), ((1291, 1313), 'numpy.nanmax', 'np.nanmax', (['sub_dataset'], {}), '(sub_dataset)\n', (1300, 1313), True, 'import numpy as np\n'), ((1331, 1356), 'numpy.nanmedian', 'np.nanmedian', (['sub_dataset'], {}), '(sub_dataset)\n', (1343, 1356), True, 'import numpy as np\n'), ((1372, 1395), 'numpy.nanmean', 'np.nanmean', (['sub_dataset'], {}), '(sub_dataset)\n', (1382, 1395), True, 'import numpy as np\n'), ((1410, 1432), 'numpy.nanstd', 'np.nanstd', (['sub_dataset'], {}), '(sub_dataset)\n', (1419, 1432), True, 'import numpy as np\n'), ((1447, 1469), 'numpy.nanvar', 'np.nanvar', (['sub_dataset'], {}), '(sub_dataset)\n', (1456, 1469), True, 'import numpy as np\n')]
|
import os
import functools
import numpy as np
from keras.models import Sequential
from keras.layers import Embedding, SimpleRNN, Dense, Dropout
from keras.callbacks import EarlyStopping
from utilnn import accuracy, fscore, coef
def load_data(labels_prefix):
"""
@param labels_prefix: 'classfication' or 'regression'
@return: (inputs_train, outputs_train, inputs_test, outputs_test)
"""
# Generate file path
inputs_train_file_path = os.path.join(
os.path.pardir, "data", "word-embedding" + "_train")
outputs_train_file_path = os.path.join(
os.path.pardir, "data", labels_prefix + "_train")
inputs_test_file_path = os.path.join(
os.path.pardir, "data", "word-embedding" + "_test")
outputs_test_file_path = os.path.join(
os.path.pardir, "data", "regression" + "_test") # test label always use regression
# Get data
with open(inputs_train_file_path, 'rb') as inputs_train_file:
inputs_train = np.load(inputs_train_file)
with open(outputs_train_file_path, 'rb') as outputs_train_file:
outputs_train = np.load(outputs_train_file)
with open(inputs_test_file_path, 'rb') as inputs_test_file:
inputs_test = np.load(inputs_test_file)
with open(outputs_test_file_path, 'rb') as outputs_test_file:
outputs_test = np.load(outputs_test_file)
# Return data
return (inputs_train, outputs_train, inputs_test, outputs_test)
def save_model(model, model_file_name):
model_file_path = os.path.join(
os.path.pardir, "models", model_file_name + ".h5")
model.save(model_file_path)
def rnn(inputs_train, outputs_train, inputs_test, outputs_test, loss, train_embedding):
"""
Recurrent neural network.
@param loss: 'classification' or 'regression'
@param train_embedding: 0 - initialize with word_embedding_matrix, trainable=False
1 - initialize with word_embedding_matrix, trainable=True
2 - initialize with random matrix, trainable=True
"""
# Load word-embedding matrix
word_embedding_matrix_file_path = os.path.join(
os.path.pardir, "data", "word-embedding_matrix")
with open(word_embedding_matrix_file_path, 'rb') as word_embedding_matrix_file:
word_embedding_matrix = np.load(word_embedding_matrix_file)
# Split to train-set and validation-set
split_at = len(inputs_train) - len(inputs_train) * 2 // 10
(inputs_train, inputs_validation) = \
(inputs_train[:split_at], inputs_train[split_at:])
(outputs_train, outputs_validation) = \
(outputs_train[:split_at], outputs_train[split_at:])
# Build RNN model
if train_embedding == 0:
embedding_layer = Embedding(word_embedding_matrix.shape[0], word_embedding_matrix.shape[1], weights=[word_embedding_matrix],
input_length=inputs_train.shape[1], trainable=False)
elif train_embedding == 1:
embedding_layer = Embedding(word_embedding_matrix.shape[0], word_embedding_matrix.shape[1], weights=[word_embedding_matrix],
input_length=inputs_train.shape[1], trainable=True)
elif train_embedding == 2:
embedding_layer = Embedding(word_embedding_matrix.shape[0], word_embedding_matrix.shape[1],
input_length=inputs_train.shape[1], trainable=True)
else:
raise ValueError("train_embedding should be 0 or 1 or 2.")
model = Sequential()
model.add(embedding_layer)
model.add(SimpleRNN(128, unroll=True))
model.add(Dropout(0.5))
model.add(Dense(outputs_train.shape[1], activation='softmax'))
print(model.summary())
# compile
model.compile(loss=loss, optimizer='adam', metrics=['accuracy'])
# train
if loss == 'categorical_crossentropy':
early_stopping = EarlyStopping(
min_delta=0.005, patience=3, restore_best_weights=True)
elif loss == 'mean_squared_error':
early_stopping = EarlyStopping(
min_delta=0.0005, patience=3, restore_best_weights=True)
else:
raise ValueError(
"loss should be 'categorical_crossentropy' or 'mean_squared_error'.")
model.fit(inputs_train, outputs_train, epochs=100, batch_size=128,
validation_data=(inputs_validation, outputs_validation), callbacks=[early_stopping])
# evaluate
outputs_test_pred = np.asarray(model.predict(inputs_test))
acc_eval = accuracy(outputs_test, outputs_test_pred)
fscore_eval = fscore(outputs_test, outputs_test_pred)
coef_eval = coef(outputs_test, outputs_test_pred)
print("Evaluation: acc - %.4f - fscore: %.4f - coef: %.4f - pvalue: %.4f" %
(acc_eval, fscore_eval, coef_eval[0], coef_eval[1]))
# return model
return model
rnn_static = functools.partial(rnn, train_embedding=0)
rnn_non_static = functools.partial(rnn, train_embedding=1)
rnn_rand = functools.partial(rnn, train_embedding=2)
if __name__ == "__main__":
inputs_train, outputs_train, inputs_test, outputs_test = \
load_data("classification")
model = rnn_static(inputs_train, outputs_train, inputs_test, outputs_test,
loss='categorical_crossentropy')
save_model(model, "rnn_static_classification")
model = rnn_non_static(inputs_train, outputs_train, inputs_test, outputs_test,
loss='categorical_crossentropy')
save_model(model, "rnn_non_static_classification")
model = rnn_rand(inputs_train, outputs_train, inputs_test, outputs_test,
loss='categorical_crossentropy')
save_model(model, "rnn_rand_classification")
inputs_train, outputs_train, inputs_test, outputs_test = \
load_data("regression")
model = rnn_static(inputs_train, outputs_train, inputs_test, outputs_test,
loss='mean_squared_error')
save_model(model, "rnn_static_regression")
model = rnn_non_static(inputs_train, outputs_train, inputs_test, outputs_test,
loss='mean_squared_error')
save_model(model, "rnn_non_static_regression")
model = rnn_rand(inputs_train, outputs_train, inputs_test, outputs_test,
loss='mean_squared_error')
save_model(model, "rnn_rand_regression")
|
[
"keras.layers.SimpleRNN",
"functools.partial",
"numpy.load",
"utilnn.coef",
"utilnn.fscore",
"keras.layers.Dropout",
"utilnn.accuracy",
"keras.layers.Dense",
"keras.callbacks.EarlyStopping",
"keras.layers.Embedding",
"keras.models.Sequential",
"os.path.join"
] |
[((4815, 4856), 'functools.partial', 'functools.partial', (['rnn'], {'train_embedding': '(0)'}), '(rnn, train_embedding=0)\n', (4832, 4856), False, 'import functools\n'), ((4874, 4915), 'functools.partial', 'functools.partial', (['rnn'], {'train_embedding': '(1)'}), '(rnn, train_embedding=1)\n', (4891, 4915), False, 'import functools\n'), ((4927, 4968), 'functools.partial', 'functools.partial', (['rnn'], {'train_embedding': '(2)'}), '(rnn, train_embedding=2)\n', (4944, 4968), False, 'import functools\n'), ((458, 523), 'os.path.join', 'os.path.join', (['os.path.pardir', '"""data"""', "('word-embedding' + '_train')"], {}), "(os.path.pardir, 'data', 'word-embedding' + '_train')\n", (470, 523), False, 'import os\n'), ((563, 625), 'os.path.join', 'os.path.join', (['os.path.pardir', '"""data"""', "(labels_prefix + '_train')"], {}), "(os.path.pardir, 'data', labels_prefix + '_train')\n", (575, 625), False, 'import os\n'), ((663, 727), 'os.path.join', 'os.path.join', (['os.path.pardir', '"""data"""', "('word-embedding' + '_test')"], {}), "(os.path.pardir, 'data', 'word-embedding' + '_test')\n", (675, 727), False, 'import os\n'), ((766, 826), 'os.path.join', 'os.path.join', (['os.path.pardir', '"""data"""', "('regression' + '_test')"], {}), "(os.path.pardir, 'data', 'regression' + '_test')\n", (778, 826), False, 'import os\n'), ((1501, 1564), 'os.path.join', 'os.path.join', (['os.path.pardir', '"""models"""', "(model_file_name + '.h5')"], {}), "(os.path.pardir, 'models', model_file_name + '.h5')\n", (1513, 1564), False, 'import os\n'), ((2114, 2175), 'os.path.join', 'os.path.join', (['os.path.pardir', '"""data"""', '"""word-embedding_matrix"""'], {}), "(os.path.pardir, 'data', 'word-embedding_matrix')\n", (2126, 2175), False, 'import os\n'), ((3483, 3495), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3493, 3495), False, 'from keras.models import Sequential\n'), ((4467, 4508), 'utilnn.accuracy', 'accuracy', (['outputs_test', 'outputs_test_pred'], {}), '(outputs_test, outputs_test_pred)\n', (4475, 4508), False, 'from utilnn import accuracy, fscore, coef\n'), ((4527, 4566), 'utilnn.fscore', 'fscore', (['outputs_test', 'outputs_test_pred'], {}), '(outputs_test, outputs_test_pred)\n', (4533, 4566), False, 'from utilnn import accuracy, fscore, coef\n'), ((4583, 4620), 'utilnn.coef', 'coef', (['outputs_test', 'outputs_test_pred'], {}), '(outputs_test, outputs_test_pred)\n', (4587, 4620), False, 'from utilnn import accuracy, fscore, coef\n'), ((976, 1002), 'numpy.load', 'np.load', (['inputs_train_file'], {}), '(inputs_train_file)\n', (983, 1002), True, 'import numpy as np\n'), ((1095, 1122), 'numpy.load', 'np.load', (['outputs_train_file'], {}), '(outputs_train_file)\n', (1102, 1122), True, 'import numpy as np\n'), ((1209, 1234), 'numpy.load', 'np.load', (['inputs_test_file'], {}), '(inputs_test_file)\n', (1216, 1234), True, 'import numpy as np\n'), ((1324, 1350), 'numpy.load', 'np.load', (['outputs_test_file'], {}), '(outputs_test_file)\n', (1331, 1350), True, 'import numpy as np\n'), ((2301, 2336), 'numpy.load', 'np.load', (['word_embedding_matrix_file'], {}), '(word_embedding_matrix_file)\n', (2308, 2336), True, 'import numpy as np\n'), ((2727, 2894), 'keras.layers.Embedding', 'Embedding', (['word_embedding_matrix.shape[0]', 'word_embedding_matrix.shape[1]'], {'weights': '[word_embedding_matrix]', 'input_length': 'inputs_train.shape[1]', 'trainable': '(False)'}), '(word_embedding_matrix.shape[0], word_embedding_matrix.shape[1],\n weights=[word_embedding_matrix], input_length=inputs_train.shape[1],\n trainable=False)\n', (2736, 2894), False, 'from keras.layers import Embedding, SimpleRNN, Dense, Dropout\n'), ((3541, 3568), 'keras.layers.SimpleRNN', 'SimpleRNN', (['(128)'], {'unroll': '(True)'}), '(128, unroll=True)\n', (3550, 3568), False, 'from keras.layers import Embedding, SimpleRNN, Dense, Dropout\n'), ((3584, 3596), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (3591, 3596), False, 'from keras.layers import Embedding, SimpleRNN, Dense, Dropout\n'), ((3612, 3663), 'keras.layers.Dense', 'Dense', (['outputs_train.shape[1]'], {'activation': '"""softmax"""'}), "(outputs_train.shape[1], activation='softmax')\n", (3617, 3663), False, 'from keras.layers import Embedding, SimpleRNN, Dense, Dropout\n'), ((3855, 3924), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'min_delta': '(0.005)', 'patience': '(3)', 'restore_best_weights': '(True)'}), '(min_delta=0.005, patience=3, restore_best_weights=True)\n', (3868, 3924), False, 'from keras.callbacks import EarlyStopping\n'), ((2980, 3146), 'keras.layers.Embedding', 'Embedding', (['word_embedding_matrix.shape[0]', 'word_embedding_matrix.shape[1]'], {'weights': '[word_embedding_matrix]', 'input_length': 'inputs_train.shape[1]', 'trainable': '(True)'}), '(word_embedding_matrix.shape[0], word_embedding_matrix.shape[1],\n weights=[word_embedding_matrix], input_length=inputs_train.shape[1],\n trainable=True)\n', (2989, 3146), False, 'from keras.layers import Embedding, SimpleRNN, Dense, Dropout\n'), ((4002, 4072), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'min_delta': '(0.0005)', 'patience': '(3)', 'restore_best_weights': '(True)'}), '(min_delta=0.0005, patience=3, restore_best_weights=True)\n', (4015, 4072), False, 'from keras.callbacks import EarlyStopping\n'), ((3232, 3361), 'keras.layers.Embedding', 'Embedding', (['word_embedding_matrix.shape[0]', 'word_embedding_matrix.shape[1]'], {'input_length': 'inputs_train.shape[1]', 'trainable': '(True)'}), '(word_embedding_matrix.shape[0], word_embedding_matrix.shape[1],\n input_length=inputs_train.shape[1], trainable=True)\n', (3241, 3361), False, 'from keras.layers import Embedding, SimpleRNN, Dense, Dropout\n')]
|
#!/usr/bin/env python
"""test_atom.py: Verify the Atom class functions as it's meant to."""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import unittest as ut
import numpy as np
from miniapp.system import atom
class TestAtom(ut.TestCase):
"""Unit tests for the Atom class.
"""
def test_1(self):
"""Make a Neon atom and verify the object construction.
"""
atom_type = "Ne"
position_vector = np.array([1.0, 1.0, 1.0])
test_atom = atom.Atom(atom_type, position_vector)
self.assertEqual(test_atom.atom_type, atom_type)
self.assertEqual(test_atom.pos.tolist(), position_vector.tolist())
self.assertEqual(test_atom.Z, atom.nuclear_charge[atom_type])
|
[
"miniapp.system.atom.Atom",
"numpy.array"
] |
[((450, 475), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (458, 475), True, 'import numpy as np\n'), ((497, 534), 'miniapp.system.atom.Atom', 'atom.Atom', (['atom_type', 'position_vector'], {}), '(atom_type, position_vector)\n', (506, 534), False, 'from miniapp.system import atom\n')]
|
import sys
import teca_py
import numpy as np
class teca_tc_wind_radii_stats(teca_py.teca_python_algorithm):
"""
Computes statistics using track wind radii
"""
def __init__(self):
self.basename = 'stats'
self.dpi = 100
self.interactive = False
self.wind_column = 'surface_wind'
self.output_prefix = ''
def __str__(self):
return 'basename=%s, dpi=%d, interactive=%s, rel_axes=%s'%( \
self.basename, self.dpi, str(self.interactive), str(self.rel_axes))
def set_basename(self, basename):
"""
All output files are prefixed by the basename. default 'stats'
"""
self.basename = basename
def set_dpi(self, dpi):
"""
set the DPI resolution for image output. default 100
"""
self.dpi = dpi
def set_interactive(self, interactive):
"""
plots are rendered to a an on screen window when enabled.
when disabled plots are written directly to disk. default False
"""
self.interactive = interactive
def set_wind_column(self, wind_column):
"""
set the name of the column to obtain wind speed from
"""
self.wind_column = wind_column
def set_output_prefix(self, output_prefix):
"""
set the path to prepend to output files
"""
self.output_prefix = output_prefix
def execute(self, port, data_in, req):
"""
expects a table with track data containing wind radii computed
along each point of the track. produces statistical plots showing
the global distribution of wind radii.
"""
track_table = teca_py.as_teca_table(data_in[0])
# plot stats
import matplotlib.pyplot as plt
import matplotlib.patches as plt_mp
from matplotlib.colors import LogNorm
red_cmap = ['#ffd2a3','#ffa749','#ff7c04', \
'#ea4f00','#c92500','#a80300']
km_per_deg_lat = 111
km_s_per_m_hr = 3.6
fig = plt.figure(figsize=(9.25,6.75),dpi=self.dpi)
# scatter
plt.subplot('331')
if not track_table.has_column(self.wind_column):
sys.stderr.write('ERROR: track table missing %s\n'%(self.wind_column))
sys.exit(-1)
year = track_table.get_column('year').as_array()
month = track_table.get_column('month').as_array()
day = track_table.get_column('day').as_array()
ws = km_s_per_m_hr*track_table.get_column(self.wind_column).as_array()
wr = []
nwr = 0
while track_table.has_column('wind_radius_%d'%(nwr)):
wr.append(km_per_deg_lat*track_table.get_column('wind_radius_%d'%(nwr)).as_array())
nwr += 1
i = 0
while i < nwr:
wc = teca_py.teca_tc_saffir_simpson.get_upper_bound_kmph(i-1)
wri = wr[i]
ii = np.where(wri > 0.0)
plt.scatter(wri[ii], ws[ii], c=red_cmap[i], alpha=0.25, marker='.', zorder=3+i)
i += 1
plt.ylabel('Wind speed (km/hr)', fontweight='normal', fontsize=10)
plt.title('R0 - R5 vs Wind speed', fontweight='bold', fontsize=11)
plt.grid(True)
ax = plt.gca()
ax.set_xlim([0.0, 6.0*km_per_deg_lat])
# all
plt.subplot('332')
i = 0
while i < nwr:
wc = teca_py.teca_tc_saffir_simpson.get_upper_bound_kmph(i-1)
wri = wr[i]
n,bins,pats = plt.hist(wri[np.where(wri > 0.0)], 32, range=[0,6.0*km_per_deg_lat], \
facecolor=red_cmap[i], alpha=0.95, edgecolor='black', \
linewidth=2, zorder=3+i)
i += 1
plt.ylabel('Number', fontweight='normal', fontsize=10)
plt.title('All R0 - R5', fontweight='bold', fontsize=11)
plt.grid(True)
ax = plt.gca()
ax.set_xlim([0.0, 6.0*km_per_deg_lat])
# r0 - r5
i = 0
while i < nwr:
plt.subplot(333+i)
wc = teca_py.teca_tc_saffir_simpson.get_upper_bound_kmph(i-1)
wri = wr[i]
wrii=wri[np.where(wri > 0.0)]
n,bins,pats = plt.hist(wrii, 32, \
facecolor=red_cmap[i], alpha=1.00, edgecolor='black', \
linewidth=2, zorder=3)
if ((i % 3) == 1):
plt.ylabel('Number', fontweight='normal', fontsize=10)
if (i >= 3):
plt.xlabel('Radius (km)', fontweight='normal', fontsize=10)
plt.title('R%d (%0.1f km/hr)'%(i,wc), fontweight='bold', fontsize=11)
plt.grid(True)
ax = plt.gca()
try:
ax.set_xlim([np.min(wrii), np.max(wrii)])
except:
pass
i += 1
# legend
plt.subplot('339')
red_cmap_pats = []
q = 0
while q < nwr:
red_cmap_pats.append( \
plt_mp.Patch(color=red_cmap[q], label='R%d'%(q)))
q += 1
l = plt.legend(handles=red_cmap_pats, loc=2, bbox_to_anchor=(-0.1, 1.0), fancybox=True)
plt.axis('off')
plt.suptitle('Wind Radii %s/%d/%d - %s/%d/%d'%(month[0],day[0],year[0], \
month[-1],day[-1],year[-1]), fontweight='bold', fontsize=12)
plt.subplots_adjust(hspace=0.35, wspace=0.35, top=0.90)
plt.savefig(self.output_prefix + 'wind_radii_stats.png')
fig = plt.figure(figsize=(7.5,4.0),dpi=100)
# peak radius
pr = km_per_deg_lat*track_table.get_column('peak_radius').as_array()
# peak radius is only valid if one of the other wind radii
# exist
kk = wr[0] > 1.0e-6
q = 1
while q < nwr:
kk = np.logical_or(kk, wr[q] > 1.0e-6)
q += 1
pr = pr[kk]
plt.subplot(121)
n,bins,pats = plt.hist(pr[np.where(pr > 0.0)], 24, \
facecolor='steelblue', alpha=0.95, edgecolor='black', \
linewidth=2, zorder=3)
plt.ylabel('Number', fontweight='normal', fontsize=10)
plt.xlabel('Radius (km)', fontweight='normal', fontsize=10)
plt.title('RP (radius at peak wind)', fontweight='bold', fontsize=11)
plt.grid(True)
ax = plt.gca()
ax.set_xlim([0.0, np.max(pr)])
# scatter
plt.subplot('122')
ii = np.where(pr > 0.0)
cnts,xe,ye,im = plt.hist2d(pr[ii], ws[ii], bins=24, norm=LogNorm(), zorder=2)
plt.ylabel('Wind speed (km/hr)', fontweight='normal', fontsize=10)
plt.xlabel('Radius (km)', fontweight='normal', fontsize=10)
plt.title('RP vs Wind speed', fontweight='bold', fontsize=11)
plt.grid(True)
ax = plt.gca()
ax.set_xlim([0.0, np.max(pr)])
fig.subplots_adjust(right=0.85)
cbar_ax = fig.add_axes([0.88, 0.35, 0.05, 0.5])
fig.colorbar(im, cax=cbar_ax)
plt.suptitle('Wind Radii %s/%d/%d - %s/%d/%d'%(month[0],day[0],year[0], \
month[-1],day[-1],year[-1]), fontweight='bold', fontsize=12)
plt.subplots_adjust(hspace=0.3, wspace=0.3, top=0.85)
plt.savefig(self.output_prefix + 'peak_radius_stats.png')
if self.interactive:
plt.show()
# send data downstream
return track_table
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.figure",
"matplotlib.colors.LogNorm",
"matplotlib.pyplot.gca",
"matplotlib.patches.Patch",
"numpy.max",
"teca_py.as_teca_table",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"teca_py.teca_tc_saffir_simpson.get_upper_bound_kmph",
"numpy.min",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.grid",
"sys.exit",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.axis",
"numpy.where",
"numpy.logical_or",
"sys.stderr.write",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((1697, 1730), 'teca_py.as_teca_table', 'teca_py.as_teca_table', (['data_in[0]'], {}), '(data_in[0])\n', (1718, 1730), False, 'import teca_py\n'), ((2053, 2099), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9.25, 6.75)', 'dpi': 'self.dpi'}), '(figsize=(9.25, 6.75), dpi=self.dpi)\n', (2063, 2099), True, 'import matplotlib.pyplot as plt\n'), ((2125, 2143), 'matplotlib.pyplot.subplot', 'plt.subplot', (['"""331"""'], {}), "('331')\n", (2136, 2143), True, 'import matplotlib.pyplot as plt\n'), ((3068, 3134), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Wind speed (km/hr)"""'], {'fontweight': '"""normal"""', 'fontsize': '(10)'}), "('Wind speed (km/hr)', fontweight='normal', fontsize=10)\n", (3078, 3134), True, 'import matplotlib.pyplot as plt\n'), ((3143, 3209), 'matplotlib.pyplot.title', 'plt.title', (['"""R0 - R5 vs Wind speed"""'], {'fontweight': '"""bold"""', 'fontsize': '(11)'}), "('R0 - R5 vs Wind speed', fontweight='bold', fontsize=11)\n", (3152, 3209), True, 'import matplotlib.pyplot as plt\n'), ((3218, 3232), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (3226, 3232), True, 'import matplotlib.pyplot as plt\n'), ((3246, 3255), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3253, 3255), True, 'import matplotlib.pyplot as plt\n'), ((3326, 3344), 'matplotlib.pyplot.subplot', 'plt.subplot', (['"""332"""'], {}), "('332')\n", (3337, 3344), True, 'import matplotlib.pyplot as plt\n'), ((3717, 3771), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number"""'], {'fontweight': '"""normal"""', 'fontsize': '(10)'}), "('Number', fontweight='normal', fontsize=10)\n", (3727, 3771), True, 'import matplotlib.pyplot as plt\n'), ((3780, 3836), 'matplotlib.pyplot.title', 'plt.title', (['"""All R0 - R5"""'], {'fontweight': '"""bold"""', 'fontsize': '(11)'}), "('All R0 - R5', fontweight='bold', fontsize=11)\n", (3789, 3836), True, 'import matplotlib.pyplot as plt\n'), ((3845, 3859), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (3853, 3859), True, 'import matplotlib.pyplot as plt\n'), ((3873, 3882), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3880, 3882), True, 'import matplotlib.pyplot as plt\n'), ((4815, 4833), 'matplotlib.pyplot.subplot', 'plt.subplot', (['"""339"""'], {}), "('339')\n", (4826, 4833), True, 'import matplotlib.pyplot as plt\n'), ((5031, 5118), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': 'red_cmap_pats', 'loc': '(2)', 'bbox_to_anchor': '(-0.1, 1.0)', 'fancybox': '(True)'}), '(handles=red_cmap_pats, loc=2, bbox_to_anchor=(-0.1, 1.0),\n fancybox=True)\n', (5041, 5118), True, 'import matplotlib.pyplot as plt\n'), ((5123, 5138), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (5131, 5138), True, 'import matplotlib.pyplot as plt\n'), ((5149, 5291), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (["('Wind Radii %s/%d/%d - %s/%d/%d' % (month[0], day[0], year[0], month[-1],\n day[-1], year[-1]))"], {'fontweight': '"""bold"""', 'fontsize': '(12)'}), "('Wind Radii %s/%d/%d - %s/%d/%d' % (month[0], day[0], year[0],\n month[-1], day[-1], year[-1]), fontweight='bold', fontsize=12)\n", (5161, 5291), True, 'import matplotlib.pyplot as plt\n'), ((5304, 5358), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.35)', 'wspace': '(0.35)', 'top': '(0.9)'}), '(hspace=0.35, wspace=0.35, top=0.9)\n', (5323, 5358), True, 'import matplotlib.pyplot as plt\n'), ((5369, 5425), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(self.output_prefix + 'wind_radii_stats.png')"], {}), "(self.output_prefix + 'wind_radii_stats.png')\n", (5380, 5425), True, 'import matplotlib.pyplot as plt\n'), ((5441, 5480), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7.5, 4.0)', 'dpi': '(100)'}), '(figsize=(7.5, 4.0), dpi=100)\n', (5451, 5480), True, 'import matplotlib.pyplot as plt\n'), ((5825, 5841), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (5836, 5841), True, 'import matplotlib.pyplot as plt\n'), ((6014, 6068), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number"""'], {'fontweight': '"""normal"""', 'fontsize': '(10)'}), "('Number', fontweight='normal', fontsize=10)\n", (6024, 6068), True, 'import matplotlib.pyplot as plt\n'), ((6077, 6136), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Radius (km)"""'], {'fontweight': '"""normal"""', 'fontsize': '(10)'}), "('Radius (km)', fontweight='normal', fontsize=10)\n", (6087, 6136), True, 'import matplotlib.pyplot as plt\n'), ((6145, 6214), 'matplotlib.pyplot.title', 'plt.title', (['"""RP (radius at peak wind)"""'], {'fontweight': '"""bold"""', 'fontsize': '(11)'}), "('RP (radius at peak wind)', fontweight='bold', fontsize=11)\n", (6154, 6214), True, 'import matplotlib.pyplot as plt\n'), ((6223, 6237), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (6231, 6237), True, 'import matplotlib.pyplot as plt\n'), ((6251, 6260), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6258, 6260), True, 'import matplotlib.pyplot as plt\n'), ((6327, 6345), 'matplotlib.pyplot.subplot', 'plt.subplot', (['"""122"""'], {}), "('122')\n", (6338, 6345), True, 'import matplotlib.pyplot as plt\n'), ((6359, 6377), 'numpy.where', 'np.where', (['(pr > 0.0)'], {}), '(pr > 0.0)\n', (6367, 6377), True, 'import numpy as np\n'), ((6472, 6538), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Wind speed (km/hr)"""'], {'fontweight': '"""normal"""', 'fontsize': '(10)'}), "('Wind speed (km/hr)', fontweight='normal', fontsize=10)\n", (6482, 6538), True, 'import matplotlib.pyplot as plt\n'), ((6547, 6606), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Radius (km)"""'], {'fontweight': '"""normal"""', 'fontsize': '(10)'}), "('Radius (km)', fontweight='normal', fontsize=10)\n", (6557, 6606), True, 'import matplotlib.pyplot as plt\n'), ((6615, 6676), 'matplotlib.pyplot.title', 'plt.title', (['"""RP vs Wind speed"""'], {'fontweight': '"""bold"""', 'fontsize': '(11)'}), "('RP vs Wind speed', fontweight='bold', fontsize=11)\n", (6624, 6676), True, 'import matplotlib.pyplot as plt\n'), ((6685, 6699), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (6693, 6699), True, 'import matplotlib.pyplot as plt\n'), ((6713, 6722), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6720, 6722), True, 'import matplotlib.pyplot as plt\n'), ((6906, 7048), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (["('Wind Radii %s/%d/%d - %s/%d/%d' % (month[0], day[0], year[0], month[-1],\n day[-1], year[-1]))"], {'fontweight': '"""bold"""', 'fontsize': '(12)'}), "('Wind Radii %s/%d/%d - %s/%d/%d' % (month[0], day[0], year[0],\n month[-1], day[-1], year[-1]), fontweight='bold', fontsize=12)\n", (6918, 7048), True, 'import matplotlib.pyplot as plt\n'), ((7061, 7114), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.3)', 'wspace': '(0.3)', 'top': '(0.85)'}), '(hspace=0.3, wspace=0.3, top=0.85)\n', (7080, 7114), True, 'import matplotlib.pyplot as plt\n'), ((7125, 7182), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(self.output_prefix + 'peak_radius_stats.png')"], {}), "(self.output_prefix + 'peak_radius_stats.png')\n", (7136, 7182), True, 'import matplotlib.pyplot as plt\n'), ((2214, 2284), 'sys.stderr.write', 'sys.stderr.write', (["('ERROR: track table missing %s\\n' % self.wind_column)"], {}), "('ERROR: track table missing %s\\n' % self.wind_column)\n", (2230, 2284), False, 'import sys\n'), ((2297, 2309), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (2305, 2309), False, 'import sys\n'), ((2830, 2888), 'teca_py.teca_tc_saffir_simpson.get_upper_bound_kmph', 'teca_py.teca_tc_saffir_simpson.get_upper_bound_kmph', (['(i - 1)'], {}), '(i - 1)\n', (2881, 2888), False, 'import teca_py\n'), ((2928, 2947), 'numpy.where', 'np.where', (['(wri > 0.0)'], {}), '(wri > 0.0)\n', (2936, 2947), True, 'import numpy as np\n'), ((2960, 3046), 'matplotlib.pyplot.scatter', 'plt.scatter', (['wri[ii]', 'ws[ii]'], {'c': 'red_cmap[i]', 'alpha': '(0.25)', 'marker': '"""."""', 'zorder': '(3 + i)'}), "(wri[ii], ws[ii], c=red_cmap[i], alpha=0.25, marker='.', zorder=\n 3 + i)\n", (2971, 3046), True, 'import matplotlib.pyplot as plt\n'), ((3399, 3457), 'teca_py.teca_tc_saffir_simpson.get_upper_bound_kmph', 'teca_py.teca_tc_saffir_simpson.get_upper_bound_kmph', (['(i - 1)'], {}), '(i - 1)\n', (3450, 3457), False, 'import teca_py\n'), ((3998, 4018), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(333 + i)'], {}), '(333 + i)\n', (4009, 4018), True, 'import matplotlib.pyplot as plt\n'), ((4034, 4092), 'teca_py.teca_tc_saffir_simpson.get_upper_bound_kmph', 'teca_py.teca_tc_saffir_simpson.get_upper_bound_kmph', (['(i - 1)'], {}), '(i - 1)\n', (4085, 4092), False, 'import teca_py\n'), ((4183, 4281), 'matplotlib.pyplot.hist', 'plt.hist', (['wrii', '(32)'], {'facecolor': 'red_cmap[i]', 'alpha': '(1.0)', 'edgecolor': '"""black"""', 'linewidth': '(2)', 'zorder': '(3)'}), "(wrii, 32, facecolor=red_cmap[i], alpha=1.0, edgecolor='black',\n linewidth=2, zorder=3)\n", (4191, 4281), True, 'import matplotlib.pyplot as plt\n'), ((4530, 4602), 'matplotlib.pyplot.title', 'plt.title', (["('R%d (%0.1f km/hr)' % (i, wc))"], {'fontweight': '"""bold"""', 'fontsize': '(11)'}), "('R%d (%0.1f km/hr)' % (i, wc), fontweight='bold', fontsize=11)\n", (4539, 4602), True, 'import matplotlib.pyplot as plt\n'), ((4612, 4626), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (4620, 4626), True, 'import matplotlib.pyplot as plt\n'), ((4644, 4653), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4651, 4653), True, 'import matplotlib.pyplot as plt\n'), ((5743, 5775), 'numpy.logical_or', 'np.logical_or', (['kk', '(wr[q] > 1e-06)'], {}), '(kk, wr[q] > 1e-06)\n', (5756, 5775), True, 'import numpy as np\n'), ((7225, 7235), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7233, 7235), True, 'import matplotlib.pyplot as plt\n'), ((4136, 4155), 'numpy.where', 'np.where', (['(wri > 0.0)'], {}), '(wri > 0.0)\n', (4144, 4155), True, 'import numpy as np\n'), ((4362, 4416), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number"""'], {'fontweight': '"""normal"""', 'fontsize': '(10)'}), "('Number', fontweight='normal', fontsize=10)\n", (4372, 4416), True, 'import matplotlib.pyplot as plt\n'), ((4458, 4517), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Radius (km)"""'], {'fontweight': '"""normal"""', 'fontsize': '(10)'}), "('Radius (km)', fontweight='normal', fontsize=10)\n", (4468, 4517), True, 'import matplotlib.pyplot as plt\n'), ((4950, 4998), 'matplotlib.patches.Patch', 'plt_mp.Patch', ([], {'color': 'red_cmap[q]', 'label': "('R%d' % q)"}), "(color=red_cmap[q], label='R%d' % q)\n", (4962, 4998), True, 'import matplotlib.patches as plt_mp\n'), ((5876, 5894), 'numpy.where', 'np.where', (['(pr > 0.0)'], {}), '(pr > 0.0)\n', (5884, 5894), True, 'import numpy as np\n'), ((6287, 6297), 'numpy.max', 'np.max', (['pr'], {}), '(pr)\n', (6293, 6297), True, 'import numpy as np\n'), ((6443, 6452), 'matplotlib.colors.LogNorm', 'LogNorm', ([], {}), '()\n', (6450, 6452), False, 'from matplotlib.colors import LogNorm\n'), ((6749, 6759), 'numpy.max', 'np.max', (['pr'], {}), '(pr)\n', (6755, 6759), True, 'import numpy as np\n'), ((3519, 3538), 'numpy.where', 'np.where', (['(wri > 0.0)'], {}), '(wri > 0.0)\n', (3527, 3538), True, 'import numpy as np\n'), ((4700, 4712), 'numpy.min', 'np.min', (['wrii'], {}), '(wrii)\n', (4706, 4712), True, 'import numpy as np\n'), ((4714, 4726), 'numpy.max', 'np.max', (['wrii'], {}), '(wrii)\n', (4720, 4726), True, 'import numpy as np\n')]
|
# set the matplotlib backend so figures can be saved in the background
import matplotlib
matplotlib.use("Agg")
# import the necessary packages
from pyimagesearch.convautoencoder import ConvAutoencoder
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.datasets import mnist
import matplotlib.pyplot as plt
import numpy as np
import argparse
import cv2
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-s", "--samples", type=int, default=8,
help="# number of samples to visualize when decoding")
ap.add_argument("-o", "--output", type=str, default="output.png",
help="path to output visualization file")
ap.add_argument("-p", "--plot", type=str, default="plot.png",
help="path to output plot file")
args = vars(ap.parse_args())
# initialize the number of epochs to train for and batch size
EPOCHS = 25
BS = 32
# load the MNIST dataset
print("[INFO] loading MNIST dataset...")
((trainX, _), (testX, _)) = mnist.load_data()
# add a channel dimension to every image in the dataset, then scale
# the pixel intensities to the range [0, 1]
trainX = np.expand_dims(trainX, axis=-1)
testX = np.expand_dims(testX, axis=-1)
trainX = trainX.astype("float32") / 255.0
testX = testX.astype("float32") / 255.0
# construct our convolutional autoencoder
print("[INFO] building autoencoder...")
(encoder, decoder, autoencoder) = ConvAutoencoder.build(28, 28, 1)
opt = Adam(lr=1e-3)
autoencoder.compile(loss="mse", optimizer=opt)
# train the convolutional autoencoder
H = autoencoder.fit(
trainX, trainX,
validation_data=(testX, testX),
epochs=EPOCHS,
batch_size=BS)
# construct a plot that plots and saves the training history
N = np.arange(0, EPOCHS)
plt.style.use("ggplot")
plt.figure()
plt.plot(N, H.history["loss"], label="train_loss")
plt.plot(N, H.history["val_loss"], label="val_loss")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="lower left")
plt.savefig(args["plot"])
# use the convolutional autoencoder to make predictions on the
# testing images, then initialize our list of output images
print("[INFO] making predictions...")
decoded = autoencoder.predict(testX)
outputs = None
# loop over our number of output samples
for i in range(0, args["samples"]):
# grab the original image and reconstructed image
original = (testX[i] * 255).astype("uint8")
recon = (decoded[i] * 255).astype("uint8")
# stack the original and reconstructed image side-by-side
output = np.hstack([original, recon])
# if the outputs array is empty, initialize it as the current
# side-by-side image display
if outputs is None:
outputs = output
# otherwise, vertically stack the outputs
else:
outputs = np.vstack([outputs, output])
# save the outputs image to disk
cv2.imwrite(args["output"], outputs)
|
[
"matplotlib.pyplot.title",
"pyimagesearch.convautoencoder.ConvAutoencoder.build",
"argparse.ArgumentParser",
"matplotlib.pyplot.plot",
"cv2.imwrite",
"matplotlib.pyplot.legend",
"numpy.expand_dims",
"tensorflow.keras.datasets.mnist.load_data",
"numpy.hstack",
"matplotlib.pyplot.style.use",
"matplotlib.use",
"tensorflow.keras.optimizers.Adam",
"numpy.arange",
"matplotlib.pyplot.figure",
"numpy.vstack",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((89, 110), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (103, 110), False, 'import matplotlib\n'), ((430, 455), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (453, 455), False, 'import argparse\n'), ((980, 997), 'tensorflow.keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (995, 997), False, 'from tensorflow.keras.datasets import mnist\n'), ((1120, 1151), 'numpy.expand_dims', 'np.expand_dims', (['trainX'], {'axis': '(-1)'}), '(trainX, axis=-1)\n', (1134, 1151), True, 'import numpy as np\n'), ((1160, 1190), 'numpy.expand_dims', 'np.expand_dims', (['testX'], {'axis': '(-1)'}), '(testX, axis=-1)\n', (1174, 1190), True, 'import numpy as np\n'), ((1390, 1422), 'pyimagesearch.convautoencoder.ConvAutoencoder.build', 'ConvAutoencoder.build', (['(28)', '(28)', '(1)'], {}), '(28, 28, 1)\n', (1411, 1422), False, 'from pyimagesearch.convautoencoder import ConvAutoencoder\n'), ((1429, 1443), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (1433, 1443), False, 'from tensorflow.keras.optimizers import Adam\n'), ((1698, 1718), 'numpy.arange', 'np.arange', (['(0)', 'EPOCHS'], {}), '(0, EPOCHS)\n', (1707, 1718), True, 'import numpy as np\n'), ((1719, 1742), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (1732, 1742), True, 'import matplotlib.pyplot as plt\n'), ((1743, 1755), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1753, 1755), True, 'import matplotlib.pyplot as plt\n'), ((1756, 1806), 'matplotlib.pyplot.plot', 'plt.plot', (['N', "H.history['loss']"], {'label': '"""train_loss"""'}), "(N, H.history['loss'], label='train_loss')\n", (1764, 1806), True, 'import matplotlib.pyplot as plt\n'), ((1807, 1859), 'matplotlib.pyplot.plot', 'plt.plot', (['N', "H.history['val_loss']"], {'label': '"""val_loss"""'}), "(N, H.history['val_loss'], label='val_loss')\n", (1815, 1859), True, 'import matplotlib.pyplot as plt\n'), ((1860, 1899), 'matplotlib.pyplot.title', 'plt.title', (['"""Training Loss and Accuracy"""'], {}), "('Training Loss and Accuracy')\n", (1869, 1899), True, 'import matplotlib.pyplot as plt\n'), ((1900, 1921), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch #"""'], {}), "('Epoch #')\n", (1910, 1921), True, 'import matplotlib.pyplot as plt\n'), ((1922, 1949), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss/Accuracy"""'], {}), "('Loss/Accuracy')\n", (1932, 1949), True, 'import matplotlib.pyplot as plt\n'), ((1950, 1978), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower left"""'}), "(loc='lower left')\n", (1960, 1978), True, 'import matplotlib.pyplot as plt\n'), ((1979, 2004), 'matplotlib.pyplot.savefig', 'plt.savefig', (["args['plot']"], {}), "(args['plot'])\n", (1990, 2004), True, 'import matplotlib.pyplot as plt\n'), ((2796, 2832), 'cv2.imwrite', 'cv2.imwrite', (["args['output']", 'outputs'], {}), "(args['output'], outputs)\n", (2807, 2832), False, 'import cv2\n'), ((2507, 2535), 'numpy.hstack', 'np.hstack', (['[original, recon]'], {}), '([original, recon])\n', (2516, 2535), True, 'import numpy as np\n'), ((2733, 2761), 'numpy.vstack', 'np.vstack', (['[outputs, output]'], {}), '([outputs, output])\n', (2742, 2761), True, 'import numpy as np\n')]
|
from typing import List, Tuple, Type
import numpy as np
from continuum.datasets.base import _ContinuumDataset
from continuum.datasets.pytorch import (CIFAR10, CIFAR100, KMNIST, MNIST, FashionMNIST)
class Fellowship(_ContinuumDataset):
def __init__(
self,
dataset_list: List[Type[_ContinuumDataset]],
data_path: str = "",
download: bool = True,
):
super().__init__(data_path, download)
self.datasets = [dataset(data_path, download) for dataset in dataset_list]
def init(self, train: bool) -> Tuple[np.ndarray, np.ndarray, None]:
x, y = [], []
class_counter = 0
for dataset in self.datasets:
data = dataset.init(train)
x.append(data[0])
y.append(data[1] + class_counter)
class_counter += len(np.unique(data[1]))
x = np.concatenate(x)
y = np.concatenate(y)
return x, y, None
class MNISTFellowship(Fellowship):
def __init__(self, data_path: str = "", download: bool = True) -> None:
super().__init__([MNIST, FashionMNIST, KMNIST], data_path, download)
class CIFARFellowship(Fellowship):
def __init__(self, data_path: str = "", download: bool = True) -> None:
super().__init__([CIFAR10, CIFAR100], data_path, download)
|
[
"numpy.unique",
"numpy.concatenate"
] |
[((865, 882), 'numpy.concatenate', 'np.concatenate', (['x'], {}), '(x)\n', (879, 882), True, 'import numpy as np\n'), ((895, 912), 'numpy.concatenate', 'np.concatenate', (['y'], {}), '(y)\n', (909, 912), True, 'import numpy as np\n'), ((832, 850), 'numpy.unique', 'np.unique', (['data[1]'], {}), '(data[1])\n', (841, 850), True, 'import numpy as np\n')]
|
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
from math import log
from random import randint #, shuffle, sample
from functools import reduce
from operator import __or__
import numpy as np
import pytest
from mmgroup.bitfunctions import bit_mat_mul, bit_mat_inverse
from mmgroup.clifford12 import bitmatrix64_t
from mmgroup.clifford12 import bitmatrix64_echelon_h
from mmgroup.clifford12 import bitmatrix64_echelon_l
from mmgroup.clifford12 import bitmatrix64_cap_h
from mmgroup.clifford12 import bitmatrix64_mul
from mmgroup.clifford12 import bitmatrix64_inv
from mmgroup.clifford12 import bitmatrix64_error_pool
#####################################################################
# Test function bitmatrix64_t()
#####################################################################
def rand_bit_matrix(rows, cols):
m = (1 << cols) - 1
return [randint(0, m) for i in range(rows)]
def as_bit_array(m, cols):
a = np.zeros( (len(m), cols), dtype = np.uint8)
for i in range(len(m)):
for j in range(cols):
a[i,j] = (int(m[i]) >> j) & 1
return a
def create_bitmatrices():
"""yield a bit matrix as a list of integers """
for rows in range(8):
for cols in range(8):
m = rand_bit_matrix(rows, cols)
yield m, cols
@pytest.mark.qstate
def test_bitmatrix_t(verbose = 0):
"""Test the transposition of a bit matrix"""
for ntest, (m, cols) in enumerate(create_bitmatrices()):
t = bitmatrix64_t(m, cols)
m1 = as_bit_array(m, cols)
#print(m1, "shp", m1.shape)
t1 = as_bit_array(t, len(m))
if verbose:
print("Test %d: " % ntest)
print("Transpose %s bit matrix, m =" % str(m1.shape))
print(m1, "\nTransposed: %s\n%s" % (str(t1.shape), t1))
assert m1.T.shape == t1.shape, (m1.shape, t1.shape)
assert (m1.T == t1).all()
#####################################################################
# Test functions bitmatrix64_echelon_h(), bitmatrix64_echelon_l()
#####################################################################
def create_echelon_matrices():
"""yield a bit matrix as a list of integers """
for rows in range(8):
for _ in range(8):
m = rand_bit_matrix(rows, 64)
i, j = randint(0,63), randint(0,63)
yield m, min(i,j), max(i,j)
def ref_echelon(m, j0 = 64, n = 64, high = True):
m = [int(x) for x in m]
row = 0
if high:
assert n <= j0
range_ = range(j0-1, j0-n-1, -1)
mask = ((1 << n) - 1) << (j0 - n)
else:
range_ = range(j0, j0+n)
mask = ((1 << n) - 1) << (j0)
for col in range_:
col_mask = 1 << col
for i in range(len(m)-1, row-1, -1):
if m[i] & col_mask:
for i1 in range(i-1, -1, -1):
if m[i1] & col_mask:
m[i1] ^= m[i]
m[i], m[row] = m[row], m[i]
row += 1
break
return row, m
@pytest.mark.qstate
def test_bitmatrix_echelon(verbose = 0):
for ntest, (m, imin, imax) in enumerate(create_echelon_matrices()):
m1h = np.array(m, dtype = np.uint64, copy = True)
j0h, nh = imax + 1, imax - imin
lenh = bitmatrix64_echelon_h(m1h, len(m1h), j0h, nh)
m1h = list(m1h)
lenh_ref, m1h_ref = ref_echelon(m, j0h, nh)
ok_h = m1h == m1h_ref and lenh == lenh_ref
m1l = np.array(m, dtype = np.uint64, copy = True)
j0l, nl = imin, imax - imin
lenl = bitmatrix64_echelon_l(m1l, len(m1l), j0l, nl)
m1l = list(m1l)
lenl_ref, m1l_ref = ref_echelon(m, j0l, nl, False)
ok_l = m1l == m1l_ref and lenl == lenl_ref
ok = ok_h and ok_l
if verbose or not ok:
print("Test ", ntest)
print("m =", [hex(x) for x in m], j0h, nh)
print("echelon h", [hex(x) for x in m1h], lenh)
print("expected", [hex(x) for x in m1h_ref], lenh_ref)
if not ok_h:
err = "Error in function bitmatrix64_echelon_h"
raise ValueError(err)
print("m =", [hex(x) for x in m], j0l, nl)
print("echelon l", [hex(x) for x in m1l], lenl)
print("expected", [hex(x) for x in m1l_ref], lenl_ref)
if not ok_l:
err = "Error in function bitmatrix64_echelon_l"
raise ValueError(err)
#####################################################################
# Test functions bitmatrix64_cap_h()
#####################################################################
def create_cap_matrices():
"""yield a bit matrix as a list of integers """
test_matrices = [
([3,7,11], [3,1], 0,4)
]
for t in test_matrices:
yield t
for rows1 in range(5):
for rows2 in range(5):
m1 = rand_bit_matrix(rows1, 5)
m2 = rand_bit_matrix(rows2, 5)
i, j = randint(0,6), randint(0,6)
yield m1, m2, min(i,j), max(i,j)
for rows1 in range(1,65,7):
for rows2 in range(1,65,7):
m1 = rand_bit_matrix(rows1, 64)
m2 = rand_bit_matrix(rows2, 64)
i, j = randint(0,63), randint(0,63)
yield m1, m2, min(i,j), max(i,j)
def basis_to_set(b):
if len(b) == 0:
return set([0])
s0 = basis_to_set(b[1:])
b0 = b[0]
return s0 | set((x ^ b0 for x in s0))
def as_set(m, mask):
m = [int(x) & int(mask) for x in m]
return basis_to_set(m)
@pytest.mark.qstate
def test_bitmatrix_cap(verbose = 0):
for ntest, (m1, m2, imin, imax) in enumerate(create_cap_matrices()):
if verbose:
print("Test", ntest+1, "imin =", imin, "imax =", imax )
print("m1 =", [hex(x) for x in m1])
print("m2 =", [hex(x) for x in m2])
m1h = np.array(m1, dtype = np.uint64, copy = True)
m2h = np.array(m2, dtype = np.uint64, copy = True)
j0h, nh = imax + 1, imax - imin + 1
l1h, l2h = bitmatrix64_cap_h(m1h, m2h, j0h, nh)
m1h, m2h = list(m1h), list (m2h)
if verbose:
print("Non intersecting parts of m1 and m2")
print("out1 =", [hex(x) for x in m1h[:l1h]])
print("out2 =", [hex(x) for x in m2h[:l2h]])
mask = (1 << (imax + 1)) - (1 << imin)
print("Intersecting parts (mask = %s):" % hex(mask))
print("out1 =", [hex(x) for x in m1h[l1h:]])
print("out2 =", [hex(x) for x in m2h[l2h:]])
if verbose > 1:
print("Intermediate results")
pool = np.zeros(20, dtype = np.uint64)
bitmatrix64_error_pool(pool, 20)
for i in range(20): print(i, hex(pool[i]))
assert len(m1) == len(m1h) and len(m2) == len(m2h)
assert ref_echelon(m1) == ref_echelon(m1h)
assert ref_echelon(m2) == ref_echelon(m2h)
mask = (1 << (imax + 1)) - (1 << imin)
l1, l2 = len(m1), len(m2)
while l1 and int(m1h[l1-1]) & mask == 0:
l1 -= 1
while l2 and int(m2h[l2-1]) & mask == 0:
l2 -= 1
assert l1-l1h == l2-l2h, (l1, l1h, l2, l2h)
if len(m1) < 5 and len(m2) < 5:
set1 = as_set(m1h, mask)
set2 = as_set(m2h, mask)
cap = set1 & set2
set_cap1 = as_set(m1h[l1h:], mask)
set_cap2 = as_set(m2h[l2h:], mask)
assert cap == set_cap1 , (set1, set2, cap, set_cap1)
assert cap == set_cap2 , (set1, set2, cap, set_cap2)
if verbose:
print("Intersection testd successfully")
#####################################################################
# Test functions bitmatrix64_mul() and bitmatrix64_inv()
#####################################################################
# This tests also function bitmatrix64_mask_rows()
# and bitmatrix64_add_diag().
def create_mul_inv_matrices():
"""yield a bit matrix as a list of integers """
for i in [1, 3, 5, 7, 17, 24, 31, 32]:
m2i = None
for _ in range(1000):
m2 = rand_bit_matrix(i, i)
try:
m2i = bit_mat_inverse(m2)
break
except ZeroDivisionError:
pass
for j in [1,2,7,32, 63, 64]:
m1 = rand_bit_matrix(j, i)
yield m1, m2, m2i
m2i = None
@pytest.mark.qstate
def test_bitmatrix_mul_inv(verbose = 0):
for ntest, (m1, m2, m2i) in enumerate(create_mul_inv_matrices()):
#print(m1, m2, m2i)
m1a = np.array(m1, dtype = np.uint64, copy = True)
m2a = np.array(m2, dtype = np.uint64, copy = True)
m3 = bit_mat_mul(m1, m2)
m3a = bitmatrix64_mul(m1a, m2a)
assert list(m3a) == m3
if m2i is not None:
m2ia = bitmatrix64_inv(m2)
assert list(m2ia) == m2i
|
[
"mmgroup.clifford12.bitmatrix64_error_pool",
"random.randint",
"mmgroup.clifford12.bitmatrix64_cap_h",
"mmgroup.bitfunctions.bit_mat_inverse",
"mmgroup.clifford12.bitmatrix64_mul",
"mmgroup.clifford12.bitmatrix64_t",
"numpy.zeros",
"mmgroup.clifford12.bitmatrix64_inv",
"numpy.array",
"mmgroup.bitfunctions.bit_mat_mul"
] |
[((929, 942), 'random.randint', 'randint', (['(0)', 'm'], {}), '(0, m)\n', (936, 942), False, 'from random import randint\n'), ((1575, 1597), 'mmgroup.clifford12.bitmatrix64_t', 'bitmatrix64_t', (['m', 'cols'], {}), '(m, cols)\n', (1588, 1597), False, 'from mmgroup.clifford12 import bitmatrix64_t\n'), ((3304, 3343), 'numpy.array', 'np.array', (['m'], {'dtype': 'np.uint64', 'copy': '(True)'}), '(m, dtype=np.uint64, copy=True)\n', (3312, 3343), True, 'import numpy as np\n'), ((3598, 3637), 'numpy.array', 'np.array', (['m'], {'dtype': 'np.uint64', 'copy': '(True)'}), '(m, dtype=np.uint64, copy=True)\n', (3606, 3637), True, 'import numpy as np\n'), ((6050, 6090), 'numpy.array', 'np.array', (['m1'], {'dtype': 'np.uint64', 'copy': '(True)'}), '(m1, dtype=np.uint64, copy=True)\n', (6058, 6090), True, 'import numpy as np\n'), ((6110, 6150), 'numpy.array', 'np.array', (['m2'], {'dtype': 'np.uint64', 'copy': '(True)'}), '(m2, dtype=np.uint64, copy=True)\n', (6118, 6150), True, 'import numpy as np\n'), ((6221, 6257), 'mmgroup.clifford12.bitmatrix64_cap_h', 'bitmatrix64_cap_h', (['m1h', 'm2h', 'j0h', 'nh'], {}), '(m1h, m2h, j0h, nh)\n', (6238, 6257), False, 'from mmgroup.clifford12 import bitmatrix64_cap_h\n'), ((8837, 8877), 'numpy.array', 'np.array', (['m1'], {'dtype': 'np.uint64', 'copy': '(True)'}), '(m1, dtype=np.uint64, copy=True)\n', (8845, 8877), True, 'import numpy as np\n'), ((8897, 8937), 'numpy.array', 'np.array', (['m2'], {'dtype': 'np.uint64', 'copy': '(True)'}), '(m2, dtype=np.uint64, copy=True)\n', (8905, 8937), True, 'import numpy as np\n'), ((8957, 8976), 'mmgroup.bitfunctions.bit_mat_mul', 'bit_mat_mul', (['m1', 'm2'], {}), '(m1, m2)\n', (8968, 8976), False, 'from mmgroup.bitfunctions import bit_mat_mul, bit_mat_inverse\n'), ((8992, 9017), 'mmgroup.clifford12.bitmatrix64_mul', 'bitmatrix64_mul', (['m1a', 'm2a'], {}), '(m1a, m2a)\n', (9007, 9017), False, 'from mmgroup.clifford12 import bitmatrix64_mul\n'), ((6822, 6851), 'numpy.zeros', 'np.zeros', (['(20)'], {'dtype': 'np.uint64'}), '(20, dtype=np.uint64)\n', (6830, 6851), True, 'import numpy as np\n'), ((6867, 6899), 'mmgroup.clifford12.bitmatrix64_error_pool', 'bitmatrix64_error_pool', (['pool', '(20)'], {}), '(pool, 20)\n', (6889, 6899), False, 'from mmgroup.clifford12 import bitmatrix64_error_pool\n'), ((9098, 9117), 'mmgroup.clifford12.bitmatrix64_inv', 'bitmatrix64_inv', (['m2'], {}), '(m2)\n', (9113, 9117), False, 'from mmgroup.clifford12 import bitmatrix64_inv\n'), ((2401, 2415), 'random.randint', 'randint', (['(0)', '(63)'], {}), '(0, 63)\n', (2408, 2415), False, 'from random import randint\n'), ((2416, 2430), 'random.randint', 'randint', (['(0)', '(63)'], {}), '(0, 63)\n', (2423, 2430), False, 'from random import randint\n'), ((5142, 5155), 'random.randint', 'randint', (['(0)', '(6)'], {}), '(0, 6)\n', (5149, 5155), False, 'from random import randint\n'), ((5156, 5169), 'random.randint', 'randint', (['(0)', '(6)'], {}), '(0, 6)\n', (5163, 5169), False, 'from random import randint\n'), ((5389, 5403), 'random.randint', 'randint', (['(0)', '(63)'], {}), '(0, 63)\n', (5396, 5403), False, 'from random import randint\n'), ((5404, 5418), 'random.randint', 'randint', (['(0)', '(63)'], {}), '(0, 63)\n', (5411, 5418), False, 'from random import randint\n'), ((8411, 8430), 'mmgroup.bitfunctions.bit_mat_inverse', 'bit_mat_inverse', (['m2'], {}), '(m2)\n', (8426, 8430), False, 'from mmgroup.bitfunctions import bit_mat_mul, bit_mat_inverse\n')]
|
import numpy as np
import colorlover as cl
from multiagent.scenario import BaseScenario
from mdac.utils.entities import Drone, TargetLandmark, SupplyEntity
from mdac.utils.worlds import DroneWorld
class Scenario(BaseScenario):
def make_world(self):
n_lidar_per_agent = 256
world = DroneWorld(n_lidar_per_agent=n_lidar_per_agent,
mem_frames=1, dt=0.08)
num_agents = 5
num_targets = num_agents
world.collaborative = False
world.agents = [Drone(uid=i) for i in range(num_agents)]
world.landmarks = [TargetLandmark() for i in range(num_targets)]
for i, agent in enumerate(world.agents):
agent.name = 'agent %d' % i
agent.collide = True
agent.silent = True
agent.lidar_range = 4.0
agent.target = world.landmarks[i]
agent.construct_range = 0.1
for i, landmark in enumerate(world.landmarks):
landmark.collide = False
landmark.movable = False
if isinstance(landmark, TargetLandmark):
landmark.name = 'landmark %d' % i
landmark.size = 0.05
if isinstance(landmark, SupplyEntity):
landmark.name = 'supply %d' % i
landmark.size = 1.5
# make initial conditions
self.reset_world(world)
return world
def generate_random_pose(self, agent):
pos = np.random.uniform(-7, +7, 2)
dis = np.linalg.norm(pos)
while (dis > 7):
pos = np.random.uniform(-7, +7, 2)
dis = np.linalg.norm(pos)
agent.state.p_pos = pos
def generate_random_goal(self, agent):
goal_pos = np.random.uniform(-7, +7, 2)
dis_origin = np.linalg.norm(goal_pos)
dis_goal = np.linalg.norm(agent.state.p_pos - goal_pos)
while (dis_origin > 7 or dis_goal > 8 or dis_goal < 6):
goal_pos = np.random.uniform(-7, +7, 2)
dis_origin = np.linalg.norm(goal_pos)
dis_goal = np.linalg.norm(agent.state.p_pos - goal_pos)
agent.target.state.p_pos = goal_pos
def reset_world(self, world):
colors = np.array(cl.to_numeric(cl.scales['5']['div']['RdYlBu']))/255
for i, agent in enumerate(world.agents):
agent.size = np.random.uniform(0.2, 0.3)
agent.pseudo_collision_range = agent.size + 0.1
agent.color = colors[i%5]
agent.target.color = colors[i%5]
self.generate_random_pose(agent)
self.generate_random_goal(agent)
agent.state.p_vel = np.zeros(world.dim_p)
agent.previous_state.p_pos = np.copy(agent.state.p_pos)
agent.previous_state.p_vel = np.copy(agent.state.p_vel)
agent.state.c = np.zeros(world.dim_c)
agent.terminate = False
for agent in world.agents:
agent.agents_lidar = world.lidar.get_ray_lidar(agent)
agent.lidar_memory = [agent.agents_lidar, agent.agents_lidar]
def is_collision(self, agent1, agent2):
dist = np.linalg.norm(agent1.state.p_pos - agent2.state.p_pos)
dist_min = agent1.size + agent2.size
return True if dist < dist_min else False
def collision_reward(self, agent, entity):
if agent is entity:
return 0
if agent.pseudo_collision_range is not None:
p_range = agent.pseudo_collision_range
else:
p_range = agent.size
d = np.linalg.norm(agent.state.p_pos-entity.state.p_pos)
s = agent.size + entity.size
if d > p_range + entity.size:
return 0
if d <= s:
return -15
return ((d - s) / (p_range - agent.size) - 1) * 15
def reward(self, agent, world):
prev_d = np.linalg.norm(agent.previous_state.p_pos - agent.target.state.p_pos)
d = np.linalg.norm(agent.state.p_pos - agent.target.state.p_pos)
reward_g = (prev_d - d) * 2.5
reward_c = 0
if agent.collide:
for a in world.agents:
if a is agent: continue
if self.is_collision(agent, a):
print(agent.name, 'collided')
reward_c -= 15
agent.terminate = True
else:
reward_c += self.collision_reward(agent, a)
if d < agent.construct_range and (np.abs(agent.state.p_vel) < 0.2).all():
print(agent.name, 'reached target')
reward_g += 15
# agent.target.state.p_pos = np.random.uniform(-6, +6, world.dim_p)
self.generate_random_goal(agent)
agent.terminate = True
return reward_c + reward_g
def observation(self, agent, world):
out = [np.concatenate(agent.lidar_memory + [agent.agents_lidar]),
agent.state.p_vel,
agent.target.state.p_pos - agent.state.p_pos,
]
return np.concatenate(out)
def done(self, agent, world):
return agent.terminate
|
[
"numpy.random.uniform",
"numpy.abs",
"numpy.copy",
"numpy.zeros",
"mdac.utils.entities.TargetLandmark",
"colorlover.to_numeric",
"mdac.utils.entities.Drone",
"numpy.linalg.norm",
"mdac.utils.worlds.DroneWorld",
"numpy.concatenate"
] |
[((303, 373), 'mdac.utils.worlds.DroneWorld', 'DroneWorld', ([], {'n_lidar_per_agent': 'n_lidar_per_agent', 'mem_frames': '(1)', 'dt': '(0.08)'}), '(n_lidar_per_agent=n_lidar_per_agent, mem_frames=1, dt=0.08)\n', (313, 373), False, 'from mdac.utils.worlds import DroneWorld\n'), ((1458, 1486), 'numpy.random.uniform', 'np.random.uniform', (['(-7)', '(+7)', '(2)'], {}), '(-7, +7, 2)\n', (1475, 1486), True, 'import numpy as np\n'), ((1501, 1520), 'numpy.linalg.norm', 'np.linalg.norm', (['pos'], {}), '(pos)\n', (1515, 1520), True, 'import numpy as np\n'), ((1726, 1754), 'numpy.random.uniform', 'np.random.uniform', (['(-7)', '(+7)', '(2)'], {}), '(-7, +7, 2)\n', (1743, 1754), True, 'import numpy as np\n'), ((1776, 1800), 'numpy.linalg.norm', 'np.linalg.norm', (['goal_pos'], {}), '(goal_pos)\n', (1790, 1800), True, 'import numpy as np\n'), ((1820, 1864), 'numpy.linalg.norm', 'np.linalg.norm', (['(agent.state.p_pos - goal_pos)'], {}), '(agent.state.p_pos - goal_pos)\n', (1834, 1864), True, 'import numpy as np\n'), ((3103, 3158), 'numpy.linalg.norm', 'np.linalg.norm', (['(agent1.state.p_pos - agent2.state.p_pos)'], {}), '(agent1.state.p_pos - agent2.state.p_pos)\n', (3117, 3158), True, 'import numpy as np\n'), ((3514, 3568), 'numpy.linalg.norm', 'np.linalg.norm', (['(agent.state.p_pos - entity.state.p_pos)'], {}), '(agent.state.p_pos - entity.state.p_pos)\n', (3528, 3568), True, 'import numpy as np\n'), ((3819, 3888), 'numpy.linalg.norm', 'np.linalg.norm', (['(agent.previous_state.p_pos - agent.target.state.p_pos)'], {}), '(agent.previous_state.p_pos - agent.target.state.p_pos)\n', (3833, 3888), True, 'import numpy as np\n'), ((3901, 3961), 'numpy.linalg.norm', 'np.linalg.norm', (['(agent.state.p_pos - agent.target.state.p_pos)'], {}), '(agent.state.p_pos - agent.target.state.p_pos)\n', (3915, 3961), True, 'import numpy as np\n'), ((4983, 5002), 'numpy.concatenate', 'np.concatenate', (['out'], {}), '(out)\n', (4997, 5002), True, 'import numpy as np\n'), ((518, 530), 'mdac.utils.entities.Drone', 'Drone', ([], {'uid': 'i'}), '(uid=i)\n', (523, 530), False, 'from mdac.utils.entities import Drone, TargetLandmark, SupplyEntity\n'), ((586, 602), 'mdac.utils.entities.TargetLandmark', 'TargetLandmark', ([], {}), '()\n', (600, 602), False, 'from mdac.utils.entities import Drone, TargetLandmark, SupplyEntity\n'), ((1564, 1592), 'numpy.random.uniform', 'np.random.uniform', (['(-7)', '(+7)', '(2)'], {}), '(-7, +7, 2)\n', (1581, 1592), True, 'import numpy as np\n'), ((1611, 1630), 'numpy.linalg.norm', 'np.linalg.norm', (['pos'], {}), '(pos)\n', (1625, 1630), True, 'import numpy as np\n'), ((1952, 1980), 'numpy.random.uniform', 'np.random.uniform', (['(-7)', '(+7)', '(2)'], {}), '(-7, +7, 2)\n', (1969, 1980), True, 'import numpy as np\n'), ((2006, 2030), 'numpy.linalg.norm', 'np.linalg.norm', (['goal_pos'], {}), '(goal_pos)\n', (2020, 2030), True, 'import numpy as np\n'), ((2054, 2098), 'numpy.linalg.norm', 'np.linalg.norm', (['(agent.state.p_pos - goal_pos)'], {}), '(agent.state.p_pos - goal_pos)\n', (2068, 2098), True, 'import numpy as np\n'), ((2331, 2358), 'numpy.random.uniform', 'np.random.uniform', (['(0.2)', '(0.3)'], {}), '(0.2, 0.3)\n', (2348, 2358), True, 'import numpy as np\n'), ((2624, 2645), 'numpy.zeros', 'np.zeros', (['world.dim_p'], {}), '(world.dim_p)\n', (2632, 2645), True, 'import numpy as np\n'), ((2687, 2713), 'numpy.copy', 'np.copy', (['agent.state.p_pos'], {}), '(agent.state.p_pos)\n', (2694, 2713), True, 'import numpy as np\n'), ((2755, 2781), 'numpy.copy', 'np.copy', (['agent.state.p_vel'], {}), '(agent.state.p_vel)\n', (2762, 2781), True, 'import numpy as np\n'), ((2810, 2831), 'numpy.zeros', 'np.zeros', (['world.dim_c'], {}), '(world.dim_c)\n', (2818, 2831), True, 'import numpy as np\n'), ((4797, 4854), 'numpy.concatenate', 'np.concatenate', (['(agent.lidar_memory + [agent.agents_lidar])'], {}), '(agent.lidar_memory + [agent.agents_lidar])\n', (4811, 4854), True, 'import numpy as np\n'), ((2205, 2251), 'colorlover.to_numeric', 'cl.to_numeric', (["cl.scales['5']['div']['RdYlBu']"], {}), "(cl.scales['5']['div']['RdYlBu'])\n", (2218, 2251), True, 'import colorlover as cl\n'), ((4428, 4453), 'numpy.abs', 'np.abs', (['agent.state.p_vel'], {}), '(agent.state.p_vel)\n', (4434, 4453), True, 'import numpy as np\n')]
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for conv_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from absl.testing import parameterized
import numpy as np
from tensorflow.python.frozen_keras.utils import conv_utils
from tensorflow.python.platform import test
def _get_const_output_shape(input_shape, dim):
return tuple([min(d, dim) for d in input_shape])
input_shapes = [
(0,),
(0, 0),
(1,),
(2,),
(3,),
(1, 0),
(0, 3),
(1, 1),
(1, 2),
(3, 1),
(2, 2),
(3, 3),
(1, 0, 1),
(5, 2, 3),
(3, 5, 6, 7, 0),
(3, 2, 2, 4, 4),
(1, 2, 3, 4, 7, 2),
]
class TestBasicConvUtilsTest(test.TestCase):
def test_convert_data_format(self):
self.assertEqual('NCDHW', conv_utils.convert_data_format(
'channels_first', 5))
self.assertEqual('NCHW', conv_utils.convert_data_format(
'channels_first', 4))
self.assertEqual('NCW', conv_utils.convert_data_format('channels_first', 3))
self.assertEqual('NHWC', conv_utils.convert_data_format('channels_last', 4))
self.assertEqual('NWC', conv_utils.convert_data_format('channels_last', 3))
self.assertEqual('NDHWC', conv_utils.convert_data_format(
'channels_last', 5))
with self.assertRaises(ValueError):
conv_utils.convert_data_format('invalid', 2)
def test_normalize_tuple(self):
self.assertEqual((2, 2, 2),
conv_utils.normalize_tuple(2, n=3, name='strides'))
self.assertEqual((2, 1, 2),
conv_utils.normalize_tuple((2, 1, 2), n=3, name='strides'))
with self.assertRaises(ValueError):
conv_utils.normalize_tuple((2, 1), n=3, name='strides')
with self.assertRaises(ValueError):
conv_utils.normalize_tuple(None, n=3, name='strides')
def test_normalize_data_format(self):
self.assertEqual('channels_last',
conv_utils.normalize_data_format('Channels_Last'))
self.assertEqual('channels_first',
conv_utils.normalize_data_format('CHANNELS_FIRST'))
with self.assertRaises(ValueError):
conv_utils.normalize_data_format('invalid')
def test_normalize_padding(self):
self.assertEqual('same', conv_utils.normalize_padding('SAME'))
self.assertEqual('valid', conv_utils.normalize_padding('VALID'))
with self.assertRaises(ValueError):
conv_utils.normalize_padding('invalid')
def test_conv_output_length(self):
self.assertEqual(4, conv_utils.conv_output_length(4, 2, 'same', 1, 1))
self.assertEqual(2, conv_utils.conv_output_length(4, 2, 'same', 2, 1))
self.assertEqual(3, conv_utils.conv_output_length(4, 2, 'valid', 1, 1))
self.assertEqual(2, conv_utils.conv_output_length(4, 2, 'valid', 2, 1))
self.assertEqual(5, conv_utils.conv_output_length(4, 2, 'full', 1, 1))
self.assertEqual(3, conv_utils.conv_output_length(4, 2, 'full', 2, 1))
self.assertEqual(2, conv_utils.conv_output_length(5, 2, 'valid', 2, 2))
def test_conv_input_length(self):
self.assertEqual(3, conv_utils.conv_input_length(4, 2, 'same', 1))
self.assertEqual(2, conv_utils.conv_input_length(2, 2, 'same', 2))
self.assertEqual(4, conv_utils.conv_input_length(3, 2, 'valid', 1))
self.assertEqual(4, conv_utils.conv_input_length(2, 2, 'valid', 2))
self.assertEqual(3, conv_utils.conv_input_length(4, 2, 'full', 1))
self.assertEqual(4, conv_utils.conv_input_length(3, 2, 'full', 2))
def test_deconv_output_length(self):
self.assertEqual(4, conv_utils.deconv_output_length(4, 2, 'same', stride=1))
self.assertEqual(8, conv_utils.deconv_output_length(4, 2, 'same', stride=2))
self.assertEqual(5, conv_utils.deconv_output_length(
4, 2, 'valid', stride=1))
self.assertEqual(8, conv_utils.deconv_output_length(
4, 2, 'valid', stride=2))
self.assertEqual(3, conv_utils.deconv_output_length(4, 2, 'full', stride=1))
self.assertEqual(6, conv_utils.deconv_output_length(4, 2, 'full', stride=2))
self.assertEqual(
5,
conv_utils.deconv_output_length(
4, 2, 'same', output_padding=2, stride=1))
self.assertEqual(
7,
conv_utils.deconv_output_length(
4, 2, 'same', output_padding=1, stride=2))
self.assertEqual(
7,
conv_utils.deconv_output_length(
4, 2, 'valid', output_padding=2, stride=1))
self.assertEqual(
9,
conv_utils.deconv_output_length(
4, 2, 'valid', output_padding=1, stride=2))
self.assertEqual(
5,
conv_utils.deconv_output_length(
4, 2, 'full', output_padding=2, stride=1))
self.assertEqual(
7,
conv_utils.deconv_output_length(
4, 2, 'full', output_padding=1, stride=2))
self.assertEqual(
5,
conv_utils.deconv_output_length(
4, 2, 'same', output_padding=1, stride=1, dilation=2))
self.assertEqual(
12,
conv_utils.deconv_output_length(
4, 2, 'valid', output_padding=2, stride=2, dilation=3))
self.assertEqual(
6,
conv_utils.deconv_output_length(
4, 2, 'full', output_padding=2, stride=2, dilation=3))
@parameterized.parameters(input_shapes)
class TestConvUtils(test.TestCase, parameterized.TestCase):
def test_conv_kernel_mask_fc(self, *input_shape):
padding = 'valid'
kernel_shape = input_shape
ndims = len(input_shape)
strides = (1,) * ndims
output_shape = _get_const_output_shape(input_shape, dim=1)
mask = np.ones(input_shape + output_shape, np.bool)
self.assertAllEqual(
mask,
conv_utils.conv_kernel_mask(
input_shape,
kernel_shape,
strides,
padding
)
)
def test_conv_kernel_mask_diag(self, *input_shape):
ndims = len(input_shape)
kernel_shape = (1,) * ndims
strides = (1,) * ndims
for padding in ['valid', 'same']:
mask = np.identity(int(np.prod(input_shape)), np.bool)
mask = np.reshape(mask, input_shape * 2)
self.assertAllEqual(
mask,
conv_utils.conv_kernel_mask(
input_shape,
kernel_shape,
strides,
padding
)
)
def test_conv_kernel_mask_full_stride(self, *input_shape):
padding = 'valid'
ndims = len(input_shape)
kernel_shape = (1,) * ndims
strides = tuple([max(d, 1) for d in input_shape])
output_shape = _get_const_output_shape(input_shape, dim=1)
mask = np.zeros(input_shape + output_shape, np.bool)
if all(d > 0 for d in mask.shape):
mask[(0,) * len(output_shape)] = True
self.assertAllEqual(
mask,
conv_utils.conv_kernel_mask(
input_shape,
kernel_shape,
strides,
padding
)
)
def test_conv_kernel_mask_almost_full_stride(self, *input_shape):
padding = 'valid'
ndims = len(input_shape)
kernel_shape = (1,) * ndims
strides = tuple([max(d - 1, 1) for d in input_shape])
output_shape = _get_const_output_shape(input_shape, dim=2)
mask = np.zeros(input_shape + output_shape, np.bool)
if all(d > 0 for d in mask.shape):
for in_position in itertools.product(*[[0, d - 1] for d in input_shape]):
out_position = tuple([min(p, 1) for p in in_position])
mask[in_position + out_position] = True
self.assertAllEqual(
mask,
conv_utils.conv_kernel_mask(
input_shape,
kernel_shape,
strides,
padding
)
)
def test_conv_kernel_mask_rect_kernel(self, *input_shape):
padding = 'valid'
ndims = len(input_shape)
strides = (1,) * ndims
for d in range(ndims):
kernel_shape = [1] * ndims
kernel_shape[d] = input_shape[d]
output_shape = list(input_shape)
output_shape[d] = min(1, input_shape[d])
mask = np.identity(int(np.prod(input_shape)), np.bool)
mask = np.reshape(mask, input_shape * 2)
for p in itertools.product(*[range(input_shape[dim])
for dim in range(ndims)]):
p = list(p)
p[d] = slice(None)
mask[p * 2] = True
mask = np.take(mask, range(0, min(1, input_shape[d])), ndims + d)
self.assertAllEqual(
mask,
conv_utils.conv_kernel_mask(
input_shape,
kernel_shape,
strides,
padding
)
)
def test_conv_kernel_mask_wrong_padding(self, *input_shape):
ndims = len(input_shape)
kernel_shape = (1,) * ndims
strides = (1,) * ndims
conv_utils.conv_kernel_mask(
input_shape,
kernel_shape,
strides,
'valid'
)
conv_utils.conv_kernel_mask(
input_shape,
kernel_shape,
strides,
'same'
)
self.assertRaises(NotImplementedError,
conv_utils.conv_kernel_mask,
input_shape, kernel_shape, strides, 'full')
def test_conv_kernel_mask_wrong_dims(self, *input_shape):
kernel_shape = 1
strides = 1
conv_utils.conv_kernel_mask(
input_shape,
kernel_shape,
strides,
'valid'
)
ndims = len(input_shape)
kernel_shape = (2,) * (ndims + 1)
self.assertRaises(ValueError,
conv_utils.conv_kernel_mask,
input_shape, kernel_shape, strides, 'same')
strides = (1,) * ndims
self.assertRaises(ValueError,
conv_utils.conv_kernel_mask,
input_shape, kernel_shape, strides, 'valid')
kernel_shape = (1,) * ndims
strides = (2,) * (ndims - 1)
self.assertRaises(ValueError,
conv_utils.conv_kernel_mask,
input_shape, kernel_shape, strides, 'valid')
strides = (2,) * ndims
conv_utils.conv_kernel_mask(
input_shape,
kernel_shape,
strides,
'valid'
)
if __name__ == '__main__':
test.main()
|
[
"tensorflow.python.platform.test.main",
"tensorflow.python.frozen_keras.utils.conv_utils.convert_data_format",
"tensorflow.python.frozen_keras.utils.conv_utils.normalize_data_format",
"tensorflow.python.frozen_keras.utils.conv_utils.conv_input_length",
"numpy.zeros",
"numpy.ones",
"absl.testing.parameterized.parameters",
"tensorflow.python.frozen_keras.utils.conv_utils.conv_kernel_mask",
"tensorflow.python.frozen_keras.utils.conv_utils.deconv_output_length",
"numpy.reshape",
"itertools.product",
"tensorflow.python.frozen_keras.utils.conv_utils.normalize_padding",
"tensorflow.python.frozen_keras.utils.conv_utils.normalize_tuple",
"tensorflow.python.frozen_keras.utils.conv_utils.conv_output_length",
"numpy.prod"
] |
[((5908, 5946), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['input_shapes'], {}), '(input_shapes)\n', (5932, 5946), False, 'from absl.testing import parameterized\n'), ((10756, 10767), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (10765, 10767), False, 'from tensorflow.python.platform import test\n'), ((6243, 6287), 'numpy.ones', 'np.ones', (['(input_shape + output_shape)', 'np.bool'], {}), '(input_shape + output_shape, np.bool)\n', (6250, 6287), True, 'import numpy as np\n'), ((7238, 7283), 'numpy.zeros', 'np.zeros', (['(input_shape + output_shape)', 'np.bool'], {}), '(input_shape + output_shape, np.bool)\n', (7246, 7283), True, 'import numpy as np\n'), ((7837, 7882), 'numpy.zeros', 'np.zeros', (['(input_shape + output_shape)', 'np.bool'], {}), '(input_shape + output_shape, np.bool)\n', (7845, 7882), True, 'import numpy as np\n'), ((9363, 9435), 'tensorflow.python.frozen_keras.utils.conv_utils.conv_kernel_mask', 'conv_utils.conv_kernel_mask', (['input_shape', 'kernel_shape', 'strides', '"""valid"""'], {}), "(input_shape, kernel_shape, strides, 'valid')\n", (9390, 9435), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((9479, 9550), 'tensorflow.python.frozen_keras.utils.conv_utils.conv_kernel_mask', 'conv_utils.conv_kernel_mask', (['input_shape', 'kernel_shape', 'strides', '"""same"""'], {}), "(input_shape, kernel_shape, strides, 'same')\n", (9506, 9550), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((9853, 9925), 'tensorflow.python.frozen_keras.utils.conv_utils.conv_kernel_mask', 'conv_utils.conv_kernel_mask', (['input_shape', 'kernel_shape', 'strides', '"""valid"""'], {}), "(input_shape, kernel_shape, strides, 'valid')\n", (9880, 9925), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((10614, 10686), 'tensorflow.python.frozen_keras.utils.conv_utils.conv_kernel_mask', 'conv_utils.conv_kernel_mask', (['input_shape', 'kernel_shape', 'strides', '"""valid"""'], {}), "(input_shape, kernel_shape, strides, 'valid')\n", (10641, 10686), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((1478, 1529), 'tensorflow.python.frozen_keras.utils.conv_utils.convert_data_format', 'conv_utils.convert_data_format', (['"""channels_first"""', '(5)'], {}), "('channels_first', 5)\n", (1508, 1529), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((1569, 1620), 'tensorflow.python.frozen_keras.utils.conv_utils.convert_data_format', 'conv_utils.convert_data_format', (['"""channels_first"""', '(4)'], {}), "('channels_first', 4)\n", (1599, 1620), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((1659, 1710), 'tensorflow.python.frozen_keras.utils.conv_utils.convert_data_format', 'conv_utils.convert_data_format', (['"""channels_first"""', '(3)'], {}), "('channels_first', 3)\n", (1689, 1710), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((1741, 1791), 'tensorflow.python.frozen_keras.utils.conv_utils.convert_data_format', 'conv_utils.convert_data_format', (['"""channels_last"""', '(4)'], {}), "('channels_last', 4)\n", (1771, 1791), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((1821, 1871), 'tensorflow.python.frozen_keras.utils.conv_utils.convert_data_format', 'conv_utils.convert_data_format', (['"""channels_last"""', '(3)'], {}), "('channels_last', 3)\n", (1851, 1871), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((1903, 1953), 'tensorflow.python.frozen_keras.utils.conv_utils.convert_data_format', 'conv_utils.convert_data_format', (['"""channels_last"""', '(5)'], {}), "('channels_last', 5)\n", (1933, 1953), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((2011, 2055), 'tensorflow.python.frozen_keras.utils.conv_utils.convert_data_format', 'conv_utils.convert_data_format', (['"""invalid"""', '(2)'], {}), "('invalid', 2)\n", (2041, 2055), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((2144, 2194), 'tensorflow.python.frozen_keras.utils.conv_utils.normalize_tuple', 'conv_utils.normalize_tuple', (['(2)'], {'n': '(3)', 'name': '"""strides"""'}), "(2, n=3, name='strides')\n", (2170, 2194), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((2249, 2307), 'tensorflow.python.frozen_keras.utils.conv_utils.normalize_tuple', 'conv_utils.normalize_tuple', (['(2, 1, 2)'], {'n': '(3)', 'name': '"""strides"""'}), "((2, 1, 2), n=3, name='strides')\n", (2275, 2307), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((2356, 2411), 'tensorflow.python.frozen_keras.utils.conv_utils.normalize_tuple', 'conv_utils.normalize_tuple', (['(2, 1)'], {'n': '(3)', 'name': '"""strides"""'}), "((2, 1), n=3, name='strides')\n", (2382, 2411), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((2459, 2512), 'tensorflow.python.frozen_keras.utils.conv_utils.normalize_tuple', 'conv_utils.normalize_tuple', (['None'], {'n': '(3)', 'name': '"""strides"""'}), "(None, n=3, name='strides')\n", (2485, 2512), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((2613, 2662), 'tensorflow.python.frozen_keras.utils.conv_utils.normalize_data_format', 'conv_utils.normalize_data_format', (['"""Channels_Last"""'], {}), "('Channels_Last')\n", (2645, 2662), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((2724, 2774), 'tensorflow.python.frozen_keras.utils.conv_utils.normalize_data_format', 'conv_utils.normalize_data_format', (['"""CHANNELS_FIRST"""'], {}), "('CHANNELS_FIRST')\n", (2756, 2774), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((2823, 2866), 'tensorflow.python.frozen_keras.utils.conv_utils.normalize_data_format', 'conv_utils.normalize_data_format', (['"""invalid"""'], {}), "('invalid')\n", (2855, 2866), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((2933, 2969), 'tensorflow.python.frozen_keras.utils.conv_utils.normalize_padding', 'conv_utils.normalize_padding', (['"""SAME"""'], {}), "('SAME')\n", (2961, 2969), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((3001, 3038), 'tensorflow.python.frozen_keras.utils.conv_utils.normalize_padding', 'conv_utils.normalize_padding', (['"""VALID"""'], {}), "('VALID')\n", (3029, 3038), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((3087, 3126), 'tensorflow.python.frozen_keras.utils.conv_utils.normalize_padding', 'conv_utils.normalize_padding', (['"""invalid"""'], {}), "('invalid')\n", (3115, 3126), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((3189, 3238), 'tensorflow.python.frozen_keras.utils.conv_utils.conv_output_length', 'conv_utils.conv_output_length', (['(4)', '(2)', '"""same"""', '(1)', '(1)'], {}), "(4, 2, 'same', 1, 1)\n", (3218, 3238), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((3264, 3313), 'tensorflow.python.frozen_keras.utils.conv_utils.conv_output_length', 'conv_utils.conv_output_length', (['(4)', '(2)', '"""same"""', '(2)', '(1)'], {}), "(4, 2, 'same', 2, 1)\n", (3293, 3313), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((3339, 3389), 'tensorflow.python.frozen_keras.utils.conv_utils.conv_output_length', 'conv_utils.conv_output_length', (['(4)', '(2)', '"""valid"""', '(1)', '(1)'], {}), "(4, 2, 'valid', 1, 1)\n", (3368, 3389), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((3415, 3465), 'tensorflow.python.frozen_keras.utils.conv_utils.conv_output_length', 'conv_utils.conv_output_length', (['(4)', '(2)', '"""valid"""', '(2)', '(1)'], {}), "(4, 2, 'valid', 2, 1)\n", (3444, 3465), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((3491, 3540), 'tensorflow.python.frozen_keras.utils.conv_utils.conv_output_length', 'conv_utils.conv_output_length', (['(4)', '(2)', '"""full"""', '(1)', '(1)'], {}), "(4, 2, 'full', 1, 1)\n", (3520, 3540), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((3566, 3615), 'tensorflow.python.frozen_keras.utils.conv_utils.conv_output_length', 'conv_utils.conv_output_length', (['(4)', '(2)', '"""full"""', '(2)', '(1)'], {}), "(4, 2, 'full', 2, 1)\n", (3595, 3615), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((3641, 3691), 'tensorflow.python.frozen_keras.utils.conv_utils.conv_output_length', 'conv_utils.conv_output_length', (['(5)', '(2)', '"""valid"""', '(2)', '(2)'], {}), "(5, 2, 'valid', 2, 2)\n", (3670, 3691), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((3754, 3799), 'tensorflow.python.frozen_keras.utils.conv_utils.conv_input_length', 'conv_utils.conv_input_length', (['(4)', '(2)', '"""same"""', '(1)'], {}), "(4, 2, 'same', 1)\n", (3782, 3799), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((3825, 3870), 'tensorflow.python.frozen_keras.utils.conv_utils.conv_input_length', 'conv_utils.conv_input_length', (['(2)', '(2)', '"""same"""', '(2)'], {}), "(2, 2, 'same', 2)\n", (3853, 3870), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((3896, 3942), 'tensorflow.python.frozen_keras.utils.conv_utils.conv_input_length', 'conv_utils.conv_input_length', (['(3)', '(2)', '"""valid"""', '(1)'], {}), "(3, 2, 'valid', 1)\n", (3924, 3942), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((3968, 4014), 'tensorflow.python.frozen_keras.utils.conv_utils.conv_input_length', 'conv_utils.conv_input_length', (['(2)', '(2)', '"""valid"""', '(2)'], {}), "(2, 2, 'valid', 2)\n", (3996, 4014), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((4040, 4085), 'tensorflow.python.frozen_keras.utils.conv_utils.conv_input_length', 'conv_utils.conv_input_length', (['(4)', '(2)', '"""full"""', '(1)'], {}), "(4, 2, 'full', 1)\n", (4068, 4085), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((4111, 4156), 'tensorflow.python.frozen_keras.utils.conv_utils.conv_input_length', 'conv_utils.conv_input_length', (['(3)', '(2)', '"""full"""', '(2)'], {}), "(3, 2, 'full', 2)\n", (4139, 4156), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((4222, 4277), 'tensorflow.python.frozen_keras.utils.conv_utils.deconv_output_length', 'conv_utils.deconv_output_length', (['(4)', '(2)', '"""same"""'], {'stride': '(1)'}), "(4, 2, 'same', stride=1)\n", (4253, 4277), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((4303, 4358), 'tensorflow.python.frozen_keras.utils.conv_utils.deconv_output_length', 'conv_utils.deconv_output_length', (['(4)', '(2)', '"""same"""'], {'stride': '(2)'}), "(4, 2, 'same', stride=2)\n", (4334, 4358), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((4384, 4440), 'tensorflow.python.frozen_keras.utils.conv_utils.deconv_output_length', 'conv_utils.deconv_output_length', (['(4)', '(2)', '"""valid"""'], {'stride': '(1)'}), "(4, 2, 'valid', stride=1)\n", (4415, 4440), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((4475, 4531), 'tensorflow.python.frozen_keras.utils.conv_utils.deconv_output_length', 'conv_utils.deconv_output_length', (['(4)', '(2)', '"""valid"""'], {'stride': '(2)'}), "(4, 2, 'valid', stride=2)\n", (4506, 4531), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((4566, 4621), 'tensorflow.python.frozen_keras.utils.conv_utils.deconv_output_length', 'conv_utils.deconv_output_length', (['(4)', '(2)', '"""full"""'], {'stride': '(1)'}), "(4, 2, 'full', stride=1)\n", (4597, 4621), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((4647, 4702), 'tensorflow.python.frozen_keras.utils.conv_utils.deconv_output_length', 'conv_utils.deconv_output_length', (['(4)', '(2)', '"""full"""'], {'stride': '(2)'}), "(4, 2, 'full', stride=2)\n", (4678, 4702), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((4745, 4818), 'tensorflow.python.frozen_keras.utils.conv_utils.deconv_output_length', 'conv_utils.deconv_output_length', (['(4)', '(2)', '"""same"""'], {'output_padding': '(2)', 'stride': '(1)'}), "(4, 2, 'same', output_padding=2, stride=1)\n", (4776, 4818), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((4874, 4947), 'tensorflow.python.frozen_keras.utils.conv_utils.deconv_output_length', 'conv_utils.deconv_output_length', (['(4)', '(2)', '"""same"""'], {'output_padding': '(1)', 'stride': '(2)'}), "(4, 2, 'same', output_padding=1, stride=2)\n", (4905, 4947), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((5003, 5077), 'tensorflow.python.frozen_keras.utils.conv_utils.deconv_output_length', 'conv_utils.deconv_output_length', (['(4)', '(2)', '"""valid"""'], {'output_padding': '(2)', 'stride': '(1)'}), "(4, 2, 'valid', output_padding=2, stride=1)\n", (5034, 5077), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((5133, 5207), 'tensorflow.python.frozen_keras.utils.conv_utils.deconv_output_length', 'conv_utils.deconv_output_length', (['(4)', '(2)', '"""valid"""'], {'output_padding': '(1)', 'stride': '(2)'}), "(4, 2, 'valid', output_padding=1, stride=2)\n", (5164, 5207), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((5263, 5336), 'tensorflow.python.frozen_keras.utils.conv_utils.deconv_output_length', 'conv_utils.deconv_output_length', (['(4)', '(2)', '"""full"""'], {'output_padding': '(2)', 'stride': '(1)'}), "(4, 2, 'full', output_padding=2, stride=1)\n", (5294, 5336), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((5392, 5465), 'tensorflow.python.frozen_keras.utils.conv_utils.deconv_output_length', 'conv_utils.deconv_output_length', (['(4)', '(2)', '"""full"""'], {'output_padding': '(1)', 'stride': '(2)'}), "(4, 2, 'full', output_padding=1, stride=2)\n", (5423, 5465), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((5521, 5610), 'tensorflow.python.frozen_keras.utils.conv_utils.deconv_output_length', 'conv_utils.deconv_output_length', (['(4)', '(2)', '"""same"""'], {'output_padding': '(1)', 'stride': '(1)', 'dilation': '(2)'}), "(4, 2, 'same', output_padding=1, stride=1,\n dilation=2)\n", (5552, 5610), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((5663, 5753), 'tensorflow.python.frozen_keras.utils.conv_utils.deconv_output_length', 'conv_utils.deconv_output_length', (['(4)', '(2)', '"""valid"""'], {'output_padding': '(2)', 'stride': '(2)', 'dilation': '(3)'}), "(4, 2, 'valid', output_padding=2, stride=2,\n dilation=3)\n", (5694, 5753), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((5805, 5894), 'tensorflow.python.frozen_keras.utils.conv_utils.deconv_output_length', 'conv_utils.deconv_output_length', (['(4)', '(2)', '"""full"""'], {'output_padding': '(2)', 'stride': '(2)', 'dilation': '(3)'}), "(4, 2, 'full', output_padding=2, stride=2,\n dilation=3)\n", (5836, 5894), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((6335, 6407), 'tensorflow.python.frozen_keras.utils.conv_utils.conv_kernel_mask', 'conv_utils.conv_kernel_mask', (['input_shape', 'kernel_shape', 'strides', 'padding'], {}), '(input_shape, kernel_shape, strides, padding)\n', (6362, 6407), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((6728, 6761), 'numpy.reshape', 'np.reshape', (['mask', '(input_shape * 2)'], {}), '(mask, input_shape * 2)\n', (6738, 6761), True, 'import numpy as np\n'), ((7415, 7487), 'tensorflow.python.frozen_keras.utils.conv_utils.conv_kernel_mask', 'conv_utils.conv_kernel_mask', (['input_shape', 'kernel_shape', 'strides', 'padding'], {}), '(input_shape, kernel_shape, strides, padding)\n', (7442, 7487), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((7947, 8000), 'itertools.product', 'itertools.product', (['*[[0, d - 1] for d in input_shape]'], {}), '(*[[0, d - 1] for d in input_shape])\n', (7964, 8000), False, 'import itertools\n'), ((8161, 8233), 'tensorflow.python.frozen_keras.utils.conv_utils.conv_kernel_mask', 'conv_utils.conv_kernel_mask', (['input_shape', 'kernel_shape', 'strides', 'padding'], {}), '(input_shape, kernel_shape, strides, padding)\n', (8188, 8233), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((8700, 8733), 'numpy.reshape', 'np.reshape', (['mask', '(input_shape * 2)'], {}), '(mask, input_shape * 2)\n', (8710, 8733), True, 'import numpy as np\n'), ((6815, 6887), 'tensorflow.python.frozen_keras.utils.conv_utils.conv_kernel_mask', 'conv_utils.conv_kernel_mask', (['input_shape', 'kernel_shape', 'strides', 'padding'], {}), '(input_shape, kernel_shape, strides, padding)\n', (6842, 6887), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((9057, 9129), 'tensorflow.python.frozen_keras.utils.conv_utils.conv_kernel_mask', 'conv_utils.conv_kernel_mask', (['input_shape', 'kernel_shape', 'strides', 'padding'], {}), '(input_shape, kernel_shape, strides, padding)\n', (9084, 9129), False, 'from tensorflow.python.frozen_keras.utils import conv_utils\n'), ((6683, 6703), 'numpy.prod', 'np.prod', (['input_shape'], {}), '(input_shape)\n', (6690, 6703), True, 'import numpy as np\n'), ((8655, 8675), 'numpy.prod', 'np.prod', (['input_shape'], {}), '(input_shape)\n', (8662, 8675), True, 'import numpy as np\n')]
|
import numpy as np
from numpy.testing import *
from supreme.register.radial_sum import radial_sum
def test_basic():
x = np.array([[1, 0, 2],
[0, 5, 0],
[3, 0, 4]], dtype=np.double)
R = radial_sum(x)
pi = np.pi
assert_array_equal(R[[135, 45, 225, 315]], [1, 2, 3, 4])
if __name__ == "__main__":
run_module_suite()
|
[
"supreme.register.radial_sum.radial_sum",
"numpy.array"
] |
[((126, 186), 'numpy.array', 'np.array', (['[[1, 0, 2], [0, 5, 0], [3, 0, 4]]'], {'dtype': 'np.double'}), '([[1, 0, 2], [0, 5, 0], [3, 0, 4]], dtype=np.double)\n', (134, 186), True, 'import numpy as np\n'), ((231, 244), 'supreme.register.radial_sum.radial_sum', 'radial_sum', (['x'], {}), '(x)\n', (241, 244), False, 'from supreme.register.radial_sum import radial_sum\n')]
|
from __future__ import print_function
import torch.nn.init as init
import argparse
import time
import os
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.utils.data
from torch.autograd import Variable
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
from adamPre import AdamPre
from mogdata import generate_data_SingleBatch, loglikelihood
# TODO: Needed while running on server. Change the GUI accordingly.
plt.switch_backend('agg')
parser = argparse.ArgumentParser()
# Information regarding data input
parser.add_argument('--batchSize', type=int, default=512, help='input batch size')
parser.add_argument('--modes', type=int, default=8, help='total number of gaussian modes to consider')
parser.add_argument('--radius', type=int, default=1, help='radius of circle with MoG')
parser.add_argument('--sigma', type=float, default=0.01, help='variance of gaussians, default=0.01')
# Information regarding network
parser.add_argument('--ngf', type=int, default=128)
parser.add_argument('--ndf', type=int, default=128)
parser.add_argument('--nz', type=int, default=2, help='size of the latent z vector')
parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')
parser.add_argument('--netG', default='', help="path to netG (to continue training)")
parser.add_argument('--netD', default='', help="path to netD (to continue training)")
# Training/Optimizer information
parser.add_argument('--niter', type=int, default=50000, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=1e-3, help='learning rate, default=0.001')
parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for adam. default=0.5')
parser.add_argument('--pdhgGLookAhead', action='store_true', help='enables generator lookahead')
parser.add_argument('--pdhgDLookAhead', action='store_true', help='enables discriminator lookahead')
parser.add_argument('--GLRatio', type=float, default=1.0, help='scaling factor for lr of generator')
parser.add_argument('--DLRatio', type=float, default=1.0, help='scaling factor for lr of discriminator')
# Miscellaneous information
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--outf', default='.', help='folder to output images and model checkpoints')
parser.add_argument('--manualSeed', type=int, help='manual seed')
parser.add_argument('--deviceID', type=int, help='deviceID', default=0)
parser.add_argument('--verbose', action='store_true', help='displays additional information')
# Options for visualization
parser.add_argument('--viz_every', type=int, default=10000, help='plotting visualization every few iteration')
parser.add_argument('--n_batches_viz', type=int, default=10, help='number of samples used for visualization')
parser.add_argument('--markerSize', type=float, help='input batch size')
parser.add_argument('--plotRealData', action='store_true', help='saves real samples')
parser.add_argument('--plotLoss', action='store_true', help='Enables plotting of loss function')
class _netG(nn.Module):
def __init__(self,ngpu,nz,ngf):
super(_netG, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.Linear(nz, ngf),
nn.Tanh(),
nn.Linear(ngf, ngf),
nn.Tanh(),
nn.Linear(ngf, 2),
)
def forward(self, input):
if self.ngpu > 1 and isinstance(input.data, torch.cuda.FloatTensor):
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output
class _netD(nn.Module):
def __init__(self, ngpu, ndf):
super(_netD, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
nn.Linear(2, ndf),
nn.Tanh(),
nn.Linear(ndf, ndf),
nn.Tanh(),
nn.Linear(ndf, 1),
nn.Sigmoid()
)
def forward(self, input):
if self.ngpu > 1 and isinstance(input.data, torch.cuda.FloatTensor):
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output.view(-1, 1)
def main():
opt = parser.parse_args()
print(opt)
try:
os.makedirs(opt.outf)
except OSError:
pass
if opt.manualSeed is None:
opt.manualSeed = random.randint(1, 10000)
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
np.random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if opt.cuda:
torch.cuda.manual_seed_all(opt.manualSeed)
torch.backends.cudnn.enabled = False
print("torch.backends.cudnn.enabled is: ", torch.backends.cudnn.enabled)
cudnn.benchmark = True
if torch.cuda.is_available():
ngpu = int(opt.ngpu)
if not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
if int(opt.ngpu) > 0:
print("WARNING: CUDA not available, cannot use --ngpu =", opt.ngpu)
ngpu = 0
# Initializing Generator and Discriminator Network
nz = int(opt.nz)
ngf = int(opt.ngf)
ndf = int(opt.ndf)
netG = _netG(ngpu,nz,ngf)
netG.apply(weights_init)
if opt.netG != '':
netG.load_state_dict(torch.load(opt.netG))
print(netG)
netD = _netD(ngpu,ndf)
netD.apply(weights_init)
if opt.netD != '':
netD.load_state_dict(torch.load(opt.netD))
print(netD)
criterion = nn.BCELoss()
input = torch.FloatTensor(opt.batchSize, 2)
noise = torch.FloatTensor(opt.batchSize, nz)
fixed_noise = torch.FloatTensor(opt.batchSize * opt.n_batches_viz, nz).normal_(0, 1)
label = torch.FloatTensor(opt.batchSize)
real_label = 1
fake_label = 0
if opt.cuda:
netD.cuda()
netG.cuda()
criterion.cuda()
input, label = input.cuda(), label.cuda()
noise, fixed_noise = noise.cuda(), fixed_noise.cuda()
input = Variable(input)
label = Variable(label)
noise = Variable(noise)
fixed_noise = Variable(fixed_noise)
# Flag for disabling prediction step in the first iterate
firstTime = True
# setup optimizer
optimizerD = AdamPre(netD.parameters(), lr=opt.lr/opt.DLRatio, betas=(opt.beta1, 0.999), name='optD')
optimizerG = AdamPre(netG.parameters(), lr=opt.lr/opt.GLRatio, betas=(opt.beta1, 0.999), name='optG')
fs = []
np_samples = []
np_samples_real = []
for i in range(opt.niter):
if opt.verbose:
c1 = time.clock()
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
# sampling input batch
real_cpu = generate_data_SingleBatch(num_mode=opt.modes, radius=opt.radius, center=(0, 0), sigma=opt.sigma,
batchSize=opt.batchSize)
batch_size = real_cpu.size(0)
input.data.resize_(real_cpu.size()).copy_(real_cpu)
label.data.resize_(batch_size).fill_(real_label)
netD.zero_grad()
output = netD(input)
errD_real = criterion(output, label)
errD_real.backward()
D_x = output.data.mean()
# Update the generator weights with prediction
# We avoid update during the first iteration
if not firstTime and opt.pdhgGLookAhead:
optimizerG.stepLookAhead()
# train with fake
noise.data.resize_(batch_size, nz)
noise.data.normal_(0, 1)
label.data.resize_(batch_size)
label.data.fill_(fake_label)
fake = netG(noise)
output = netD(fake.detach())
errD_fake = criterion(output, label)
errD_fake.backward()
D_G_z1 = output.data.mean()
errD = errD_real + errD_fake
optimizerD.step()
# restore the previous (non-predicted) weights of Generator
if not firstTime and opt.pdhgGLookAhead:
optimizerG.restoreStepLookAhead()
# Set the flag to false after the first iter
firstTime = False
############################
# (2) Update G network: maximize -log(1 - D(G(z)))
###########################
# Update discriminator weights with prediction; restore after the generator update.
if opt.pdhgDLookAhead:
optimizerD.stepLookAhead()
# Unlike DCGAN code, we use original loss for generator. Hence we fill fake labels.
label.data.fill_(fake_label)
netG.zero_grad()
fake = netG(noise)
output = netD(fake)
errG = -criterion(output, label)
errG.backward()
D_G_z2 = output.data.mean()
optimizerG.step()
# restore back discriminator weights
if opt.pdhgDLookAhead:
optimizerD.restoreStepLookAhead()
if opt.plotLoss:
f = [errD.data[0], errG.data[0]]
fs.append(f)
print('[%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
% (i, opt.niter, errD.data[0], errG.data[0], D_x, D_G_z1, D_G_z2))
if opt.verbose:
print("itr=", i, "clock time elapsed=", time.clock() - c1)
if i % opt.viz_every == 0 or i == opt.niter - 1:
# save checkpoints
torch.save(netG.state_dict(), '{0}/netG_epoch_{1}.pth'.format(opt.outf, i))
torch.save(netD.state_dict(), '{0}/netD_epoch_{1}.pth'.format(opt.outf, i))
tmp_cpu = ((netG(fixed_noise)).data).cpu().numpy()
np_samples.append(tmp_cpu)
fig = plt.figure(figsize=(5, 5))
if opt.markerSize:
plt.scatter(tmp_cpu[:, 0], tmp_cpu[:, 1], c='g', edgecolor='none', s=opt.markerSize)
else:
plt.scatter(tmp_cpu[:, 0], tmp_cpu[:, 1], c='g', edgecolor='none')
plt.axis('off')
plt.savefig('%s/MoG_Fake_withP_%03d.pdf' % (opt.outf, i))
plt.close()
if opt.plotRealData:
real_cpu_temp = generate_data_SingleBatch(num_mode=opt.modes, radius=opt.radius, center=(0, 0), sigma=opt.sigma,
batchSize=opt.batchSize * opt.n_batches_viz)
tmp_cpu = real_cpu_temp.numpy()
np_samples_real.append(tmp_cpu)
fig = plt.figure(figsize=(5, 5))
if opt.markerSize:
plt.scatter(tmp_cpu[:, 0], tmp_cpu[:, 1], c='g', edgecolor='none', s=opt.markerSize) # green is ground truth
else:
plt.scatter(tmp_cpu[:, 0], tmp_cpu[:, 1], c='g', edgecolor='none') # green is ground truth
plt.axis('off')
plt.savefig('%s/MoG_Real.pdf' % (opt.outf))
plt.close()
# Final KDE plot for paper. It also plots log likelihood
xmax = 1.3
nLevels = 20
np_samples_ = np_samples[::1]
cols = len(np_samples_)
bg_color = sns.color_palette('Greens', n_colors=256)[0]
plt.figure(figsize=(2*cols, 2))
for i, samps in enumerate(np_samples_):
if i == 0:
ax = plt.subplot(1,cols,1)
else:
plt.subplot(1,cols,i+1, sharex=ax, sharey=ax)
ax2 = sns.kdeplot(samps[:, 0], samps[:, 1], shade=True, cmap='Greens', n_levels=nLevels, clip=[[-xmax,xmax]]*2)
ax2.set_facecolor(bg_color)
plt.xticks([]); plt.yticks([])
plt.title('step %d'%(i*opt.viz_every))
plt.gcf().tight_layout()
plt.savefig('{0}/all.png'.format(opt.outf))
if opt.plotLoss:
plt.figure()
fs = np.array(fs)
plt.plot(fs)
plt.legend(('Discriminator loss', 'Generator loss'))
plt.savefig('{0}/losses.pdf'.format(opt.outf))
plt.close('all')
# custom weights initialization called on netG and netD
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
init.orthogonal(m.weight)
init.constant(m.bias, 0.1)
if __name__ == '__main__':
main()
|
[
"matplotlib.pyplot.title",
"numpy.random.seed",
"argparse.ArgumentParser",
"seaborn.kdeplot",
"matplotlib.pyplot.figure",
"torch.nn.BCELoss",
"random.randint",
"matplotlib.pyplot.close",
"torch.load",
"matplotlib.pyplot.yticks",
"torch.FloatTensor",
"torch.nn.init.orthogonal",
"time.clock",
"random.seed",
"torch.nn.Linear",
"matplotlib.pyplot.xticks",
"mogdata.generate_data_SingleBatch",
"torch.nn.Tanh",
"torch.manual_seed",
"torch.autograd.Variable",
"matplotlib.pyplot.legend",
"torch.cuda.is_available",
"torch.nn.init.constant",
"matplotlib.pyplot.gcf",
"torch.nn.Sigmoid",
"matplotlib.pyplot.switch_backend",
"matplotlib.pyplot.subplot",
"os.makedirs",
"matplotlib.pyplot.plot",
"warnings.filterwarnings",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.axis",
"torch.cuda.manual_seed_all",
"numpy.array",
"seaborn.color_palette",
"matplotlib.pyplot.savefig"
] |
[((365, 398), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (388, 398), False, 'import warnings\n'), ((558, 583), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""agg"""'], {}), "('agg')\n", (576, 583), True, 'import matplotlib.pyplot as plt\n'), ((594, 619), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (617, 619), False, 'import argparse\n'), ((4657, 4684), 'random.seed', 'random.seed', (['opt.manualSeed'], {}), '(opt.manualSeed)\n', (4668, 4684), False, 'import random\n'), ((4689, 4719), 'numpy.random.seed', 'np.random.seed', (['opt.manualSeed'], {}), '(opt.manualSeed)\n', (4703, 4719), True, 'import numpy as np\n'), ((4724, 4757), 'torch.manual_seed', 'torch.manual_seed', (['opt.manualSeed'], {}), '(opt.manualSeed)\n', (4741, 4757), False, 'import torch\n'), ((4989, 5014), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5012, 5014), False, 'import torch\n'), ((5737, 5749), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (5747, 5749), True, 'import torch.nn as nn\n'), ((5763, 5798), 'torch.FloatTensor', 'torch.FloatTensor', (['opt.batchSize', '(2)'], {}), '(opt.batchSize, 2)\n', (5780, 5798), False, 'import torch\n'), ((5811, 5847), 'torch.FloatTensor', 'torch.FloatTensor', (['opt.batchSize', 'nz'], {}), '(opt.batchSize, nz)\n', (5828, 5847), False, 'import torch\n'), ((5950, 5982), 'torch.FloatTensor', 'torch.FloatTensor', (['opt.batchSize'], {}), '(opt.batchSize)\n', (5967, 5982), False, 'import torch\n'), ((6229, 6244), 'torch.autograd.Variable', 'Variable', (['input'], {}), '(input)\n', (6237, 6244), False, 'from torch.autograd import Variable\n'), ((6257, 6272), 'torch.autograd.Variable', 'Variable', (['label'], {}), '(label)\n', (6265, 6272), False, 'from torch.autograd import Variable\n'), ((6285, 6300), 'torch.autograd.Variable', 'Variable', (['noise'], {}), '(noise)\n', (6293, 6300), False, 'from torch.autograd import Variable\n'), ((6319, 6340), 'torch.autograd.Variable', 'Variable', (['fixed_noise'], {}), '(fixed_noise)\n', (6327, 6340), False, 'from torch.autograd import Variable\n'), ((11482, 11515), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2 * cols, 2)'}), '(figsize=(2 * cols, 2))\n', (11492, 11515), True, 'import matplotlib.pyplot as plt\n'), ((12219, 12235), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (12228, 12235), True, 'import matplotlib.pyplot as plt\n'), ((4472, 4493), 'os.makedirs', 'os.makedirs', (['opt.outf'], {}), '(opt.outf)\n', (4483, 4493), False, 'import os\n'), ((4584, 4608), 'random.randint', 'random.randint', (['(1)', '(10000)'], {}), '(1, 10000)\n', (4598, 4608), False, 'import random\n'), ((4784, 4826), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['opt.manualSeed'], {}), '(opt.manualSeed)\n', (4810, 4826), False, 'import torch\n'), ((7027, 7152), 'mogdata.generate_data_SingleBatch', 'generate_data_SingleBatch', ([], {'num_mode': 'opt.modes', 'radius': 'opt.radius', 'center': '(0, 0)', 'sigma': 'opt.sigma', 'batchSize': 'opt.batchSize'}), '(num_mode=opt.modes, radius=opt.radius, center=(0,\n 0), sigma=opt.sigma, batchSize=opt.batchSize)\n', (7052, 7152), False, 'from mogdata import generate_data_SingleBatch, loglikelihood\n'), ((10582, 10727), 'mogdata.generate_data_SingleBatch', 'generate_data_SingleBatch', ([], {'num_mode': 'opt.modes', 'radius': 'opt.radius', 'center': '(0, 0)', 'sigma': 'opt.sigma', 'batchSize': '(opt.batchSize * opt.n_batches_viz)'}), '(num_mode=opt.modes, radius=opt.radius, center=(0,\n 0), sigma=opt.sigma, batchSize=opt.batchSize * opt.n_batches_viz)\n', (10607, 10727), False, 'from mogdata import generate_data_SingleBatch, loglikelihood\n'), ((10869, 10895), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (10879, 10895), True, 'import matplotlib.pyplot as plt\n'), ((11172, 11187), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (11180, 11187), True, 'import matplotlib.pyplot as plt\n'), ((11196, 11237), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s/MoG_Real.pdf' % opt.outf)"], {}), "('%s/MoG_Real.pdf' % opt.outf)\n", (11207, 11237), True, 'import matplotlib.pyplot as plt\n'), ((11248, 11259), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (11257, 11259), True, 'import matplotlib.pyplot as plt\n'), ((11433, 11474), 'seaborn.color_palette', 'sns.color_palette', (['"""Greens"""'], {'n_colors': '(256)'}), "('Greens', n_colors=256)\n", (11450, 11474), True, 'import seaborn as sns\n'), ((11702, 11815), 'seaborn.kdeplot', 'sns.kdeplot', (['samps[:, 0]', 'samps[:, 1]'], {'shade': '(True)', 'cmap': '"""Greens"""', 'n_levels': 'nLevels', 'clip': '([[-xmax, xmax]] * 2)'}), "(samps[:, 0], samps[:, 1], shade=True, cmap='Greens', n_levels=\n nLevels, clip=[[-xmax, xmax]] * 2)\n", (11713, 11815), True, 'import seaborn as sns\n'), ((11852, 11866), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (11862, 11866), True, 'import matplotlib.pyplot as plt\n'), ((11868, 11882), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (11878, 11882), True, 'import matplotlib.pyplot as plt\n'), ((11891, 11933), 'matplotlib.pyplot.title', 'plt.title', (["('step %d' % (i * opt.viz_every))"], {}), "('step %d' % (i * opt.viz_every))\n", (11900, 11933), True, 'import matplotlib.pyplot as plt\n'), ((12038, 12050), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12048, 12050), True, 'import matplotlib.pyplot as plt\n'), ((12064, 12076), 'numpy.array', 'np.array', (['fs'], {}), '(fs)\n', (12072, 12076), True, 'import numpy as np\n'), ((12085, 12097), 'matplotlib.pyplot.plot', 'plt.plot', (['fs'], {}), '(fs)\n', (12093, 12097), True, 'import matplotlib.pyplot as plt\n'), ((12106, 12158), 'matplotlib.pyplot.legend', 'plt.legend', (["('Discriminator loss', 'Generator loss')"], {}), "(('Discriminator loss', 'Generator loss'))\n", (12116, 12158), True, 'import matplotlib.pyplot as plt\n'), ((12399, 12424), 'torch.nn.init.orthogonal', 'init.orthogonal', (['m.weight'], {}), '(m.weight)\n', (12414, 12424), True, 'import torch.nn.init as init\n'), ((12433, 12459), 'torch.nn.init.constant', 'init.constant', (['m.bias', '(0.1)'], {}), '(m.bias, 0.1)\n', (12446, 12459), True, 'import torch.nn.init as init\n'), ((3378, 3396), 'torch.nn.Linear', 'nn.Linear', (['nz', 'ngf'], {}), '(nz, ngf)\n', (3387, 3396), True, 'import torch.nn as nn\n'), ((3410, 3419), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (3417, 3419), True, 'import torch.nn as nn\n'), ((3433, 3452), 'torch.nn.Linear', 'nn.Linear', (['ngf', 'ngf'], {}), '(ngf, ngf)\n', (3442, 3452), True, 'import torch.nn as nn\n'), ((3466, 3475), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (3473, 3475), True, 'import torch.nn as nn\n'), ((3489, 3506), 'torch.nn.Linear', 'nn.Linear', (['ngf', '(2)'], {}), '(ngf, 2)\n', (3498, 3506), True, 'import torch.nn as nn\n'), ((3954, 3971), 'torch.nn.Linear', 'nn.Linear', (['(2)', 'ndf'], {}), '(2, ndf)\n', (3963, 3971), True, 'import torch.nn as nn\n'), ((3985, 3994), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (3992, 3994), True, 'import torch.nn as nn\n'), ((4008, 4027), 'torch.nn.Linear', 'nn.Linear', (['ndf', 'ndf'], {}), '(ndf, ndf)\n', (4017, 4027), True, 'import torch.nn as nn\n'), ((4041, 4050), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (4048, 4050), True, 'import torch.nn as nn\n'), ((4064, 4081), 'torch.nn.Linear', 'nn.Linear', (['ndf', '(1)'], {}), '(ndf, 1)\n', (4073, 4081), True, 'import torch.nn as nn\n'), ((4095, 4107), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (4105, 4107), True, 'import torch.nn as nn\n'), ((5535, 5555), 'torch.load', 'torch.load', (['opt.netG'], {}), '(opt.netG)\n', (5545, 5555), False, 'import torch\n'), ((5682, 5702), 'torch.load', 'torch.load', (['opt.netD'], {}), '(opt.netD)\n', (5692, 5702), False, 'import torch\n'), ((5866, 5922), 'torch.FloatTensor', 'torch.FloatTensor', (['(opt.batchSize * opt.n_batches_viz)', 'nz'], {}), '(opt.batchSize * opt.n_batches_viz, nz)\n', (5883, 5922), False, 'import torch\n'), ((6799, 6811), 'time.clock', 'time.clock', ([], {}), '()\n', (6809, 6811), False, 'import time\n'), ((10121, 10147), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (10131, 10147), True, 'import matplotlib.pyplot as plt\n'), ((10414, 10429), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (10422, 10429), True, 'import matplotlib.pyplot as plt\n'), ((10446, 10503), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s/MoG_Fake_withP_%03d.pdf' % (opt.outf, i))"], {}), "('%s/MoG_Fake_withP_%03d.pdf' % (opt.outf, i))\n", (10457, 10503), True, 'import matplotlib.pyplot as plt\n'), ((10520, 10531), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (10529, 10531), True, 'import matplotlib.pyplot as plt\n'), ((10935, 11024), 'matplotlib.pyplot.scatter', 'plt.scatter', (['tmp_cpu[:, 0]', 'tmp_cpu[:, 1]'], {'c': '"""g"""', 'edgecolor': '"""none"""', 's': 'opt.markerSize'}), "(tmp_cpu[:, 0], tmp_cpu[:, 1], c='g', edgecolor='none', s=opt.\n markerSize)\n", (10946, 11024), True, 'import matplotlib.pyplot as plt\n'), ((11071, 11137), 'matplotlib.pyplot.scatter', 'plt.scatter', (['tmp_cpu[:, 0]', 'tmp_cpu[:, 1]'], {'c': '"""g"""', 'edgecolor': '"""none"""'}), "(tmp_cpu[:, 0], tmp_cpu[:, 1], c='g', edgecolor='none')\n", (11082, 11137), True, 'import matplotlib.pyplot as plt\n'), ((11594, 11617), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', 'cols', '(1)'], {}), '(1, cols, 1)\n', (11605, 11617), True, 'import matplotlib.pyplot as plt\n'), ((11642, 11691), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', 'cols', '(i + 1)'], {'sharex': 'ax', 'sharey': 'ax'}), '(1, cols, i + 1, sharex=ax, sharey=ax)\n', (11653, 11691), True, 'import matplotlib.pyplot as plt\n'), ((11935, 11944), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (11942, 11944), True, 'import matplotlib.pyplot as plt\n'), ((10203, 10292), 'matplotlib.pyplot.scatter', 'plt.scatter', (['tmp_cpu[:, 0]', 'tmp_cpu[:, 1]'], {'c': '"""g"""', 'edgecolor': '"""none"""', 's': 'opt.markerSize'}), "(tmp_cpu[:, 0], tmp_cpu[:, 1], c='g', edgecolor='none', s=opt.\n markerSize)\n", (10214, 10292), True, 'import matplotlib.pyplot as plt\n'), ((10330, 10396), 'matplotlib.pyplot.scatter', 'plt.scatter', (['tmp_cpu[:, 0]', 'tmp_cpu[:, 1]'], {'c': '"""g"""', 'edgecolor': '"""none"""'}), "(tmp_cpu[:, 0], tmp_cpu[:, 1], c='g', edgecolor='none')\n", (10341, 10396), True, 'import matplotlib.pyplot as plt\n'), ((9686, 9698), 'time.clock', 'time.clock', ([], {}), '()\n', (9696, 9698), False, 'import time\n')]
|
"""
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import scipy.optimize
from nncf.compression_method_api import CompressionScheduler
from nncf.config import NNCFConfig
from nncf.registry import Registry
PRUNING_SCHEDULERS = Registry("pruning_schedulers")
class PruningScheduler(CompressionScheduler):
def __init__(self, pruning_algo, params: NNCFConfig = None):
super().__init__()
if params is None:
self._params = NNCFConfig()
else:
self._params = params
self.algo = pruning_algo
# Number of initial steps of training before pruning
self.num_init_steps = self._params.get('num_init_steps', 0)
self.pruning_steps = self._params.get('pruning_steps', 100)
# Pruning rates
self.initial_pruning = self.algo.pruning_init
self.pruning_target = self._params.get('pruning_target', 0.5)
def load_state_dict(self, state_dict):
super().load_state_dict(state_dict)
self._set_pruning_level()
def epoch_step(self, next_epoch=None):
super().epoch_step(next_epoch)
self._set_pruning_level()
def _set_pruning_level(self):
self.algo.set_pruning_rate(self.current_pruning_level)
if self.current_epoch >= (self.pruning_steps + self.num_init_steps):
self.algo.freeze()
def _calc_pruning_level(self):
raise NotImplementedError
@property
def current_pruning_level(self):
if self.current_epoch >= self.num_init_steps:
return self._calc_pruning_level()
return 0
def _calc_density_level(self):
return 1 - self.current_pruning_level()
@PRUNING_SCHEDULERS.register("baseline")
class BaselinePruningScheduler(PruningScheduler):
"""
Baseline scheduler that setting max pruning rate after num_init_steps epoch
and freeze algorithm after it.
"""
def __init__(self, pruning_algo, config=None):
super().__init__(pruning_algo, config)
self._set_pruning_level()
def _calc_pruning_level(self):
return self.pruning_target
def _set_pruning_level(self):
self.algo.set_pruning_rate(self.current_pruning_level)
if self.current_epoch >= self.num_init_steps:
self.algo.freeze()
@PRUNING_SCHEDULERS.register("exponential")
class ExponentialPruningScheduler(PruningScheduler):
"""
Calculates pruning rate progressively according to the formula
P = 1 - a * exp(- k * epoch)
Where:
epoch - epoch number
P - pruning rate for current epoch
a, k - params
"""
def __init__(self, pruning_algo, config=None):
super().__init__(pruning_algo, config)
self.a, self.k = self._init_exp(self.initial_pruning, self.pruning_target, pruning_steps=self.pruning_steps)
self._set_pruning_level()
def _calc_pruning_level(self):
curr_pruning = 1 - self.a * np.exp(-self.k * (self.current_epoch - self.num_init_steps))
max_pruning = self.pruning_target
return max_pruning if curr_pruning >= max_pruning else curr_pruning
@staticmethod
def _init_exp(initial_pruning, max_pruning, pruning_steps=20):
p1 = (0, 1 - initial_pruning)
p2 = (pruning_steps, 1 - max_pruning)
k = np.log(p2[1] / p1[1]) / (p1[0] - p2[0])
a = p1[1] / np.exp(-k * p1[0])
return a, k
@PRUNING_SCHEDULERS.register("exponential_with_bias")
class ExponentialWithBiasPruningScheduler(PruningScheduler):
"""
Calculates pruning rate progressively according to the formula
P = a * exp(- k * epoch) + b
Where:
epoch - epoch number
P - pruning rate for current epoch
a, b, k - params
"""
def __init__(self, pruning_algo, config=None):
super().__init__(pruning_algo, config)
self.a, self.b, self.k = self._init_exp(self.pruning_steps, self.initial_pruning, self.pruning_target)
self._set_pruning_level()
def _calc_pruning_level(self):
curr_pruning = self.a * np.exp(-self.k * (self.current_epoch - self.num_init_steps - 1)) + self.b
max_pruning = self.pruning_target
return max_pruning if curr_pruning >= max_pruning else curr_pruning
@staticmethod
def _init_exp(E_max, P_min, P_max, D=1 / 8):
"""
Find a, b, k for system (from SPFP paper):
1. P_min = a + b
2. P_max = a * exp(-k * E_max) + b
3. 3/4 * P_max = a * exp(-k * E_max * D) + b
Where P_min, P_max - minimal and goal levels of pruning rate
E_max - number of epochs for pruning
"""
def get_b(a, k):
return P_min - a
def get_a(k):
return (3 / 4 * P_max - P_min) / (np.exp(- D * k * E_max) - 1)
def f_to_solve(x):
y = np.exp(D * x * E_max)
return 1 / 3 * y + 1 / (y ** 7) - 4 / 3
k = scipy.optimize.fsolve(f_to_solve, [1])[0]
a = get_a(k)
b = get_b(a, k)
return a, b, k
|
[
"numpy.exp",
"nncf.registry.Registry",
"nncf.config.NNCFConfig",
"numpy.log"
] |
[((772, 802), 'nncf.registry.Registry', 'Registry', (['"""pruning_schedulers"""'], {}), "('pruning_schedulers')\n", (780, 802), False, 'from nncf.registry import Registry\n'), ((997, 1009), 'nncf.config.NNCFConfig', 'NNCFConfig', ([], {}), '()\n', (1007, 1009), False, 'from nncf.config import NNCFConfig\n'), ((3807, 3828), 'numpy.log', 'np.log', (['(p2[1] / p1[1])'], {}), '(p2[1] / p1[1])\n', (3813, 3828), True, 'import numpy as np\n'), ((3867, 3885), 'numpy.exp', 'np.exp', (['(-k * p1[0])'], {}), '(-k * p1[0])\n', (3873, 3885), True, 'import numpy as np\n'), ((5313, 5334), 'numpy.exp', 'np.exp', (['(D * x * E_max)'], {}), '(D * x * E_max)\n', (5319, 5334), True, 'import numpy as np\n'), ((3446, 3506), 'numpy.exp', 'np.exp', (['(-self.k * (self.current_epoch - self.num_init_steps))'], {}), '(-self.k * (self.current_epoch - self.num_init_steps))\n', (3452, 3506), True, 'import numpy as np\n'), ((4546, 4610), 'numpy.exp', 'np.exp', (['(-self.k * (self.current_epoch - self.num_init_steps - 1))'], {}), '(-self.k * (self.current_epoch - self.num_init_steps - 1))\n', (4552, 4610), True, 'import numpy as np\n'), ((5240, 5262), 'numpy.exp', 'np.exp', (['(-D * k * E_max)'], {}), '(-D * k * E_max)\n', (5246, 5262), True, 'import numpy as np\n')]
|
import geopandas as gpd
import numpy as np
import pandas as pd
from shapely.geometry import Point, LineString, Polygon
def convert_geo_list_to_geoseries(geo_list):
for i in range(0, len(geo_list)):
try:
out = out.append(geo_list[i])
except:
out = gpd.GeoSeries(geo_list[i])
return out
def make_vertical_segments(scanline_row, step_increment = 0.1, segment_width = 1):
# need to adjust the np.arange to step and not divide....
# need to draw this out. We have x boxes (given by np_arrang and the step increment) and we
# want to offset them by a distance (say 1 m - the window width)
# so we get an index of y_coords and take the first one to the -n one
# where n is floor(window_width/step_increment)
n = int(segment_width/step_increment)
scanline = scanline_row[1].loc['orig_geom']
x_coord = np.unique(scanline.xy[0])
y_coords = np.arange(np.min(scanline.xy[1]), np.max(scanline.xy[1]), step_increment)
seg_start_point = list(zip(np.repeat(x_coord,len(y_coords[0:-n])), y_coords[0:-n]))
seg_end_point = list(zip(np.repeat(x_coord,len(y_coords[n:])), y_coords[n:]))
seg_points = list(zip(seg_start_point,seg_end_point))
scanline_segments = gpd.GeoSeries(map(LineString, seg_points))
name = scanline_row[1]['name']
names = [name + '_seg_' + str(i) for i in np.arange(0,len(scanline_segments))+1]
segment_df = gpd.GeoDataFrame({
'name': names,
'x_coord': np.repeat(x_coord, len(names)),
'y_midpoint': (y_coords[0:-n] + y_coords[n:])/2},
geometry = scanline_segments
)
segment_df['orig_length'] = segment_df.length
segment_df['orig_geom'] = segment_df['geometry']
return segment_df
def make_horizontal_segments(scanline_row, step_increment = 0.1, segment_width = 1):
n = int(segment_width/step_increment)
scanline = scanline_row[1].loc['orig_geom']
y_coord = np.unique(scanline.xy[1])
x_coords = np.arange(np.min(scanline.xy[0]), np.max(scanline.xy[0]), step_increment)
seg_start_point = list(zip(x_coords[0:-n], np.repeat(y_coord,len(x_coords[0:-n]))))
seg_end_point = list(zip(x_coords[n:],np.repeat(y_coord,len(x_coords[n:]))))
seg_points = list(zip(seg_start_point,seg_end_point))
scanline_segments = gpd.GeoSeries(map(LineString, seg_points))
name = scanline_row[1]['name']
names = [name + '_seg_' + str(i) for i in np.arange(0,len(scanline_segments))+1]
segment_df = gpd.GeoDataFrame({
'name': names,
'y_coord': np.repeat(y_coord, len(names)),
'x_midpoint': (x_coords[0:-n] + x_coords[n:])/2},
geometry = scanline_segments
)
segment_df['orig_length'] = segment_df.length
segment_df['orig_geom'] = segment_df['geometry']
return segment_df
def make_polygon_from_tuple(x,y,w):
return Polygon([[x - w/2, y - w/2], [x - w/2, y + w/2],
[x + w/2, y + w/2], [x + w/2, y - w/2]])
|
[
"geopandas.GeoSeries",
"shapely.geometry.Polygon",
"numpy.min",
"numpy.max",
"numpy.unique"
] |
[((890, 915), 'numpy.unique', 'np.unique', (['scanline.xy[0]'], {}), '(scanline.xy[0])\n', (899, 915), True, 'import numpy as np\n'), ((2032, 2057), 'numpy.unique', 'np.unique', (['scanline.xy[1]'], {}), '(scanline.xy[1])\n', (2041, 2057), True, 'import numpy as np\n'), ((3027, 3136), 'shapely.geometry.Polygon', 'Polygon', (['[[x - w / 2, y - w / 2], [x - w / 2, y + w / 2], [x + w / 2, y + w / 2], [x +\n w / 2, y - w / 2]]'], {}), '([[x - w / 2, y - w / 2], [x - w / 2, y + w / 2], [x + w / 2, y + w /\n 2], [x + w / 2, y - w / 2]])\n', (3034, 3136), False, 'from shapely.geometry import Point, LineString, Polygon\n'), ((941, 963), 'numpy.min', 'np.min', (['scanline.xy[1]'], {}), '(scanline.xy[1])\n', (947, 963), True, 'import numpy as np\n'), ((965, 987), 'numpy.max', 'np.max', (['scanline.xy[1]'], {}), '(scanline.xy[1])\n', (971, 987), True, 'import numpy as np\n'), ((2083, 2105), 'numpy.min', 'np.min', (['scanline.xy[0]'], {}), '(scanline.xy[0])\n', (2089, 2105), True, 'import numpy as np\n'), ((2107, 2129), 'numpy.max', 'np.max', (['scanline.xy[0]'], {}), '(scanline.xy[0])\n', (2113, 2129), True, 'import numpy as np\n'), ((294, 320), 'geopandas.GeoSeries', 'gpd.GeoSeries', (['geo_list[i]'], {}), '(geo_list[i])\n', (307, 320), True, 'import geopandas as gpd\n')]
|
import os,copy
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from collections import OrderedDict
def read_spectral_k(filename="tc_dos.dat"):
"""
Reads the spectrial thermal conductivity information
"""
tcdos_labels = [
"wavelength",
"k_xx_raw","k_yy_raw","k_zz_raw",
"k_xx_smooth","k_yy_smooth","k_zz_smooth",
"lifetime_dos1 ","lifetime_dos2"]
def subselect_table_block(i_start,lines):
i = i_start + 1
table = []
while(lines[i].strip() != ""):
args = lines[i].split()
args = [arg.strip() for arg in args]
args = [float(arg) for arg in args]
table.append(args)
i += 1
return np.array(table)
line = None # initialize
with open(filename,'r') as f:
lines = f.readlines()
lines = [s.strip() for s in lines]
temperatures = []
tcdos_dict = OrderedDict()
for il,line in enumerate(lines):
if line.startswith('# Temp:'):
args = line.split(':')
T = int(float(args[1].strip()))
temperatures.append(T)
tcdos_dict[T] = subselect_table_block(il,lines)
tcdos_df_dict = OrderedDict()
for temp in temperatures:
tcdos_df_dict[temp] = pd.DataFrame(
copy.deepcopy(tcdos_dict[temp]),
columns=list(tcdos_labels))
return {k:v.copy() for k,v in tcdos_df_dict.items()}
def normalize_tcdos(
data_filename='tc_dos.dat'):
tcdos_df_dict = read_spectral_k(filename=data_filename)
tcdos_df_dict_n = tcdos_df_dict
for k, v in tcdos_df_dict.items():
k_xx_raw = sum(list(tcdos_df_dict[k]['k_xx_raw']))
k_yy_raw = sum(list(tcdos_df_dict[k]['k_yy_raw']))
k_zz_raw = sum(list(tcdos_df_dict[k]['k_zz_raw']))
k_xx_smooth = sum(list(tcdos_df_dict[k]['k_xx_smooth']))
k_yy_smooth = sum(list(tcdos_df_dict[k]['k_yy_smooth']))
k_zz_smooth = sum(list(tcdos_df_dict[k]['k_zz_smooth']))
tcdos_df_dict_n[k]['k_xx_raw'] = tcdos_df_dict[k]['k_xx_raw']/k_xx_raw
tcdos_df_dict_n[k]['k_yy_raw'] = tcdos_df_dict[k]['k_yy_raw']/k_yy_raw
tcdos_df_dict_n[k]['k_zz_raw'] = tcdos_df_dict[k]['k_zz_raw']/k_zz_raw
tcdos_df_dict_n[k]['k_xx_smooth'] = tcdos_df_dict[k]['k_xx_smooth']/k_xx_smooth
tcdos_df_dict_n[k]['k_yy_smooth'] = tcdos_df_dict[k]['k_yy_smooth']/k_yy_smooth
tcdos_df_dict_n[k]['k_zz_smooth'] = tcdos_df_dict[k]['k_zz_smooth']/k_zz_smooth
return {k:v.copy() for k,v in tcdos_df_dict_n.items()}
def make_tcdos_plot(
data_filename='tc_dos.dat',
figure_prefix='tc_dos',
xlim=None,
ylim=None):
tcdos_df_dict = normalize_tcdos(data_filename=data_filename)
for keys in tcdos_df_dict.keys():
tcdos_figure_filename = tcdos_figure_prefix + '_' + str(keys) + 'K' + '.png'
figure = plt.figure()
tcdos_plot = figure.add_subplot(111)
tcdos_plot.plot(tcdos_df_dict[keys]['wavelength'],tcdos_df_dict[keys]['k_xx_raw'], label='k_xx_raw', color='g')
tcdos_plot.plot(tcdos_df_dict[keys]['wavelength'],tcdos_df_dict[keys]['k_yy_raw'], label='k_yy_raw', color='b')
tcdos_plot.plot(tcdos_df_dict[keys]['wavelength'],tcdos_df_dict[keys]['k_zz_raw'], label='k_zz_raw', color='c')
tcdos_plot.plot(tcdos_df_dict[keys]['wavelength'],tcdos_df_dict[keys]['k_xx_smooth'], label='k_xx_smooth', color='y')
tcdos_plot.plot(tcdos_df_dict[keys]['wavelength'],tcdos_df_dict[keys]['k_yy_smooth'], label='k_yy_smooth', color='m')
tcdos_plot.plot(tcdos_df_dict[keys]['wavelength'],tcdos_df_dict[keys]['k_zz_smooth'], label='k_zz_smooth', color='r')
tcdos_title=plt.title('Spectral thermal conductivity'+ ' at ' + str(keys)+ 'K', fontname='Times New Roman')
tcdos_xlabel=plt.xlabel('Frequency (THz)', fontname='Times New Roman')
tcdos_ylabel=plt.ylabel('Thermal conductivity (W/mK)', fontname='Times New Roman')
tcdos_legend=plt.legend(loc='upper right', prop={'size':8})
tcdos_font=plt.rc('font', family='Times New Roman')
#set axis here
if xlim is not None:
tcdos_plot.set_xlim(xlim)
if ylim is not None:
tcdos_plot.set_ylim(ylim)
figure.savefig(tcdos_figure_filename)
plt.close
if __name__ == "__main__":
phonts_sim_dir = 'Ar_result'
tcdos_data_filename = os.path.join(phonts_sim_dir,'tc_dos.dat')
tcdos_figure_prefix = 'tc_dos'
assert type(tcdos_data_filename)
assert os.path.isfile(tcdos_data_filename)
tcdos_df_dict = read_spectral_k(filename=tcdos_data_filename)
# example how to use make_tcdos_plot(i)
make_tcdos_plot(
data_filename = tcdos_data_filename,
figure_prefix = tcdos_figure_prefix,
xlim = [0,15],
ylim = [0,0.06])
|
[
"copy.deepcopy",
"matplotlib.pyplot.legend",
"os.path.isfile",
"matplotlib.pyplot.figure",
"numpy.array",
"matplotlib.pyplot.rc",
"collections.OrderedDict",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"os.path.join"
] |
[((944, 957), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (955, 957), False, 'from collections import OrderedDict\n'), ((1230, 1243), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1241, 1243), False, 'from collections import OrderedDict\n'), ((4479, 4521), 'os.path.join', 'os.path.join', (['phonts_sim_dir', '"""tc_dos.dat"""'], {}), "(phonts_sim_dir, 'tc_dos.dat')\n", (4491, 4521), False, 'import os, copy\n'), ((4609, 4644), 'os.path.isfile', 'os.path.isfile', (['tcdos_data_filename'], {}), '(tcdos_data_filename)\n', (4623, 4644), False, 'import os, copy\n'), ((755, 770), 'numpy.array', 'np.array', (['table'], {}), '(table)\n', (763, 770), True, 'import numpy as np\n'), ((2943, 2955), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2953, 2955), True, 'import matplotlib.pyplot as plt\n'), ((3876, 3933), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequency (THz)"""'], {'fontname': '"""Times New Roman"""'}), "('Frequency (THz)', fontname='Times New Roman')\n", (3886, 3933), True, 'import matplotlib.pyplot as plt\n'), ((3955, 4024), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Thermal conductivity (W/mK)"""'], {'fontname': '"""Times New Roman"""'}), "('Thermal conductivity (W/mK)', fontname='Times New Roman')\n", (3965, 4024), True, 'import matplotlib.pyplot as plt\n'), ((4046, 4093), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'prop': "{'size': 8}"}), "(loc='upper right', prop={'size': 8})\n", (4056, 4093), True, 'import matplotlib.pyplot as plt\n'), ((4121, 4161), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""Times New Roman"""'}), "('font', family='Times New Roman')\n", (4127, 4161), True, 'import matplotlib.pyplot as plt\n'), ((1330, 1361), 'copy.deepcopy', 'copy.deepcopy', (['tcdos_dict[temp]'], {}), '(tcdos_dict[temp])\n', (1343, 1361), False, 'import os, copy\n')]
|
import sys
import time
import numpy as np
import pyaudio
from scipy import fftpack
from scipy.io import wavfile
import notes
from player import play
if len(sys.argv) < 2:
print(f'Usage: python app.py <WAV_FILE>')
sys.exit(1)
audio_input_file = sys.argv[1]
fs, audio_input = wavfile.read(audio_input_file)
print(f'Playing input file: {audio_input_file}')
play(audio_input, volume=1, rate=fs, channels=2, format=pyaudio.paInt32)
start = time.time()
audio_input_normalized = np.mean(audio_input, axis=1) / 2 ** 32
freqs = fftpack.fftfreq(audio_input_normalized.size) * fs
X = fftpack.fft(audio_input_normalized)
indices = np.flip(np.argsort(np.abs(X)))
freqs_ordered = np.abs(freqs[indices])[:100]
dist = np.vectorize(lambda f1, f2: abs(f1 - f2))
closest_frequencies_idx = np.fromiter(
(np.argmin(dist(f, notes.frequencies())) for f in freqs_ordered),
int,
)
_, idx = np.unique(closest_frequencies_idx, return_index=True)
closest_frequencies_idx = closest_frequencies_idx[np.sort(idx)]
closest_frequencies = notes.frequencies()[closest_frequencies_idx]
closest_notes = notes.frequencies_to_notes(closest_frequencies)
elapsed_time = time.time() - start
print(f'Playing detected notes (ordered by overall amplitude): {closest_notes} (took {elapsed_time:.3f}s)')
audio_output = np.concatenate([
notes.generate_sound_from_note(note, fs, 500)
for note in closest_notes
], axis=0).astype(np.float32)
play(audio_output, volume=1, rate=fs)
|
[
"numpy.abs",
"notes.generate_sound_from_note",
"player.play",
"notes.frequencies_to_notes",
"time.time",
"scipy.io.wavfile.read",
"scipy.fftpack.fft",
"numpy.sort",
"numpy.mean",
"notes.frequencies",
"sys.exit",
"scipy.fftpack.fftfreq",
"numpy.unique"
] |
[((286, 316), 'scipy.io.wavfile.read', 'wavfile.read', (['audio_input_file'], {}), '(audio_input_file)\n', (298, 316), False, 'from scipy.io import wavfile\n'), ((366, 438), 'player.play', 'play', (['audio_input'], {'volume': '(1)', 'rate': 'fs', 'channels': '(2)', 'format': 'pyaudio.paInt32'}), '(audio_input, volume=1, rate=fs, channels=2, format=pyaudio.paInt32)\n', (370, 438), False, 'from player import play\n'), ((448, 459), 'time.time', 'time.time', ([], {}), '()\n', (457, 459), False, 'import time\n'), ((587, 622), 'scipy.fftpack.fft', 'fftpack.fft', (['audio_input_normalized'], {}), '(audio_input_normalized)\n', (598, 622), False, 'from scipy import fftpack\n'), ((887, 940), 'numpy.unique', 'np.unique', (['closest_frequencies_idx'], {'return_index': '(True)'}), '(closest_frequencies_idx, return_index=True)\n', (896, 940), True, 'import numpy as np\n'), ((1088, 1135), 'notes.frequencies_to_notes', 'notes.frequencies_to_notes', (['closest_frequencies'], {}), '(closest_frequencies)\n', (1114, 1135), False, 'import notes\n'), ((1424, 1461), 'player.play', 'play', (['audio_output'], {'volume': '(1)', 'rate': 'fs'}), '(audio_output, volume=1, rate=fs)\n', (1428, 1461), False, 'from player import play\n'), ((224, 235), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (232, 235), False, 'import sys\n'), ((486, 514), 'numpy.mean', 'np.mean', (['audio_input'], {'axis': '(1)'}), '(audio_input, axis=1)\n', (493, 514), True, 'import numpy as np\n'), ((533, 577), 'scipy.fftpack.fftfreq', 'fftpack.fftfreq', (['audio_input_normalized.size'], {}), '(audio_input_normalized.size)\n', (548, 577), False, 'from scipy import fftpack\n'), ((680, 702), 'numpy.abs', 'np.abs', (['freqs[indices]'], {}), '(freqs[indices])\n', (686, 702), True, 'import numpy as np\n'), ((991, 1003), 'numpy.sort', 'np.sort', (['idx'], {}), '(idx)\n', (998, 1003), True, 'import numpy as np\n'), ((1027, 1046), 'notes.frequencies', 'notes.frequencies', ([], {}), '()\n', (1044, 1046), False, 'import notes\n'), ((1152, 1163), 'time.time', 'time.time', ([], {}), '()\n', (1161, 1163), False, 'import time\n'), ((652, 661), 'numpy.abs', 'np.abs', (['X'], {}), '(X)\n', (658, 661), True, 'import numpy as np\n'), ((820, 839), 'notes.frequencies', 'notes.frequencies', ([], {}), '()\n', (837, 839), False, 'import notes\n'), ((1318, 1363), 'notes.generate_sound_from_note', 'notes.generate_sound_from_note', (['note', 'fs', '(500)'], {}), '(note, fs, 500)\n', (1348, 1363), False, 'import notes\n')]
|
import torch
import numpy as np
def mpjpe(predicted, target):
"""
Mean per-joint position error (i.e. mean Euclidean distance),
often referred to as "Protocol #1" in many papers.
"""
assert predicted.shape == target.shape
return torch.mean(torch.norm(predicted - target, dim=len(target.shape)-1))
def weighted_mpjpe(predicted, target, w):
"""
Weighted mean per-joint position error (i.e. mean Euclidean distance)
"""
assert predicted.shape == target.shape
assert w.shape[0] == predicted.shape[0]
return torch.mean(w * torch.norm(predicted - target, dim=len(target.shape)-1))
def p_mpjpe_torch(predicted, target, with_sRt=False,full_torch=False,with_aligned=False):
"""
Pose error: MPJPE after rigid alignment (scale, rotation, and translation),
often referred to as "Protocol #2" in many papers.
"""
assert predicted.shape == target.shape
muX = torch.mean(target, dim=1, keepdim=True)
muY = torch.mean(predicted, dim=1, keepdim=True)
X0 = target - muX
Y0 = predicted - muY
X0[X0**2<1e-6]=1e-3
normX = torch.sqrt(torch.sum(X0**2, dim=(1, 2), keepdim=True))
normY = torch.sqrt(torch.sum(Y0**2, dim=(1, 2), keepdim=True))
normX[normX<1e-3]=1e-3
X0 /= normX
Y0 /= normY
H = torch.matmul(X0.transpose(1,2), Y0)
if full_torch:
U, s, V = batch_svd(H)
else:
U, s, Vt = np.linalg.svd(H.cpu().numpy())
V = torch.from_numpy(Vt.transpose(0, 2, 1)).cuda()
U = torch.from_numpy(U).cuda()
s = torch.from_numpy(s).cuda()
R = torch.matmul(V, U.transpose(2, 1))
# Avoid improper rotations (reflections), i.e. rotations with det(R) = -1
sign_detR = torch.sign(torch.unsqueeze(torch.det(R[0]), 0))
V[:, :, -1] *= sign_detR.unsqueeze(0)
s[:, -1] *= sign_detR.flatten()
R = torch.matmul(V, U.transpose(2, 1)) # Rotation
tr = torch.unsqueeze(torch.sum(s, dim=1, keepdim=True), 2)
a = tr * normX / normY # Scale
t = muX - a*torch.matmul(muY, R) # Translation
if (a!=a).sum()>0:
print('NaN Error!!')
print('UsV:',U,s,V)
print('aRt:',a,R,t)
a[a!=a]=1.
R[R!=R]=0.
t[t!=t]=0.
# Perform rigid transformation on the input
predicted_aligned = a*torch.matmul(predicted, R) + t
if with_sRt:
return torch.sqrt(((predicted_aligned - target)**2).sum(-1)).mean(),(a,R,t)#torch.mean(torch.norm(predicted_aligned - target, dim=len(target.shape)-1))
if with_aligned:
return torch.sqrt(((predicted_aligned - target)**2).sum(-1)).mean(),predicted_aligned
# Return MPJPE
return torch.sqrt(((predicted_aligned - target)**2).sum(-1)).mean()#torch.mean(torch.norm(predicted_aligned - target, dim=len(target.shape)-1))#,(a,R,t),predicted_aligned
def batch_svd(H):
num = H.shape[0]
U_batch, s_batch, V_batch = [],[],[]
for i in range(num):
U, s, V = H[i].svd(some=False)
U_batch.append(U.unsqueeze(0))
s_batch.append(s.unsqueeze(0))
V_batch.append(V.unsqueeze(0))
return torch.cat(U_batch,0),torch.cat(s_batch,0),torch.cat(V_batch,0)
def p_mpjpe(predicted, target, with_sRt=False,full_torch=False,with_aligned=False,each_separate=False):
"""
Pose error: MPJPE after rigid alignment (scale, rotation, and translation),
often referred to as "Protocol #2" in many papers.
"""
assert predicted.shape == target.shape
muX = np.mean(target, axis=1, keepdims=True)
muY = np.mean(predicted, axis=1, keepdims=True)
X0 = target - muX
Y0 = predicted - muY
normX = np.sqrt(np.sum(X0**2, axis=(1, 2), keepdims=True))
normY = np.sqrt(np.sum(Y0**2, axis=(1, 2), keepdims=True))
X0 /= (normX+1e-6)
Y0 /= (normY+1e-6)
H = np.matmul(X0.transpose(0, 2, 1), Y0).astype(np.float16).astype(np.float64)
U, s, Vt = np.linalg.svd(H)
V = Vt.transpose(0, 2, 1)
R = np.matmul(V, U.transpose(0, 2, 1))
# Avoid improper rotations (reflections), i.e. rotations with det(R) = -1
sign_detR = np.sign(np.expand_dims(np.linalg.det(R), axis=1))
V[:, :, -1] *= sign_detR
s[:, -1] *= sign_detR.flatten()
R = np.matmul(V, U.transpose(0, 2, 1)) # Rotation
tr = np.expand_dims(np.sum(s, axis=1, keepdims=True), axis=2)
a = tr * normX / normY # Scale
t = muX - a*np.matmul(muY, R) # Translation
# Perform rigid transformation on the input
predicted_aligned = a*np.matmul(predicted, R) + t
if each_separate:
return np.linalg.norm(predicted_aligned - target, axis=len(target.shape)-1)
error = np.mean(np.linalg.norm(predicted_aligned - target, axis=len(target.shape)-1))
if with_sRt and not with_aligned:
return error, (a,R,t)
if with_aligned:
return error,(a,R,t),predicted_aligned
# Return MPJPE
return error
def n_mpjpe(predicted, target):
"""
Normalized MPJPE (scale only), adapted from:
https://github.com/hrhodin/UnsupervisedGeometryAwareRepresentationLearning/blob/master/losses/poses.py
"""
assert predicted.shape == target.shape
norm_predicted = torch.mean(torch.sum(predicted**2, dim=3, keepdim=True), dim=2, keepdim=True)
norm_target = torch.mean(torch.sum(target*predicted, dim=3, keepdim=True), dim=2, keepdim=True)
scale = norm_target / norm_predicted
return mpjpe(scale * predicted, target)
def mean_velocity_error(predicted, target):
"""
Mean per-joint velocity error (i.e. mean Euclidean distance of the 1st derivative)
"""
assert predicted.shape == target.shape
velocity_predicted = np.diff(predicted, axis=0)
velocity_target = np.diff(target, axis=0)
return np.mean(np.linalg.norm(velocity_predicted - velocity_target, axis=len(target.shape)-1))
def test():
r1 = np.random.rand(3,14,3)
r2 = np.random.rand(3,14,3)
pmpjpe = p_mpjpe(r1, r2,with_sRt=False)
pmpjpe_torch = p_mpjpe_torch(torch.from_numpy(r1), torch.from_numpy(r2),with_sRt=False,full_torch=True)
print('pmpjpe: {}; {:.6f}; {:.6f}; {:.6f}'.format(np.abs(pmpjpe-pmpjpe_torch.numpy())<0.01,pmpjpe,pmpjpe_torch.numpy(), pmpjpe-pmpjpe_torch.numpy()))
if __name__ == '__main__':
test()
|
[
"torch.mean",
"numpy.sum",
"torch.cat",
"torch.det",
"numpy.linalg.svd",
"numpy.mean",
"numpy.diff",
"numpy.matmul",
"numpy.random.rand",
"numpy.linalg.det",
"torch.matmul",
"torch.sum",
"torch.from_numpy"
] |
[((921, 960), 'torch.mean', 'torch.mean', (['target'], {'dim': '(1)', 'keepdim': '(True)'}), '(target, dim=1, keepdim=True)\n', (931, 960), False, 'import torch\n'), ((971, 1013), 'torch.mean', 'torch.mean', (['predicted'], {'dim': '(1)', 'keepdim': '(True)'}), '(predicted, dim=1, keepdim=True)\n', (981, 1013), False, 'import torch\n'), ((3435, 3473), 'numpy.mean', 'np.mean', (['target'], {'axis': '(1)', 'keepdims': '(True)'}), '(target, axis=1, keepdims=True)\n', (3442, 3473), True, 'import numpy as np\n'), ((3484, 3525), 'numpy.mean', 'np.mean', (['predicted'], {'axis': '(1)', 'keepdims': '(True)'}), '(predicted, axis=1, keepdims=True)\n', (3491, 3525), True, 'import numpy as np\n'), ((3848, 3864), 'numpy.linalg.svd', 'np.linalg.svd', (['H'], {}), '(H)\n', (3861, 3864), True, 'import numpy as np\n'), ((5575, 5601), 'numpy.diff', 'np.diff', (['predicted'], {'axis': '(0)'}), '(predicted, axis=0)\n', (5582, 5601), True, 'import numpy as np\n'), ((5624, 5647), 'numpy.diff', 'np.diff', (['target'], {'axis': '(0)'}), '(target, axis=0)\n', (5631, 5647), True, 'import numpy as np\n'), ((5770, 5794), 'numpy.random.rand', 'np.random.rand', (['(3)', '(14)', '(3)'], {}), '(3, 14, 3)\n', (5784, 5794), True, 'import numpy as np\n'), ((5802, 5826), 'numpy.random.rand', 'np.random.rand', (['(3)', '(14)', '(3)'], {}), '(3, 14, 3)\n', (5816, 5826), True, 'import numpy as np\n'), ((1110, 1154), 'torch.sum', 'torch.sum', (['(X0 ** 2)'], {'dim': '(1, 2)', 'keepdim': '(True)'}), '(X0 ** 2, dim=(1, 2), keepdim=True)\n', (1119, 1154), False, 'import torch\n'), ((1177, 1221), 'torch.sum', 'torch.sum', (['(Y0 ** 2)'], {'dim': '(1, 2)', 'keepdim': '(True)'}), '(Y0 ** 2, dim=(1, 2), keepdim=True)\n', (1186, 1221), False, 'import torch\n'), ((1919, 1952), 'torch.sum', 'torch.sum', (['s'], {'dim': '(1)', 'keepdim': '(True)'}), '(s, dim=1, keepdim=True)\n', (1928, 1952), False, 'import torch\n'), ((3062, 3083), 'torch.cat', 'torch.cat', (['U_batch', '(0)'], {}), '(U_batch, 0)\n', (3071, 3083), False, 'import torch\n'), ((3083, 3104), 'torch.cat', 'torch.cat', (['s_batch', '(0)'], {}), '(s_batch, 0)\n', (3092, 3104), False, 'import torch\n'), ((3104, 3125), 'torch.cat', 'torch.cat', (['V_batch', '(0)'], {}), '(V_batch, 0)\n', (3113, 3125), False, 'import torch\n'), ((3595, 3638), 'numpy.sum', 'np.sum', (['(X0 ** 2)'], {'axis': '(1, 2)', 'keepdims': '(True)'}), '(X0 ** 2, axis=(1, 2), keepdims=True)\n', (3601, 3638), True, 'import numpy as np\n'), ((3658, 3701), 'numpy.sum', 'np.sum', (['(Y0 ** 2)'], {'axis': '(1, 2)', 'keepdims': '(True)'}), '(Y0 ** 2, axis=(1, 2), keepdims=True)\n', (3664, 3701), True, 'import numpy as np\n'), ((4227, 4259), 'numpy.sum', 'np.sum', (['s'], {'axis': '(1)', 'keepdims': '(True)'}), '(s, axis=1, keepdims=True)\n', (4233, 4259), True, 'import numpy as np\n'), ((5106, 5152), 'torch.sum', 'torch.sum', (['(predicted ** 2)'], {'dim': '(3)', 'keepdim': '(True)'}), '(predicted ** 2, dim=3, keepdim=True)\n', (5115, 5152), False, 'import torch\n'), ((5202, 5252), 'torch.sum', 'torch.sum', (['(target * predicted)'], {'dim': '(3)', 'keepdim': '(True)'}), '(target * predicted, dim=3, keepdim=True)\n', (5211, 5252), False, 'import torch\n'), ((5902, 5922), 'torch.from_numpy', 'torch.from_numpy', (['r1'], {}), '(r1)\n', (5918, 5922), False, 'import torch\n'), ((5924, 5944), 'torch.from_numpy', 'torch.from_numpy', (['r2'], {}), '(r2)\n', (5940, 5944), False, 'import torch\n'), ((1740, 1755), 'torch.det', 'torch.det', (['R[0]'], {}), '(R[0])\n', (1749, 1755), False, 'import torch\n'), ((2009, 2029), 'torch.matmul', 'torch.matmul', (['muY', 'R'], {}), '(muY, R)\n', (2021, 2029), False, 'import torch\n'), ((2272, 2298), 'torch.matmul', 'torch.matmul', (['predicted', 'R'], {}), '(predicted, R)\n', (2284, 2298), False, 'import torch\n'), ((4056, 4072), 'numpy.linalg.det', 'np.linalg.det', (['R'], {}), '(R)\n', (4069, 4072), True, 'import numpy as np\n'), ((4321, 4338), 'numpy.matmul', 'np.matmul', (['muY', 'R'], {}), '(muY, R)\n', (4330, 4338), True, 'import numpy as np\n'), ((4428, 4451), 'numpy.matmul', 'np.matmul', (['predicted', 'R'], {}), '(predicted, R)\n', (4437, 4451), True, 'import numpy as np\n'), ((1508, 1527), 'torch.from_numpy', 'torch.from_numpy', (['U'], {}), '(U)\n', (1524, 1527), False, 'import torch\n'), ((1547, 1566), 'torch.from_numpy', 'torch.from_numpy', (['s'], {}), '(s)\n', (1563, 1566), False, 'import torch\n')]
|
import numpy as np
import pandas as pd
import random
#from data import Data
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import KFold
from sklearn import metrics
from sklearn import svm
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
###-----construct a workable data set-----###
def import_x_data(p):
train_rs = []
#train_label = []
path = p
with open(path, 'r+') as f:
for l in f:
if l.strip() == "":
continue
vec = [0 for _ in range(219)]
tokens = l.split(' ')
#labels = tokens[0]
#train_label.append(labels)
for pair in tokens[1:]:
t = pair.split(':')
idx = int(t[0])
value = int(t[1])
vec[idx-1]=value
train_rs.append(vec)
x = np.asarray(train_rs)
#y = np.asarray(train_label)
return x
x_train0 = import_x_data("./training00.data")
x_train1 = import_x_data("./training01.data")
x_train2 = import_x_data("./training02.data")
x_train3 = import_x_data("./training03.data")
x_train4 = import_x_data("./training04.data")
print(x_train0)
#-----create train and testing set X-----#
train_set1 = np.append(x_train0, x_train1, axis = 0)
train_set2 = np.append(train_set1, x_train2, axis = 0)
train_set0 = np.append(train_set2, x_train3, axis = 0)
train_set3 = np.append(train_set1, x_train3, axis = 0)
train_set4 = np.append(x_train0, x_train2, axis = 0)
train_set5 = np.append(train_set4, x_train3, axis = 0)
train_set6 = np.append(x_train1, x_train2, axis = 0)
train_set7 = np.append(train_set6, x_train3, axis = 0)
#print(train_set1)
xtrain_k = np.append(train_set0, x_train4, axis = 0)
xfold4 = np.append(train_set2, x_train3, axis = 0)
xfold3 = np.append(train_set2, x_train4, axis = 0)
xfold2 = np.append(train_set3, x_train4, axis = 0)
xfold1 = np.append(train_set5, x_train4, axis = 0)
xfold0 = np.append(train_set7, x_train4, axis = 0)
print(xfold4.shape)
#-----extract labels from the data-----#
def import_y_data(p):
train_label = []
path = p
with open(path, 'r+') as f:
for l in f:
if l.strip() == "":
continue
vec = [0 for _ in range(17)]
tokens = l.split(' ')
labels = int(tokens[0])
train_label.append(labels)
y = np.asarray(train_label)
return y
y_train0 = import_y_data("./training00.data")
y_train1 = import_y_data("./training01.data")
y_train2 = import_y_data("./training02.data")
y_train3 = import_y_data("./training03.data")
y_train4 = import_y_data("./training04.data")
#-----create training and testing set Y-----#
train1 = np.append(y_train0, y_train1, axis = 0)
train2 = np.append(train1, y_train2, axis = 0)
train0 = np.append(train2, y_train3, axis = 0)
train3 = np.append(train1, y_train3, axis = 0)
train4 = np.append(y_train0, y_train2, axis = 0)
train5 = np.append(train4, y_train3, axis = 0)
train6 = np.append(y_train1, y_train2, axis = 0)
train7 = np.append(train6, y_train3, axis = 0)
yfold4 = np.append(train2, y_train3, axis = 0)
yfold3 = np.append(train2, y_train4, axis = 0)
yfold2 = np.append(train3, y_train4, axis = 0)
yfold1 = np.append(train5, y_train4, axis = 0)
yfold0 = np.append(train7, y_train4, axis = 0)
ytrain_k = np.append(train0, y_train4, axis = 0)
#print(ytrain_k.shape)
print(yfold4.shape)
###----SUPPORT VECTOR MACHINE-----###
def fitt(X,Y,gamma,c):
epoch = 20
w = np.ones(219)
b = 1
for t in range(1, epoch):
#I need to shuffle data
gt = gamma/(1+epoch)
for i, x in enumerate(X):
if (Y[i]*np.dot(X[i], w)+b) <= 1:
w = ((1-gt)*w)+(gt*(c*(Y[i]*X[i])))*100000
#print(w[0:3])
else:
w = (1-gt)*w
#print(w[0:5])
return w
def predict(X,Y,w):
tp = 0
fp = 0
fn = 0
b = 1
#print(len(X))
for i, x in enumerate(X):
classification = np.sign(np.dot(X[i],w)+b)
if classification > 0 and Y[i] > 0:
tp += 1
elif classification > 0 and Y[i] < 0:
fp += 1
if classification < 0 and Y[i] > 0:
fn += 1
else:
pass
return tp,fp,fn
#-----test for F1 statistic-----#
def F1(TP,FP,FN):
if TP+FP == 0:
p = 1
else:
p = TP/(TP+FP)
if TP+FN == 0:
r = 1
else:
r = TP/(TP+FN)
f1 = 2*(p*r)/(p+r)
return f1
def p(TP,FP):
if TP+FP ==0:
p = 1
else:
p = TP/(TP+FP)
return p
def r(TP,FN):
if TP+FN == 0:
r = 1
else:
r = TP/(TP+FN)
return r
#-----average f1 score with: gamma=0.1, c=0.01-----#
t4 = fitt(xfold4,yfold4,0.1,0.01)
p_tst4 = predict(x_train4,y_train4,t4)
p_tra4 = predict(xfold4,yfold4,t4)
tf4 = F1(p_tst4[0],p_tst4[1],p_tst4[2])
trf4 = F1(p_tra4[0],p_tra4[1],p_tra4[2])
print(t4)
print(p_tst4)
print(p_tra4)
print(tf4)
print(trf4)
t3 = fitt(xfold3,yfold3,0.1,0.01)
p_tst3 = predict(x_train3,y_train3,t3)
p_tra3 = predict(xfold3,yfold3,t3)
tf3 = F1(p_tst3[0],p_tst3[1],p_tst3[2])
trf3 = F1(p_tra3[0],p_tra3[1],p_tra3[0])
print(t3)
print(p_tst3)
print(p_tra3)
print(tf3)
print(trf3)
t2 = fitt(xfold2,yfold2,0.1,0.01)
p_tst2 = predict(x_train2,y_train2,t2)
p_tra2 = predict(xfold2,yfold2,t2)
tf2 = F1(p_tst2[0],p_tst2[1],p_tst2[2])
trf2 = F1(p_tra2[0],p_tra2[1],p_tra2[2])
print(t2)
print(p_tst2)
print(p_tra2)
print(tf2)
print(trf2)
t1 = fitt(xfold1,yfold1,0.1,0.01)
p_tst1 = predict(x_train1,y_train1,t1)
p_tra1 = predict(xfold1,yfold1,t1)
tf1 = F1(p_tst1[0],p_tst1[1],p_tst1[2])
trf1 = F1(p_tra1[0],p_tra1[1],p_tra1[2])
print(t1)
print(p_tst1)
print(p_tra1)
print(tf1)
print(trf1)
t0 = fitt(xfold0,yfold0,0.1,0.01)
p_tst0 = predict(x_train0,y_train0,t0)
p_tra0 = predict(xfold0,yfold0,t0)
tf0 = F1(p_tst0[0],p_tst0[1],p_tst0[2])
trf0 = F1(p_tra0[0],p_tra0[1],p_tra0[2])
print(t0)
print(p_tst0)
print(p_tra0)
print(tf0)
print(trf0)
print('TESTavgf1(gamma=0.1,c=0.01):',(tf4+tf3+tf2+tf1+tf0)/5) #avg TEST F1#
print('TRAINavgf1(gamma=0.1,c=0.01):',(trf4+trf3+trf2+trf1+trf0)/5) #avg TRAIN F1#
### ###
#-----average f1 score with:: gamma=0.01, c=1-----#
t4 = fitt(xfold4,yfold4,0.01,1)
p_tst4 = predict(x_train4,y_train4,t4)
p_tra4 = predict(xfold4,yfold4,t4)
tf4 = F1(p_tst4[0],p_tst4[1],p_tst4[2])
trf4 = F1(p_tra4[0],p_tra4[1],p_tra4[2])
t3 = fitt(xfold3,yfold3,0.01,1)
p_tst3 = predict(x_train3,y_train3,t3)
p_tra3 = predict(xfold3,yfold3,t3)
tf3 = F1(p_tst3[0],p_tst3[1],p_tst3[2])
trf3 = F1(p_tra3[0],p_tra3[1],p_tra3[0])
t2 = fitt(xfold2,yfold2,0.01,1)
p_tst2 = predict(x_train2,y_train2,t2)
p_tra2 = predict(xfold2,yfold2,t2)
tf2 = F1(p_tst2[0],p_tst2[1],p_tst2[2])
trf2 = F1(p_tra2[0],p_tra2[1],p_tra2[2])
t1 = fitt(xfold1,yfold1,0.01,1)
p_tst1 = predict(x_train1,y_train1,t1)
p_tra1 = predict(xfold1,yfold1,t1)
tf1 = F1(p_tst1[0],p_tst1[1],p_tst1[2])
trf1 = F1(p_tra1[0],p_tra1[1],p_tra1[2])
t0 = fitt(xfold0,yfold0,0.01,1)
p_tst0 = predict(x_train0,y_train0,t0)
p_tra0 = predict(xfold0,yfold0,t0)
tf0 = F1(p_tst0[0],p_tst0[1],p_tst0[2])
trf0 = F1(p_tra0[0],p_tra0[1],p_tra0[2])
print('TESTavgf1(gamma=0.01,c=1):',(tf4+tf3+tf2+tf1+tf0)/5) #avg TEST F1
print('TRAINavgf1(gamma=0.01,c=1):',(trf4+trf3+trf2+trf1+trf0)/5) #avg TRAIN F1
### ###
#-----average f1 score with: gamma=0.1 c=0.00001-----#
t4 = fitt(xfold4,yfold4,0.1,0.00001)
p_tst4 = predict(x_train4,y_train4,t4)
p_tra4 = predict(xfold4,yfold4,t4)
tf4 = F1(p_tst4[0],p_tst4[1],p_tst4[2])
trf4 = F1(p_tra4[0],p_tra4[1],p_tra4[2])
print(p_tst4)
t3 = fitt(xfold3,yfold3,0.1,0.00001)
p_tst3 = predict(x_train3,y_train3,t3)
p_tra3 = predict(xfold3,yfold3,t3)
tf3 = F1(p_tst3[0],p_tst3[1],p_tst3[2])
trf3 = F1(p_tra3[0],p_tra3[1],p_tra3[0])
print(p_tst3)
t2 = fitt(xfold2,yfold2,0.1,0.00001)
p_tst2 = predict(x_train2,y_train2,t2)
p_tra2 = predict(xfold2,yfold2,t2)
tf2 = F1(p_tst2[0],p_tst2[1],p_tst2[2])
trf2 = F1(p_tra2[0],p_tra2[1],p_tra2[2])
print(p_tst2)
t1 = fitt(xfold1,yfold1,0.1,0.00001)
p_tst1 = predict(x_train1,y_train1,t1)
p_tra1 = predict(xfold1,yfold1,t1)
tf1 = F1(p_tst1[0],p_tst1[1],p_tst1[2])
trf1 = F1(p_tra1[0],p_tra1[1],p_tra1[2])
print(p_tst1)
t0 = fitt(xfold0,yfold0,0.1,0.00001)
p_tst0 = predict(x_train0,y_train0,t0)
p_tra0 = predict(xfold0,yfold0,t0)
tf0 = F1(p_tst0[0],p_tst0[1],p_tst0[2])
trf0 = F1(p_tra0[0],p_tra0[1],p_tra0[2])
print(p_tst0)
print('TESTavgf1(gamma=0.1,c=0.00001):',(tf4+tf3+tf2+tf1+tf0)/5) #avg TEST F1
print('TRAINavgf1(gamma=0.1,c=0.00001):',(trf4+trf3+trf2+trf1+trf0)/5) #avg TRAIN F1
### ###
#-----average f1 score with: gamma=0.001 c=10-----#
t4 = fitt(xfold4,yfold4,0.001,10)
p_tst4 = predict(x_train4,y_train4,t4)
p_tra4 = predict(xfold4,yfold4,t4)
tp4 = p(p_tst4[0],p_tst4[1])
tr4 = r(p_tst4[0],p_tst4[2])
trp4 = p(p_tra4[0],p_tra4[1])
trr4 = p(p_tra4[0],p_tra4[2])
tf4 = F1(p_tst4[0],p_tst4[1],p_tst4[2])
trf4 = F1(p_tra4[0],p_tra4[1],p_tra4[2])
print(p_tst4)
t3 = fitt(xfold3,yfold3,0.001,10)
p_tst3 = predict(x_train3,y_train3,t3)
p_tra3 = predict(xfold3,yfold3,t3)
tp3 = p(p_tst3[0],p_tst3[1])
tr3 = r(p_tst3[0],p_tst3[2])
trp3 = p(p_tra3[0],p_tra3[1])
trr3 = p(p_tra3[0],p_tra3[2])
tf3 = F1(p_tst3[0],p_tst3[1],p_tst3[2])
trf3 = F1(p_tra3[0],p_tra3[1],p_tra3[0])
print(p_tst3)
t2 = fitt(xfold2,yfold2,0.001,10)
p_tst2 = predict(x_train2,y_train2,t2)
p_tra2 = predict(xfold2,yfold2,t2)
tp2 = p(p_tst2[0],p_tst2[1])
tr2 = r(p_tst2[0],p_tst2[2])
trp2 = p(p_tra2[0],p_tra2[1])
trr2 = p(p_tra2[0],p_tra2[2])
tf2 = F1(p_tst2[0],p_tst2[1],p_tst2[2])
trf2 = F1(p_tra2[0],p_tra2[1],p_tra2[2])
print(p_tst2)
t1 = fitt(xfold1,yfold1,0.001,10)
p_tst1 = predict(x_train1,y_train1,t1)
p_tra1 = predict(xfold1,yfold1,t1)
tp1 = p(p_tst1[0],p_tst1[1])
tr1 = r(p_tst1[0],p_tst1[2])
trp1 = p(p_tra1[0],p_tra1[1])
trr1 = p(p_tra1[0],p_tra1[2])
tf1 = F1(p_tst1[0],p_tst1[1],p_tst1[2])
trf1 = F1(p_tra1[0],p_tra1[1],p_tra1[2])
print(p_tst1)
t0 = fitt(xfold0,yfold0,0.001,10)
p_tst0 = predict(x_train0,y_train0,t0)
p_tra0 = predict(xfold0,yfold0,t0)
tp0 = p(p_tst0[0],p_tst0[1])
tr0 = r(p_tst0[0],p_tst0[2])
trp0 = p(p_tra0[0],p_tra0[1])
trr0 = p(p_tra0[0],p_tra0[2])
tf0 = F1(p_tst0[0],p_tst0[1],p_tst0[2])
trf0 = F1(p_tra0[0],p_tra0[1],p_tra0[2])
print(p_tst0)
print('TESTavgp(gamma=0.001,c=10:',(tp4+tp3+tp2+tp1+tp0)/5)
print('TESTavgr(gamma=0.001,c=10:',(tr4+tr3+tr2+tr1+tr0)/5)
print('TRAINavgp(gamma=0.001,c=10:',(trp4+trp3+trp2+trp1+trp0)/5)
print('TRAINavgr(gamma=0.001,c=10:',(trr4+trr3+trr2+trr1+trr0)/5)
print('TESTavgf1(gamma=0.001,c=10):',(tf4+tf3+tf2+tf1+tf0)/5) #avg TEST F1
print('TRAINavgf1(gamma=0.001,c=10):',(trf4+trf3+trf2+trf1+trf0)/5) #avg TRAIN F1
print('### after many iterations my f1 score converged to roughly 36%, I have contrasted my model with sklearn and found a big difference. Sklearn produced an accuracy of 82% I believe this indicates that there is a bug in my code but I have yet to find it. My optimal hyperparameters turned out to be gamma=0.001, c=10 for a 39% F1 avg. accuracy on training set')
#-----report accuracy using sklearn-----#
print('please wait...thinking..')
model_svm = svm.SVC(C=1, gamma=0.01)
acc = cross_val_score(model_svm, xtrain_k, ytrain_k, cv=5)
print('sklearn acc',np.mean(acc))
###-----LOGISTIC REGRESSION-----###
def logit(X,Y,gamma,sigma,intercept=False):
w = np.ones(219)
b = 1
epoch = 20
for t in range(1, epoch):
for i, x in enumerate(X):
z = np.dot(X[i], w)
s = 1/(1+np.exp(-z))
gradient = np.dot(X[i], (s-Y[i]))+2*w/sigma
#print(gradient)
w = gamma*gradient
return w
def estimate(X,Y,w):
tp = 0
fp = 0
fn = 0
#print(len(X))
for i, x in enumerate(X):
classification = np.sign(np.dot(X[i],w))
if classification > 0 and Y[i] > 0:
tp += 1
elif classification > 0 and Y[i] < 0:
fp += 1
if classification < 0 and Y[i] > 0:
fn += 1
else:
pass
return tp,fp,fn
#-----average f1 score with gamma=0.1, sigma=1-----#
l4 = logit(xfold4,yfold4,0.1,1)
tst_l4 = estimate(xfold4,yfold4,l4)
tra_l4 = estimate(x_train4,y_train4,l4)
tlf4 = F1(tst_l4[0],tst_l4[1],tst_l4[2])
trlf4 = F1(tra_l4[0],tra_l4[1],tra_l4[2])
print(l4)
print(tst_l4)
print(tra_l4)
print(tlf4)
print(trlf4)
l3 = logit(xfold3,yfold3,0.1,1)
tst_l3 = estimate(xfold3,yfold3,l3)
tra_l3 = estimate(x_train3,y_train3,l3)
tlf3 = F1(tst_l3[0],tst_l3[1],tst_l3[2])
trlf3 = F1(tra_l3[0],tra_l3[1],tra_l3[2])
print(l3)
print(tst_l3)
print(tra_l3)
print(tlf3)
print(trlf3)
l2 = logit(xfold2,yfold2,0.1,1)
tst_l2 = estimate(xfold2,yfold2,l2)
tra_l2 = estimate(x_train2,y_train2,l2)
tlf2 = F1(tst_l2[0],tst_l2[1],tst_l2[2])
trlf2 = F1(tra_l2[0],tra_l2[1],tra_l2[2])
print(l2)
print(tst_l2)
print(tra_l2)
print(tlf2)
print(trlf2)
l1 = logit(xfold1,yfold1,0.1,1)
tst_l1 = estimate(xfold1,yfold1,l1)
tra_l1 = estimate(x_train1,y_train1,l1)
tlf1 = F1(tst_l1[0],tst_l1[1],tst_l1[2])
trlf1 = F1(tra_l1[0],tra_l1[1],tra_l1[2])
print(l1)
print(tst_l1)
print(tra_l1)
print(tlf1)
print(trlf1)
l0 = logit(xfold0,yfold0,0.1,1)
tst_l0 = estimate(xfold0,yfold0,l0)
tra_l0 = estimate(x_train0,y_train0,l0)
tlf0 = F1(tst_l0[0],tst_l0[1],tst_l0[2])
trlf0 = F1(tra_l0[0],tra_l0[1],tra_l0[2])
print(l0)
print(tst_l0)
print(tra_l0)
print(tlf0)
print(trlf0)
print('TESTavgf1(gamma=0.1,c=1):',(tlf4+tlf3+tlf2+tlf1+tlf0)/5)
print('TRAINavgf1(gamma=0.1,c=1):',(trlf4+trlf3+trlf2+trlf1+trlf0)/5)
#-----average f1 score with gamma=0.01, sigma=0.1-----#
l4 = logit(xfold4,yfold4,0.01,0.1)
tst_l4 = estimate(xfold4,yfold4,l4)
tra_l4 = estimate(x_train4,y_train4,l4)
tlf4 = F1(tst_l4[0],tst_l4[1],tst_l4[2])
trlf4 = F1(tra_l4[0],tra_l4[1],tra_l4[2])
l3 = logit(xfold3,yfold3,0.01,0.1)
tst_l3 = estimate(xfold3,yfold3,l3)
tra_l3 = estimate(x_train3,y_train3,l3)
tlf3 = F1(tst_l3[0],tst_l3[1],tst_l3[2])
trlf3 = F1(tra_l3[0],tra_l3[1],tra_l3[2])
l2 = logit(xfold2,yfold2,0.01,0.1)
tst_l2 = estimate(xfold2,yfold2,l2)
tra_l2 = estimate(x_train2,y_train2,l2)
tlf2 = F1(tst_l2[0],tst_l2[1],tst_l2[2])
trlf2 = F1(tra_l2[0],tra_l2[1],tra_l2[2])
l1 = logit(xfold1,yfold1,0.01,0.1)
tst_l1 = estimate(xfold1,yfold1,l1)
tra_l1 = estimate(x_train1,y_train1,l1)
tlf1 = F1(tst_l1[0],tst_l1[1],tst_l1[2])
trlf1 = F1(tra_l1[0],tra_l1[1],tra_l1[2])
l0 = logit(xfold0,yfold0,0.01,0.1)
tst_l0 = estimate(xfold0,yfold0,l0)
tra_l0 = estimate(x_train0,y_train0,l0)
tlf0 = F1(tst_l0[0],tst_l0[1],tst_l0[2])
trlf0 = F1(tra_l0[0],tra_l0[1],tra_l0[2])
print('TESTavgf1(gamma=0.01,c=0.1):',(tlf4+tlf3+tlf2+tlf1+tlf0)/5)
print('TRAINavgf1(gamma=0.01,c=0.1):',(trlf4+trlf3+trlf2+trlf1+trlf0)/5)
#-----average f1 score with gamma=0.001, sigma=0.01-----#
l4 = logit(xfold4,yfold4,0.001,0.01)
tst_l4 = estimate(xfold4,yfold4,l4)
tra_l4 = estimate(x_train4,y_train4,l4)
tlf4 = F1(tst_l4[0],tst_l4[1],tst_l4[2])
trlf4 = F1(tra_l4[0],tra_l4[1],tra_l4[2])
l3 = logit(xfold3,yfold3,0.001,0.01)
tst_l3 = estimate(xfold3,yfold3,l3)
tra_l3 = estimate(x_train3,y_train3,l3)
tlf3 = F1(tst_l3[0],tst_l3[1],tst_l3[2])
trlf3 = F1(tra_l3[0],tra_l3[1],tra_l3[2])
l2 = logit(xfold2,yfold2,0.001,0.01)
tst_l2 = estimate(xfold2,yfold2,l2)
tra_l2 = estimate(x_train2,y_train2,l2)
tlf2 = F1(tst_l2[0],tst_l2[1],tst_l2[2])
trlf2 = F1(tra_l2[0],tra_l2[1],tra_l2[2])
l1 = logit(xfold1,yfold1,0.001,0.01)
tst_l1 = estimate(xfold1,yfold1,l1)
tra_l1 = estimate(x_train1,y_train1,l1)
tlf1 = F1(tst_l1[0],tst_l1[1],tst_l1[2])
trlf1 = F1(tra_l1[0],tra_l1[1],tra_l1[2])
l0 = logit(xfold0,yfold0,0.001,0.01)
tst_l0 = estimate(xfold0,yfold0,l0)
tra_l0 = estimate(x_train0,y_train0,l0)
tlf0 = F1(tst_l0[0],tst_l0[1],tst_l0[2])
trlf0 = F1(tra_l0[0],tra_l0[1],tra_l0[2])
print('TESTavgf1(gamma=0.001,c=0.01):',(tlf4+tlf3+tlf2+tlf1+tlf0)/5)
print('TRAINavgf1(gamma=0.001,c=0.01):',(trlf4+trlf3+trlf2+trlf1+trlf0)/5)
#-----average f1 score with gamma=0.001, sigma=0.001-----#
l4 = logit(xfold4,yfold4,1,1)
tst_l4 = estimate(xfold4,yfold4,l4)
tra_l4 = estimate(x_train4,y_train4,l4)
tp4 = p(tst_l4[0],tst_l4[1])
tr4 = r(tst_l4[0],tst_l4[2])
trp4 = p(tra_l4[0],tra_l4[1])
trr4 = p(tra_l4[0],tra_l4[2])
tlf4 = F1(tst_l4[0],tst_l4[1],tst_l4[2])
trlf4 = F1(tra_l4[0],tra_l4[1],tra_l4[2])
l3 = logit(xfold3,yfold3,1,1)
tst_l3 = estimate(xfold3,yfold3,l3)
tra_l3 = estimate(x_train3,y_train3,l3)
tp4 = p(tst_l3[0],tst_l3[1])
tr4 = r(tst_l3[0],tst_l3[2])
trp4 = p(tra_l3[0],tra_l3[1])
trr4 = p(tra_l3[0],tra_l3[2])
tlf3 = F1(tst_l3[0],tst_l3[1],tst_l3[2])
trlf3 = F1(tra_l3[0],tra_l3[1],tra_l3[2])
l2 = logit(xfold2,yfold2,1,1)
tst_l2 = estimate(xfold2,yfold2,l2)
tra_l2 = estimate(x_train2,y_train2,l2)
tp4 = p(tst_l2[0],tst_l2[1])
tr4 = r(tst_l2[0],tst_l2[2])
trp4 = p(tra_l2[0],tra_l2[1])
trr4 = p(tra_l2[0],tra_l2[2])
tlf2 = F1(tst_l2[0],tst_l2[1],tst_l2[2])
trlf2 = F1(tra_l2[0],tra_l2[1],tra_l2[2])
l1 = logit(xfold1,yfold1,1,1)
tst_l1 = estimate(xfold1,yfold1,l1)
tra_l1 = estimate(x_train1,y_train1,l1)
tp4 = p(tst_l1[0],tst_l1[1])
tr4 = r(tst_l1[0],tst_l1[2])
trp4 = p(tra_l1[0],tra_l1[1])
trr4 = p(tra_l1[0],tra_l1[2])
tlf1 = F1(tst_l1[0],tst_l1[1],tst_l1[2])
trlf1 = F1(tra_l1[0],tra_l1[1],tra_l1[2])
l0 = logit(xfold0,yfold0,1,1)
tst_l0 = estimate(xfold0,yfold0,l0)
tra_l0 = estimate(x_train0,y_train0,l0)
tp4 = p(tst_l0[0],tst_l0[1])
tr4 = r(tst_l0[0],tst_l0[2])
trp4 = p(tra_l0[0],tra_l0[1])
trr4 = p(tra_l0[0],tra_l0[2])
tlf0 = F1(tst_l0[0],tst_l0[1],tst_l0[2])
trlf0 = F1(tra_l0[0],tra_l0[1],tra_l0[2])
print('TESTavgp(gamma=1,c=1:',(tp4+tp3+tp2+tp1+tp0)/5)
print('TESTavgr(gamma=1,c=1:',(tr4+tr3+tr2+tr1+tr0)/5)
print('TRAINavgp(gamma=1,c=1:',(trp4+trp3+trp2+trp1+trp0)/5)
print('TRAINavgr(gamma=1,c=1:',(trr4+trr3+trr2+trr1+trr0)/5)
print('TESTavgf1(gamma=1,c=1):',(tlf4+tlf3+tlf2+tlf1+tlf0)/5)
print('TRAINavgf1(gamma=1,c=1):',(trlf4+trlf3+trlf2+trlf1+trlf0)/5)
#-----sklearn logistic regression accuracy-----#
model_logit = LogisticRegression(penalty='l2',C=0.1,solver='lbfgs')
accl = cross_val_score(model_logit, xtrain_k, ytrain_k, cv=5)
print('sklearn accuracy:',np.mean(accl))
###-----Naive Bayes-----###
#-----combining data-----#
y_fold4 = yfold4.reshape(16000,1)
y_fold3 = yfold3.reshape(16000,1)
y_fold2 = yfold2.reshape(16000,1)
y_fold1 = yfold1.reshape(16000,1)
y_fold0 = yfold0.reshape(16000,1)
print(y_fold4.shape)
print(xfold4.shape)
fold4 = np.hstack((y_fold4,xfold4))
fold3 = np.hstack((y_fold3,xfold3))
fold2 = np.hstack((y_fold2,xfold2))
fold1 = np.hstack((y_fold1,xfold1))
fold0 = np.hstack((y_fold0,xfold0))
print(fold4)
print(fold4.shape)
#-----object oriented programming-----#
'''
t0 = Data(fpath='training00.data')
t1 = Data(fpath='training01.data')
t2 = Data(fpath='training02.data')
t3 = Data(fpath='training03.data')
t4 = Data(fpath='training04.data')
t = Data(fpath='test.liblinear')
tr = Data(fpath='train.liblinear')
t0._load_data(fpath="training00.data")
t0._set_attributes_info(index_column_dict,data)
t0.get_row_subset(attribute_name,attribute_value)
'''
def tran_x(X):
return X.T
def p_label(X):
pos = 0
neg = 0
for i in X[0]:
if i == 1:
pos += 1
else:
neg += 1
print("Positive: %d" % pos)
print("Negative: %d" % neg)
ppos = pos/len(X)
pneg = neg/len(X)
return ppos, pneg
print(p_label(fold4))
def frequency(X,Y):
pos = []
neg = []
m = 218
for i in range(219):
if Y[i] == 1:
if X[i][m] == 1:
pos[0][2*m+0]
else:
print(neg[1][2*m+1])
return pos, neg
'''
print(pos)
elif Y[i] == -1:
#print('####')
neg = np.count_nonzero(X[i], axis=0)
else:
pass
return pos,neg
print(frequency(xfold4,yfold4))
#frequency(xfold4,yfold4)
'''
model_bayes = GaussianNB(var_smoothing=1.5)
accb = cross_val_score(model_bayes, xtrain_k, ytrain_k, cv =5)
print('sklearn accuracy:', np.mean(accb))
###-----SVM Over Trees-----###
print('please wait....this will take a bit..')
model_rf = RandomForestClassifier(n_estimators=200, max_depth=10)
model_svm = svm.SVC(C=1, gamma=0.01, probability=True)
ensemble_svmot = VotingClassifier(estimators=[('dt',model_rf), ('svm',model_svm)],voting='soft')
accuracy = cross_val_score(ensemble_svmot, xtrain_k, ytrain_k, cv=5)
print('sklearn accuracy',np.mean(accuracy))
|
[
"sklearn.ensemble.RandomForestClassifier",
"sklearn.naive_bayes.GaussianNB",
"sklearn.model_selection.cross_val_score",
"numpy.asarray",
"numpy.ones",
"numpy.hstack",
"numpy.append",
"sklearn.linear_model.LogisticRegression",
"sklearn.ensemble.VotingClassifier",
"numpy.mean",
"numpy.exp",
"sklearn.svm.SVC",
"numpy.dot"
] |
[((1381, 1418), 'numpy.append', 'np.append', (['x_train0', 'x_train1'], {'axis': '(0)'}), '(x_train0, x_train1, axis=0)\n', (1390, 1418), True, 'import numpy as np\n'), ((1434, 1473), 'numpy.append', 'np.append', (['train_set1', 'x_train2'], {'axis': '(0)'}), '(train_set1, x_train2, axis=0)\n', (1443, 1473), True, 'import numpy as np\n'), ((1489, 1528), 'numpy.append', 'np.append', (['train_set2', 'x_train3'], {'axis': '(0)'}), '(train_set2, x_train3, axis=0)\n', (1498, 1528), True, 'import numpy as np\n'), ((1544, 1583), 'numpy.append', 'np.append', (['train_set1', 'x_train3'], {'axis': '(0)'}), '(train_set1, x_train3, axis=0)\n', (1553, 1583), True, 'import numpy as np\n'), ((1599, 1636), 'numpy.append', 'np.append', (['x_train0', 'x_train2'], {'axis': '(0)'}), '(x_train0, x_train2, axis=0)\n', (1608, 1636), True, 'import numpy as np\n'), ((1652, 1691), 'numpy.append', 'np.append', (['train_set4', 'x_train3'], {'axis': '(0)'}), '(train_set4, x_train3, axis=0)\n', (1661, 1691), True, 'import numpy as np\n'), ((1707, 1744), 'numpy.append', 'np.append', (['x_train1', 'x_train2'], {'axis': '(0)'}), '(x_train1, x_train2, axis=0)\n', (1716, 1744), True, 'import numpy as np\n'), ((1760, 1799), 'numpy.append', 'np.append', (['train_set6', 'x_train3'], {'axis': '(0)'}), '(train_set6, x_train3, axis=0)\n', (1769, 1799), True, 'import numpy as np\n'), ((1833, 1872), 'numpy.append', 'np.append', (['train_set0', 'x_train4'], {'axis': '(0)'}), '(train_set0, x_train4, axis=0)\n', (1842, 1872), True, 'import numpy as np\n'), ((1884, 1923), 'numpy.append', 'np.append', (['train_set2', 'x_train3'], {'axis': '(0)'}), '(train_set2, x_train3, axis=0)\n', (1893, 1923), True, 'import numpy as np\n'), ((1935, 1974), 'numpy.append', 'np.append', (['train_set2', 'x_train4'], {'axis': '(0)'}), '(train_set2, x_train4, axis=0)\n', (1944, 1974), True, 'import numpy as np\n'), ((1986, 2025), 'numpy.append', 'np.append', (['train_set3', 'x_train4'], {'axis': '(0)'}), '(train_set3, x_train4, axis=0)\n', (1995, 2025), True, 'import numpy as np\n'), ((2037, 2076), 'numpy.append', 'np.append', (['train_set5', 'x_train4'], {'axis': '(0)'}), '(train_set5, x_train4, axis=0)\n', (2046, 2076), True, 'import numpy as np\n'), ((2088, 2127), 'numpy.append', 'np.append', (['train_set7', 'x_train4'], {'axis': '(0)'}), '(train_set7, x_train4, axis=0)\n', (2097, 2127), True, 'import numpy as np\n'), ((2843, 2880), 'numpy.append', 'np.append', (['y_train0', 'y_train1'], {'axis': '(0)'}), '(y_train0, y_train1, axis=0)\n', (2852, 2880), True, 'import numpy as np\n'), ((2892, 2927), 'numpy.append', 'np.append', (['train1', 'y_train2'], {'axis': '(0)'}), '(train1, y_train2, axis=0)\n', (2901, 2927), True, 'import numpy as np\n'), ((2939, 2974), 'numpy.append', 'np.append', (['train2', 'y_train3'], {'axis': '(0)'}), '(train2, y_train3, axis=0)\n', (2948, 2974), True, 'import numpy as np\n'), ((2986, 3021), 'numpy.append', 'np.append', (['train1', 'y_train3'], {'axis': '(0)'}), '(train1, y_train3, axis=0)\n', (2995, 3021), True, 'import numpy as np\n'), ((3033, 3070), 'numpy.append', 'np.append', (['y_train0', 'y_train2'], {'axis': '(0)'}), '(y_train0, y_train2, axis=0)\n', (3042, 3070), True, 'import numpy as np\n'), ((3082, 3117), 'numpy.append', 'np.append', (['train4', 'y_train3'], {'axis': '(0)'}), '(train4, y_train3, axis=0)\n', (3091, 3117), True, 'import numpy as np\n'), ((3129, 3166), 'numpy.append', 'np.append', (['y_train1', 'y_train2'], {'axis': '(0)'}), '(y_train1, y_train2, axis=0)\n', (3138, 3166), True, 'import numpy as np\n'), ((3178, 3213), 'numpy.append', 'np.append', (['train6', 'y_train3'], {'axis': '(0)'}), '(train6, y_train3, axis=0)\n', (3187, 3213), True, 'import numpy as np\n'), ((3226, 3261), 'numpy.append', 'np.append', (['train2', 'y_train3'], {'axis': '(0)'}), '(train2, y_train3, axis=0)\n', (3235, 3261), True, 'import numpy as np\n'), ((3273, 3308), 'numpy.append', 'np.append', (['train2', 'y_train4'], {'axis': '(0)'}), '(train2, y_train4, axis=0)\n', (3282, 3308), True, 'import numpy as np\n'), ((3320, 3355), 'numpy.append', 'np.append', (['train3', 'y_train4'], {'axis': '(0)'}), '(train3, y_train4, axis=0)\n', (3329, 3355), True, 'import numpy as np\n'), ((3367, 3402), 'numpy.append', 'np.append', (['train5', 'y_train4'], {'axis': '(0)'}), '(train5, y_train4, axis=0)\n', (3376, 3402), True, 'import numpy as np\n'), ((3414, 3449), 'numpy.append', 'np.append', (['train7', 'y_train4'], {'axis': '(0)'}), '(train7, y_train4, axis=0)\n', (3423, 3449), True, 'import numpy as np\n'), ((3463, 3498), 'numpy.append', 'np.append', (['train0', 'y_train4'], {'axis': '(0)'}), '(train0, y_train4, axis=0)\n', (3472, 3498), True, 'import numpy as np\n'), ((11272, 11296), 'sklearn.svm.SVC', 'svm.SVC', ([], {'C': '(1)', 'gamma': '(0.01)'}), '(C=1, gamma=0.01)\n', (11279, 11296), False, 'from sklearn import svm\n'), ((11303, 11355), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['model_svm', 'xtrain_k', 'ytrain_k'], {'cv': '(5)'}), '(model_svm, xtrain_k, ytrain_k, cv=5)\n', (11318, 11355), False, 'from sklearn.model_selection import cross_val_score\n'), ((18075, 18130), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'penalty': '"""l2"""', 'C': '(0.1)', 'solver': '"""lbfgs"""'}), "(penalty='l2', C=0.1, solver='lbfgs')\n", (18093, 18130), False, 'from sklearn.linear_model import LogisticRegression\n'), ((18136, 18190), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['model_logit', 'xtrain_k', 'ytrain_k'], {'cv': '(5)'}), '(model_logit, xtrain_k, ytrain_k, cv=5)\n', (18151, 18190), False, 'from sklearn.model_selection import cross_val_score\n'), ((18510, 18538), 'numpy.hstack', 'np.hstack', (['(y_fold4, xfold4)'], {}), '((y_fold4, xfold4))\n', (18519, 18538), True, 'import numpy as np\n'), ((18546, 18574), 'numpy.hstack', 'np.hstack', (['(y_fold3, xfold3)'], {}), '((y_fold3, xfold3))\n', (18555, 18574), True, 'import numpy as np\n'), ((18582, 18610), 'numpy.hstack', 'np.hstack', (['(y_fold2, xfold2)'], {}), '((y_fold2, xfold2))\n', (18591, 18610), True, 'import numpy as np\n'), ((18618, 18646), 'numpy.hstack', 'np.hstack', (['(y_fold1, xfold1)'], {}), '((y_fold1, xfold1))\n', (18627, 18646), True, 'import numpy as np\n'), ((18654, 18682), 'numpy.hstack', 'np.hstack', (['(y_fold0, xfold0)'], {}), '((y_fold0, xfold0))\n', (18663, 18682), True, 'import numpy as np\n'), ((20011, 20040), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {'var_smoothing': '(1.5)'}), '(var_smoothing=1.5)\n', (20021, 20040), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((20048, 20102), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['model_bayes', 'xtrain_k', 'ytrain_k'], {'cv': '(5)'}), '(model_bayes, xtrain_k, ytrain_k, cv=5)\n', (20063, 20102), False, 'from sklearn.model_selection import cross_val_score\n'), ((20239, 20293), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(200)', 'max_depth': '(10)'}), '(n_estimators=200, max_depth=10)\n', (20261, 20293), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((20306, 20348), 'sklearn.svm.SVC', 'svm.SVC', ([], {'C': '(1)', 'gamma': '(0.01)', 'probability': '(True)'}), '(C=1, gamma=0.01, probability=True)\n', (20313, 20348), False, 'from sklearn import svm\n'), ((20366, 20453), 'sklearn.ensemble.VotingClassifier', 'VotingClassifier', ([], {'estimators': "[('dt', model_rf), ('svm', model_svm)]", 'voting': '"""soft"""'}), "(estimators=[('dt', model_rf), ('svm', model_svm)], voting=\n 'soft')\n", (20382, 20453), False, 'from sklearn.ensemble import VotingClassifier\n'), ((20457, 20514), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['ensemble_svmot', 'xtrain_k', 'ytrain_k'], {'cv': '(5)'}), '(ensemble_svmot, xtrain_k, ytrain_k, cv=5)\n', (20472, 20514), False, 'from sklearn.model_selection import cross_val_score\n'), ((1008, 1028), 'numpy.asarray', 'np.asarray', (['train_rs'], {}), '(train_rs)\n', (1018, 1028), True, 'import numpy as np\n'), ((2517, 2540), 'numpy.asarray', 'np.asarray', (['train_label'], {}), '(train_label)\n', (2527, 2540), True, 'import numpy as np\n'), ((3646, 3658), 'numpy.ones', 'np.ones', (['(219)'], {}), '(219)\n', (3653, 3658), True, 'import numpy as np\n'), ((11376, 11388), 'numpy.mean', 'np.mean', (['acc'], {}), '(acc)\n', (11383, 11388), True, 'import numpy as np\n'), ((11484, 11496), 'numpy.ones', 'np.ones', (['(219)'], {}), '(219)\n', (11491, 11496), True, 'import numpy as np\n'), ((18217, 18230), 'numpy.mean', 'np.mean', (['accl'], {}), '(accl)\n', (18224, 18230), True, 'import numpy as np\n'), ((20131, 20144), 'numpy.mean', 'np.mean', (['accb'], {}), '(accb)\n', (20138, 20144), True, 'import numpy as np\n'), ((20540, 20557), 'numpy.mean', 'np.mean', (['accuracy'], {}), '(accuracy)\n', (20547, 20557), True, 'import numpy as np\n'), ((11602, 11617), 'numpy.dot', 'np.dot', (['X[i]', 'w'], {}), '(X[i], w)\n', (11608, 11617), True, 'import numpy as np\n'), ((11930, 11945), 'numpy.dot', 'np.dot', (['X[i]', 'w'], {}), '(X[i], w)\n', (11936, 11945), True, 'import numpy as np\n'), ((4167, 4182), 'numpy.dot', 'np.dot', (['X[i]', 'w'], {}), '(X[i], w)\n', (4173, 4182), True, 'import numpy as np\n'), ((11674, 11696), 'numpy.dot', 'np.dot', (['X[i]', '(s - Y[i])'], {}), '(X[i], s - Y[i])\n', (11680, 11696), True, 'import numpy as np\n'), ((11639, 11649), 'numpy.exp', 'np.exp', (['(-z)'], {}), '(-z)\n', (11645, 11649), True, 'import numpy as np\n'), ((3815, 3830), 'numpy.dot', 'np.dot', (['X[i]', 'w'], {}), '(X[i], w)\n', (3821, 3830), True, 'import numpy as np\n')]
|
#! /usr/bin/env python
import tensorflow as tf
import numpy as np
import re
import os
import time
import datetime
import gc
import sys
import shutil
from input_helpers import InputHelper
from siamese_network_semantic import SiameseLSTMw2v
from tensorflow.contrib import learn
import gzip
from random import random
# Parameters
# ==================================================
tf.flags.DEFINE_string("training_filepath", "data/train_snli.txt", "training file path (default: None)")
tf.flags.DEFINE_string("output_dirpath", None, "output directory path (default: None)")
tf.flags.DEFINE_float("y_scale", 5.0, "scale of y in training file (default: 5.0)")
tf.flags.DEFINE_integer("y_position", 0, "position of y in training file (default: 0)")
tf.flags.DEFINE_integer("x1_position", 0, "position of x1 in training file (default: 1)")
tf.flags.DEFINE_integer("x2_position", 0, "position of x2 in training file (default: 2)")
tf.flags.DEFINE_boolean("header", False, "if training file has a header (default: False)")
# Embedding parameters
tf.flags.DEFINE_string("word2vec_model", "wiki.simple.vec", "word2vec pre-trained embeddings file (default: None)")
tf.flags.DEFINE_string("word2vec_format", "text", "word2vec pre-trained embeddings file format (bin/text/textgz)(default: None)")
tf.flags.DEFINE_integer("embedding_dim", 300, "Dimensionality of character embedding (default: 300)")
# RNN stack parameters
tf.flags.DEFINE_boolean("tied", True, "Different side weights are tied / untied (default: True)")
tf.flags.DEFINE_float("side1_dropout", 1.0, "Dropout keep probability (default: 1.0)")
tf.flags.DEFINE_float("side2_dropout", 1.0, "Dropout keep probability (default: 1.0)")
tf.flags.DEFINE_list("side1_nodes", [50, 50, 50], "Number of nodes in layers for Side_1 (default:50,50,50)")
tf.flags.DEFINE_list("side2_nodes", [50, 50, 50], "Number of nodes in layers for Side_2 (default:50,50,50)")
# Training parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_integer("num_epochs", 300, "Number of training epochs (default: 200)")
tf.flags.DEFINE_integer("max_iterations", 500000, "Maximum number of iterations")
tf.flags.DEFINE_integer("evaluate_every", 1000, "Evaluate model on dev set after this many steps (default: 100)")
tf.flags.DEFINE_integer("checkpoint_every", 1000, "Save model after this many steps (default: 100)")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
FLAGS(sys.argv)
print("EXECUTION PARAMETERS:")
for attr, flag in sorted(FLAGS.__flags.items()):
print("{} = {}".format(attr.upper(), flag.value))
if FLAGS.training_filepath==None:
print("Input File path is empty. use --training_filepath argument.")
exit()
max_document_length=15
inpH = InputHelper()
train_set, dev_set, vocab_processor, sum_no_of_batches = inpH.getDataSets(
FLAGS.training_filepath, FLAGS.y_position, FLAGS.x1_position, FLAGS.x2_position, FLAGS.header, max_document_length, 10, FLAGS.batch_size)
trainableEmbeddings=False
if FLAGS.word2vec_model==None:
trainableEmbeddings=True
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
"You are using word embedding based semantic similarity but "
"word2vec model path is empty. It is Recommended to use --word2vec_model argument. "
"Otherwise now the code is automatically trying to learn embedding values (may not help in accuracy)"
"\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n")
else:
inpH.loadW2V(FLAGS.word2vec_model, FLAGS.word2vec_format)
# Training
# ==================================================
with tf.Graph().as_default():
sess_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=sess_conf)
with sess.as_default():
siameseModel = SiameseLSTMw2v(
sequence_length=max_document_length,
vocab_size=len(vocab_processor.vocabulary_),
embedding_size=FLAGS.embedding_dim,
batch_size=FLAGS.batch_size,
trainableEmbeddings=trainableEmbeddings,
tied=FLAGS.tied,
side1_nodes=FLAGS.side1_nodes,
side2_nodes=FLAGS.side2_nodes,
)
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.AdamOptimizer(1e-3)
grads_and_vars = optimizer.compute_gradients(siameseModel.loss)
train_op_set = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
# Keep track of gradient values and sparsity (optional)
grad_summaries = []
for g, v in grads_and_vars:
if g is not None:
grad_hist_summary = tf.summary.histogram("{}/grad/hist".format(v.name), g)
sparsity_summary = tf.summary.scalar("{}/grad/sparsity".format(v.name), tf.nn.zero_fraction(g))
grad_summaries.append(grad_hist_summary)
grad_summaries.append(sparsity_summary)
grad_summaries_merged = tf.summary.merge(grad_summaries)
# Output directory for models and summaries
out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", str(int(time.time())))) \
if FLAGS.output_dirpath is None else \
os.path.abspath(FLAGS.output_dirpath)
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
print("Writing to %s." % out_dir)
# Summaries for loss pcc rho mse
loss_summary = tf.summary.scalar("loss", siameseModel.loss)
pcc_summary = tf.summary.scalar("pcc", siameseModel.pcc)
rho_summary = tf.summary.scalar("rho", siameseModel.rho)
mse_summary = tf.summary.scalar("mse", siameseModel.mse)
# Train Summaries
train_summary_op = tf.summary.merge([loss_summary, pcc_summary, rho_summary, mse_summary, grad_summaries_merged])
train_summary_dir = os.path.join(out_dir, "summaries", "train")
train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
# Dev summaries
dev_summary_op = tf.summary.merge([loss_summary, pcc_summary, rho_summary, mse_summary])
dev_summary_dir = os.path.join(out_dir, "summaries", "dev")
dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, sess.graph)
# Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=100)
# Write vocabulary
vocab_processor.save(os.path.join(checkpoint_dir, "vocab"))
# Initialize all variables
sess.run(tf.global_variables_initializer())
graph_def = tf.get_default_graph().as_graph_def()
graphpb_txt = str(graph_def)
with open(os.path.join(checkpoint_dir, "graphpb.txt"), 'w') as f:
f.write(graphpb_txt)
if FLAGS.word2vec_model :
# initial matrix with random uniform
initW = np.random.uniform(-0.25, 0.25, (len(vocab_processor.vocabulary_), FLAGS.embedding_dim))
#initW = np.zeros(shape=(len(vocab_processor.vocabulary_), FLAGS.embedding_dim))
# load any vectors from the word2vec
for w in vocab_processor.vocabulary_._mapping:
arr=[]
s = re.sub('[^0-9a-zA-Z]+', '', w)
if w in inpH.pre_emb:
arr=inpH.pre_emb[w]
elif w.lower() in inpH.pre_emb:
arr=inpH.pre_emb[w.lower()]
elif s in inpH.pre_emb:
arr=inpH.pre_emb[s]
elif s.isdigit():
arr=inpH.pre_emb["zero"]
if len(arr)>0:
idx = vocab_processor.vocabulary_.get(w)
initW[idx]=np.asarray(arr).astype(np.float32)
inpH.deletePreEmb()
gc.collect()
sess.run(siameseModel.W.assign(initW))
def train_step(x1_batch, x2_batch, y_batch, i):
random_value = random()
feed_dict = {
siameseModel.input_x1: x1_batch if random_value > 0.5 else x2_batch,
siameseModel.input_x2: x2_batch if random_value > 0.5 else x1_batch,
siameseModel.input_y_norm: map(lambda x: x / FLAGS.y_scale, y_batch),
siameseModel.side1_dropout: FLAGS.side1_dropout,
siameseModel.side2_dropout: FLAGS.side2_dropout,
}
_, step, loss, pcc, rho, mse, dist, summaries = sess.run([train_op_set, global_step, siameseModel.loss, siameseModel.pcc, siameseModel.rho, siameseModel.mse, siameseModel.distance, train_summary_op], feed_dict)
time_str = datetime.datetime.now().isoformat()
if i % 100 == 0:
print("TRAIN {}: step {}, loss {}, pcc: {}, rho: {}, mse: {}".format(time_str, step, loss, pcc, rho, mse * FLAGS.y_scale))
train_summary_writer.add_summary(summaries, step)
def dev_step(x1_batch, x2_batch, y_batch, i):
random_value = random()
feed_dict = {
siameseModel.input_x1: x1_batch if random_value > 0.5 else x2_batch,
siameseModel.input_x2: x2_batch if random_value > 0.5 else x1_batch,
siameseModel.input_y_norm: map(lambda x: x / FLAGS.y_scale, y_batch),
siameseModel.side1_dropout: 1.0,
siameseModel.side2_dropout: 1.0,
}
step, loss, pcc, rho, mse, summaries = sess.run([global_step, siameseModel.loss, siameseModel.pcc, siameseModel.rho, siameseModel.mse, dev_summary_op], feed_dict)
time_str = datetime.datetime.now().isoformat()
if i % 100 == 0:
print("DEV {}: step {}, loss {}, pcc {}, rho {}, mse: {}".format(time_str, step, loss, pcc, rho, mse * FLAGS.y_scale))
dev_summary_writer.add_summary(summaries, step)
return mse * FLAGS.y_scale
# Generate batches
batches = inpH.batch_iter(list(zip(train_set[0], train_set[1], train_set[2])), FLAGS.batch_size, FLAGS.num_epochs)
max_validation_mse=1e256
n_iterations = sum_no_of_batches * FLAGS.num_epochs
n_iterations = n_iterations if n_iterations < FLAGS.max_iterations else FLAGS.max_iterations
print('Total number of iterations %s.' % n_iterations)
for nn in xrange(n_iterations):
batch = batches.next()
if len(batch)<1:
continue
x1_batch, x2_batch, y_batch = zip(*batch)
if len(y_batch)<1:
continue
train_step(x1_batch, x2_batch, y_batch, nn)
step = tf.train.global_step(sess, global_step)
current_evaluation_total_mse = 0.0
if step % FLAGS.evaluate_every == 0:
dev_batches = inpH.batch_iter(list(zip(dev_set[0], dev_set[1], dev_set[2])), FLAGS.batch_size, 1)
i = 0
for db in dev_batches:
if len(db)<1:
continue
x1_dev_b, x2_dev_b, y_dev_b = zip(*db)
if len(y_dev_b)<1:
continue
current_evaluation_total_mse = current_evaluation_total_mse + dev_step(x1_dev_b, x2_dev_b, y_dev_b, i)
i = i + 1
if current_evaluation_total_mse <= max_validation_mse:
max_validation_mse = current_evaluation_total_mse
saver.save(sess, checkpoint_prefix, global_step=step)
tf.train.write_graph(sess.graph.as_graph_def(), checkpoint_prefix, "graph"+str(nn)+".pb", as_text=False)
print("Saved model {} with total_mse={} checkpoint to {}.".format(nn, max_validation_mse, checkpoint_prefix))
|
[
"tensorflow.nn.zero_fraction",
"tensorflow.ConfigProto",
"tensorflow.global_variables",
"tensorflow.Variable",
"gc.collect",
"shutil.rmtree",
"tensorflow.summary.merge",
"tensorflow.get_default_graph",
"os.path.join",
"os.path.abspath",
"os.path.exists",
"input_helpers.InputHelper",
"tensorflow.summary.FileWriter",
"re.sub",
"tensorflow.flags.DEFINE_list",
"tensorflow.flags.DEFINE_boolean",
"tensorflow.train.global_step",
"datetime.datetime.now",
"tensorflow.summary.scalar",
"tensorflow.global_variables_initializer",
"numpy.asarray",
"tensorflow.Session",
"random.random",
"tensorflow.Graph",
"tensorflow.flags.DEFINE_integer",
"tensorflow.flags.DEFINE_string",
"os.makedirs",
"tensorflow.flags.DEFINE_float",
"time.time",
"tensorflow.train.AdamOptimizer"
] |
[((381, 489), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""training_filepath"""', '"""data/train_snli.txt"""', '"""training file path (default: None)"""'], {}), "('training_filepath', 'data/train_snli.txt',\n 'training file path (default: None)')\n", (403, 489), True, 'import tensorflow as tf\n'), ((486, 577), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""output_dirpath"""', 'None', '"""output directory path (default: None)"""'], {}), "('output_dirpath', None,\n 'output directory path (default: None)')\n", (508, 577), True, 'import tensorflow as tf\n'), ((574, 661), 'tensorflow.flags.DEFINE_float', 'tf.flags.DEFINE_float', (['"""y_scale"""', '(5.0)', '"""scale of y in training file (default: 5.0)"""'], {}), "('y_scale', 5.0,\n 'scale of y in training file (default: 5.0)')\n", (595, 661), True, 'import tensorflow as tf\n'), ((658, 749), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""y_position"""', '(0)', '"""position of y in training file (default: 0)"""'], {}), "('y_position', 0,\n 'position of y in training file (default: 0)')\n", (681, 749), True, 'import tensorflow as tf\n'), ((746, 839), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""x1_position"""', '(0)', '"""position of x1 in training file (default: 1)"""'], {}), "('x1_position', 0,\n 'position of x1 in training file (default: 1)')\n", (769, 839), True, 'import tensorflow as tf\n'), ((836, 929), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""x2_position"""', '(0)', '"""position of x2 in training file (default: 2)"""'], {}), "('x2_position', 0,\n 'position of x2 in training file (default: 2)')\n", (859, 929), True, 'import tensorflow as tf\n'), ((926, 1020), 'tensorflow.flags.DEFINE_boolean', 'tf.flags.DEFINE_boolean', (['"""header"""', '(False)', '"""if training file has a header (default: False)"""'], {}), "('header', False,\n 'if training file has a header (default: False)')\n", (949, 1020), True, 'import tensorflow as tf\n'), ((1041, 1160), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""word2vec_model"""', '"""wiki.simple.vec"""', '"""word2vec pre-trained embeddings file (default: None)"""'], {}), "('word2vec_model', 'wiki.simple.vec',\n 'word2vec pre-trained embeddings file (default: None)')\n", (1063, 1160), True, 'import tensorflow as tf\n'), ((1157, 1295), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""word2vec_format"""', '"""text"""', '"""word2vec pre-trained embeddings file format (bin/text/textgz)(default: None)"""'], {}), "('word2vec_format', 'text',\n 'word2vec pre-trained embeddings file format (bin/text/textgz)(default: None)'\n )\n", (1179, 1295), True, 'import tensorflow as tf\n'), ((1287, 1392), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""embedding_dim"""', '(300)', '"""Dimensionality of character embedding (default: 300)"""'], {}), "('embedding_dim', 300,\n 'Dimensionality of character embedding (default: 300)')\n", (1310, 1392), True, 'import tensorflow as tf\n'), ((1413, 1514), 'tensorflow.flags.DEFINE_boolean', 'tf.flags.DEFINE_boolean', (['"""tied"""', '(True)', '"""Different side weights are tied / untied (default: True)"""'], {}), "('tied', True,\n 'Different side weights are tied / untied (default: True)')\n", (1436, 1514), True, 'import tensorflow as tf\n'), ((1511, 1601), 'tensorflow.flags.DEFINE_float', 'tf.flags.DEFINE_float', (['"""side1_dropout"""', '(1.0)', '"""Dropout keep probability (default: 1.0)"""'], {}), "('side1_dropout', 1.0,\n 'Dropout keep probability (default: 1.0)')\n", (1532, 1601), True, 'import tensorflow as tf\n'), ((1598, 1688), 'tensorflow.flags.DEFINE_float', 'tf.flags.DEFINE_float', (['"""side2_dropout"""', '(1.0)', '"""Dropout keep probability (default: 1.0)"""'], {}), "('side2_dropout', 1.0,\n 'Dropout keep probability (default: 1.0)')\n", (1619, 1688), True, 'import tensorflow as tf\n'), ((1685, 1797), 'tensorflow.flags.DEFINE_list', 'tf.flags.DEFINE_list', (['"""side1_nodes"""', '[50, 50, 50]', '"""Number of nodes in layers for Side_1 (default:50,50,50)"""'], {}), "('side1_nodes', [50, 50, 50],\n 'Number of nodes in layers for Side_1 (default:50,50,50)')\n", (1705, 1797), True, 'import tensorflow as tf\n'), ((1794, 1906), 'tensorflow.flags.DEFINE_list', 'tf.flags.DEFINE_list', (['"""side2_nodes"""', '[50, 50, 50]', '"""Number of nodes in layers for Side_2 (default:50,50,50)"""'], {}), "('side2_nodes', [50, 50, 50],\n 'Number of nodes in layers for Side_2 (default:50,50,50)')\n", (1814, 1906), True, 'import tensorflow as tf\n'), ((1926, 1995), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""batch_size"""', '(64)', '"""Batch Size (default: 64)"""'], {}), "('batch_size', 64, 'Batch Size (default: 64)')\n", (1949, 1995), True, 'import tensorflow as tf\n'), ((1996, 2086), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""num_epochs"""', '(300)', '"""Number of training epochs (default: 200)"""'], {}), "('num_epochs', 300,\n 'Number of training epochs (default: 200)')\n", (2019, 2086), True, 'import tensorflow as tf\n'), ((2083, 2168), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""max_iterations"""', '(500000)', '"""Maximum number of iterations"""'], {}), "('max_iterations', 500000,\n 'Maximum number of iterations')\n", (2106, 2168), True, 'import tensorflow as tf\n'), ((2165, 2282), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""evaluate_every"""', '(1000)', '"""Evaluate model on dev set after this many steps (default: 100)"""'], {}), "('evaluate_every', 1000,\n 'Evaluate model on dev set after this many steps (default: 100)')\n", (2188, 2282), True, 'import tensorflow as tf\n'), ((2279, 2383), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""checkpoint_every"""', '(1000)', '"""Save model after this many steps (default: 100)"""'], {}), "('checkpoint_every', 1000,\n 'Save model after this many steps (default: 100)')\n", (2302, 2383), True, 'import tensorflow as tf\n'), ((2399, 2494), 'tensorflow.flags.DEFINE_boolean', 'tf.flags.DEFINE_boolean', (['"""allow_soft_placement"""', '(True)', '"""Allow device soft device placement"""'], {}), "('allow_soft_placement', True,\n 'Allow device soft device placement')\n", (2422, 2494), True, 'import tensorflow as tf\n'), ((2491, 2584), 'tensorflow.flags.DEFINE_boolean', 'tf.flags.DEFINE_boolean', (['"""log_device_placement"""', '(False)', '"""Log placement of ops on devices"""'], {}), "('log_device_placement', False,\n 'Log placement of ops on devices')\n", (2514, 2584), True, 'import tensorflow as tf\n'), ((2900, 2913), 'input_helpers.InputHelper', 'InputHelper', ([], {}), '()\n', (2911, 2913), False, 'from input_helpers import InputHelper\n'), ((3766, 3882), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': 'FLAGS.allow_soft_placement', 'log_device_placement': 'FLAGS.log_device_placement'}), '(allow_soft_placement=FLAGS.allow_soft_placement,\n log_device_placement=FLAGS.log_device_placement)\n', (3780, 3882), True, 'import tensorflow as tf\n'), ((3897, 3925), 'tensorflow.Session', 'tf.Session', ([], {'config': 'sess_conf'}), '(config=sess_conf)\n', (3907, 3925), True, 'import tensorflow as tf\n'), ((5017, 5049), 'tensorflow.summary.merge', 'tf.summary.merge', (['grad_summaries'], {}), '(grad_summaries)\n', (5033, 5049), True, 'import tensorflow as tf\n'), ((5294, 5317), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (5308, 5317), False, 'import os\n'), ((5435, 5479), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'siameseModel.loss'], {}), "('loss', siameseModel.loss)\n", (5452, 5479), True, 'import tensorflow as tf\n'), ((5496, 5538), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""pcc"""', 'siameseModel.pcc'], {}), "('pcc', siameseModel.pcc)\n", (5513, 5538), True, 'import tensorflow as tf\n'), ((5555, 5597), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""rho"""', 'siameseModel.rho'], {}), "('rho', siameseModel.rho)\n", (5572, 5597), True, 'import tensorflow as tf\n'), ((5614, 5656), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""mse"""', 'siameseModel.mse'], {}), "('mse', siameseModel.mse)\n", (5631, 5656), True, 'import tensorflow as tf\n'), ((5699, 5797), 'tensorflow.summary.merge', 'tf.summary.merge', (['[loss_summary, pcc_summary, rho_summary, mse_summary, grad_summaries_merged]'], {}), '([loss_summary, pcc_summary, rho_summary, mse_summary,\n grad_summaries_merged])\n', (5715, 5797), True, 'import tensorflow as tf\n'), ((5816, 5859), 'os.path.join', 'os.path.join', (['out_dir', '"""summaries"""', '"""train"""'], {}), "(out_dir, 'summaries', 'train')\n", (5828, 5859), False, 'import os\n'), ((5885, 5937), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['train_summary_dir', 'sess.graph'], {}), '(train_summary_dir, sess.graph)\n', (5906, 5937), True, 'import tensorflow as tf\n'), ((5976, 6047), 'tensorflow.summary.merge', 'tf.summary.merge', (['[loss_summary, pcc_summary, rho_summary, mse_summary]'], {}), '([loss_summary, pcc_summary, rho_summary, mse_summary])\n', (5992, 6047), True, 'import tensorflow as tf\n'), ((6068, 6109), 'os.path.join', 'os.path.join', (['out_dir', '"""summaries"""', '"""dev"""'], {}), "(out_dir, 'summaries', 'dev')\n", (6080, 6109), False, 'import os\n'), ((6133, 6183), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['dev_summary_dir', 'sess.graph'], {}), '(dev_summary_dir, sess.graph)\n', (6154, 6183), True, 'import tensorflow as tf\n'), ((6379, 6416), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""model"""'], {}), "(checkpoint_dir, 'model')\n", (6391, 6416), False, 'import os\n'), ((4332, 4383), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'name': '"""global_step"""', 'trainable': '(False)'}), "(0, name='global_step', trainable=False)\n", (4343, 4383), True, 'import tensorflow as tf\n'), ((4400, 4429), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['(0.001)'], {}), '(0.001)\n', (4422, 4429), True, 'import tensorflow as tf\n'), ((5251, 5288), 'os.path.abspath', 'os.path.abspath', (['FLAGS.output_dirpath'], {}), '(FLAGS.output_dirpath)\n', (5266, 5288), False, 'import os\n'), ((5323, 5345), 'shutil.rmtree', 'shutil.rmtree', (['out_dir'], {}), '(out_dir)\n', (5336, 5345), False, 'import shutil\n'), ((6319, 6355), 'os.path.join', 'os.path.join', (['out_dir', '"""checkpoints"""'], {}), "(out_dir, 'checkpoints')\n", (6331, 6355), False, 'import os\n'), ((6426, 6456), 'os.path.exists', 'os.path.exists', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (6440, 6456), False, 'import os\n'), ((6462, 6489), 'os.makedirs', 'os.makedirs', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (6473, 6489), False, 'import os\n'), ((6515, 6536), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (6534, 6536), True, 'import tensorflow as tf\n'), ((6600, 6637), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""vocab"""'], {}), "(checkpoint_dir, 'vocab')\n", (6612, 6637), False, 'import os\n'), ((6680, 6713), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (6711, 6713), True, 'import tensorflow as tf\n'), ((7690, 7702), 'gc.collect', 'gc.collect', ([], {}), '()\n', (7700, 7702), False, 'import gc\n'), ((7816, 7824), 'random.random', 'random', ([], {}), '()\n', (7822, 7824), False, 'from random import random\n'), ((8724, 8732), 'random.random', 'random', ([], {}), '()\n', (8730, 8732), False, 'from random import random\n'), ((10127, 10166), 'tensorflow.train.global_step', 'tf.train.global_step', (['sess', 'global_step'], {}), '(sess, global_step)\n', (10147, 10166), True, 'import tensorflow as tf\n'), ((3726, 3736), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (3734, 3736), True, 'import tensorflow as tf\n'), ((6729, 6751), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (6749, 6751), True, 'import tensorflow as tf\n'), ((6810, 6853), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""graphpb.txt"""'], {}), "(checkpoint_dir, 'graphpb.txt')\n", (6822, 6853), False, 'import os\n'), ((7262, 7292), 're.sub', 're.sub', (['"""[^0-9a-zA-Z]+"""', '""""""', 'w'], {}), "('[^0-9a-zA-Z]+', '', w)\n", (7268, 7292), False, 'import re\n'), ((4874, 4896), 'tensorflow.nn.zero_fraction', 'tf.nn.zero_fraction', (['g'], {}), '(g)\n', (4893, 4896), True, 'import tensorflow as tf\n'), ((8416, 8439), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8437, 8439), False, 'import datetime\n'), ((9248, 9271), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (9269, 9271), False, 'import datetime\n'), ((5170, 5181), 'time.time', 'time.time', ([], {}), '()\n', (5179, 5181), False, 'import time\n'), ((7627, 7642), 'numpy.asarray', 'np.asarray', (['arr'], {}), '(arr)\n', (7637, 7642), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 19 10:28:35 2021
@author: <NAME> -Spatial structure index value distribution of urban streetscape
"""
import pickle
from database import postSQL2gpd,gpd2postSQL
import pandas as pd
xian_epsg=32649 #Xi'an WGS84 / UTM zone 49N
wgs84_epsg=4326
poi_classificationName={
0:"delicacy",
1:"hotel",
2:"shopping",
3:"lifeService",
4:"beauty",
5:"spot",
6:"entertainment",
7:"sports",
8:"education",
9:"media",
10:"medicalTreatment",
11:"carService",
12:"trafficFacilities",
13:"finance",
14:"realEstate",
15:"corporation",
16:"government",
17:"entrance",
18:"naturalFeatures",
}
poi_classificationName_reverse={v:k for k,v in poi_classificationName.items()}
def street_poi_structure(poi,position,distance=300):
from tqdm import tqdm
import pickle,math
import pandas as pd
import numpy as np
import geopandas as gpd
# tqdm.pandas()
poi_num=len(poi_classificationName.keys())
feature_vector=np.zeros(poi_num)
poi_=poi.copy(deep=True)
pos_poi_dict={}
pos_poi_idxes_df=pd.DataFrame(columns=['geometry','frank_e','num'])
pos_poi_feature_vector_df=pd.DataFrame(columns=['geometry']+list(range(poi_num)))
# print(pos_poi_feature_vector)
for idx,row in tqdm(position.iterrows(),total=position.shape[0]):
poi_['within']=poi_.geometry.apply(lambda pt: pt.within(row.geometry.buffer(distance)))
# print(poi_)
poi_selection_df=poi_[poi_['within']==True]
counts=poi_selection_df.level_0.value_counts().to_dict()
num=len(poi_selection_df)
counts_percent={k:v/num for k,v in counts.items()}
# print(counts_percent)
ve=0.0
for v in counts_percent.values():
if v!=0.:
ve-=v*math.log(v)
max_entropy=math.log(num)
frank_e=ve/max_entropy*100
# print(max_entropy,frank_e)
for k,v in counts.items(): #计算特征聚类出现的频数/直方图
poi_name=k.split("_")[-1]
poi_idx=poi_classificationName_reverse[poi_name]
# print(poi_idx,v)
feature_vector[poi_idx]=v
# print(feature_vector)
pos_poi_dict.update({idx:{'fn_stem':row.fn_stem, 'fn_key':row.fn_key, 'fn_idx':row.fn_idx ,'counts':counts,'counts_percent':counts_percent,'feature_vector':feature_vector,'num':num,'frank_e':frank_e,'geometry':row.geometry}})
pos_poi_idxes_df=pos_poi_idxes_df.append({'fn_stem':row.fn_stem, 'fn_key':row.fn_key, 'fn_idx':row.fn_idx,'geometry':row.geometry,'frank_e':frank_e,'num':num},ignore_index=True)
feature_vector_dict={i:feature_vector[i] for i in range(len(feature_vector))}
feature_vector_dict.update({'geometry':row.geometry,'fn_stem':row.fn_stem, 'fn_key':row.fn_key, 'fn_idx':row.fn_idx,})
pos_poi_feature_vector_df=pos_poi_feature_vector_df.append(feature_vector_dict,ignore_index=True)
# if idx==3:break
pos_poi_idxes_gdf=gpd.GeoDataFrame(pos_poi_idxes_df,geometry=pos_poi_idxes_df.geometry,crs=position.crs)
pos_poi_idxes_gdf['num_diff']=pos_poi_idxes_gdf.num.diff()
pos_poi_feature_vector_gdf=gpd.GeoDataFrame(pos_poi_feature_vector_df,geometry=pos_poi_feature_vector_df.geometry,crs=position.crs)
with open('./processed data/pos_poi_dict.pkl','wb') as f:
pickle.dump(pos_poi_dict,f)
return pos_poi_idxes_gdf,pos_poi_feature_vector_gdf
def poi_feature_clustering(feature_vector,fields,n_clusters=7,feature_analysis=True):
import pandas as pd
from sklearn.neighbors import NearestNeighbors
import numpy as np
from sklearn import cluster
from shapely.geometry import Point
import geopandas as gpd
import pyproj
from yellowbrick.cluster import KElbowVisualizer
from yellowbrick.features import Manifold
from sklearn.feature_selection import chi2, SelectKBest, f_classif
from sklearn import preprocessing
from sklearn.preprocessing import normalize
import matplotlib.pyplot as plt
pts_geometry=feature_vector[['geometry']]
pts_geometry[['x','y']]=pts_geometry.geometry.apply(lambda row:pd.Series([row.x,row.y]))
# print(pts_geometry)
pts_coordis=pts_geometry[['x','y']].to_numpy()
# print(pts_coordis)
nbrs=NearestNeighbors(n_neighbors=9, algorithm='ball_tree').fit(pts_coordis)
connectivity=nbrs.kneighbors_graph(pts_coordis)
# print(connectivity.toarray())
X_=feature_vector[fields].to_numpy()
X=normalize(X_,axis=0, norm='max')
clustering=cluster.AgglomerativeClustering(connectivity=connectivity,n_clusters=n_clusters).fit(X)
feature_vector['clustering']=clustering.labels_
#_________________________________________________________________________
if feature_analysis==True:
y=clustering.labels_
selector=SelectKBest(score_func=f_classif, k=len(fields)) #score_func=chi2
selector.fit(X,y)
dfscores = pd.DataFrame(selector.scores_)
dfpvalues=pd.DataFrame(selector.pvalues_)
dfcolumns = pd.DataFrame(fields)
featureScores = pd.concat([dfcolumns,dfscores,dfpvalues],axis=1)
featureScores.columns = ['Factor','Score','p_value'] #naming the dataframe columns
featureScores['Factor']=featureScores['Factor'].apply(lambda row:int(row))
featureScores['poi_name']=featureScores['Factor'].map(poi_classificationName)
featureScores=featureScores.sort_values(by=['Score'])
# print(type(featureScores['Factor'][0]))
print(featureScores)
# featureScores.to_excel('./graph/tl_poi_features scores.xlsx')
featureScores_=featureScores.set_index('Factor')
featureScores_.nlargest(len(fields),'Score').Score.plot(kind='barh',figsize=(30,20),fontsize=38)
featureScores_.Score.plot(kind='barh')
plt.show()
clustering_=cluster.AgglomerativeClustering(connectivity=connectivity,) #n_clusters=n_clusters
visualizer = KElbowVisualizer(clustering_, timings=False,size=(500, 500), k=(4,12)) #k=(4,12) metric='calinski_harabasz'
visualizer.fit(X) # Fit the data to the visualizer
# visualizer.show(outpath="./graph/tl_poi_clustering_KEIbow_.png") # Finalize and render the figure
return feature_vector
if __name__=="__main__":
# poi_gdf=postSQL2gpd(table_name='poi',geom_col='geometry',myusername='postgres',mypassword='<PASSWORD>',mydatabase='streetscape_GSV')
# poi_gdf=poi_gdf.to_crs(xian_epsg)
# tl_idxes_clustering_12_gdf=postSQL2gpd(table_name='tl_idxes_clustering_12',geom_col='geometry',myusername='postgres',mypassword='<PASSWORD>',mydatabase='streetscape_GSV')
# pos_poi_idxes_gdf,pos_poi_feature_vector_gdf=street_poi_structure(poi=poi_gdf,position=tl_idxes_clustering_12_gdf)
# gpd2postSQL(pos_poi_idxes_gdf,table_name='pos_poi_idxes',myusername='postgres',mypassword='<PASSWORD>',mydatabase='streetscape_GSV')
# gpd2postSQL(pos_poi_feature_vector_gdf,table_name='pos_poi_feature_vector',myusername='postgres',mypassword='<PASSWORD>',mydatabase='streetscape_GSV')
# with open('./processed data/pos_poi_dict.pkl','rb') as f:
# pos_poi_dict=pickle.load(f)
pos_poi_feature_vector_gdf=postSQL2gpd(table_name='pos_poi_feature_vector',geom_col='geometry',myusername='postgres',mypassword='<PASSWORD>',mydatabase='streetscape_GSV')
fields=[ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10','11', '12', '13', '14', '15', '16', '17', '18']
n_clusters=12 #12
feature_vector=poi_feature_clustering(pos_poi_feature_vector_gdf,fields,n_clusters=n_clusters,feature_analysis=True)
# gpd2postSQL(feature_vector,table_name='pos_poi_feature_vector_{}'.format(n_clusters),myusername='postgres',mypassword='<PASSWORD>',mydatabase='streetscape_GSV')
|
[
"pandas.DataFrame",
"pickle.dump",
"matplotlib.pyplot.show",
"database.postSQL2gpd",
"numpy.zeros",
"geopandas.GeoDataFrame",
"sklearn.neighbors.NearestNeighbors",
"sklearn.preprocessing.normalize",
"pandas.Series",
"sklearn.cluster.AgglomerativeClustering",
"math.log",
"pandas.concat",
"yellowbrick.cluster.KElbowVisualizer"
] |
[((1137, 1154), 'numpy.zeros', 'np.zeros', (['poi_num'], {}), '(poi_num)\n', (1145, 1154), True, 'import numpy as np\n'), ((1230, 1282), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['geometry', 'frank_e', 'num']"}), "(columns=['geometry', 'frank_e', 'num'])\n", (1242, 1282), True, 'import pandas as pd\n'), ((3142, 3235), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (['pos_poi_idxes_df'], {'geometry': 'pos_poi_idxes_df.geometry', 'crs': 'position.crs'}), '(pos_poi_idxes_df, geometry=pos_poi_idxes_df.geometry, crs=\n position.crs)\n', (3158, 3235), True, 'import geopandas as gpd\n'), ((3326, 3437), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (['pos_poi_feature_vector_df'], {'geometry': 'pos_poi_feature_vector_df.geometry', 'crs': 'position.crs'}), '(pos_poi_feature_vector_df, geometry=\n pos_poi_feature_vector_df.geometry, crs=position.crs)\n', (3342, 3437), True, 'import geopandas as gpd\n'), ((4675, 4708), 'sklearn.preprocessing.normalize', 'normalize', (['X_'], {'axis': '(0)', 'norm': '"""max"""'}), "(X_, axis=0, norm='max')\n", (4684, 4708), False, 'from sklearn.preprocessing import normalize\n'), ((7480, 7636), 'database.postSQL2gpd', 'postSQL2gpd', ([], {'table_name': '"""pos_poi_feature_vector"""', 'geom_col': '"""geometry"""', 'myusername': '"""postgres"""', 'mypassword': '"""<PASSWORD>"""', 'mydatabase': '"""streetscape_GSV"""'}), "(table_name='pos_poi_feature_vector', geom_col='geometry',\n myusername='postgres', mypassword='<PASSWORD>', mydatabase=\n 'streetscape_GSV')\n", (7491, 7636), False, 'from database import postSQL2gpd, gpd2postSQL\n'), ((1975, 1988), 'math.log', 'math.log', (['num'], {}), '(num)\n', (1983, 1988), False, 'import pickle, math\n'), ((3504, 3532), 'pickle.dump', 'pickle.dump', (['pos_poi_dict', 'f'], {}), '(pos_poi_dict, f)\n', (3515, 3532), False, 'import pickle, math\n'), ((5153, 5183), 'pandas.DataFrame', 'pd.DataFrame', (['selector.scores_'], {}), '(selector.scores_)\n', (5165, 5183), True, 'import pandas as pd\n'), ((5202, 5233), 'pandas.DataFrame', 'pd.DataFrame', (['selector.pvalues_'], {}), '(selector.pvalues_)\n', (5214, 5233), True, 'import pandas as pd\n'), ((5254, 5274), 'pandas.DataFrame', 'pd.DataFrame', (['fields'], {}), '(fields)\n', (5266, 5274), True, 'import pandas as pd\n'), ((5301, 5352), 'pandas.concat', 'pd.concat', (['[dfcolumns, dfscores, dfpvalues]'], {'axis': '(1)'}), '([dfcolumns, dfscores, dfpvalues], axis=1)\n', (5310, 5352), True, 'import pandas as pd\n'), ((6055, 6065), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6063, 6065), True, 'import matplotlib.pyplot as plt\n'), ((6099, 6157), 'sklearn.cluster.AgglomerativeClustering', 'cluster.AgglomerativeClustering', ([], {'connectivity': 'connectivity'}), '(connectivity=connectivity)\n', (6130, 6157), False, 'from sklearn import cluster\n'), ((6203, 6275), 'yellowbrick.cluster.KElbowVisualizer', 'KElbowVisualizer', (['clustering_'], {'timings': '(False)', 'size': '(500, 500)', 'k': '(4, 12)'}), '(clustering_, timings=False, size=(500, 500), k=(4, 12))\n', (6219, 6275), False, 'from yellowbrick.cluster import KElbowVisualizer\n'), ((4322, 4347), 'pandas.Series', 'pd.Series', (['[row.x, row.y]'], {}), '([row.x, row.y])\n', (4331, 4347), True, 'import pandas as pd\n'), ((4464, 4518), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': '(9)', 'algorithm': '"""ball_tree"""'}), "(n_neighbors=9, algorithm='ball_tree')\n", (4480, 4518), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((4728, 4814), 'sklearn.cluster.AgglomerativeClustering', 'cluster.AgglomerativeClustering', ([], {'connectivity': 'connectivity', 'n_clusters': 'n_clusters'}), '(connectivity=connectivity, n_clusters=\n n_clusters)\n', (4759, 4814), False, 'from sklearn import cluster\n'), ((1943, 1954), 'math.log', 'math.log', (['v'], {}), '(v)\n', (1951, 1954), False, 'import pickle, math\n')]
|
#!/usr/bin/env python
# Copyright 2019-2021 <NAME>
#
# This file is part of WarpX.
#
# License: BSD-3-Clause-LBNL
# This script tests the particle scraping for the embedded boundary in RZ.
# Particles are initialized between r=0.15 and r=0.2
# having a negative radial velocity.
# A cylindrical embedded surface is placed at r=0.1.
# Upon reaching the surface, particles should be removed.
# At the end of the simulation, i.e., at time step 37,
# there should be 512 particles left.
# In addition, the test checks the boundary scraping diagnostic
# by making sure that all removed particles are properly recorded.
# Possible errors: 0
# tolerance: 0
# Possible running time: < 1 s
import os
import sys
import numpy as np
from openpmd_viewer import OpenPMDTimeSeries
import yt
sys.path.insert(1, '../../../../warpx/Regression/Checksum/')
import checksumAPI
tolerance = 0
fn = sys.argv[1]
ds = yt.load( fn )
ad = ds.all_data()
x = ad['electron', 'particle_position_x'].v
error = len(x)-512
print('error = ', error)
print('tolerance = ', tolerance)
assert(error==tolerance)
# Check that all the removed particles are properly recorded
# by making sure that, at each iteration, the sum of the number of
# remaining particles and scraped particles is equal to the
# original number of particles
ts_full = OpenPMDTimeSeries('./diags/diag2/')
ts_scraping = OpenPMDTimeSeries('./diags/diag3/')
def n_remaining_particles( iteration ):
w, = ts_full.get_particle(['w'], iteration=iteration)
return len(w)
def n_scraped_particles( iteration ):
timestamp = ts_scraping.get_particle( ['timestamp'] )
return (timestamp <= iteration).sum()
n_remaining = np.array([ n_remaining_particles(iteration) for iteration in ts_full.iterations ])
n_scraped = np.array([ n_scraped_particles(iteration) for iteration in ts_full.iterations ])
n_total = n_remaining[0]
assert np.all( n_scraped+n_remaining == n_total)
# Checksum test
test_name = os.path.split(os.getcwd())[1]
checksumAPI.evaluate_checksum(test_name, fn, do_particles=False)
|
[
"openpmd_viewer.OpenPMDTimeSeries",
"os.getcwd",
"sys.path.insert",
"checksumAPI.evaluate_checksum",
"yt.load",
"numpy.all"
] |
[((782, 842), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../../../../warpx/Regression/Checksum/"""'], {}), "(1, '../../../../warpx/Regression/Checksum/')\n", (797, 842), False, 'import sys\n'), ((900, 911), 'yt.load', 'yt.load', (['fn'], {}), '(fn)\n', (907, 911), False, 'import yt\n'), ((1310, 1345), 'openpmd_viewer.OpenPMDTimeSeries', 'OpenPMDTimeSeries', (['"""./diags/diag2/"""'], {}), "('./diags/diag2/')\n", (1327, 1345), False, 'from openpmd_viewer import OpenPMDTimeSeries\n'), ((1360, 1395), 'openpmd_viewer.OpenPMDTimeSeries', 'OpenPMDTimeSeries', (['"""./diags/diag3/"""'], {}), "('./diags/diag3/')\n", (1377, 1395), False, 'from openpmd_viewer import OpenPMDTimeSeries\n'), ((1873, 1915), 'numpy.all', 'np.all', (['(n_scraped + n_remaining == n_total)'], {}), '(n_scraped + n_remaining == n_total)\n', (1879, 1915), True, 'import numpy as np\n'), ((1974, 2038), 'checksumAPI.evaluate_checksum', 'checksumAPI.evaluate_checksum', (['test_name', 'fn'], {'do_particles': '(False)'}), '(test_name, fn, do_particles=False)\n', (2003, 2038), False, 'import checksumAPI\n'), ((1958, 1969), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1967, 1969), False, 'import os\n')]
|
from __future__ import division, print_function
import numpy as np
from scipy import signal as sig
from matplotlib import pyplot as plt
import seaborn as sns
"""https://stackoverflow.com/questions/56551114/fully-monotone-interpolation-in-python """
# see also
# https://en.wikipedia.org/wiki/Monotone-spline aka I-spline
# https://scikit-learn.org/stable/modules/isotonic.html
# denis 2 March 2020
def butter_filtfilt( x, Wn=0.5, axis=0 ):
""" butter( 2, Wn ), filtfilt
axis 0 each col, -1 each row
"""
b, a = sig.butter( N=2, Wn=Wn )
return sig.filtfilt( b, a, x, axis=axis, method="gust" )
# twice, forward backward
def ints( x ):
return x.round().astype(int)
def minavmax( x ):
return "min av max %.3g %.3g %.3g" % (
x.min(), x.mean(), x.max() )
def pvec( x ):
n = len(x) // 25 * 25
return "%s \n%s \n" % (
minavmax( x ),
ints( x[ - n : ]) .reshape( -1, 25 ))
#...............................................................................
def monofit( y, Wn=0.1 ):
""" monotone-increasing curve fit """
y = np.asarray(y).squeeze()
# print( "\n{ monofit: y %d %s Wn %.3g " % (
# len(y), minavmax( y ), Wn ))
ygrad = np.gradient( y )
# print( "grad y:", pvec( ygrad ))
# lowpass filter --
gradsmooth = butter_filtfilt( ygrad, Wn=Wn )
# print( "gradsmooth:", pvec( gradsmooth ))
ge0 = np.fmax( gradsmooth, 0 )
ymono = np.cumsum( ge0 ) # integrate, sensitive to first few
ymono += (y - ymono).mean()
err = y - ymono
# print( "y - ymono:", pvec( err ))
errstr = "average |y - monofit|: %.2g" % np.abs( err ).mean()
# print( errstr )
# print( "} \n" )
return ymono, err, errstr
#...............................................................................
if __name__ == "__main__":
import sys
# np.set_printoptions( threshold=20, edgeitems=15, linewidth=120,
# formatter = dict( float = lambda x: "%.2g" % x ))
# float arrays %.2g
# print( 80 * "=" )
thispy = sys.argv[0]
infile = sys.argv[1] if len(sys.argv) > 1 \
else "so-mono.txt"
Wn = 0.1
params = "%s %s Wn %g " % (thispy, infile, Wn)
# print( params )
y = np.array([0.1109157119023644, 0.20187393816931934, 0.14466318670239758,
0.16535159414166822, 0.05452708697483864, 0.2153046237959556,
0.2200300476272603, 0.21012762463269324, 0.15947100322395022,
0.2819691842129948, 0.15567770052985092, 0.24850595803020692,
0.1329341593280457, 0.15595107081606913, 0.3232021121832229,
0.23707961921686588, 0.2415887076540357, 0.32363506549779797,
0.3584089204036798, 0.29232772580068433, 0.22145994836140775,
0.22797587985241133, 0.2717787840603025, 0.3245255944762287,
0.29301098282789195, 0.32417076823344143, 0.3450906550996232,
0.34272097408024904, 0.3868714875012437, 0.41876692320045755,
0.3544198724867363, 0.33073960954801895, 0.3921033666371904,
0.33349050060172974, 0.3608862044547096, 0.37375822841635425,
0.5396399750708429, 0.4209201143798284, 0.42004773793166883,
0.5217725632679073, 0.5911731474218788, 0.43389609315065386,
0.4287288396176006, 0.43007525393257007, 0.5687062142675405,
0.6030811498722173, 0.5292225577714743, 0.47710974351051355,
0.6182720730381119, 0.6241033581931327, 0.6236788197617511,
0.6643161356364049, 0.5577616524049582, 0.6888440258481371,
0.6867893120660341, 0.6685257606057502, 0.599481675493677,
0.7309075091448749, 0.7644365338580481, 0.6176797601816733,
0.6751467827192018, 0.6452178017908761, 0.6684778262246701,
0.7003380077556168, 0.667035916425416, 0.8434451759113093,
0.8419343615815968, 0.8657695361433773, 0.7392487161484605,
0.8773282098364621, 0.8265679895117846, 0.7246599961191632,
0.7251899061730714, 0.9271640780410231, 0.9180581424305536,
0.8099033021701689, 0.8268585329594615, 0.8519967080830176,
0.8711231413093845, 0.8689802343798663, 0.8299523829217353,
1.0057741699770046, 0.8538130788729608, 0.9662784297225102,
1.023419780920539, 0.913146849759822, 0.9900885996579213,
0.8740638988529978, 0.8900285618419457, 0.9065474574434158,
1.0749522597307315, 1.0319120938258166, 1.0051369663172995,
0.9893558841613622, 1.051384986916457, 1.0327996870915341,
1.0945543972861898, 0.9716604944496021, 1.1490370559566179,
1.1379231481207432, 1.6836433783615088, 1.8162068766097395,
2.072155286917785, 2.0395966998366, 2.191064589600466,
2.1581974932543617, 2.163403843819597, 2.133441151300847,
2.1726053994136922, 2.1157865673629526, 2.2249636455682866,
2.2313062166802147, 2.1731708496472764, 2.315203950110816,
2.1601242661726827, 2.174940281421225, 2.2653635413275945,
2.337227057574145, 2.3645767548381618, 2.3084919291392527,
2.314014515926446, 2.25166717296155, 2.2621157708115778,
2.2644578546265586, 2.313504860292943, 2.398969190357051,
2.309443951779675, 2.278946047410807, 2.4080802287121146,
2.353652872018618, 2.35527529074088, 2.4233001060410784,
2.428767198055608, 2.35677123091093, 2.497135132404064,
2.3978099128437282, 2.3970802609341972, 2.4967434818740024,
2.511209192435555, 2.541001050440798, 2.5760248002036525,
2.5960512284192245, 2.4778408861721037, 2.5757724103530046,
2.631148267999664, 2.538327346218921, 2.4878734713248507,
2.6133797275761066, 2.6282561527857395, 2.6150327104952447,
3.102757164382848, 3.3318503012160905, 3.3907776288198193,
3.6065313558941936, 3.601180295875859, 3.560491539319038,
3.650095006265445, 3.574812155815713, 3.686227315374108,
3.6338261415040867, 3.5661194785086288, 3.5747332336054645,
3.560674343726918, 3.5678550481603635, 3.5342848534390967,
3.4929538312485913, 3.564544653619436, 3.6861775399566126,
3.6390300636595216, 3.6656336332413666, 3.5731185631923945,
3.5965520044069854, 3.537434489989021, 3.5590937423870144,
3.5331656424410083, 3.640652819618705, 3.5971240740252126,
3.641793843012055, 3.6064014089254295, 3.530378938786505,
3.613631139461306, 3.519542268056021, 3.5416251524576,
3.524789618934195, 3.5519951806099512, 3.6435695455293975,
3.6825670484650863, 3.5993379768209217, 3.628367553897596,
3.633290480934276, 3.5772841681579535, 3.602326323397947,
3.518180278272883, 3.531054006706696, 3.5566645495066167,
3.5410992153240985, 3.630762839301216, 3.5924649123201053,
3.646230633817883, 3.568290612034935, 3.638356129262967,
3.566083243271712, 3.6064978645771797, 3.4942864293427633,
3.595438454812999, 3.681726879126678, 3.6501308156903463,
3.5490717955938593, 3.598535359345363, 3.6328331698421654,
3.595159538698094, 3.556715819008055, 3.6292942886764554,
3.6362895697392856, 3.5965220100874093, 3.6103542985016266,
3.5715010140382493, 3.658769915445062, 3.5939686395400416,
3.4974461928859917, 3.5232691556732267, 3.6145687814416614,
3.5682054018341005, 3.648937250575395, 3.4912089018613384,
3.522426560340423, 3.6757968409374637, 3.651348691084845,
3.5395070091675973, 3.5306275536360383, 3.6153498246329883,
3.599762785949876, 3.5351931286962333, 3.6488316987683054,
3.5198301490992963, 3.5696570079786687, 3.561553836008927,
3.5659475947331423, 3.553147100256108, 3.5475591872743664,
3.6097226797553317, 3.6849600324757934, 3.5264731043844413,
3.506658609738451, 3.5535775980874114, 3.5487291053913554,
3.570651383823912, 3.552993371839188, 3.5054297764661846,
3.5723024888238792])
ymono, err, errstr = monofit( y, Wn=Wn )
if 1:
sns.set_style("whitegrid")
fig, ax = plt.subplots( figsize=[10, 5] )
plt.subplots_adjust( left=.05, right=.99, bottom=.05, top=.90 )
fig.suptitle(
"Easy monotone curve fit: np.gradient | lowpass filter | clip < 0 | integrate \n"
+ errstr, multialignment="left" )
ax.plot( ymono, color="orangered" )
j = np.where( ymono < y )[0]
xax = np.arange( len(y) )
plt.vlines( xax[j], ymono[j], y[j], color="blue", lw=1 )
j = np.where( ymono > y )[0]
plt.vlines( xax[j], y[j], ymono[j], color="blue", lw=1 )
# png = thispy.replace( ".py", ".png" )
# print( "writing", png )
# plt.savefig( png )
plt.show()
|
[
"seaborn.set_style",
"numpy.fmax",
"matplotlib.pyplot.show",
"numpy.abs",
"scipy.signal.filtfilt",
"numpy.asarray",
"matplotlib.pyplot.vlines",
"numpy.cumsum",
"numpy.where",
"numpy.array",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.subplots",
"scipy.signal.butter",
"numpy.gradient"
] |
[((533, 555), 'scipy.signal.butter', 'sig.butter', ([], {'N': '(2)', 'Wn': 'Wn'}), '(N=2, Wn=Wn)\n', (543, 555), True, 'from scipy import signal as sig\n'), ((569, 616), 'scipy.signal.filtfilt', 'sig.filtfilt', (['b', 'a', 'x'], {'axis': 'axis', 'method': '"""gust"""'}), "(b, a, x, axis=axis, method='gust')\n", (581, 616), True, 'from scipy import signal as sig\n'), ((1224, 1238), 'numpy.gradient', 'np.gradient', (['y'], {}), '(y)\n', (1235, 1238), True, 'import numpy as np\n'), ((1417, 1439), 'numpy.fmax', 'np.fmax', (['gradsmooth', '(0)'], {}), '(gradsmooth, 0)\n', (1424, 1439), True, 'import numpy as np\n'), ((1455, 1469), 'numpy.cumsum', 'np.cumsum', (['ge0'], {}), '(ge0)\n', (1464, 1469), True, 'import numpy as np\n'), ((2247, 7621), 'numpy.array', 'np.array', (['[0.1109157119023644, 0.20187393816931934, 0.14466318670239758, \n 0.16535159414166822, 0.05452708697483864, 0.2153046237959556, \n 0.2200300476272603, 0.21012762463269324, 0.15947100322395022, \n 0.2819691842129948, 0.15567770052985092, 0.24850595803020692, \n 0.1329341593280457, 0.15595107081606913, 0.3232021121832229, \n 0.23707961921686588, 0.2415887076540357, 0.32363506549779797, \n 0.3584089204036798, 0.29232772580068433, 0.22145994836140775, \n 0.22797587985241133, 0.2717787840603025, 0.3245255944762287, \n 0.29301098282789195, 0.32417076823344143, 0.3450906550996232, \n 0.34272097408024904, 0.3868714875012437, 0.41876692320045755, \n 0.3544198724867363, 0.33073960954801895, 0.3921033666371904, \n 0.33349050060172974, 0.3608862044547096, 0.37375822841635425, \n 0.5396399750708429, 0.4209201143798284, 0.42004773793166883, \n 0.5217725632679073, 0.5911731474218788, 0.43389609315065386, \n 0.4287288396176006, 0.43007525393257007, 0.5687062142675405, \n 0.6030811498722173, 0.5292225577714743, 0.47710974351051355, \n 0.6182720730381119, 0.6241033581931327, 0.6236788197617511, \n 0.6643161356364049, 0.5577616524049582, 0.6888440258481371, \n 0.6867893120660341, 0.6685257606057502, 0.599481675493677, \n 0.7309075091448749, 0.7644365338580481, 0.6176797601816733, \n 0.6751467827192018, 0.6452178017908761, 0.6684778262246701, \n 0.7003380077556168, 0.667035916425416, 0.8434451759113093, \n 0.8419343615815968, 0.8657695361433773, 0.7392487161484605, \n 0.8773282098364621, 0.8265679895117846, 0.7246599961191632, \n 0.7251899061730714, 0.9271640780410231, 0.9180581424305536, \n 0.8099033021701689, 0.8268585329594615, 0.8519967080830176, \n 0.8711231413093845, 0.8689802343798663, 0.8299523829217353, \n 1.0057741699770046, 0.8538130788729608, 0.9662784297225102, \n 1.023419780920539, 0.913146849759822, 0.9900885996579213, \n 0.8740638988529978, 0.8900285618419457, 0.9065474574434158, \n 1.0749522597307315, 1.0319120938258166, 1.0051369663172995, \n 0.9893558841613622, 1.051384986916457, 1.0327996870915341, \n 1.0945543972861898, 0.9716604944496021, 1.1490370559566179, \n 1.1379231481207432, 1.6836433783615088, 1.8162068766097395, \n 2.072155286917785, 2.0395966998366, 2.191064589600466, \n 2.1581974932543617, 2.163403843819597, 2.133441151300847, \n 2.1726053994136922, 2.1157865673629526, 2.2249636455682866, \n 2.2313062166802147, 2.1731708496472764, 2.315203950110816, \n 2.1601242661726827, 2.174940281421225, 2.2653635413275945, \n 2.337227057574145, 2.3645767548381618, 2.3084919291392527, \n 2.314014515926446, 2.25166717296155, 2.2621157708115778, \n 2.2644578546265586, 2.313504860292943, 2.398969190357051, \n 2.309443951779675, 2.278946047410807, 2.4080802287121146, \n 2.353652872018618, 2.35527529074088, 2.4233001060410784, \n 2.428767198055608, 2.35677123091093, 2.497135132404064, \n 2.3978099128437282, 2.3970802609341972, 2.4967434818740024, \n 2.511209192435555, 2.541001050440798, 2.5760248002036525, \n 2.5960512284192245, 2.4778408861721037, 2.5757724103530046, \n 2.631148267999664, 2.538327346218921, 2.4878734713248507, \n 2.6133797275761066, 2.6282561527857395, 2.6150327104952447, \n 3.102757164382848, 3.3318503012160905, 3.3907776288198193, \n 3.6065313558941936, 3.601180295875859, 3.560491539319038, \n 3.650095006265445, 3.574812155815713, 3.686227315374108, \n 3.6338261415040867, 3.5661194785086288, 3.5747332336054645, \n 3.560674343726918, 3.5678550481603635, 3.5342848534390967, \n 3.4929538312485913, 3.564544653619436, 3.6861775399566126, \n 3.6390300636595216, 3.6656336332413666, 3.5731185631923945, \n 3.5965520044069854, 3.537434489989021, 3.5590937423870144, \n 3.5331656424410083, 3.640652819618705, 3.5971240740252126, \n 3.641793843012055, 3.6064014089254295, 3.530378938786505, \n 3.613631139461306, 3.519542268056021, 3.5416251524576, \n 3.524789618934195, 3.5519951806099512, 3.6435695455293975, \n 3.6825670484650863, 3.5993379768209217, 3.628367553897596, \n 3.633290480934276, 3.5772841681579535, 3.602326323397947, \n 3.518180278272883, 3.531054006706696, 3.5566645495066167, \n 3.5410992153240985, 3.630762839301216, 3.5924649123201053, \n 3.646230633817883, 3.568290612034935, 3.638356129262967, \n 3.566083243271712, 3.6064978645771797, 3.4942864293427633, \n 3.595438454812999, 3.681726879126678, 3.6501308156903463, \n 3.5490717955938593, 3.598535359345363, 3.6328331698421654, \n 3.595159538698094, 3.556715819008055, 3.6292942886764554, \n 3.6362895697392856, 3.5965220100874093, 3.6103542985016266, \n 3.5715010140382493, 3.658769915445062, 3.5939686395400416, \n 3.4974461928859917, 3.5232691556732267, 3.6145687814416614, \n 3.5682054018341005, 3.648937250575395, 3.4912089018613384, \n 3.522426560340423, 3.6757968409374637, 3.651348691084845, \n 3.5395070091675973, 3.5306275536360383, 3.6153498246329883, \n 3.599762785949876, 3.5351931286962333, 3.6488316987683054, \n 3.5198301490992963, 3.5696570079786687, 3.561553836008927, \n 3.5659475947331423, 3.553147100256108, 3.5475591872743664, \n 3.6097226797553317, 3.6849600324757934, 3.5264731043844413, \n 3.506658609738451, 3.5535775980874114, 3.5487291053913554, \n 3.570651383823912, 3.552993371839188, 3.5054297764661846, \n 3.5723024888238792]'], {}), '([0.1109157119023644, 0.20187393816931934, 0.14466318670239758, \n 0.16535159414166822, 0.05452708697483864, 0.2153046237959556, \n 0.2200300476272603, 0.21012762463269324, 0.15947100322395022, \n 0.2819691842129948, 0.15567770052985092, 0.24850595803020692, \n 0.1329341593280457, 0.15595107081606913, 0.3232021121832229, \n 0.23707961921686588, 0.2415887076540357, 0.32363506549779797, \n 0.3584089204036798, 0.29232772580068433, 0.22145994836140775, \n 0.22797587985241133, 0.2717787840603025, 0.3245255944762287, \n 0.29301098282789195, 0.32417076823344143, 0.3450906550996232, \n 0.34272097408024904, 0.3868714875012437, 0.41876692320045755, \n 0.3544198724867363, 0.33073960954801895, 0.3921033666371904, \n 0.33349050060172974, 0.3608862044547096, 0.37375822841635425, \n 0.5396399750708429, 0.4209201143798284, 0.42004773793166883, \n 0.5217725632679073, 0.5911731474218788, 0.43389609315065386, \n 0.4287288396176006, 0.43007525393257007, 0.5687062142675405, \n 0.6030811498722173, 0.5292225577714743, 0.47710974351051355, \n 0.6182720730381119, 0.6241033581931327, 0.6236788197617511, \n 0.6643161356364049, 0.5577616524049582, 0.6888440258481371, \n 0.6867893120660341, 0.6685257606057502, 0.599481675493677, \n 0.7309075091448749, 0.7644365338580481, 0.6176797601816733, \n 0.6751467827192018, 0.6452178017908761, 0.6684778262246701, \n 0.7003380077556168, 0.667035916425416, 0.8434451759113093, \n 0.8419343615815968, 0.8657695361433773, 0.7392487161484605, \n 0.8773282098364621, 0.8265679895117846, 0.7246599961191632, \n 0.7251899061730714, 0.9271640780410231, 0.9180581424305536, \n 0.8099033021701689, 0.8268585329594615, 0.8519967080830176, \n 0.8711231413093845, 0.8689802343798663, 0.8299523829217353, \n 1.0057741699770046, 0.8538130788729608, 0.9662784297225102, \n 1.023419780920539, 0.913146849759822, 0.9900885996579213, \n 0.8740638988529978, 0.8900285618419457, 0.9065474574434158, \n 1.0749522597307315, 1.0319120938258166, 1.0051369663172995, \n 0.9893558841613622, 1.051384986916457, 1.0327996870915341, \n 1.0945543972861898, 0.9716604944496021, 1.1490370559566179, \n 1.1379231481207432, 1.6836433783615088, 1.8162068766097395, \n 2.072155286917785, 2.0395966998366, 2.191064589600466, \n 2.1581974932543617, 2.163403843819597, 2.133441151300847, \n 2.1726053994136922, 2.1157865673629526, 2.2249636455682866, \n 2.2313062166802147, 2.1731708496472764, 2.315203950110816, \n 2.1601242661726827, 2.174940281421225, 2.2653635413275945, \n 2.337227057574145, 2.3645767548381618, 2.3084919291392527, \n 2.314014515926446, 2.25166717296155, 2.2621157708115778, \n 2.2644578546265586, 2.313504860292943, 2.398969190357051, \n 2.309443951779675, 2.278946047410807, 2.4080802287121146, \n 2.353652872018618, 2.35527529074088, 2.4233001060410784, \n 2.428767198055608, 2.35677123091093, 2.497135132404064, \n 2.3978099128437282, 2.3970802609341972, 2.4967434818740024, \n 2.511209192435555, 2.541001050440798, 2.5760248002036525, \n 2.5960512284192245, 2.4778408861721037, 2.5757724103530046, \n 2.631148267999664, 2.538327346218921, 2.4878734713248507, \n 2.6133797275761066, 2.6282561527857395, 2.6150327104952447, \n 3.102757164382848, 3.3318503012160905, 3.3907776288198193, \n 3.6065313558941936, 3.601180295875859, 3.560491539319038, \n 3.650095006265445, 3.574812155815713, 3.686227315374108, \n 3.6338261415040867, 3.5661194785086288, 3.5747332336054645, \n 3.560674343726918, 3.5678550481603635, 3.5342848534390967, \n 3.4929538312485913, 3.564544653619436, 3.6861775399566126, \n 3.6390300636595216, 3.6656336332413666, 3.5731185631923945, \n 3.5965520044069854, 3.537434489989021, 3.5590937423870144, \n 3.5331656424410083, 3.640652819618705, 3.5971240740252126, \n 3.641793843012055, 3.6064014089254295, 3.530378938786505, \n 3.613631139461306, 3.519542268056021, 3.5416251524576, \n 3.524789618934195, 3.5519951806099512, 3.6435695455293975, \n 3.6825670484650863, 3.5993379768209217, 3.628367553897596, \n 3.633290480934276, 3.5772841681579535, 3.602326323397947, \n 3.518180278272883, 3.531054006706696, 3.5566645495066167, \n 3.5410992153240985, 3.630762839301216, 3.5924649123201053, \n 3.646230633817883, 3.568290612034935, 3.638356129262967, \n 3.566083243271712, 3.6064978645771797, 3.4942864293427633, \n 3.595438454812999, 3.681726879126678, 3.6501308156903463, \n 3.5490717955938593, 3.598535359345363, 3.6328331698421654, \n 3.595159538698094, 3.556715819008055, 3.6292942886764554, \n 3.6362895697392856, 3.5965220100874093, 3.6103542985016266, \n 3.5715010140382493, 3.658769915445062, 3.5939686395400416, \n 3.4974461928859917, 3.5232691556732267, 3.6145687814416614, \n 3.5682054018341005, 3.648937250575395, 3.4912089018613384, \n 3.522426560340423, 3.6757968409374637, 3.651348691084845, \n 3.5395070091675973, 3.5306275536360383, 3.6153498246329883, \n 3.599762785949876, 3.5351931286962333, 3.6488316987683054, \n 3.5198301490992963, 3.5696570079786687, 3.561553836008927, \n 3.5659475947331423, 3.553147100256108, 3.5475591872743664, \n 3.6097226797553317, 3.6849600324757934, 3.5264731043844413, \n 3.506658609738451, 3.5535775980874114, 3.5487291053913554, \n 3.570651383823912, 3.552993371839188, 3.5054297764661846, \n 3.5723024888238792])\n', (2255, 7621), True, 'import numpy as np\n'), ((8019, 8045), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (8032, 8045), True, 'import seaborn as sns\n'), ((8064, 8093), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '[10, 5]'}), '(figsize=[10, 5])\n', (8076, 8093), True, 'from matplotlib import pyplot as plt\n'), ((8104, 8168), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.05)', 'right': '(0.99)', 'bottom': '(0.05)', 'top': '(0.9)'}), '(left=0.05, right=0.99, bottom=0.05, top=0.9)\n', (8123, 8168), True, 'from matplotlib import pyplot as plt\n'), ((8442, 8496), 'matplotlib.pyplot.vlines', 'plt.vlines', (['xax[j]', 'ymono[j]', 'y[j]'], {'color': '"""blue"""', 'lw': '(1)'}), "(xax[j], ymono[j], y[j], color='blue', lw=1)\n", (8452, 8496), True, 'from matplotlib import pyplot as plt\n'), ((8544, 8598), 'matplotlib.pyplot.vlines', 'plt.vlines', (['xax[j]', 'y[j]', 'ymono[j]'], {'color': '"""blue"""', 'lw': '(1)'}), "(xax[j], y[j], ymono[j], color='blue', lw=1)\n", (8554, 8598), True, 'from matplotlib import pyplot as plt\n'), ((8721, 8731), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8729, 8731), True, 'from matplotlib import pyplot as plt\n'), ((1099, 1112), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (1109, 1112), True, 'import numpy as np\n'), ((8375, 8394), 'numpy.where', 'np.where', (['(ymono < y)'], {}), '(ymono < y)\n', (8383, 8394), True, 'import numpy as np\n'), ((8511, 8530), 'numpy.where', 'np.where', (['(ymono > y)'], {}), '(ymono > y)\n', (8519, 8530), True, 'import numpy as np\n'), ((1647, 1658), 'numpy.abs', 'np.abs', (['err'], {}), '(err)\n', (1653, 1658), True, 'import numpy as np\n')]
|
import numpy
from matplotlib import pyplot
import chaospy
pyplot.rc("figure", figsize=[3, 2])
COLOR1 = "steelblue"
COLOR2 = "slategray"
def save(name):
pyplot.axis("off")
pyplot.savefig(
f"./{name}.png",
bbox_inches="tight",
transparent=True,
)
pyplot.clf()
def make_distribution():
t = numpy.linspace(-1, 1, 100)
dist = chaospy.Normal(0, 0.5)
pyplot.fill_between(t, 0, dist.pdf(t), alpha=0.3, color=COLOR1)
pyplot.plot(t, dist.pdf(t), COLOR1, lw=4)
pyplot.fill_between(t, 0, dist.cdf(t), alpha=0.3, color=COLOR2)
pyplot.plot(t, dist.cdf(t), COLOR2, lw=4)
save("distribution")
def make_polynomial():
q0 = chaospy.variable()
poly = 1.2*q0*(q0-1.8)*(q0+1.8)
t = numpy.linspace(-2, 2, 100)
t0 = numpy.linspace(-2, 0, 100)
pyplot.fill_between(t0, 0, poly(t0), alpha=0.3, color=COLOR1)
pyplot.plot(t0, poly(t0), COLOR1, lw=4)
t0 = numpy.linspace(0, 2, 100)
pyplot.fill_between(t0, poly(t0), 0, alpha=0.3, color=COLOR2)
pyplot.plot(t0, poly(t0), COLOR2, lw=4)
save("polynomial")
def make_sampling():
dist = chaospy.Iid(chaospy.Uniform(0, 1), 2)
samples = dist.sample(20, rule="sobol")
size = 80
pyplot.scatter(*samples[:, ::2], s=size, lw=3, color="w", edgecolors=COLOR1)
pyplot.scatter(*samples[:, ::2], s=size, color=COLOR1, alpha=0.6)
pyplot.scatter(*samples[:, 1::2], s=size, lw=3, color="w", edgecolor=COLOR2)
pyplot.scatter(*samples[:, 1::2], s=size, color=COLOR2, alpha=0.6)
save("sampling")
def make_quadrature():
dist = chaospy.Iid(chaospy.Uniform(0, 1), 2)
nodes, weights = chaospy.generate_quadrature(2, dist, growth=True, rule="fejer", sparse=True)
size = (weights*500).astype(int)
indices = weights < 0
pyplot.scatter(*nodes[:, indices], s=-size[indices], lw=3, color="w", edgecolors=COLOR2)
pyplot.scatter(*nodes[:, indices], s=-size[indices], color=COLOR2, alpha=0.6)
pyplot.scatter(*nodes[:, ~indices], s=size[~indices], lw=3, color="w", edgecolor=COLOR1)
pyplot.scatter(*nodes[:, ~indices], s=size[~indices], color=COLOR1, alpha=0.6)
save("quadrature")
def make_orthogonality():
t = numpy.linspace(-2, 2, 200)
q0 = chaospy.variable()
poly1 = (q0-1.2)*(q0+1.2)
poly2 = -(q0-1.2)*(q0+1.2)
t0 = numpy.linspace(-2, -1.2)
pyplot.fill_between(t0, poly1(t0), poly2(t0), color=COLOR1, alpha=0.3)
t0 = numpy.linspace(1.2, 2)
pyplot.fill_between(t0, poly1(t0), poly2(t0), color=COLOR1, alpha=0.3)
pyplot.plot(t, poly1(t), COLOR1, lw=4)
t0 = numpy.linspace(-1.2, 1.2)
pyplot.fill_between(t, poly1(t), poly2(t), color=COLOR2, alpha=0.3)
pyplot.plot(t, poly2(t), COLOR2, lw=4)
save("orthogonality")
def make_recurrence():
dist = chaospy.Iid(chaospy.Uniform(0, 1), 2)
samples1 = numpy.array([[0, 1, 2, 3], [0, 1, 2, 3]])
samples2 = numpy.array([[0, 0, 0, 1, 1, 2], [1, 2, 3, 2, 3, 3]])
size = 100
pyplot.plot([.16, .84], [2, 2], COLOR2, lw=4)
pyplot.plot([.16, .84], [3, 3], COLOR2, lw=4)
pyplot.plot([1.16, 1.84], [3, 3], COLOR2, lw=4)
pyplot.scatter(*samples1, s=size, lw=3, color="w", edgecolors=COLOR1)
pyplot.scatter(*samples1, s=size, color=COLOR1, alpha=0.6)
pyplot.scatter(*samples2, s=size, lw=3, color="w", edgecolor=COLOR2)
pyplot.scatter(*samples2, s=size, color=COLOR2, alpha=0.6)
save("recurrence")
def make_descriptive():
numpy.random.seed(1234)
dist1 = chaospy.Normal(0, 1)
samples1 = dist1.sample(40)
dist2 = chaospy.Exponential()
samples2 = dist2.sample(20)
x = y = numpy.linspace(0, 2*numpy.pi, 200)
x, y = numpy.cos(x), numpy.sin(y)
pyplot.pie([0.5], colors=[COLOR1], radius=1, normalize=False,
center=(-0.3, 0.3), startangle=45,
wedgeprops={"width": 0.5, "alpha": 0.5, "lw": 4})
pyplot.plot(x-0.3, y+0.3, COLOR1, lw=4)
pyplot.plot(x/2-0.3, y/2+0.3, COLOR1, lw=4)
pyplot.bar([0, 0.6], [0.5, 1], bottom=[-0.6, -0.6],
width=0.5, yerr=[0.2, 0.3], color=COLOR2)
save("descriptive")
if __name__ == "__main__":
make_distribution()
make_polynomial()
make_sampling()
make_quadrature()
make_orthogonality()
make_recurrence()
make_descriptive()
|
[
"numpy.random.seed",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.scatter",
"chaospy.Exponential",
"matplotlib.pyplot.bar",
"chaospy.Uniform",
"matplotlib.pyplot.axis",
"chaospy.variable",
"numpy.sin",
"numpy.array",
"matplotlib.pyplot.rc",
"numpy.linspace",
"chaospy.generate_quadrature",
"matplotlib.pyplot.pie",
"numpy.cos",
"chaospy.Normal",
"matplotlib.pyplot.savefig"
] |
[((59, 94), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""figure"""'], {'figsize': '[3, 2]'}), "('figure', figsize=[3, 2])\n", (68, 94), False, 'from matplotlib import pyplot\n'), ((160, 178), 'matplotlib.pyplot.axis', 'pyplot.axis', (['"""off"""'], {}), "('off')\n", (171, 178), False, 'from matplotlib import pyplot\n'), ((183, 253), 'matplotlib.pyplot.savefig', 'pyplot.savefig', (['f"""./{name}.png"""'], {'bbox_inches': '"""tight"""', 'transparent': '(True)'}), "(f'./{name}.png', bbox_inches='tight', transparent=True)\n", (197, 253), False, 'from matplotlib import pyplot\n'), ((289, 301), 'matplotlib.pyplot.clf', 'pyplot.clf', ([], {}), '()\n', (299, 301), False, 'from matplotlib import pyplot\n'), ((338, 364), 'numpy.linspace', 'numpy.linspace', (['(-1)', '(1)', '(100)'], {}), '(-1, 1, 100)\n', (352, 364), False, 'import numpy\n'), ((376, 398), 'chaospy.Normal', 'chaospy.Normal', (['(0)', '(0.5)'], {}), '(0, 0.5)\n', (390, 398), False, 'import chaospy\n'), ((689, 707), 'chaospy.variable', 'chaospy.variable', ([], {}), '()\n', (705, 707), False, 'import chaospy\n'), ((752, 778), 'numpy.linspace', 'numpy.linspace', (['(-2)', '(2)', '(100)'], {}), '(-2, 2, 100)\n', (766, 778), False, 'import numpy\n'), ((789, 815), 'numpy.linspace', 'numpy.linspace', (['(-2)', '(0)', '(100)'], {}), '(-2, 0, 100)\n', (803, 815), False, 'import numpy\n'), ((936, 961), 'numpy.linspace', 'numpy.linspace', (['(0)', '(2)', '(100)'], {}), '(0, 2, 100)\n', (950, 961), False, 'import numpy\n'), ((1231, 1307), 'matplotlib.pyplot.scatter', 'pyplot.scatter', (['*samples[:, ::2]'], {'s': 'size', 'lw': '(3)', 'color': '"""w"""', 'edgecolors': 'COLOR1'}), "(*samples[:, ::2], s=size, lw=3, color='w', edgecolors=COLOR1)\n", (1245, 1307), False, 'from matplotlib import pyplot\n'), ((1312, 1377), 'matplotlib.pyplot.scatter', 'pyplot.scatter', (['*samples[:, ::2]'], {'s': 'size', 'color': 'COLOR1', 'alpha': '(0.6)'}), '(*samples[:, ::2], s=size, color=COLOR1, alpha=0.6)\n', (1326, 1377), False, 'from matplotlib import pyplot\n'), ((1382, 1458), 'matplotlib.pyplot.scatter', 'pyplot.scatter', (['*samples[:, 1::2]'], {'s': 'size', 'lw': '(3)', 'color': '"""w"""', 'edgecolor': 'COLOR2'}), "(*samples[:, 1::2], s=size, lw=3, color='w', edgecolor=COLOR2)\n", (1396, 1458), False, 'from matplotlib import pyplot\n'), ((1463, 1529), 'matplotlib.pyplot.scatter', 'pyplot.scatter', (['*samples[:, 1::2]'], {'s': 'size', 'color': 'COLOR2', 'alpha': '(0.6)'}), '(*samples[:, 1::2], s=size, color=COLOR2, alpha=0.6)\n', (1477, 1529), False, 'from matplotlib import pyplot\n'), ((1648, 1724), 'chaospy.generate_quadrature', 'chaospy.generate_quadrature', (['(2)', 'dist'], {'growth': '(True)', 'rule': '"""fejer"""', 'sparse': '(True)'}), "(2, dist, growth=True, rule='fejer', sparse=True)\n", (1675, 1724), False, 'import chaospy\n'), ((1793, 1885), 'matplotlib.pyplot.scatter', 'pyplot.scatter', (['*nodes[:, indices]'], {'s': '(-size[indices])', 'lw': '(3)', 'color': '"""w"""', 'edgecolors': 'COLOR2'}), "(*nodes[:, indices], s=-size[indices], lw=3, color='w',\n edgecolors=COLOR2)\n", (1807, 1885), False, 'from matplotlib import pyplot\n'), ((1886, 1963), 'matplotlib.pyplot.scatter', 'pyplot.scatter', (['*nodes[:, indices]'], {'s': '(-size[indices])', 'color': 'COLOR2', 'alpha': '(0.6)'}), '(*nodes[:, indices], s=-size[indices], color=COLOR2, alpha=0.6)\n', (1900, 1963), False, 'from matplotlib import pyplot\n'), ((1968, 2060), 'matplotlib.pyplot.scatter', 'pyplot.scatter', (['*nodes[:, ~indices]'], {'s': 'size[~indices]', 'lw': '(3)', 'color': '"""w"""', 'edgecolor': 'COLOR1'}), "(*nodes[:, ~indices], s=size[~indices], lw=3, color='w',\n edgecolor=COLOR1)\n", (1982, 2060), False, 'from matplotlib import pyplot\n'), ((2061, 2139), 'matplotlib.pyplot.scatter', 'pyplot.scatter', (['*nodes[:, ~indices]'], {'s': 'size[~indices]', 'color': 'COLOR1', 'alpha': '(0.6)'}), '(*nodes[:, ~indices], s=size[~indices], color=COLOR1, alpha=0.6)\n', (2075, 2139), False, 'from matplotlib import pyplot\n'), ((2201, 2227), 'numpy.linspace', 'numpy.linspace', (['(-2)', '(2)', '(200)'], {}), '(-2, 2, 200)\n', (2215, 2227), False, 'import numpy\n'), ((2237, 2255), 'chaospy.variable', 'chaospy.variable', ([], {}), '()\n', (2253, 2255), False, 'import chaospy\n'), ((2327, 2351), 'numpy.linspace', 'numpy.linspace', (['(-2)', '(-1.2)'], {}), '(-2, -1.2)\n', (2341, 2351), False, 'import numpy\n'), ((2436, 2458), 'numpy.linspace', 'numpy.linspace', (['(1.2)', '(2)'], {}), '(1.2, 2)\n', (2450, 2458), False, 'import numpy\n'), ((2586, 2611), 'numpy.linspace', 'numpy.linspace', (['(-1.2)', '(1.2)'], {}), '(-1.2, 1.2)\n', (2600, 2611), False, 'import numpy\n'), ((2843, 2884), 'numpy.array', 'numpy.array', (['[[0, 1, 2, 3], [0, 1, 2, 3]]'], {}), '([[0, 1, 2, 3], [0, 1, 2, 3]])\n', (2854, 2884), False, 'import numpy\n'), ((2900, 2953), 'numpy.array', 'numpy.array', (['[[0, 0, 0, 1, 1, 2], [1, 2, 3, 2, 3, 3]]'], {}), '([[0, 0, 0, 1, 1, 2], [1, 2, 3, 2, 3, 3]])\n', (2911, 2953), False, 'import numpy\n'), ((2974, 3021), 'matplotlib.pyplot.plot', 'pyplot.plot', (['[0.16, 0.84]', '[2, 2]', 'COLOR2'], {'lw': '(4)'}), '([0.16, 0.84], [2, 2], COLOR2, lw=4)\n', (2985, 3021), False, 'from matplotlib import pyplot\n'), ((3024, 3071), 'matplotlib.pyplot.plot', 'pyplot.plot', (['[0.16, 0.84]', '[3, 3]', 'COLOR2'], {'lw': '(4)'}), '([0.16, 0.84], [3, 3], COLOR2, lw=4)\n', (3035, 3071), False, 'from matplotlib import pyplot\n'), ((3074, 3121), 'matplotlib.pyplot.plot', 'pyplot.plot', (['[1.16, 1.84]', '[3, 3]', 'COLOR2'], {'lw': '(4)'}), '([1.16, 1.84], [3, 3], COLOR2, lw=4)\n', (3085, 3121), False, 'from matplotlib import pyplot\n'), ((3126, 3195), 'matplotlib.pyplot.scatter', 'pyplot.scatter', (['*samples1'], {'s': 'size', 'lw': '(3)', 'color': '"""w"""', 'edgecolors': 'COLOR1'}), "(*samples1, s=size, lw=3, color='w', edgecolors=COLOR1)\n", (3140, 3195), False, 'from matplotlib import pyplot\n'), ((3200, 3258), 'matplotlib.pyplot.scatter', 'pyplot.scatter', (['*samples1'], {'s': 'size', 'color': 'COLOR1', 'alpha': '(0.6)'}), '(*samples1, s=size, color=COLOR1, alpha=0.6)\n', (3214, 3258), False, 'from matplotlib import pyplot\n'), ((3263, 3331), 'matplotlib.pyplot.scatter', 'pyplot.scatter', (['*samples2'], {'s': 'size', 'lw': '(3)', 'color': '"""w"""', 'edgecolor': 'COLOR2'}), "(*samples2, s=size, lw=3, color='w', edgecolor=COLOR2)\n", (3277, 3331), False, 'from matplotlib import pyplot\n'), ((3336, 3394), 'matplotlib.pyplot.scatter', 'pyplot.scatter', (['*samples2'], {'s': 'size', 'color': 'COLOR2', 'alpha': '(0.6)'}), '(*samples2, s=size, color=COLOR2, alpha=0.6)\n', (3350, 3394), False, 'from matplotlib import pyplot\n'), ((3450, 3473), 'numpy.random.seed', 'numpy.random.seed', (['(1234)'], {}), '(1234)\n', (3467, 3473), False, 'import numpy\n'), ((3486, 3506), 'chaospy.Normal', 'chaospy.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (3500, 3506), False, 'import chaospy\n'), ((3551, 3572), 'chaospy.Exponential', 'chaospy.Exponential', ([], {}), '()\n', (3570, 3572), False, 'import chaospy\n'), ((3618, 3654), 'numpy.linspace', 'numpy.linspace', (['(0)', '(2 * numpy.pi)', '(200)'], {}), '(0, 2 * numpy.pi, 200)\n', (3632, 3654), False, 'import numpy\n'), ((3696, 3846), 'matplotlib.pyplot.pie', 'pyplot.pie', (['[0.5]'], {'colors': '[COLOR1]', 'radius': '(1)', 'normalize': '(False)', 'center': '(-0.3, 0.3)', 'startangle': '(45)', 'wedgeprops': "{'width': 0.5, 'alpha': 0.5, 'lw': 4}"}), "([0.5], colors=[COLOR1], radius=1, normalize=False, center=(-0.3,\n 0.3), startangle=45, wedgeprops={'width': 0.5, 'alpha': 0.5, 'lw': 4})\n", (3706, 3846), False, 'from matplotlib import pyplot\n'), ((3877, 3920), 'matplotlib.pyplot.plot', 'pyplot.plot', (['(x - 0.3)', '(y + 0.3)', 'COLOR1'], {'lw': '(4)'}), '(x - 0.3, y + 0.3, COLOR1, lw=4)\n', (3888, 3920), False, 'from matplotlib import pyplot\n'), ((3921, 3972), 'matplotlib.pyplot.plot', 'pyplot.plot', (['(x / 2 - 0.3)', '(y / 2 + 0.3)', 'COLOR1'], {'lw': '(4)'}), '(x / 2 - 0.3, y / 2 + 0.3, COLOR1, lw=4)\n', (3932, 3972), False, 'from matplotlib import pyplot\n'), ((3970, 4068), 'matplotlib.pyplot.bar', 'pyplot.bar', (['[0, 0.6]', '[0.5, 1]'], {'bottom': '[-0.6, -0.6]', 'width': '(0.5)', 'yerr': '[0.2, 0.3]', 'color': 'COLOR2'}), '([0, 0.6], [0.5, 1], bottom=[-0.6, -0.6], width=0.5, yerr=[0.2, \n 0.3], color=COLOR2)\n', (3980, 4068), False, 'from matplotlib import pyplot\n'), ((1142, 1163), 'chaospy.Uniform', 'chaospy.Uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1157, 1163), False, 'import chaospy\n'), ((1600, 1621), 'chaospy.Uniform', 'chaospy.Uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1615, 1621), False, 'import chaospy\n'), ((2802, 2823), 'chaospy.Uniform', 'chaospy.Uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (2817, 2823), False, 'import chaospy\n'), ((3664, 3676), 'numpy.cos', 'numpy.cos', (['x'], {}), '(x)\n', (3673, 3676), False, 'import numpy\n'), ((3678, 3690), 'numpy.sin', 'numpy.sin', (['y'], {}), '(y)\n', (3687, 3690), False, 'import numpy\n')]
|
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v2 as tf
from tf_agents.specs import tensor_spec
from tf_agents.policies import tf_policy
from typing import Any, Callable, Iterable, Optional, Sequence, Tuple, Union
import dice_rl.data.dataset as dataset_lib
import dice_rl.estimators.estimator as estimator_lib
def _is_categorical_spec(spec):
return (tensor_spec.is_discrete(spec) and tensor_spec.is_bounded(spec) and
spec.shape == [] and spec.minimum == 0)
class TabularQLearning(object):
"""Approximate the density ratio using exact matrix solves."""
def __init__(self,
dataset_spec,
gamma: Union[float, tf.Tensor],
reward_fn: Callable = None,
solve_for_state_action_value: bool = True,
num_qvalues: Optional[int] = None,
bootstrap: bool = True,
perturbation_scale: Union[float, tf.Tensor] = 1.0,
default_reward_value: Union[float, tf.Tensor] = 0.0,
limit_episodes: Optional[int] = None):
"""Initializes the solver.
Args:
dataset_spec: The spec of the dataset that will be given.
gamma: The discount factor to use.
reward_fn: A function that takes in an EnvStep and returns the reward for
that step. If not specified, defaults to just EnvStep.reward.
solve_for_state_action_value: Whether to solve for Q-values (default) or
V-values, i.e., state-values.
num_qvalues: If specified, maintains an ensemble of Q-values for
confidence bound estimation.
bootstrap: Whether to bootstrap the dataset.
perturbation_scale: Scale of reward perturbation.
default_reward_value: Value to use for reward of unseen state-actions.
limit_episodes: How many episodes to take from the dataset. Defaults to
None (take all episodes).
"""
self._dataset_spec = dataset_spec
self._gamma = gamma
if reward_fn is None:
reward_fn = lambda env_step: env_step.reward
self._reward_fn = reward_fn
self._num_qvalues = num_qvalues
self._bootstrap = bootstrap
self._perturbation_scale = np.array(perturbation_scale)
if len(np.shape(self._perturbation_scale)) < 1:
self._perturbation_scale = np.reshape(self._perturbation_scale, [-1])
self._num_perturbations = len(self._perturbation_scale)
self._default_reward_value = default_reward_value
self._limit_episodes = limit_episodes
self._solve_for_state_action_value = solve_for_state_action_value
if (not self._solve_for_state_action_value and
not self._dataset_spec.has_log_probability()):
raise ValueError('Dataset must contain log-probability when '
'solve_for_state_action_value is False.')
# Get number of states/actions.
observation_spec = self._dataset_spec.observation
action_spec = self._dataset_spec.action
if not _is_categorical_spec(observation_spec):
raise ValueError('Observation spec must be discrete and bounded.')
self._num_states = observation_spec.maximum + 1
if not _is_categorical_spec(action_spec):
raise ValueError('Action spec must be discrete and bounded.')
self._num_actions = action_spec.maximum + 1
self._dimension = (
self._num_states * self._num_actions
if self._solve_for_state_action_value else self._num_states)
self._dimension += 1 # Add 1 for terminal absorbing state.
self._point_qvalues = np.zeros([self._dimension])
if self._num_qvalues is not None:
self._ensemble_qvalues = np.zeros([self._num_qvalues, self._dimension])
def _get_index(self, state, action):
if self._solve_for_state_action_value:
return state * self._num_actions + action
else:
return state
def solve(self,
dataset: dataset_lib.OffpolicyDataset,
target_policy: tf_policy.TFPolicy,
regularizer: float = 1e-8):
"""Solves for Q-values and then approximates target policy value.
Args:
dataset: The dataset to sample experience from.
target_policy: The policy whose value we want to estimate.
regularizer: A small constant to add before dividing.
Returns:
Estimated average per-step reward of the target policy.
"""
num_estimates = 1 + int(self._num_qvalues)
transition_matrix = np.zeros(
[self._dimension, self._dimension, num_estimates])
reward_vector = np.zeros(
[self._dimension, num_estimates, self._num_perturbations])
total_weights = np.zeros([self._dimension, num_estimates])
episodes, valid_steps = dataset.get_all_episodes(limit=self._limit_episodes)
#all_rewards = self._reward_fn(episodes)
#reward_std = np.ma.MaskedArray(all_rewards, valid_steps).std()
tfagents_episodes = dataset_lib.convert_to_tfagents_timestep(episodes)
sample_weights = np.array(valid_steps, dtype=np.int64)
if not self._bootstrap or self._num_qvalues is None:
sample_weights = (
sample_weights[:, :, None] * np.ones([1, 1, num_estimates]))
else:
probs = np.reshape(sample_weights, [-1]) / np.sum(sample_weights)
weights = np.random.multinomial(
np.sum(sample_weights), probs,
size=self._num_qvalues).astype(np.float32)
weights = np.reshape(
np.transpose(weights),
list(np.shape(sample_weights)) + [self._num_qvalues])
sample_weights = np.concatenate([sample_weights[:, :, None], weights],
axis=-1)
for episode_num in range(tf.shape(valid_steps)[0]):
# Precompute probabilites for this episode.
this_episode = tf.nest.map_structure(lambda t: t[episode_num], episodes)
this_tfagents_episode = dataset_lib.convert_to_tfagents_timestep(
this_episode)
episode_target_log_probabilities = target_policy.distribution(
this_tfagents_episode).action.log_prob(this_episode.action)
episode_target_probs = target_policy.distribution(
this_tfagents_episode).action.probs_parameter()
for step_num in range(tf.shape(valid_steps)[1] - 1):
this_step = tf.nest.map_structure(lambda t: t[episode_num, step_num],
episodes)
next_step = tf.nest.map_structure(
lambda t: t[episode_num, step_num + 1], episodes)
this_tfagents_step = dataset_lib.convert_to_tfagents_timestep(this_step)
next_tfagents_step = dataset_lib.convert_to_tfagents_timestep(next_step)
this_weights = sample_weights[episode_num, step_num, :]
if this_step.is_last() or not valid_steps[episode_num, step_num]:
continue
weight = this_weights
this_index = self._get_index(this_step.observation, this_step.action)
reward_vector[this_index, :, :] += np.expand_dims(
self._reward_fn(this_step) * weight, -1)
if self._num_qvalues is not None:
random_noise = np.random.binomial(this_weights[1:].astype('int64'),
0.5)
reward_vector[this_index, 1:, :] += (
self._perturbation_scale[None, :] *
(2 * random_noise - this_weights[1:])[:, None])
total_weights[this_index] += weight
policy_ratio = 1.0
if not self._solve_for_state_action_value:
policy_ratio = tf.exp(episode_target_log_probabilities[step_num] -
this_step.get_log_probability())
# Need to weight next nu by importance weight.
next_weight = (
weight if self._solve_for_state_action_value else policy_ratio *
weight)
if next_step.is_absorbing():
next_index = -1 # Absorbing state.
transition_matrix[this_index, next_index] += next_weight
else:
next_probs = episode_target_probs[step_num + 1]
for next_action, next_prob in enumerate(next_probs):
next_index = self._get_index(next_step.observation, next_action)
transition_matrix[this_index, next_index] += next_prob * next_weight
print('Done processing data.')
transition_matrix /= (regularizer + total_weights)[:, None, :]
reward_vector /= (regularizer + total_weights)[:, :, None]
reward_vector[np.where(np.equal(total_weights,
0.0))] = self._default_reward_value
reward_vector[-1, :, :] = 0.0 # Terminal absorbing state has 0 reward.
self._point_qvalues = np.linalg.solve(
np.eye(self._dimension) - self._gamma * transition_matrix[:, :, 0],
reward_vector[:, 0])
if self._num_qvalues is not None:
self._ensemble_qvalues = np.linalg.solve(
(np.eye(self._dimension) -
self._gamma * np.transpose(transition_matrix, [2, 0, 1])),
np.transpose(reward_vector, [1, 0, 2]))
return self.estimate_average_reward(dataset, target_policy)
def estimate_average_reward(self, dataset: dataset_lib.OffpolicyDataset,
target_policy: tf_policy.TFPolicy):
"""Estimates value (average per-step reward) of policy.
Args:
dataset: The dataset to sample experience from.
target_policy: The policy whose value we want to estimate.
Returns:
Estimated average per-step reward of the target policy.
"""
def reward_fn(env_step, valid_steps, qvalues=self._point_qvalues):
"""Computes average initial Q-values of episodes."""
# env_step is an episode, and we just want the first step.
if tf.rank(valid_steps) == 1:
first_step = tf.nest.map_structure(lambda t: t[0, ...], env_step)
else:
first_step = tf.nest.map_structure(lambda t: t[:, 0, ...], env_step)
if self._solve_for_state_action_value:
indices = self._get_index(first_step.observation[:, None],
np.arange(self._num_actions)[None, :])
initial_qvalues = tf.cast(tf.gather(qvalues, indices), tf.float32)
tfagents_first_step = dataset_lib.convert_to_tfagents_timestep(
first_step)
initial_target_probs = target_policy.distribution(
tfagents_first_step).action.probs_parameter()
value = tf.reduce_sum(initial_qvalues * initial_target_probs, axis=-1)
else:
indices = self._get_index(first_step.observation, first_step.action)
value = tf.cast(tf.gather(qvalues, indices), tf.float32)
return value
def weight_fn(env_step, valid_steps):
return tf.ones([tf.shape(valid_steps)[0]], dtype=tf.float32)
if self._num_qvalues is None:
return (1 - self._gamma) * estimator_lib.get_fullbatch_average(
dataset,
limit=None,
by_steps=False,
truncate_episode_at=1,
reward_fn=reward_fn,
weight_fn=weight_fn)
else:
estimates = []
for i in range(self._num_qvalues):
estimates.append([])
for j in range(self._num_perturbations):
estimates[-1].append(
(1 - self._gamma) * estimator_lib.get_fullbatch_average(
dataset,
limit=None,
by_steps=False,
truncate_episode_at=1,
reward_fn=lambda *args: reward_fn(
*args, qvalues=self._ensemble_qvalues[i, :, j]),
weight_fn=weight_fn))
return np.array(estimates)
|
[
"tensorflow.compat.v2.nest.map_structure",
"numpy.sum",
"numpy.ones",
"numpy.shape",
"tensorflow.compat.v2.rank",
"numpy.arange",
"dice_rl.data.dataset.convert_to_tfagents_timestep",
"numpy.transpose",
"tensorflow.compat.v2.shape",
"numpy.equal",
"numpy.reshape",
"tf_agents.specs.tensor_spec.is_discrete",
"tensorflow.compat.v2.gather",
"tensorflow.compat.v2.reduce_sum",
"numpy.concatenate",
"dice_rl.estimators.estimator.get_fullbatch_average",
"numpy.zeros",
"tf_agents.specs.tensor_spec.is_bounded",
"numpy.array",
"numpy.eye"
] |
[((1038, 1067), 'tf_agents.specs.tensor_spec.is_discrete', 'tensor_spec.is_discrete', (['spec'], {}), '(spec)\n', (1061, 1067), False, 'from tf_agents.specs import tensor_spec\n'), ((1072, 1100), 'tf_agents.specs.tensor_spec.is_bounded', 'tensor_spec.is_bounded', (['spec'], {}), '(spec)\n', (1094, 1100), False, 'from tf_agents.specs import tensor_spec\n'), ((2826, 2854), 'numpy.array', 'np.array', (['perturbation_scale'], {}), '(perturbation_scale)\n', (2834, 2854), True, 'import numpy as np\n'), ((4151, 4178), 'numpy.zeros', 'np.zeros', (['[self._dimension]'], {}), '([self._dimension])\n', (4159, 4178), True, 'import numpy as np\n'), ((5027, 5086), 'numpy.zeros', 'np.zeros', (['[self._dimension, self._dimension, num_estimates]'], {}), '([self._dimension, self._dimension, num_estimates])\n', (5035, 5086), True, 'import numpy as np\n'), ((5116, 5183), 'numpy.zeros', 'np.zeros', (['[self._dimension, num_estimates, self._num_perturbations]'], {}), '([self._dimension, num_estimates, self._num_perturbations])\n', (5124, 5183), True, 'import numpy as np\n'), ((5213, 5255), 'numpy.zeros', 'np.zeros', (['[self._dimension, num_estimates]'], {}), '([self._dimension, num_estimates])\n', (5221, 5255), True, 'import numpy as np\n'), ((5475, 5525), 'dice_rl.data.dataset.convert_to_tfagents_timestep', 'dataset_lib.convert_to_tfagents_timestep', (['episodes'], {}), '(episodes)\n', (5515, 5525), True, 'import dice_rl.data.dataset as dataset_lib\n'), ((5548, 5585), 'numpy.array', 'np.array', (['valid_steps'], {'dtype': 'np.int64'}), '(valid_steps, dtype=np.int64)\n', (5556, 5585), True, 'import numpy as np\n'), ((2940, 2982), 'numpy.reshape', 'np.reshape', (['self._perturbation_scale', '[-1]'], {}), '(self._perturbation_scale, [-1])\n', (2950, 2982), True, 'import numpy as np\n'), ((4248, 4294), 'numpy.zeros', 'np.zeros', (['[self._num_qvalues, self._dimension]'], {}), '([self._num_qvalues, self._dimension])\n', (4256, 4294), True, 'import numpy as np\n'), ((6102, 6164), 'numpy.concatenate', 'np.concatenate', (['[sample_weights[:, :, None], weights]'], {'axis': '(-1)'}), '([sample_weights[:, :, None], weights], axis=-1)\n', (6116, 6164), True, 'import numpy as np\n'), ((6331, 6388), 'tensorflow.compat.v2.nest.map_structure', 'tf.nest.map_structure', (['(lambda t: t[episode_num])', 'episodes'], {}), '(lambda t: t[episode_num], episodes)\n', (6352, 6388), True, 'import tensorflow.compat.v2 as tf\n'), ((6419, 6473), 'dice_rl.data.dataset.convert_to_tfagents_timestep', 'dataset_lib.convert_to_tfagents_timestep', (['this_episode'], {}), '(this_episode)\n', (6459, 6473), True, 'import dice_rl.data.dataset as dataset_lib\n'), ((12090, 12109), 'numpy.array', 'np.array', (['estimates'], {}), '(estimates)\n', (12098, 12109), True, 'import numpy as np\n'), ((2866, 2900), 'numpy.shape', 'np.shape', (['self._perturbation_scale'], {}), '(self._perturbation_scale)\n', (2874, 2900), True, 'import numpy as np\n'), ((5707, 5737), 'numpy.ones', 'np.ones', (['[1, 1, num_estimates]'], {}), '([1, 1, num_estimates])\n', (5714, 5737), True, 'import numpy as np\n'), ((5763, 5795), 'numpy.reshape', 'np.reshape', (['sample_weights', '[-1]'], {}), '(sample_weights, [-1])\n', (5773, 5795), True, 'import numpy as np\n'), ((5798, 5820), 'numpy.sum', 'np.sum', (['sample_weights'], {}), '(sample_weights)\n', (5804, 5820), True, 'import numpy as np\n'), ((5992, 6013), 'numpy.transpose', 'np.transpose', (['weights'], {}), '(weights)\n', (6004, 6013), True, 'import numpy as np\n'), ((6233, 6254), 'tensorflow.compat.v2.shape', 'tf.shape', (['valid_steps'], {}), '(valid_steps)\n', (6241, 6254), True, 'import tensorflow.compat.v2 as tf\n'), ((6819, 6886), 'tensorflow.compat.v2.nest.map_structure', 'tf.nest.map_structure', (['(lambda t: t[episode_num, step_num])', 'episodes'], {}), '(lambda t: t[episode_num, step_num], episodes)\n', (6840, 6886), True, 'import tensorflow.compat.v2 as tf\n'), ((6949, 7020), 'tensorflow.compat.v2.nest.map_structure', 'tf.nest.map_structure', (['(lambda t: t[episode_num, step_num + 1])', 'episodes'], {}), '(lambda t: t[episode_num, step_num + 1], episodes)\n', (6970, 7020), True, 'import tensorflow.compat.v2 as tf\n'), ((7063, 7114), 'dice_rl.data.dataset.convert_to_tfagents_timestep', 'dataset_lib.convert_to_tfagents_timestep', (['this_step'], {}), '(this_step)\n', (7103, 7114), True, 'import dice_rl.data.dataset as dataset_lib\n'), ((7144, 7195), 'dice_rl.data.dataset.convert_to_tfagents_timestep', 'dataset_lib.convert_to_tfagents_timestep', (['next_step'], {}), '(next_step)\n', (7184, 7195), True, 'import dice_rl.data.dataset as dataset_lib\n'), ((8983, 9011), 'numpy.equal', 'np.equal', (['total_weights', '(0.0)'], {}), '(total_weights, 0.0)\n', (8991, 9011), True, 'import numpy as np\n'), ((9207, 9230), 'numpy.eye', 'np.eye', (['self._dimension'], {}), '(self._dimension)\n', (9213, 9230), True, 'import numpy as np\n'), ((9507, 9545), 'numpy.transpose', 'np.transpose', (['reward_vector', '[1, 0, 2]'], {}), '(reward_vector, [1, 0, 2])\n', (9519, 9545), True, 'import numpy as np\n'), ((10233, 10253), 'tensorflow.compat.v2.rank', 'tf.rank', (['valid_steps'], {}), '(valid_steps)\n', (10240, 10253), True, 'import tensorflow.compat.v2 as tf\n'), ((10281, 10333), 'tensorflow.compat.v2.nest.map_structure', 'tf.nest.map_structure', (['(lambda t: t[0, ...])', 'env_step'], {}), '(lambda t: t[0, ...], env_step)\n', (10302, 10333), True, 'import tensorflow.compat.v2 as tf\n'), ((10367, 10422), 'tensorflow.compat.v2.nest.map_structure', 'tf.nest.map_structure', (['(lambda t: t[:, 0, ...])', 'env_step'], {}), '(lambda t: t[:, 0, ...], env_step)\n', (10388, 10422), True, 'import tensorflow.compat.v2 as tf\n'), ((10715, 10767), 'dice_rl.data.dataset.convert_to_tfagents_timestep', 'dataset_lib.convert_to_tfagents_timestep', (['first_step'], {}), '(first_step)\n', (10755, 10767), True, 'import dice_rl.data.dataset as dataset_lib\n'), ((10914, 10976), 'tensorflow.compat.v2.reduce_sum', 'tf.reduce_sum', (['(initial_qvalues * initial_target_probs)'], {'axis': '(-1)'}), '(initial_qvalues * initial_target_probs, axis=-1)\n', (10927, 10976), True, 'import tensorflow.compat.v2 as tf\n'), ((11329, 11470), 'dice_rl.estimators.estimator.get_fullbatch_average', 'estimator_lib.get_fullbatch_average', (['dataset'], {'limit': 'None', 'by_steps': '(False)', 'truncate_episode_at': '(1)', 'reward_fn': 'reward_fn', 'weight_fn': 'weight_fn'}), '(dataset, limit=None, by_steps=False,\n truncate_episode_at=1, reward_fn=reward_fn, weight_fn=weight_fn)\n', (11364, 11470), True, 'import dice_rl.estimators.estimator as estimator_lib\n'), ((9401, 9424), 'numpy.eye', 'np.eye', (['self._dimension'], {}), '(self._dimension)\n', (9407, 9424), True, 'import numpy as np\n'), ((10643, 10670), 'tensorflow.compat.v2.gather', 'tf.gather', (['qvalues', 'indices'], {}), '(qvalues, indices)\n', (10652, 10670), True, 'import tensorflow.compat.v2 as tf\n'), ((11090, 11117), 'tensorflow.compat.v2.gather', 'tf.gather', (['qvalues', 'indices'], {}), '(qvalues, indices)\n', (11099, 11117), True, 'import tensorflow.compat.v2 as tf\n'), ((5870, 5892), 'numpy.sum', 'np.sum', (['sample_weights'], {}), '(sample_weights)\n', (5876, 5892), True, 'import numpy as np\n'), ((6030, 6054), 'numpy.shape', 'np.shape', (['sample_weights'], {}), '(sample_weights)\n', (6038, 6054), True, 'import numpy as np\n'), ((6768, 6789), 'tensorflow.compat.v2.shape', 'tf.shape', (['valid_steps'], {}), '(valid_steps)\n', (6776, 6789), True, 'import tensorflow.compat.v2 as tf\n'), ((9452, 9494), 'numpy.transpose', 'np.transpose', (['transition_matrix', '[2, 0, 1]'], {}), '(transition_matrix, [2, 0, 1])\n', (9464, 9494), True, 'import numpy as np\n'), ((10570, 10598), 'numpy.arange', 'np.arange', (['self._num_actions'], {}), '(self._num_actions)\n', (10579, 10598), True, 'import numpy as np\n'), ((11216, 11237), 'tensorflow.compat.v2.shape', 'tf.shape', (['valid_steps'], {}), '(valid_steps)\n', (11224, 11237), True, 'import tensorflow.compat.v2 as tf\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 17 10:49:15 2016
@author: yxl
"""
from imagepy import IPy
import numpy as np
from imagepy.ui.canvasframe import CanvasFrame
from imagepy.core.manager import ImageManager, WindowsManager
from imagepy.core.engine import Simple
from skimage import color
class SplitRGB(Simple):
title = 'Split RGB Channels'
note = ['rgb']
para = {'copy':False, 'destory':True}
view = {(bool, 'copy', 'Copy data from view'),
(bool, 'destory', 'Destory current image')}
#process
def run(self, ips, imgs, para = None):
r,g,b = [],[],[]
for i,n in zip(imgs,list(range(ips.get_nslices()))):
for c,ci in zip((r,g,b),(0,1,2)):
if self.para['copy']:c.append(i[:,:,ci].copy())
else: c.append(i[:,:,ci])
self.progress(i, n)
for im, tl in zip([r,g,b],['red','green','blue']):
IPy.show_img(im, ips.title+'-'+tl)
if self.para['destory']:
ImageManager.close(ips.title)
class ToRGB(Simple):
title = 'RGB to RGB'
note = ['all']
#parameter
para = {'red':'','green':'','blue':'','destory':True}
def load(self, ips):
r, g, b = self.titles()[1:]
self.view = [('img', r, 'red', ''),
('img', g, 'green', ''),
('img', b, 'blue', ''),
(bool, 'destory', 'destory')]
return True
def titles(self): return 'RGB-Merge', 'red', 'green', 'blue'
def trans(self, img1, img2, img3):
return np.array([img1.T, img2.T, img3.T], dtype=np.uint8).T
def run(self, ips, imgs, para = None):
idx = ['red','green','blue']
print(para)
imr,img,imb = [ImageManager.get(para[i]) for i in idx]
sr,sg,sb = [i.get_nslices() for i in [imr,img,imb]]
if imr.imgtype!='8-bit' or img.imgtype!='8-bit' or imb.imgtype!='8-bit' or \
imr.size!=img.size or img.size!=imb.size or sr!=sg or sg!=sb:
IPy.alert('three images must be 8-bit image, with the same size and slices!')
return
rgbs = []
w,h = imr.size
for i in range(sr):
self.progress(i,sr)
rgbs.append(self.trans(imr.imgs[i], img.imgs[i], imb.imgs[i]))
IPy.show_img(rgbs, self.titles()[0])
if self.para['destory']:
for title in [para[i] for i in idx]:
WindowsManager.get(title).close()
class RGB2(Simple):
title = 'RGB To RGB'
note = ['rgb']
#process
def titles(self): return 'Red', 'Green', 'Blue'
def trans(self, img):
return img
def run(self, ips, imgs, para = None):
nr, ng, nb = [],[],[]
for i in range(ips.get_nslices()):
nrgb = self.trans(imgs[i])
nr.append(nrgb[:,:,0])
ng.append(nrgb[:,:,1])
nb.append(nrgb[:,:,2])
self.progress(i, len(imgs))
for im, tl in zip([nr, ng, nb], self.titles()):
IPy.show_img(im, ips.title+'-'+tl)
class MergeRGB(ToRGB):
title = 'Merge RGB Channels'
# ============= RGB - HSV ============
class RGB2HSV(RGB2):
title = 'RGB To HSV'
def titles(self):
return 'Hue', 'Saturation', 'Value'
def trans(self, img):
rst = color.rgb2hsv(img)
rst *= 255
print('============', rst.min(), rst.max())
return rst.astype(np.uint8)
class HSV2RGB(ToRGB):
title = 'HSV To RGB'
#process
def titles(self):
return 'HSV2RGB-Merge', 'H', 'S', 'V'
def trans(self, img1, img2, img3):
rst = np.array((img1.T, img2.T, img3.T), dtype=np.float64)
rst /= 255.0
rst = color.hsv2rgb(rst.T)
rst *= 255
return rst.astype(np.uint8)
# ============= RGB - CIE ============
class RGB2CIE(RGB2):
title = 'RGB To CIERGB'
#process
def titles(self):
return 'Red', 'Green', 'Blue'
def trans(self, img):
rst = color.rgb2rgbcie(img)
np.maximum(rst, 0, out=rst)
print('============', rst.min(axis=(0,1)), rst.max(axis=(0,1)))
rst *= 255/50*255
return rst.astype(np.uint8)
class CIE2RGB(ToRGB):
title = 'CIERGB To RGB'
#process
def titles(self):
return 'CIE2RGB-Merge', 'R', 'G', 'B'
def trans(self, img1, img2, img3):
rst = np.maximum((img1.T, img2.T, img3.T), 0, dtype=np.float64)
rst /= 255/50*255
rst = color.rgbcie2rgb(rst.T)
rst *= 255
return (rst).astype(np.uint8)
# ============= RGB - LUV ============
class RGB2LUV(RGB2):
title = 'RGB To LUV'
#process
def titles(self):
return 'Luminance', 'UColor', 'VColor'
def trans(self, img):
rst = color.rgb2luv(img)+128
#print('============', rst.min(), rst.max())
return rst.astype(np.uint8)
class LUV2RGB(ToRGB):
title = 'LUV To RGB'
#process
def titles(self):
return 'LUV2RGB-Merge', 'L', 'U', 'V'
def trans(self, img1, img2, img3):
rst = np.array((img1.T, img2.T, img3.T), dtype=np.float64)
rst -= 128
rst = color.luv2rgb(rst.T)
rst *= 255
return (rst).astype(np.uint8)
# ============= RGB - Lab ============
class RGB2Lab(RGB2):
title = 'RGB To Lab'
#process
def titles(self):
return 'Luminance', 'AColor', 'BColor'
def trans(self, img):
rst = color.rgb2lab(img)
print('============', rst.min(), rst.max())
rst+=100; rst*=(255/200.0)
return (rst).astype(np.uint8)
class Lab2RGB(ToRGB):
title = 'Lab To RGB'
#process
def titles(self):
return 'Lab2RGB-Merge', 'L', 'A', 'B'
def trans(self, img1, img2, img3):
rst = np.array((img1.T, img2.T, img3.T), dtype=np.float64)
rst *= (200/255.0); rst -= 100
rst = color.lab2rgb(rst.T)
rst *= 255
return (rst).astype(np.uint8)
class RGB2Gray(Simple):
title = 'RGB To Gray'
note = ['rgb']
def run(self, ips, imgs, para = None):
gray = []
for i in range(ips.get_nslices()):
gray.append(color.rgb2gray(imgs[i])*255)
self.progress(i, len(imgs))
IPy.show_img(gray, ips.title+'-Gray')
# ============= RGB - XYZ ============
class RGB2XYZ(RGB2):
title = 'RGB To XYZ'
#process
def titles(self):
return 'X', 'Y', 'Z'
def trans(self, img):
rst = color.rgb2xyz(img)
print('============', rst.min(), rst.max())
return (rst*(200)).astype(np.uint8)
class XYZ2RGB(ToRGB):
title = 'XYZ To RGB'
#process
def titles(self):
return 'XYZ2RGB-Merge', 'X', 'Y', 'Z'
def trans(self, img1, img2, img3):
rst = color.xyz2rgb(np.array((img1.T, img2.T, img3.T)).T/200.0)*255
#print('============', rst.min(), rst.max())
return rst.astype(np.uint8)
plgs = [RGB2Gray, '-', SplitRGB, MergeRGB, '-', RGB2HSV, HSV2RGB, '-', RGB2CIE, CIE2RGB, '-', RGB2LUV, LUV2RGB, '-', RGB2Lab, Lab2RGB, '-', RGB2XYZ, XYZ2RGB]
|
[
"skimage.color.rgbcie2rgb",
"numpy.maximum",
"skimage.color.hsv2rgb",
"skimage.color.rgb2gray",
"skimage.color.rgb2luv",
"skimage.color.rgb2hsv",
"imagepy.core.manager.ImageManager.close",
"imagepy.IPy.alert",
"skimage.color.lab2rgb",
"skimage.color.rgb2rgbcie",
"skimage.color.rgb2xyz",
"numpy.array",
"skimage.color.luv2rgb",
"imagepy.core.manager.ImageManager.get",
"imagepy.core.manager.WindowsManager.get",
"imagepy.IPy.show_img",
"skimage.color.rgb2lab"
] |
[((3325, 3343), 'skimage.color.rgb2hsv', 'color.rgb2hsv', (['img'], {}), '(img)\n', (3338, 3343), False, 'from skimage import color\n'), ((3635, 3687), 'numpy.array', 'np.array', (['(img1.T, img2.T, img3.T)'], {'dtype': 'np.float64'}), '((img1.T, img2.T, img3.T), dtype=np.float64)\n', (3643, 3687), True, 'import numpy as np\n'), ((3723, 3743), 'skimage.color.hsv2rgb', 'color.hsv2rgb', (['rst.T'], {}), '(rst.T)\n', (3736, 3743), False, 'from skimage import color\n'), ((4002, 4023), 'skimage.color.rgb2rgbcie', 'color.rgb2rgbcie', (['img'], {}), '(img)\n', (4018, 4023), False, 'from skimage import color\n'), ((4032, 4059), 'numpy.maximum', 'np.maximum', (['rst', '(0)'], {'out': 'rst'}), '(rst, 0, out=rst)\n', (4042, 4059), True, 'import numpy as np\n'), ((4381, 4438), 'numpy.maximum', 'np.maximum', (['(img1.T, img2.T, img3.T)', '(0)'], {'dtype': 'np.float64'}), '((img1.T, img2.T, img3.T), 0, dtype=np.float64)\n', (4391, 4438), True, 'import numpy as np\n'), ((4479, 4502), 'skimage.color.rgbcie2rgb', 'color.rgbcie2rgb', (['rst.T'], {}), '(rst.T)\n', (4495, 4502), False, 'from skimage import color\n'), ((5066, 5118), 'numpy.array', 'np.array', (['(img1.T, img2.T, img3.T)'], {'dtype': 'np.float64'}), '((img1.T, img2.T, img3.T), dtype=np.float64)\n', (5074, 5118), True, 'import numpy as np\n'), ((5152, 5172), 'skimage.color.luv2rgb', 'color.luv2rgb', (['rst.T'], {}), '(rst.T)\n', (5165, 5172), False, 'from skimage import color\n'), ((5440, 5458), 'skimage.color.rgb2lab', 'color.rgb2lab', (['img'], {}), '(img)\n', (5453, 5458), False, 'from skimage import color\n'), ((5768, 5820), 'numpy.array', 'np.array', (['(img1.T, img2.T, img3.T)'], {'dtype': 'np.float64'}), '((img1.T, img2.T, img3.T), dtype=np.float64)\n', (5776, 5820), True, 'import numpy as np\n'), ((5874, 5894), 'skimage.color.lab2rgb', 'color.lab2rgb', (['rst.T'], {}), '(rst.T)\n', (5887, 5894), False, 'from skimage import color\n'), ((6228, 6267), 'imagepy.IPy.show_img', 'IPy.show_img', (['gray', "(ips.title + '-Gray')"], {}), "(gray, ips.title + '-Gray')\n", (6240, 6267), False, 'from imagepy import IPy\n'), ((6458, 6476), 'skimage.color.rgb2xyz', 'color.rgb2xyz', (['img'], {}), '(img)\n', (6471, 6476), False, 'from skimage import color\n'), ((926, 964), 'imagepy.IPy.show_img', 'IPy.show_img', (['im', "(ips.title + '-' + tl)"], {}), "(im, ips.title + '-' + tl)\n", (938, 964), False, 'from imagepy import IPy\n'), ((1006, 1035), 'imagepy.core.manager.ImageManager.close', 'ImageManager.close', (['ips.title'], {}), '(ips.title)\n', (1024, 1035), False, 'from imagepy.core.manager import ImageManager, WindowsManager\n'), ((1570, 1620), 'numpy.array', 'np.array', (['[img1.T, img2.T, img3.T]'], {'dtype': 'np.uint8'}), '([img1.T, img2.T, img3.T], dtype=np.uint8)\n', (1578, 1620), True, 'import numpy as np\n'), ((1751, 1776), 'imagepy.core.manager.ImageManager.get', 'ImageManager.get', (['para[i]'], {}), '(para[i])\n', (1767, 1776), False, 'from imagepy.core.manager import ImageManager, WindowsManager\n'), ((2031, 2109), 'imagepy.IPy.alert', 'IPy.alert', (['"""three images must be 8-bit image, with the same size and slices!"""'], {}), "('three images must be 8-bit image, with the same size and slices!')\n", (2040, 2109), False, 'from imagepy import IPy\n'), ((3040, 3078), 'imagepy.IPy.show_img', 'IPy.show_img', (['im', "(ips.title + '-' + tl)"], {}), "(im, ips.title + '-' + tl)\n", (3052, 3078), False, 'from imagepy import IPy\n'), ((4770, 4788), 'skimage.color.rgb2luv', 'color.rgb2luv', (['img'], {}), '(img)\n', (4783, 4788), False, 'from skimage import color\n'), ((6151, 6174), 'skimage.color.rgb2gray', 'color.rgb2gray', (['imgs[i]'], {}), '(imgs[i])\n', (6165, 6174), False, 'from skimage import color\n'), ((2461, 2486), 'imagepy.core.manager.WindowsManager.get', 'WindowsManager.get', (['title'], {}), '(title)\n', (2479, 2486), False, 'from imagepy.core.manager import ImageManager, WindowsManager\n'), ((6771, 6805), 'numpy.array', 'np.array', (['(img1.T, img2.T, img3.T)'], {}), '((img1.T, img2.T, img3.T))\n', (6779, 6805), True, 'import numpy as np\n')]
|
"""The main script for training the model."""
from arima_model import ARIMA
import torch
import numpy as np
import plotly.graph_objects as go
trainSize = 14
sampleData = torch.tensor(np.load('data.npy'))
sampleSize = len(sampleData)
trainData = sampleData[:trainSize]
predictionModel = ARIMA(p=0, d=1, q=1)
predictionModel.fit(trainData, epochs=100, learningRate=0.01)
testData = sampleData[trainSize:]
inference = torch.zeros(sampleSize)
inference[0] = trainData[-2]
inference[1] = trainData[-1]
errors = torch.tensor(np.random.normal(
loc=0, scale=1, size=sampleSize), dtype=torch.float32)
with torch.no_grad():
for i in range(len(testData) - 2):
inference[i+2] = predictionModel.forward(
inference[0:i+2], errors[0:i+2])
fig = go.Figure()
fig.add_trace(go.Scatter(x=torch.arange(sampleSize), y=sampleData,
mode='lines',
name='sampleData'))
fig.add_trace(go.Scatter(x=torch.arange(len(testData))+trainSize,
y=inference.detach().numpy(),
mode='lines+markers',
name='predicted'))
fig.show()
|
[
"numpy.load",
"plotly.graph_objects.Figure",
"torch.arange",
"numpy.random.normal",
"torch.zeros",
"torch.no_grad",
"arima_model.ARIMA"
] |
[((290, 310), 'arima_model.ARIMA', 'ARIMA', ([], {'p': '(0)', 'd': '(1)', 'q': '(1)'}), '(p=0, d=1, q=1)\n', (295, 310), False, 'from arima_model import ARIMA\n'), ((420, 443), 'torch.zeros', 'torch.zeros', (['sampleSize'], {}), '(sampleSize)\n', (431, 443), False, 'import torch\n'), ((764, 775), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (773, 775), True, 'import plotly.graph_objects as go\n'), ((186, 205), 'numpy.load', 'np.load', (['"""data.npy"""'], {}), "('data.npy')\n", (193, 205), True, 'import numpy as np\n'), ((524, 573), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': '(1)', 'size': 'sampleSize'}), '(loc=0, scale=1, size=sampleSize)\n', (540, 573), True, 'import numpy as np\n'), ((606, 621), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (619, 621), False, 'import torch\n'), ((803, 827), 'torch.arange', 'torch.arange', (['sampleSize'], {}), '(sampleSize)\n', (815, 827), False, 'import torch\n')]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import math
import numpy as np
import pdb
from functools import partial
from opts import parser
args = parser.parse_args()
from ops.rstg import *
__all__ = [
'ResNet', 'resnet10', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnet200',
]
def conv3x3x3(in_planes, out_planes, stride=1):
# 3x3x3 convolution with padding
return nn.Conv3d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
def downsample_basic_block(x, planes, stride):
out = F.avg_pool3d(x, kernel_size=1, stride=stride)
zero_pads = torch.Tensor(
out.size(0), planes - out.size(1), out.size(2), out.size(3),
out.size(4)).zero_()
if isinstance(out.data, torch.cuda.FloatTensor):
zero_pads = zero_pads.cuda()
out = Variable(torch.cat([out.data, zero_pads], dim=1))
return out
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm3d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3x3(planes, planes)
self.bn2 = nn.BatchNorm3d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
conv_op = None
offset_groups = 1
def __init__(self, dim_in, dim_out, stride, dim_inner, group=1, use_temp_conv=1, temp_stride=1, dcn=False,
shortcut_type='B'):
super(Bottleneck, self).__init__()
# 1 x 1 layer
self.with_dcn = dcn
self.conv1 = self.Conv3dBN(dim_in, dim_inner, (1 + use_temp_conv * 2, 1, 1), (temp_stride, 1, 1),
(use_temp_conv, 0, 0))
self.relu = nn.ReLU(inplace=True)
# 3 x 3 layer
self.conv2 = self.Conv3dBN(dim_inner, dim_inner, (1, 3, 3), (1, stride, stride), (0, 1, 1))
# 1 x 1 layer
self.conv3 = self.Conv3dBN(dim_inner, dim_out, (1, 1, 1), (1, 1, 1), (0, 0, 0))
self.shortcut_type = shortcut_type
self.dim_in = dim_in
self.dim_out = dim_out
self.temp_stride = temp_stride
self.stride = stride
# nn.Conv3d(dim_in, dim_out, (1,1,1),(temp_stride,stride,stride),(0,0,0))
if self.shortcut_type == 'B':
if self.dim_in == self.dim_out and self.temp_stride == 1 and self.stride == 1: # or (self.dim_in == self.dim_out and self.dim_in == 64 and self.stride ==1):
pass
else:
# pass
self.shortcut = self.Conv3dBN(dim_in, dim_out, (1, 1, 1), (temp_stride, stride, stride), (0, 0, 0))
# nn.Conv3d(dim_in,dim_inner,kernel_size=(1+use_temp_conv*2,1,1),stride = (temp_stride,1,1),padding = )
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.relu(out)
out = self.conv2(out)
out = self.relu(out)
out = self.conv3(out)
if self.dim_in == self.dim_out and self.temp_stride == 1 and self.stride == 1:
pass
else:
residual = self.shortcut(residual)
out += residual
out = self.relu(out)
return out
def Conv3dBN(self, dim_in, dim_out, kernels, strides, pads, group=1):
if self.with_dcn and kernels[0] > 1:
# use deformable conv
return nn.Sequential(
self.conv_op(dim_in, dim_out, kernel_size=kernels, stride=strides, padding=pads, bias=False,
offset_groups=self.offset_groups),
nn.BatchNorm3d(dim_out)
)
else:
return nn.Sequential(
nn.Conv3d(dim_in, dim_out, kernel_size=kernels, stride=strides, padding=pads, bias=False),
nn.BatchNorm3d(dim_out)
)
class ResNet(nn.Module):
def __init__(self,
block,
layers,
use_temp_convs_set,
temp_strides_set,
sample_size,
sample_duration,
shortcut_type='B',
num_classes=400,
stage_with_dcn=(False, False, False, False),
extract_features=False,
loss_type='softmax'):
super(ResNet, self).__init__()
self.extract_features = extract_features
self.stage_with_dcn = stage_with_dcn
self.group = 1
self.width_per_group = 64
self.dim_inner = self.group * self.width_per_group
# self.shortcut_type = shortcut_type
self.conv1 = nn.Conv3d(
3,
64,
kernel_size=(1 + use_temp_convs_set[0][0] * 2, 7, 7),
stride=(temp_strides_set[0][0], 2, 2),
padding=(use_temp_convs_set[0][0], 3, 3),
bias=False)
self.bn1 = nn.BatchNorm3d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool1 = nn.MaxPool3d(kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 0, 0))
with_dcn = True if self.stage_with_dcn[0] else False
self.layer1 = self._make_layer(block, 64, 256, shortcut_type, stride=1, num_blocks=layers[0],
dim_inner=self.dim_inner, group=self.group, use_temp_convs=use_temp_convs_set[1],
temp_strides=temp_strides_set[1], dcn=with_dcn)
self.maxpool2 = nn.MaxPool3d(kernel_size=(2, 1, 1), stride=(2, 1, 1), padding=(0, 0, 0))
with_dcn = True if self.stage_with_dcn[1] else False
self.layer2 = self._make_layer(block, 256, 512, shortcut_type, stride=2, num_blocks=layers[1],
dim_inner=self.dim_inner * 2, group=self.group,
use_temp_convs=use_temp_convs_set[2], temp_strides=temp_strides_set[2],
dcn=with_dcn)
with_dcn = True if self.stage_with_dcn[2] else False
self.layer3 = self._make_layer(block, 512, 1024, shortcut_type, stride=2, num_blocks=layers[2],
dim_inner=self.dim_inner * 4, group=self.group,
use_temp_convs=use_temp_convs_set[3], temp_strides=temp_strides_set[3],
dcn=with_dcn)
with_dcn = True if self.stage_with_dcn[3] else False
self.layer4 = self._make_layer(block, 1024, 2048, shortcut_type, stride=1, num_blocks=layers[3],
dim_inner=self.dim_inner * 8, group=self.group,
use_temp_convs=use_temp_convs_set[4], temp_strides=temp_strides_set[4],
dcn=with_dcn)
last_duration = int(math.ceil(sample_duration / 2)) # int(math.ceil(sample_duration / 8))
last_size = int(math.ceil(sample_size / 16))
# self.avgpool = nn.AvgPool3d((last_duration, last_size, last_size), stride=1) #nn.AdaptiveAvgPool3d((1, 1, 1)) #
self.avgpool = nn.AdaptiveAvgPool3d((1, 1, 1))
self.dropout = torch.nn.Dropout(p=0.5)
self.classifier = nn.Linear(2048, num_classes)
for m in self.modules():
# if isinstance(m, nn.Conv3d):
# m.weight = nn.init.kaiming_normal_(m.weight, mode='fan_out')
# elif isinstance(m,nn.Linear):
# m.weight = nn.init.kaiming_normal(m.weight, mode='fan_out')
# elif
if isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, dim_in, dim_out, shortcut_type, stride, num_blocks, dim_inner=None, group=None,
use_temp_convs=None, temp_strides=None, dcn=False):
if use_temp_convs is None:
use_temp_convs = np.zeros(num_blocks).astype(int)
if temp_strides is None:
temp_strides = np.ones(num_blocks).astype(int)
if len(use_temp_convs) < num_blocks:
for _ in range(num_blocks - len(use_temp_convs)):
use_temp_convs.append(0)
temp_strides.append(1)
layers = []
for idx in range(num_blocks):
block_stride = 2 if (idx == 0 and stride == 2) else 1
layers.append(
block(dim_in, dim_out, block_stride, dim_inner, group, use_temp_convs[idx], temp_strides[idx], dcn))
dim_in = dim_out
return nn.Sequential(*layers)
def forward_single(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool1(x)
x = self.layer1(x)
x = self.maxpool2(x)
x = self.layer2(x)
x = self.layer3(x)
features = self.layer4(x)
x = self.avgpool(features)
y = x
# x = x.view(x.size(0), -1)
# x = self.dropout(x)
# y = self.classifier(x)
if self.extract_features:
return y, features
else:
return y
def forward_multi(self, x):
clip_preds = []
# import ipdb;ipdb.set_trace()
for clip_idx in range(x.shape[1]): # B, 10, 3, 3, 32, 224, 224
spatial_crops = []
for crop_idx in range(x.shape[2]):
clip = x[:, clip_idx, crop_idx]
clip = self.forward_single(clip)
spatial_crops.append(clip)
spatial_crops = torch.stack(spatial_crops, 1).mean(1) # (B, 400)
clip_preds.append(spatial_crops)
clip_preds = torch.stack(clip_preds, 1).mean(1) # (B, 400)
return clip_preds
def forward(self, x):
# pdb.set_trace()
# x: BT x 3 x H x W -> B x T x 3 x H x W
# pdb.set_trace()
x = x.view([args.batch_size, args.num_segments, x.shape[-3], x.shape[-2], x.shape[-1]])
x = x.permute([0,2,1,3,4])
# 5D tensor == single clip
if x.dim() == 5:
pred = self.forward_single(x)
# 7D tensor == 3 crops/10 clips
elif x.dim() == 7:
pred = self.forward_multi(x)
# loss_dict = {}
# if 'label' in batch:
# loss = F.cross_entropy(pred, batch['label'], reduction='none')
# loss_dict = {'clf': loss}
return pred
def get_fine_tuning_parameters(model, ft_begin_index):
if ft_begin_index == 0:
return model.parameters()
ft_module_names = []
for i in range(ft_begin_index, 5):
ft_module_names.append('layer{}'.format(i))
ft_module_names.append('fc')
# import ipdb;ipdb.set_trace()
parameters = []
for k, v in model.named_parameters():
for ft_module in ft_module_names:
if ft_module in k:
parameters.append({'params': v})
break
else:
parameters.append({'params': v, 'lr': 0.0})
return parameters
def obtain_arc(arc_type):
# c2d, ResNet50
if arc_type == 1:
use_temp_convs_1 = [0]
temp_strides_1 = [2]
use_temp_convs_2 = [0, 0, 0]
temp_strides_2 = [1, 1, 1]
use_temp_convs_3 = [0, 0, 0, 0]
temp_strides_3 = [1, 1, 1, 1]
use_temp_convs_4 = [0, ] * 6
temp_strides_4 = [1, ] * 6
use_temp_convs_5 = [0, 0, 0]
temp_strides_5 = [1, 1, 1]
# i3d, ResNet50
if arc_type == 2:
use_temp_convs_1 = [2]
temp_strides_1 = [1]
use_temp_convs_2 = [1, 1, 1]
temp_strides_2 = [1, 1, 1]
use_temp_convs_3 = [1, 0, 1, 0]
temp_strides_3 = [1, 1, 1, 1]
use_temp_convs_4 = [1, 0, 1, 0, 1, 0]
temp_strides_4 = [1, 1, 1, 1, 1, 1]
use_temp_convs_5 = [0, 1, 0]
temp_strides_5 = [1, 1, 1]
# c2d, ResNet101
if arc_type == 3:
use_temp_convs_1 = [0]
temp_strides_1 = [2]
use_temp_convs_2 = [0, 0, 0]
temp_strides_2 = [1, 1, 1]
use_temp_convs_3 = [0, 0, 0, 0]
temp_strides_3 = [1, 1, 1, 1]
use_temp_convs_4 = [0, ] * 23
temp_strides_4 = [1, ] * 23
use_temp_convs_5 = [0, 0, 0]
temp_strides_5 = [1, 1, 1]
# i3d, ResNet101
if arc_type == 4:
use_temp_convs_1 = [2]
temp_strides_1 = [2]
use_temp_convs_2 = [1, 1, 1]
temp_strides_2 = [1, 1, 1]
use_temp_convs_3 = [1, 0, 1, 0]
temp_strides_3 = [1, 1, 1, 1]
use_temp_convs_4 = []
for i in range(23):
if i % 2 == 0:
use_temp_convs_4.append(1)
else:
use_temp_convs_4.append(0)
temp_strides_4 = [1, ] * 23
use_temp_convs_5 = [0, 1, 0]
temp_strides_5 = [1, 1, 1]
use_temp_convs_set = [use_temp_convs_1, use_temp_convs_2, use_temp_convs_3, use_temp_convs_4, use_temp_convs_5]
temp_strides_set = [temp_strides_1, temp_strides_2, temp_strides_3, temp_strides_4, temp_strides_5]
return use_temp_convs_set, temp_strides_set
def resnet10(**kwargs):
"""Constructs a ResNet-18 model.
"""
use_temp_convs_set = []
temp_strides_set = []
model = ResNet(BasicBlock, [1, 1, 1, 1], use_temp_convs_set, temp_strides_set, **kwargs)
return model
def resnet18(**kwargs):
"""Constructs a ResNet-18 model.
"""
use_temp_convs_set = []
temp_strides_set = []
model = ResNet(BasicBlock, [2, 2, 2, 2], use_temp_convs_set, temp_strides_set, **kwargs)
return model
def resnet34(**kwargs):
"""Constructs a ResNet-34 model.
"""
use_temp_convs_set = []
temp_strides_set = []
model = ResNet(BasicBlock, [3, 4, 6, 3], use_temp_convs_set, temp_strides_set, **kwargs)
return model
def resnet50(extract_features, **kwargs):
"""Constructs a ResNet-50 model.
"""
use_temp_convs_set, temp_strides_set = obtain_arc(2)
model = ResNet(Bottleneck, [3, 4, 6, 3], use_temp_convs_set, temp_strides_set,
extract_features=extract_features, **kwargs)
return model
def resnet101(**kwargs):
"""Constructs a ResNet-101 model.
"""
use_temp_convs_set, temp_strides_set = obtain_arc(4)
model = ResNet(Bottleneck, [3, 4, 23, 3], use_temp_convs_set, temp_strides_set, **kwargs)
return model
def resnet152(**kwargs):
"""Constructs a ResNet-101 model.
"""
use_temp_convs_set = []
temp_strides_set = []
model = ResNet(Bottleneck, [3, 8, 36, 3], use_temp_convs_set, temp_strides_set, **kwargs)
return model
def resnet200(**kwargs):
"""Constructs a ResNet-101 model.
"""
use_temp_convs_set = []
temp_strides_set = []
model = ResNet(Bottleneck, [3, 24, 36, 3], use_temp_convs_set, temp_strides_set, **kwargs)
return model
def Net(num_classes, extract_features=False, loss_type='softmax',
weights=None, freeze_all_but_cls=False):
net = globals()['resnet' + str(50)](
num_classes=num_classes,
sample_size=50,
sample_duration=32,
extract_features=extract_features,
loss_type=loss_type,
)
if weights is not None:
kinetics_weights = torch.load(weights)['state_dict']
print("Found weights in {}.".format(weights))
cls_name = 'fc'
else:
kinetics_weights = torch.load('kinetics-res50.pth')
cls_name = 'fc'
print('\n Restoring Kintetics \n')
new_weights = {}
for k, v in kinetics_weights.items():
if not k.startswith('module.' + cls_name):
new_weights[k.replace('module.', '')] = v
else:
print(f"!!! Smt wrong with restore {k}")
net.load_state_dict(new_weights, strict=False)
if freeze_all_but_cls:
for name, par in net.named_parameters():
if not name.startswith('classifier'):
par.requires_grad = False
return net
|
[
"torch.nn.Dropout",
"torch.nn.BatchNorm3d",
"torch.nn.ReLU",
"torch.nn.AdaptiveAvgPool3d",
"torch.stack",
"torch.nn.Sequential",
"torch.nn.Conv3d",
"math.ceil",
"torch.load",
"numpy.zeros",
"torch.cat",
"numpy.ones",
"torch.nn.functional.avg_pool3d",
"torch.nn.Linear",
"opts.parser.parse_args",
"torch.nn.MaxPool3d"
] |
[((208, 227), 'opts.parser.parse_args', 'parser.parse_args', ([], {}), '()\n', (225, 227), False, 'from opts import parser\n'), ((469, 558), 'torch.nn.Conv3d', 'nn.Conv3d', (['in_planes', 'out_planes'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': '(1)', 'bias': '(False)'}), '(in_planes, out_planes, kernel_size=3, stride=stride, padding=1,\n bias=False)\n', (478, 558), True, 'import torch.nn as nn\n'), ((663, 708), 'torch.nn.functional.avg_pool3d', 'F.avg_pool3d', (['x'], {'kernel_size': '(1)', 'stride': 'stride'}), '(x, kernel_size=1, stride=stride)\n', (675, 708), True, 'import torch.nn.functional as F\n'), ((947, 986), 'torch.cat', 'torch.cat', (['[out.data, zero_pads]'], {'dim': '(1)'}), '([out.data, zero_pads], dim=1)\n', (956, 986), False, 'import torch\n'), ((1242, 1264), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['planes'], {}), '(planes)\n', (1256, 1264), True, 'import torch.nn as nn\n'), ((1285, 1306), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1292, 1306), True, 'import torch.nn as nn\n'), ((1373, 1395), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['planes'], {}), '(planes)\n', (1387, 1395), True, 'import torch.nn as nn\n'), ((2310, 2331), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2317, 2331), True, 'import torch.nn as nn\n'), ((5136, 5308), 'torch.nn.Conv3d', 'nn.Conv3d', (['(3)', '(64)'], {'kernel_size': '(1 + use_temp_convs_set[0][0] * 2, 7, 7)', 'stride': '(temp_strides_set[0][0], 2, 2)', 'padding': '(use_temp_convs_set[0][0], 3, 3)', 'bias': '(False)'}), '(3, 64, kernel_size=(1 + use_temp_convs_set[0][0] * 2, 7, 7),\n stride=(temp_strides_set[0][0], 2, 2), padding=(use_temp_convs_set[0][0\n ], 3, 3), bias=False)\n', (5145, 5308), True, 'import torch.nn as nn\n'), ((5392, 5410), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['(64)'], {}), '(64)\n', (5406, 5410), True, 'import torch.nn as nn\n'), ((5431, 5452), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (5438, 5452), True, 'import torch.nn as nn\n'), ((5477, 5549), 'torch.nn.MaxPool3d', 'nn.MaxPool3d', ([], {'kernel_size': '(1, 3, 3)', 'stride': '(1, 2, 2)', 'padding': '(0, 0, 0)'}), '(kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 0, 0))\n', (5489, 5549), True, 'import torch.nn as nn\n'), ((5945, 6017), 'torch.nn.MaxPool3d', 'nn.MaxPool3d', ([], {'kernel_size': '(2, 1, 1)', 'stride': '(2, 1, 1)', 'padding': '(0, 0, 0)'}), '(kernel_size=(2, 1, 1), stride=(2, 1, 1), padding=(0, 0, 0))\n', (5957, 6017), True, 'import torch.nn as nn\n'), ((7563, 7594), 'torch.nn.AdaptiveAvgPool3d', 'nn.AdaptiveAvgPool3d', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (7583, 7594), True, 'import torch.nn as nn\n'), ((7618, 7641), 'torch.nn.Dropout', 'torch.nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (7634, 7641), False, 'import torch\n'), ((7668, 7696), 'torch.nn.Linear', 'nn.Linear', (['(2048)', 'num_classes'], {}), '(2048, num_classes)\n', (7677, 7696), True, 'import torch.nn as nn\n'), ((8990, 9012), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (9003, 9012), True, 'import torch.nn as nn\n'), ((15741, 15773), 'torch.load', 'torch.load', (['"""kinetics-res50.pth"""'], {}), "('kinetics-res50.pth')\n", (15751, 15773), False, 'import torch\n'), ((7294, 7324), 'math.ceil', 'math.ceil', (['(sample_duration / 2)'], {}), '(sample_duration / 2)\n', (7303, 7324), False, 'import math\n'), ((7389, 7416), 'math.ceil', 'math.ceil', (['(sample_size / 16)'], {}), '(sample_size / 16)\n', (7398, 7416), False, 'import math\n'), ((15592, 15611), 'torch.load', 'torch.load', (['weights'], {}), '(weights)\n', (15602, 15611), False, 'import torch\n'), ((4126, 4149), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['dim_out'], {}), '(dim_out)\n', (4140, 4149), True, 'import torch.nn as nn\n'), ((4228, 4322), 'torch.nn.Conv3d', 'nn.Conv3d', (['dim_in', 'dim_out'], {'kernel_size': 'kernels', 'stride': 'strides', 'padding': 'pads', 'bias': '(False)'}), '(dim_in, dim_out, kernel_size=kernels, stride=strides, padding=\n pads, bias=False)\n', (4237, 4322), True, 'import torch.nn as nn\n'), ((4335, 4358), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['dim_out'], {}), '(dim_out)\n', (4349, 4358), True, 'import torch.nn as nn\n'), ((10079, 10105), 'torch.stack', 'torch.stack', (['clip_preds', '(1)'], {}), '(clip_preds, 1)\n', (10090, 10105), False, 'import torch\n'), ((8365, 8385), 'numpy.zeros', 'np.zeros', (['num_blocks'], {}), '(num_blocks)\n', (8373, 8385), True, 'import numpy as np\n'), ((8458, 8477), 'numpy.ones', 'np.ones', (['num_blocks'], {}), '(num_blocks)\n', (8465, 8477), True, 'import numpy as np\n'), ((9963, 9992), 'torch.stack', 'torch.stack', (['spatial_crops', '(1)'], {}), '(spatial_crops, 1)\n', (9974, 9992), False, 'import torch\n')]
|
import json
from os.path import join as pjoin
import ctl
import nibabel as nib
import numpy as np
from utils import DATA_DIRS
NUM_VIEWS = 360
SDD = 1000.
SID = 750.
NUM_DET_PIXELS = 1024
DET_PIXEL_DIM = 1.
def create_fdk(filename: str):
nib_volume = nib.load(pjoin(DATA_DIRS['datasets'], filename))
nib_shape = nib_volume.header.get_data_shape()
nib_dims = tuple([float(f) for f in nib_volume.header['pixdim'][1:4]])
nib_volume = nib_volume.get_fdata()
print(nib_dims)
system = ctl.CTSystem()
system.add_component(ctl.FlatPanelDetector(
(NUM_DET_PIXELS, NUM_DET_PIXELS),
(DET_PIXEL_DIM, DET_PIXEL_DIM),
))
system.add_component(ctl.TubularGantry(SDD, SID))
system.add_component(ctl.XrayTube())
setup = ctl.AcquisitionSetup(system, NUM_VIEWS)
setup.apply_preparation_protocol(ctl.protocols.AxialScanTrajectory())
ctl_volume = ctl.VoxelVolumeF.from_numpy(nib_volume.transpose())
ctl_volume.set_voxel_size(nib_dims)
projector = ctl.ocl.RayCasterProjector()
projections = projector.configure_and_project(setup, ctl_volume)
rec = ctl.ocl.FDKReconstructor()
reco = ctl.VoxelVolumeF(nib_shape, nib_dims)
reco.fill(0)
rec.configure_and_reconstruct_to(setup, projections, reco)
img = nib.Nifti1Image(reco, np.eye(4))
nib.save(img, f'fdk{NUM_VIEWS}/{filename}')
def main():
with open('train_valid.json', 'r') as json_file:
json_dict = json.load(json_file)
dataset_files = json_dict['train_files'] \
+ json_dict['valid_files'] \
+ json_dict['test_files']
for filename in dataset_files:
print(filename)
create_fdk(filename)
if __name__ == "__main__":
main()
|
[
"ctl.AcquisitionSetup",
"json.load",
"ctl.VoxelVolumeF",
"ctl.XrayTube",
"ctl.FlatPanelDetector",
"ctl.TubularGantry",
"nibabel.save",
"ctl.CTSystem",
"ctl.ocl.RayCasterProjector",
"ctl.ocl.FDKReconstructor",
"numpy.eye",
"os.path.join",
"ctl.protocols.AxialScanTrajectory"
] |
[((509, 523), 'ctl.CTSystem', 'ctl.CTSystem', ([], {}), '()\n', (521, 523), False, 'import ctl\n'), ((769, 808), 'ctl.AcquisitionSetup', 'ctl.AcquisitionSetup', (['system', 'NUM_VIEWS'], {}), '(system, NUM_VIEWS)\n', (789, 808), False, 'import ctl\n'), ((1010, 1038), 'ctl.ocl.RayCasterProjector', 'ctl.ocl.RayCasterProjector', ([], {}), '()\n', (1036, 1038), False, 'import ctl\n'), ((1119, 1145), 'ctl.ocl.FDKReconstructor', 'ctl.ocl.FDKReconstructor', ([], {}), '()\n', (1143, 1145), False, 'import ctl\n'), ((1157, 1194), 'ctl.VoxelVolumeF', 'ctl.VoxelVolumeF', (['nib_shape', 'nib_dims'], {}), '(nib_shape, nib_dims)\n', (1173, 1194), False, 'import ctl\n'), ((1323, 1366), 'nibabel.save', 'nib.save', (['img', 'f"""fdk{NUM_VIEWS}/{filename}"""'], {}), "(img, f'fdk{NUM_VIEWS}/{filename}')\n", (1331, 1366), True, 'import nibabel as nib\n'), ((269, 307), 'os.path.join', 'pjoin', (["DATA_DIRS['datasets']", 'filename'], {}), "(DATA_DIRS['datasets'], filename)\n", (274, 307), True, 'from os.path import join as pjoin\n'), ((549, 640), 'ctl.FlatPanelDetector', 'ctl.FlatPanelDetector', (['(NUM_DET_PIXELS, NUM_DET_PIXELS)', '(DET_PIXEL_DIM, DET_PIXEL_DIM)'], {}), '((NUM_DET_PIXELS, NUM_DET_PIXELS), (DET_PIXEL_DIM,\n DET_PIXEL_DIM))\n', (570, 640), False, 'import ctl\n'), ((686, 713), 'ctl.TubularGantry', 'ctl.TubularGantry', (['SDD', 'SID'], {}), '(SDD, SID)\n', (703, 713), False, 'import ctl\n'), ((740, 754), 'ctl.XrayTube', 'ctl.XrayTube', ([], {}), '()\n', (752, 754), False, 'import ctl\n'), ((846, 881), 'ctl.protocols.AxialScanTrajectory', 'ctl.protocols.AxialScanTrajectory', ([], {}), '()\n', (879, 881), False, 'import ctl\n'), ((1308, 1317), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1314, 1317), True, 'import numpy as np\n'), ((1454, 1474), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (1463, 1474), False, 'import json\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.