code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
===================
prospect.viewer.cds
===================
Class containing all bokeh's ColumnDataSource objects needed in viewer.py
"""
import numpy as np
from pkg_resources import resource_filename
import bokeh.plotting as bk
from bokeh.models import ColumnDataSource
_specutils_imported = True
try:
from specutils import Spectrum1D, SpectrumList
except ImportError:
_specutils_imported = False
from ..coaddcam import coaddcam_prospect
from ..utilities import supported_desitarget_masks, vi_file_fields
def _airtovac(w):
"""Convert air wavelengths to vacuum wavelengths. Don't convert less than 2000 Å.
Parameters
----------
w : :class:`float`
Wavelength [Å] of the line in air.
Returns
-------
:class:`float`
Wavelength [Å] of the line in vacuum.
"""
if w < 2000.0:
return w;
vac = w
for iter in range(2):
sigma2 = (1.0e4/vac)*(1.0e4/vac)
fact = 1.0 + 5.792105e-2/(238.0185 - sigma2) + 1.67917e-3/(57.362 - sigma2)
vac = w*fact
return vac
class ViewerCDS(object):
"""
Encapsulates Bokeh ColumnDataSource objects to be passed to js callback functions.
"""
def __init__(self):
self.cds_spectra = None
self.cds_median_spectra = None
self.cds_coaddcam_spec = None
self.cds_model = None
self.cds_model_2ndfit = None
self.cds_othermodel = None
self.cds_metadata = None
def load_spectra(self, spectra, with_noise=True):
""" Creates column data source for observed spectra """
self.cds_spectra = list()
is_desispec = False
if _specutils_imported and isinstance(spectra, SpectrumList):
s = spectra
bands = spectra.bands
elif _specutils_imported and isinstance(spectra, Spectrum1D):
s = [spectra]
bands = ['coadd']
else : # Assume desispec Spectra obj
is_desispec = True
s = spectra
bands = spectra.bands
for j, band in enumerate(bands):
input_wave = s.wave[band] if is_desispec else s[j].spectral_axis.value
input_nspec = spectra.num_spectra() if is_desispec else s[j].flux.shape[0]
cdsdata = dict(
origwave = input_wave.copy(),
plotwave = input_wave.copy(),
)
for i in range(input_nspec):
key = 'origflux'+str(i)
input_flux = spectra.flux[band][i] if is_desispec else s[j].flux.value[i, :]
cdsdata[key] = input_flux.copy()
if with_noise :
key = 'orignoise'+str(i)
input_ivar = spectra.ivar[band][i] if is_desispec else s[j].uncertainty.array[i, :]
noise = np.zeros(len(input_ivar))
w, = np.where( (input_ivar > 0) )
noise[w] = 1/np.sqrt(input_ivar[w])
cdsdata[key] = noise
cdsdata['plotflux'] = cdsdata['origflux0']
if with_noise :
cdsdata['plotnoise'] = cdsdata['orignoise0']
self.cds_spectra.append( ColumnDataSource(cdsdata, name=band) )
def compute_median_spectra(self, spectra):
""" Stores the median value for each spectrum into CDS.
Simple concatenation of all values from different bands.
"""
cdsdata = dict(median=[])
for i in range(spectra.num_spectra()):
flux_array = np.concatenate( tuple([spectra.flux[band][i] for band in spectra.bands]) )
w, = np.where( ~np.isnan(flux_array) )
if len(w)==0 :
cdsdata['median'].append(1)
else :
cdsdata['median'].append(np.median(flux_array[w]))
self.cds_median_spectra = ColumnDataSource(cdsdata)
def init_coaddcam_spec(self, spectra, with_noise=True):
""" Creates column data source for camera-coadded observed spectra
Do NOT store all coadded spectra in CDS obj, to reduce size of html files
Except for the first spectrum, coaddition is done later in javascript
"""
coadd_wave, coadd_flux, coadd_ivar = coaddcam_prospect(spectra)
cds_coaddcam_data = dict(
origwave = coadd_wave.copy(),
plotwave = coadd_wave.copy(),
plotflux = coadd_flux[0,:].copy(),
plotnoise = np.ones(len(coadd_wave))
)
if with_noise :
w, = np.where( (coadd_ivar[0,:] > 0) )
cds_coaddcam_data['plotnoise'][w] = 1/np.sqrt(coadd_ivar[0,:][w])
self.cds_coaddcam_spec = ColumnDataSource(cds_coaddcam_data)
def init_model(self, model, second_fit=False):
""" Creates a CDS for model spectrum """
mwave, mflux = model
cdsdata = dict(
origwave = mwave.copy(),
plotwave = mwave.copy(),
plotflux = np.zeros(len(mwave)),
)
for i in range(len(mflux)):
key = 'origflux'+str(i)
cdsdata[key] = mflux[i]
cdsdata['plotflux'] = cdsdata['origflux0']
if second_fit:
self.cds_model_2ndfit = ColumnDataSource(cdsdata)
else:
self.cds_model = ColumnDataSource(cdsdata)
def init_othermodel(self, zcatalog):
""" Initialize CDS for the 'other model' curve, from the best fit """
self.cds_othermodel = ColumnDataSource({
'plotwave' : self.cds_model.data['plotwave'],
'origwave' : self.cds_model.data['origwave'],
'origflux' : self.cds_model.data['origflux0'],
'plotflux' : self.cds_model.data['origflux0'],
'zref' : zcatalog['Z'][0]+np.zeros(len(self.cds_model.data['origflux0'])) # Track z reference in model
})
def load_metadata(self, spectra, mask_type=None, zcatalog=None, survey='DESI'):
""" Creates column data source for target-related metadata,
from fibermap, zcatalog and VI files
"""
if survey == 'DESI':
nspec = spectra.num_spectra()
# Optional metadata:
fibermap_keys = ['HPXPIXEL', 'MORPHTYPE', 'CAMERA',
'COADD_NUMEXP', 'COADD_EXPTIME',
'COADD_NUMNIGHT', 'COADD_NUMTILE']
# Optional metadata, will check matching FIRST/LAST/NUM keys in fibermap:
special_fm_keys = ['FIBER', 'NIGHT', 'EXPID', 'TILEID']
# Mandatory keys if zcatalog is set:
self.zcat_keys = ['Z', 'SPECTYPE', 'SUBTYPE', 'ZERR', 'ZWARN', 'DELTACHI2']
# Mandatory metadata:
self.phot_bands = ['G','R','Z', 'W1', 'W2']
supported_masks = supported_desitarget_masks
# Galactic extinction coefficients:
# - Wise bands from https://github.com/dstndstn/tractor/blob/master/tractor/sfd.py
# - Other bands from desiutil.dust (updated coefficients Apr 2021,
# matching https://desi.lbl.gov/trac/wiki/ImagingStandardBandpass)
R_extinction = {'W1':0.184, 'W2':0.113, 'W3':0.0241, 'W4':0.00910,
'G_N':3.258, 'R_N':2.176, 'Z_N':1.199,
'G_S':3.212, 'R_S':2.164, 'Z_S':1.211}
elif survey == 'SDSS':
nspec = spectra.flux.shape[0]
# Mandatory keys if zcatalog is set:
self.zcat_keys = ['Z', 'CLASS', 'SUBCLASS', 'Z_ERR', 'ZWARNING', 'RCHI2DIFF']
# Mandatory metadata:
self.phot_bands = ['u', 'g', 'r', 'i', 'z']
supported_masks = ['PRIMTARGET', 'SECTARGET',
'BOSS_TARGET1', 'BOSS_TARGET2',
'ANCILLARY_TARGET1', 'ANCILLARY_TARGET2',
'EBOSS_TARGET0', 'EBOSS_TARGET1', 'EBOSS_TARGET2']
else:
raise ValueError('Wrong survey')
self.cds_metadata = ColumnDataSource()
#- Generic metadata
if survey == 'DESI':
#- Special case for targetids: No int64 in js !!
self.cds_metadata.add([str(x) for x in spectra.fibermap['TARGETID']], name='TARGETID')
#- "Special" keys: check for FIRST/LAST/NUM
for fm_key in special_fm_keys:
use_first_last_num = False
if all([ (x+fm_key in spectra.fibermap.keys()) for x in ['FIRST_','LAST_','NUM_'] ]):
if np.any(spectra.fibermap['NUM_'+fm_key] > 1) : # if NUM==1, use fm_key only
use_first_last_num = True
self.cds_metadata.add(spectra.fibermap['FIRST_'+fm_key], name='FIRST_'+fm_key)
self.cds_metadata.add(spectra.fibermap['LAST_'+fm_key], name='LAST_'+fm_key)
self.cds_metadata.add(spectra.fibermap['NUM_'+fm_key], name='NUM_'+fm_key)
if (not use_first_last_num) and fm_key in spectra.fibermap.keys():
# Do not load placeholder metadata:
if not (np.all(spectra.fibermap[fm_key]==0) or np.all(spectra.fibermap[fm_key]==-1)):
self.cds_metadata.add(spectra.fibermap[fm_key], name=fm_key)
#- "Normal" keys
for fm_key in fibermap_keys:
# Arbitrary choice:
if fm_key == 'COADD_NUMEXP' and 'NUM_EXPID' in self.cds_metadata.data.keys():
continue
if fm_key == 'COADD_NUMNIGHT' and 'NUM_NIGHT' in self.cds_metadata.data.keys():
continue
if fm_key == 'COADD_NUMTILE' and 'NUM_TILEID' in self.cds_metadata.data.keys():
continue
if fm_key in spectra.fibermap.keys():
if not (np.all(spectra.fibermap[fm_key]==0) or np.all(spectra.fibermap[fm_key]==-1)):
self.cds_metadata.add(spectra.fibermap[fm_key], name=fm_key)
elif survey == 'SDSS':
#- Set 'TARGETID' name to OBJID for convenience
self.cds_metadata.add([str(x.tolist()) for x in spectra.meta['plugmap']['OBJID']], name='TARGETID')
#- Photometry
for i, bandname in enumerate(self.phot_bands) :
if survey == 'SDSS':
mag = spectra.meta['plugmap']['MAG'][:, i]
else :
mag = np.zeros(nspec)
flux = spectra.fibermap['FLUX_'+bandname]
extinction = np.ones(len(flux))
if ('MW_TRANSMISSION_'+bandname) in spectra.fibermap.keys():
extinction = spectra.fibermap['MW_TRANSMISSION_'+bandname]
elif ('EBV' in spectra.fibermap.keys()) and (bandname.upper() in ['W1','W2','W3','W4']):
extinction = 10**(- R_extinction[bandname.upper()] * spectra.fibermap['EBV'])
elif all(x in spectra.fibermap.keys() for x in ['EBV','PHOTSYS']) and (bandname.upper() in ['G','R','Z']):
for photsys in ['N', 'S']:
wphot, = np.where(spectra.fibermap['PHOTSYS'] == photsys)
a_band = R_extinction[bandname.upper()+"_"+photsys] * spectra.fibermap['EBV'][wphot]
extinction[wphot] = 10**(-a_band / 2.5)
w, = np.where( (flux>0) & (extinction>0) )
mag[w] = -2.5*np.log10(flux[w]/extinction[w])+22.5
self.cds_metadata.add(mag, name='mag_'+bandname)
#- Targeting masks
if mask_type is not None:
if survey == 'DESI':
if mask_type not in spectra.fibermap.keys():
mask_candidates = [x for x in spectra.fibermap.keys() if '_TARGET' in x]
raise ValueError(mask_type+" is not in spectra.fibermap.\n Hints of available masks: "+(' '.join(mask_candidates)))
mask_used = supported_masks[mask_type]
target_bits = spectra.fibermap[mask_type]
target_info = [ ' '.join(mask_used.names(x)) for x in target_bits ]
elif survey == 'SDSS':
assert mask_type in supported_masks
target_info = [ mask_type + ' (DUMMY)' for x in spectra.meta['plugmap'] ] # placeholder
self.cds_metadata.add(target_info, name='Targeting masks')
#- Software versions
#- TODO : get template version (from zcatalog...)
if survey == 'SDSS':
spec_version = 'SDSS'
else :
spec_version = '0'
for xx,yy in spectra.meta.items() :
if yy=="desispec" : spec_version = spectra.meta[xx.replace('NAM','VER')]
self.cds_metadata.add([spec_version for i in range(nspec)], name='spec_version')
redrock_version = ["-1" for i in range(nspec)]
if zcatalog is not None:
if 'RRVER' in zcatalog.keys(): redrock_version = zcatalog['RRVER'].data
self.cds_metadata.add(redrock_version, name='redrock_version')
self.cds_metadata.add(np.zeros(nspec)-1, name='template_version')
#- Redshift fit
if zcatalog is not None:
for zcat_key in self.zcat_keys:
if 'TYPE' in zcat_key or 'CLASS' in zcat_key:
data = zcatalog[zcat_key].astype('U{0:d}'.format(zcatalog[zcat_key].dtype.itemsize))
else :
data = zcatalog[zcat_key]
self.cds_metadata.add(data, name=zcat_key)
#- VI informations
default_vi_info = [ (x[1],x[3]) for x in vi_file_fields if x[0][0:3]=="VI_" ]
for vi_key, vi_value in default_vi_info:
self.cds_metadata.add([vi_value for i in range(nspec)], name=vi_key)
def load_spectral_lines(self, z=0):
line_data = dict(
restwave = [],
plotwave = [],
name = [],
longname = [],
plotname = [],
emission = [],
major = [],
#y = []
)
for line_category in ('emission', 'absorption'):
# encoding=utf-8 is needed to read greek letters
line_array = np.genfromtxt(resource_filename('prospect', "data/{0}_lines.txt".format(line_category)),
delimiter=",",
dtype=[("name", "|U20"),
("longname", "|U20"),
("wavelength", float),
("vacuum", bool),
("major", bool)],
encoding='utf-8')
vacuum_wavelengths = line_array['wavelength']
w, = np.where(line_array['vacuum']==False)
vacuum_wavelengths[w] = np.array([_airtovac(wave) for wave in line_array['wavelength'][w]])
line_data['restwave'].extend(vacuum_wavelengths)
line_data['plotwave'].extend(vacuum_wavelengths * (1+z))
line_data['name'].extend(line_array['name'])
line_data['longname'].extend(line_array['longname'])
line_data['plotname'].extend(line_array['name'])
emission_flag = True if line_category=='emission' else False
line_data['emission'].extend([emission_flag for row in line_array])
line_data['major'].extend(line_array['major'])
self.cds_spectral_lines = ColumnDataSource(line_data)
|
[
"numpy.median",
"numpy.sqrt",
"numpy.log10",
"numpy.where",
"numpy.any",
"numpy.zeros",
"bokeh.models.ColumnDataSource",
"numpy.isnan",
"numpy.all"
] |
[((3960, 3985), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['cdsdata'], {}), '(cdsdata)\n', (3976, 3985), False, 'from bokeh.models import ColumnDataSource\n'), ((4801, 4836), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['cds_coaddcam_data'], {}), '(cds_coaddcam_data)\n', (4817, 4836), False, 'from bokeh.models import ColumnDataSource\n'), ((8134, 8152), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', ([], {}), '()\n', (8150, 8152), False, 'from bokeh.models import ColumnDataSource\n'), ((15624, 15651), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['line_data'], {}), '(line_data)\n', (15640, 15651), False, 'from bokeh.models import ColumnDataSource\n'), ((4656, 4686), 'numpy.where', 'np.where', (['(coadd_ivar[0, :] > 0)'], {}), '(coadd_ivar[0, :] > 0)\n', (4664, 4686), True, 'import numpy as np\n'), ((5356, 5381), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['cdsdata'], {}), '(cdsdata)\n', (5372, 5381), False, 'from bokeh.models import ColumnDataSource\n'), ((5425, 5450), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['cdsdata'], {}), '(cdsdata)\n', (5441, 5450), False, 'from bokeh.models import ColumnDataSource\n'), ((14922, 14961), 'numpy.where', 'np.where', (["(line_array['vacuum'] == False)"], {}), "(line_array['vacuum'] == False)\n", (14930, 14961), True, 'import numpy as np\n'), ((3283, 3319), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['cdsdata'], {'name': 'band'}), '(cdsdata, name=band)\n', (3299, 3319), False, 'from bokeh.models import ColumnDataSource\n'), ((4740, 4768), 'numpy.sqrt', 'np.sqrt', (['coadd_ivar[0, :][w]'], {}), '(coadd_ivar[0, :][w])\n', (4747, 4768), True, 'import numpy as np\n'), ((10551, 10566), 'numpy.zeros', 'np.zeros', (['nspec'], {}), '(nspec)\n', (10559, 10566), True, 'import numpy as np\n'), ((11478, 11517), 'numpy.where', 'np.where', (['((flux > 0) & (extinction > 0))'], {}), '((flux > 0) & (extinction > 0))\n', (11486, 11517), True, 'import numpy as np\n'), ((13184, 13199), 'numpy.zeros', 'np.zeros', (['nspec'], {}), '(nspec)\n', (13192, 13199), True, 'import numpy as np\n'), ((2975, 2999), 'numpy.where', 'np.where', (['(input_ivar > 0)'], {}), '(input_ivar > 0)\n', (2983, 2999), True, 'import numpy as np\n'), ((3745, 3765), 'numpy.isnan', 'np.isnan', (['flux_array'], {}), '(flux_array)\n', (3753, 3765), True, 'import numpy as np\n'), ((3899, 3923), 'numpy.median', 'np.median', (['flux_array[w]'], {}), '(flux_array[w])\n', (3908, 3923), True, 'import numpy as np\n'), ((8646, 8691), 'numpy.any', 'np.any', (["(spectra.fibermap['NUM_' + fm_key] > 1)"], {}), "(spectra.fibermap['NUM_' + fm_key] > 1)\n", (8652, 8691), True, 'import numpy as np\n'), ((3037, 3059), 'numpy.sqrt', 'np.sqrt', (['input_ivar[w]'], {}), '(input_ivar[w])\n', (3044, 3059), True, 'import numpy as np\n'), ((11546, 11579), 'numpy.log10', 'np.log10', (['(flux[w] / extinction[w])'], {}), '(flux[w] / extinction[w])\n', (11554, 11579), True, 'import numpy as np\n'), ((9241, 9278), 'numpy.all', 'np.all', (['(spectra.fibermap[fm_key] == 0)'], {}), '(spectra.fibermap[fm_key] == 0)\n', (9247, 9278), True, 'import numpy as np\n'), ((9280, 9318), 'numpy.all', 'np.all', (['(spectra.fibermap[fm_key] == -1)'], {}), '(spectra.fibermap[fm_key] == -1)\n', (9286, 9318), True, 'import numpy as np\n'), ((9965, 10002), 'numpy.all', 'np.all', (['(spectra.fibermap[fm_key] == 0)'], {}), '(spectra.fibermap[fm_key] == 0)\n', (9971, 10002), True, 'import numpy as np\n'), ((10004, 10042), 'numpy.all', 'np.all', (['(spectra.fibermap[fm_key] == -1)'], {}), '(spectra.fibermap[fm_key] == -1)\n', (10010, 10042), True, 'import numpy as np\n'), ((11235, 11283), 'numpy.where', 'np.where', (["(spectra.fibermap['PHOTSYS'] == photsys)"], {}), "(spectra.fibermap['PHOTSYS'] == photsys)\n", (11243, 11283), True, 'import numpy as np\n')]
|
import logging
import time
import numpy as np
from eda import ma_data, tx_data
from sir_fitting_us import seir_experiment, make_csv_from_tx_traj
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.info("Fitting model.")
# initial values taken from previous fit, used to seed MH sampler efficiently.
x0 = np.array([ 0.393, -2.586, -3.241, -5.874, -24.999])
# ma_traj = seir_experiment(ma_data, x0, iterations=10000)
tx_traj = seir_experiment(tx_data, x0, iterations=10000)
# mean_ll = np.mean([ll for (x, ll) in ma_traj])
mean_ll = np.mean([ll for (x, ll) in tx_traj])
logger.info("Model fitting finished with mean log-likelihood: {}".format(mean_ll))
if mean_ll < -2000:
raise AssertionError(
"""Mean log-likelihood {} less than threshold of
-20. This is probably an error.""".format(mean_ll)
)
underscored_time = time.ctime().replace(" ", "_")
fname = "ma_seir_output_{}.csv".format(underscored_time)
make_csv_from_tx_traj(tx_traj, tx_data, fname)
|
[
"logging.getLogger",
"numpy.mean",
"time.ctime",
"sir_fitting_us.make_csv_from_tx_traj",
"numpy.array",
"sir_fitting_us.seir_experiment"
] |
[((157, 184), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (174, 184), False, 'import logging\n'), ((331, 381), 'numpy.array', 'np.array', (['[0.393, -2.586, -3.241, -5.874, -24.999]'], {}), '([0.393, -2.586, -3.241, -5.874, -24.999])\n', (339, 381), True, 'import numpy as np\n'), ((456, 502), 'sir_fitting_us.seir_experiment', 'seir_experiment', (['tx_data', 'x0'], {'iterations': '(10000)'}), '(tx_data, x0, iterations=10000)\n', (471, 502), False, 'from sir_fitting_us import seir_experiment, make_csv_from_tx_traj\n'), ((563, 597), 'numpy.mean', 'np.mean', (['[ll for x, ll in tx_traj]'], {}), '([ll for x, ll in tx_traj])\n', (570, 597), True, 'import numpy as np\n'), ((961, 1007), 'sir_fitting_us.make_csv_from_tx_traj', 'make_csv_from_tx_traj', (['tx_traj', 'tx_data', 'fname'], {}), '(tx_traj, tx_data, fname)\n', (982, 1007), False, 'from sir_fitting_us import seir_experiment, make_csv_from_tx_traj\n'), ((873, 885), 'time.ctime', 'time.ctime', ([], {}), '()\n', (883, 885), False, 'import time\n')]
|
"""
@file
@brief Test for :epkg:`cartopy`.
"""
import numpy
import numba
@numba.jit(nopython=True, parallel=True)
def logistic_regression(Y, X, w, iterations):
"Fits a logistic regression."
for _ in range(iterations):
w -= numpy.dot(((1.0 / (1.0 + numpy.exp(-Y * numpy.dot(X, w))) - 1.0) * Y), X)
return w
def check_numba():
"""
Runs a sample with :epkg:`numba`.
"""
Y = numpy.random.rand(10).astype(numpy.double)
X = numpy.random.rand(10, 2).astype(numpy.double)
w = numpy.random.rand(2).astype(numpy.double)
return logistic_regression(Y, X, w, 2)
|
[
"numpy.dot",
"numba.jit",
"numpy.random.rand"
] |
[((76, 115), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)', 'parallel': '(True)'}), '(nopython=True, parallel=True)\n', (85, 115), False, 'import numba\n'), ((411, 432), 'numpy.random.rand', 'numpy.random.rand', (['(10)'], {}), '(10)\n', (428, 432), False, 'import numpy\n'), ((462, 486), 'numpy.random.rand', 'numpy.random.rand', (['(10)', '(2)'], {}), '(10, 2)\n', (479, 486), False, 'import numpy\n'), ((516, 536), 'numpy.random.rand', 'numpy.random.rand', (['(2)'], {}), '(2)\n', (533, 536), False, 'import numpy\n'), ((281, 296), 'numpy.dot', 'numpy.dot', (['X', 'w'], {}), '(X, w)\n', (290, 296), False, 'import numpy\n')]
|
import numpy as np
from plots import plots_for_predictions as pp
from utilss import distinct_colours as dc
import matplotlib.pyplot as plt
c = dc.get_distinct(4)
path = '/Users/luisals/Documents/deep_halos_files/mass_range_13.4/random_20sims_200k/lr5e-5/'
p1 = np.load(path + "seed_20/predicted_sim_6_epoch_09.npy")
t1 = np.load(path + "seed_20/true_sim_6_epoch_09.npy")
p_big = np.load("/Users/luisals/Projects/DLhalos/bigbox/raw/predicted_sim_L200_N1024_genetIC3_epoch_10.npy")
t_big = np.load("/Users/luisals/Projects/DLhalos/bigbox/raw/true_sim_L200_N1024_genetIC3_epoch_10.npy")
path_av = "/Users/luisals/Documents/deep_halos_files/mass_range_13.4/random_20sims_200k/averaged_boxes/log_alpha_-4.3/"
p_av = np.load(path_av + "predicted_sim_6_epoch_32.npy")
t_av = np.load(path_av + "true_sim_6_epoch_32.npy")
p_av_big = np.load("/Users/luisals/Projects/DLhalos/bigbox/avg/predicted_sim_L200_N1024_genetIC3_epoch_18.npy")
t_av_big = np.load("/Users/luisals/Projects/DLhalos/bigbox/avg/true_sim_L200_N1024_genetIC3_epoch_18.npy")
# Raw-density case
f1, a, m = pp.plot_histogram_predictions(p1, t1, radius_bins=False, particle_ids=None, errorbars=False,
label=r"$L_\mathrm{box}=50 \, \mathrm{Mpc} \,/ \,h$", color="C0")
f11, a1, m1 = pp.plot_histogram_predictions(p_big, t_big, radius_bins=False, particle_ids=None, errorbars=False, fig=f1,
axes=a, color="C1", label=r"$L_\mathrm{box}=200 \, \mathrm{Mpc} \,/ \,h$")
a1[0].set_ylabel(r"$n_{\mathrm{particles}}$", fontsize=16)
[a.set_xlabel(r"$\log(M_{\mathrm{predicted}}/M_{\mathrm{true}})$", fontsize=16) for a in a1]
plt.savefig("/Users/lls/Documents/Papers/dlhalos_paper/small_vs_large_box.pdf")
# Averaged-density case
f1, a, m = pp.plot_histogram_predictions(p_av, t_av, radius_bins=False, particle_ids=None, errorbars=False,
label=r"$L_\mathrm{box}=50 \, \mathrm{Mpc} \,/ \,h$", color="C0")
f11, a1, m1 = pp.plot_histogram_predictions(p_av_big, t_av_big, radius_bins=False, particle_ids=None, errorbars=False, fig=f1,
axes=a, color="C1", label=r"$L_\mathrm{box}=200 \, \mathrm{Mpc} \,/ \,h$")
a1[0].set_ylabel(r"$n_{\mathrm{particles}}$", fontsize=16)
[a.set_xlabel(r"$\log(M_{\mathrm{predicted}}/M_{\mathrm{true}})$", fontsize=16) for a in a1]
plt.savefig("/Users/luisals/Documents/Papers/dlhalos_paper/averaged_small_vs_large_box.pdf")
# Averaged-density case
f1, a, m = pp.plot_histogram_predictions(p_big, t_big, radius_bins=False, particle_ids=None, errorbars=False,
label="Raw density", color="C0")
f11, a1, m1 = pp.plot_histogram_predictions(p_av_big, t_av_big, radius_bins=False, particle_ids=None, errorbars=False, fig=f1,
axes=a, color="C1", label="Averaged density")
a1[0].set_ylabel(r"$n_{\mathrm{particles}}$", fontsize=16)
[a.set_xlabel(r"$\log(M_{\mathrm{predicted}}/M_{\mathrm{true}})$", fontsize=16) for a in a1]
plt.savefig("/Users/luisals/Documents/Papers/dlhalos_paper/raw_vs_averaged_large_box.pdf")
|
[
"plots.plots_for_predictions.plot_histogram_predictions",
"numpy.load",
"matplotlib.pyplot.savefig",
"utilss.distinct_colours.get_distinct"
] |
[((144, 162), 'utilss.distinct_colours.get_distinct', 'dc.get_distinct', (['(4)'], {}), '(4)\n', (159, 162), True, 'from utilss import distinct_colours as dc\n'), ((263, 317), 'numpy.load', 'np.load', (["(path + 'seed_20/predicted_sim_6_epoch_09.npy')"], {}), "(path + 'seed_20/predicted_sim_6_epoch_09.npy')\n", (270, 317), True, 'import numpy as np\n'), ((323, 372), 'numpy.load', 'np.load', (["(path + 'seed_20/true_sim_6_epoch_09.npy')"], {}), "(path + 'seed_20/true_sim_6_epoch_09.npy')\n", (330, 372), True, 'import numpy as np\n'), ((382, 492), 'numpy.load', 'np.load', (['"""/Users/luisals/Projects/DLhalos/bigbox/raw/predicted_sim_L200_N1024_genetIC3_epoch_10.npy"""'], {}), "(\n '/Users/luisals/Projects/DLhalos/bigbox/raw/predicted_sim_L200_N1024_genetIC3_epoch_10.npy'\n )\n", (389, 492), True, 'import numpy as np\n'), ((491, 596), 'numpy.load', 'np.load', (['"""/Users/luisals/Projects/DLhalos/bigbox/raw/true_sim_L200_N1024_genetIC3_epoch_10.npy"""'], {}), "(\n '/Users/luisals/Projects/DLhalos/bigbox/raw/true_sim_L200_N1024_genetIC3_epoch_10.npy'\n )\n", (498, 596), True, 'import numpy as np\n'), ((716, 765), 'numpy.load', 'np.load', (["(path_av + 'predicted_sim_6_epoch_32.npy')"], {}), "(path_av + 'predicted_sim_6_epoch_32.npy')\n", (723, 765), True, 'import numpy as np\n'), ((773, 817), 'numpy.load', 'np.load', (["(path_av + 'true_sim_6_epoch_32.npy')"], {}), "(path_av + 'true_sim_6_epoch_32.npy')\n", (780, 817), True, 'import numpy as np\n'), ((830, 940), 'numpy.load', 'np.load', (['"""/Users/luisals/Projects/DLhalos/bigbox/avg/predicted_sim_L200_N1024_genetIC3_epoch_18.npy"""'], {}), "(\n '/Users/luisals/Projects/DLhalos/bigbox/avg/predicted_sim_L200_N1024_genetIC3_epoch_18.npy'\n )\n", (837, 940), True, 'import numpy as np\n'), ((942, 1047), 'numpy.load', 'np.load', (['"""/Users/luisals/Projects/DLhalos/bigbox/avg/true_sim_L200_N1024_genetIC3_epoch_18.npy"""'], {}), "(\n '/Users/luisals/Projects/DLhalos/bigbox/avg/true_sim_L200_N1024_genetIC3_epoch_18.npy'\n )\n", (949, 1047), True, 'import numpy as np\n'), ((1069, 1240), 'plots.plots_for_predictions.plot_histogram_predictions', 'pp.plot_histogram_predictions', (['p1', 't1'], {'radius_bins': '(False)', 'particle_ids': 'None', 'errorbars': '(False)', 'label': '"""$L_\\\\mathrm{box}=50 \\\\, \\\\mathrm{Mpc} \\\\,/ \\\\,h$"""', 'color': '"""C0"""'}), "(p1, t1, radius_bins=False, particle_ids=None,\n errorbars=False, label=\n '$L_\\\\mathrm{box}=50 \\\\, \\\\mathrm{Mpc} \\\\,/ \\\\,h$', color='C0')\n", (1098, 1240), True, 'from plots import plots_for_predictions as pp\n'), ((1283, 1478), 'plots.plots_for_predictions.plot_histogram_predictions', 'pp.plot_histogram_predictions', (['p_big', 't_big'], {'radius_bins': '(False)', 'particle_ids': 'None', 'errorbars': '(False)', 'fig': 'f1', 'axes': 'a', 'color': '"""C1"""', 'label': '"""$L_\\\\mathrm{box}=200 \\\\, \\\\mathrm{Mpc} \\\\,/ \\\\,h$"""'}), "(p_big, t_big, radius_bins=False, particle_ids\n =None, errorbars=False, fig=f1, axes=a, color='C1', label=\n '$L_\\\\mathrm{box}=200 \\\\, \\\\mathrm{Mpc} \\\\,/ \\\\,h$')\n", (1312, 1478), True, 'from plots import plots_for_predictions as pp\n'), ((1661, 1740), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""/Users/lls/Documents/Papers/dlhalos_paper/small_vs_large_box.pdf"""'], {}), "('/Users/lls/Documents/Papers/dlhalos_paper/small_vs_large_box.pdf')\n", (1672, 1740), True, 'import matplotlib.pyplot as plt\n'), ((1779, 1955), 'plots.plots_for_predictions.plot_histogram_predictions', 'pp.plot_histogram_predictions', (['p_av', 't_av'], {'radius_bins': '(False)', 'particle_ids': 'None', 'errorbars': '(False)', 'label': '"""$L_\\\\mathrm{box}=50 \\\\, \\\\mathrm{Mpc} \\\\,/ \\\\,h$"""', 'color': '"""C0"""'}), "(p_av, t_av, radius_bins=False, particle_ids=\n None, errorbars=False, label=\n '$L_\\\\mathrm{box}=50 \\\\, \\\\mathrm{Mpc} \\\\,/ \\\\,h$', color='C0')\n", (1808, 1955), True, 'from plots import plots_for_predictions as pp\n'), ((1997, 2197), 'plots.plots_for_predictions.plot_histogram_predictions', 'pp.plot_histogram_predictions', (['p_av_big', 't_av_big'], {'radius_bins': '(False)', 'particle_ids': 'None', 'errorbars': '(False)', 'fig': 'f1', 'axes': 'a', 'color': '"""C1"""', 'label': '"""$L_\\\\mathrm{box}=200 \\\\, \\\\mathrm{Mpc} \\\\,/ \\\\,h$"""'}), "(p_av_big, t_av_big, radius_bins=False,\n particle_ids=None, errorbars=False, fig=f1, axes=a, color='C1', label=\n '$L_\\\\mathrm{box}=200 \\\\, \\\\mathrm{Mpc} \\\\,/ \\\\,h$')\n", (2026, 2197), True, 'from plots import plots_for_predictions as pp\n'), ((2381, 2483), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""/Users/luisals/Documents/Papers/dlhalos_paper/averaged_small_vs_large_box.pdf"""'], {}), "(\n '/Users/luisals/Documents/Papers/dlhalos_paper/averaged_small_vs_large_box.pdf'\n )\n", (2392, 2483), True, 'import matplotlib.pyplot as plt\n'), ((2511, 2647), 'plots.plots_for_predictions.plot_histogram_predictions', 'pp.plot_histogram_predictions', (['p_big', 't_big'], {'radius_bins': '(False)', 'particle_ids': 'None', 'errorbars': '(False)', 'label': '"""Raw density"""', 'color': '"""C0"""'}), "(p_big, t_big, radius_bins=False, particle_ids\n =None, errorbars=False, label='Raw density', color='C0')\n", (2540, 2647), True, 'from plots import plots_for_predictions as pp\n'), ((2698, 2865), 'plots.plots_for_predictions.plot_histogram_predictions', 'pp.plot_histogram_predictions', (['p_av_big', 't_av_big'], {'radius_bins': '(False)', 'particle_ids': 'None', 'errorbars': '(False)', 'fig': 'f1', 'axes': 'a', 'color': '"""C1"""', 'label': '"""Averaged density"""'}), "(p_av_big, t_av_big, radius_bins=False,\n particle_ids=None, errorbars=False, fig=f1, axes=a, color='C1', label=\n 'Averaged density')\n", (2727, 2865), True, 'from plots import plots_for_predictions as pp\n'), ((3053, 3153), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""/Users/luisals/Documents/Papers/dlhalos_paper/raw_vs_averaged_large_box.pdf"""'], {}), "(\n '/Users/luisals/Documents/Papers/dlhalos_paper/raw_vs_averaged_large_box.pdf'\n )\n", (3064, 3153), True, 'import matplotlib.pyplot as plt\n')]
|
"""
---OK---
"""
from collections import OrderedDict
import copy
import numpy as np
from crystalpy.examples.Values import Interval
class PlotData1D(object):
"""
Represents a 1D plot. The graph data together with related information.
"""
def __init__(self, title, title_x_axis, title_y_axis):
"""
Constructor.
:param title: Plot title.
:param title_x_axis: X axis' title.
:param title_y_axis: Y axis' title.
"""
# Set titles.
self.title = title
self.title_x_axis = title_x_axis
self.title_y_axis = title_y_axis
# Initialize X and Y ranges.
self.x_min = None
self.x_max = None
self.y_min = None
self.y_max = None
# Initialize X and Y data.
self.x = None
self.y = None
# Initialize plot information to empty ordered dictionary.
self._plot_info = OrderedDict()
def set_x_min(self, x_min):
"""
Sets x range minimum.
:param x_min: X range minimum.
"""
self.x_min = x_min
def set_x_max(self, x_max):
"""
Sets X range maximum.
:param x_max: X range maximum.
"""
self.x_max = x_max
def set_y_min(self, y_min):
"""
Sets Y range minimum.
:param y_min: Y range minimum.
"""
self.y_min = y_min
def set_y_max(self, y_max):
"""
Sets Y range maximum.
:param y_max: Y range maximum.
"""
self.y_max = y_max
def set_x(self, x):
"""
Sets X data.
:param x: x data.
"""
self.x = x
def set_y(self, y):
"""
Sets Y data.
:param y: y data.
"""
self.y = y
def _set_interval_to_zero(self, indices, lower=True, upper=True):
"""
Sets the y's to zero in certain intervals of x's (extrema included).
:param indices: pair with the two extrema of the x interval.
:param lower: if True include the lower end of the interval.
:param upper: if True include the upper end of the interval.
"""
try:
inf_index = indices.inf
sup_index = indices.sup
# adjust the indices according to the lower and upper parameters.
if not lower:
inf_index += 1
if not upper:
sup_index -= 1
# in the index range defined by inf_index and sup_index, set the y's to zero.
for i in range(inf_index, sup_index + 1):
self.y[i] = 0
except TypeError:
print("\nERROR: could not set the values to zero in the specified intervals.\n")
def _unwrap_interval(self, indices, deg, lower=True, upper=True):
"""
Unwraps the y data vector in a certain interval.
:param indices: indices determining the interval to unwrap.
:param deg: True if values are in degrees. False if radians.
:param lower: if True include the lower end of the interval.
:param upper: if True include the upper end of the interval.
"""
inf_index = indices.inf
sup_index = indices.sup
# adjust the indices according to the lower and upper parameters.
if not lower:
inf_index += 1
if not upper:
sup_index -= 1
# numpy.unwrap works on data in radians, so if the data is in degrees, it needs to be converted.
if deg:
self.y = np.deg2rad(self.y)
# cut out the part to unwrap and then stitch it back on.
temp = self.y[inf_index:sup_index + 1]
self.y[inf_index:sup_index + 1] = np.unwrap(temp)
# convert back to degrees.
self.y = np.rad2deg(self.y)
return
# cut out the part to unwrap and then stitch it back on.
temp = self.y[inf_index:sup_index + 1]
self.y[inf_index:sup_index + 1] = np.unwrap(temp)
def _optimize_interval(self, indices, phase_limits):
"""
Takes an interval and restricts it so that the extrema match the points where the phase
becomes bigger(smaller) than some upper(lower) limit.
:param indices: indices corresponding to the interval to be optimized.
:param phase_limits: the limits of the phase to be used for the optimization, [min, max].
:return: indices of the optimized interval.
"""
inf = indices.inf
sup = indices.sup
# check the intervals.
if (self.y[inf] > phase_limits[1] or
self.y[inf] < phase_limits[0]):
print("\nERROR in PlotData1D._optimize_interval: First value in the interval exceeds limitations.")
return indices
if (self.y[sup] > phase_limits[1] or
self.y[sup] < phase_limits[0]):
print("\nERROR in PlotData1D._optimize_interval: Last value in the interval exceeds limitations.")
return indices
# starting from the lower end.
i = inf # counter initialization.
while phase_limits[0] < self.y[i] < phase_limits[1]:
i += 1
# if the conditions are not satisfied for index i:
new_inf = i - 1
# starting from the upper end.
i = sup # counter initialization.
while phase_limits[0] < self.y[i] < phase_limits[1]:
i -= 1
# if the conditions are not satisfied for index i:
new_sup = i + 1
new_indices = Interval(new_inf, new_sup)
# check that the inf is smaller than (or equal to) the sup.
if not new_indices.check_extrema():
print("\nERROR in PlotData1D._optimize_interval: The phase might be undersampled.")
return indices
return new_indices
def smart_unwrap(self, intervals, intervals_number, phase_limits, deg):
"""
Unwraps data correctly by avoiding discontinuities.
:param intervals: list of pairs. Each element is a pair with the two extrema of the x interval.
:param phase_limits: min and max tolerable values for the phase plot, [min, max].
:param intervals_number: number of intervals to set to zero.
:param deg: True if values are in degrees. False if radians.
"""
if intervals_number == 0:
if deg:
self.y = np.deg2rad(self.y) # unwrap works with radians.
self.y = np.unwrap(self.y)
self.y = np.rad2deg(self.y) # convert back to degrees.
return
self.y = np.unwrap(self.y)
return
# transform self.x into a numpy.ndarray object.
x = np.asarray(self.x)
# careful! only works with monotonic sequences.
temp_index = x.argmin()
for interval in intervals:
inf = interval.inf
sup = interval.sup
# find the indices of the y array corresponding to inf and sup.
inf_index = abs(x - inf).argmin()
sup_index = abs(x - sup).argmin()
# optimize the interval.
indices = Interval(inf_index, sup_index)
new_indices = self._optimize_interval(indices, phase_limits)
# unwrap the data before the interval.
indices_to_unwrap = Interval(temp_index, new_indices.inf)
self._unwrap_interval(indices_to_unwrap, deg, lower=True, upper=False)
# set the interval to zero.
indices_to_set = new_indices
self._set_interval_to_zero(indices_to_set, lower=True, upper=False)
temp_index = new_indices.sup
# careful! only works with monotonic sequences.
indices_to_unwrap = Interval(temp_index, x.argmax())
self._unwrap_interval(indices_to_unwrap, deg, lower=True, upper=True)
def add_xy_point(self, x_point, y_point):
"""
Adds an x-y point.
:param x_point: x coordinate.
:param y_point: y coordinate.
"""
self.x.append(x_point)
self.y.append(y_point)
def add_plot_info(self, name, info):
"""
Adds a plot info.
:param name: Name of the info.
:param info: The info.
"""
self._plot_info[name] = info
def plot_info(self):
"""
Returns the plot info copy.
:return: The plot info.
"""
return copy.deepcopy(self._plot_info)
|
[
"collections.OrderedDict",
"numpy.unwrap",
"numpy.asarray",
"numpy.deg2rad",
"copy.deepcopy",
"crystalpy.examples.Values.Interval",
"numpy.rad2deg"
] |
[((926, 939), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (937, 939), False, 'from collections import OrderedDict\n'), ((3999, 4014), 'numpy.unwrap', 'np.unwrap', (['temp'], {}), '(temp)\n', (4008, 4014), True, 'import numpy as np\n'), ((5549, 5575), 'crystalpy.examples.Values.Interval', 'Interval', (['new_inf', 'new_sup'], {}), '(new_inf, new_sup)\n', (5557, 5575), False, 'from crystalpy.examples.Values import Interval\n'), ((6727, 6745), 'numpy.asarray', 'np.asarray', (['self.x'], {}), '(self.x)\n', (6737, 6745), True, 'import numpy as np\n'), ((8455, 8485), 'copy.deepcopy', 'copy.deepcopy', (['self._plot_info'], {}), '(self._plot_info)\n', (8468, 8485), False, 'import copy\n'), ((3543, 3561), 'numpy.deg2rad', 'np.deg2rad', (['self.y'], {}), '(self.y)\n', (3553, 3561), True, 'import numpy as np\n'), ((3729, 3744), 'numpy.unwrap', 'np.unwrap', (['temp'], {}), '(temp)\n', (3738, 3744), True, 'import numpy as np\n'), ((3806, 3824), 'numpy.rad2deg', 'np.rad2deg', (['self.y'], {}), '(self.y)\n', (3816, 3824), True, 'import numpy as np\n'), ((6621, 6638), 'numpy.unwrap', 'np.unwrap', (['self.y'], {}), '(self.y)\n', (6630, 6638), True, 'import numpy as np\n'), ((7162, 7192), 'crystalpy.examples.Values.Interval', 'Interval', (['inf_index', 'sup_index'], {}), '(inf_index, sup_index)\n', (7170, 7192), False, 'from crystalpy.examples.Values import Interval\n'), ((7350, 7387), 'crystalpy.examples.Values.Interval', 'Interval', (['temp_index', 'new_indices.inf'], {}), '(temp_index, new_indices.inf)\n', (7358, 7387), False, 'from crystalpy.examples.Values import Interval\n'), ((6412, 6430), 'numpy.deg2rad', 'np.deg2rad', (['self.y'], {}), '(self.y)\n', (6422, 6430), True, 'import numpy as np\n'), ((6486, 6503), 'numpy.unwrap', 'np.unwrap', (['self.y'], {}), '(self.y)\n', (6495, 6503), True, 'import numpy as np\n'), ((6529, 6547), 'numpy.rad2deg', 'np.rad2deg', (['self.y'], {}), '(self.y)\n', (6539, 6547), True, 'import numpy as np\n')]
|
from hytra.pluginsystem import transition_feature_vector_construction_plugin
import numpy as np
from compiler.ast import flatten
class TransitionFeaturesSubtraction(
transition_feature_vector_construction_plugin.TransitionFeatureVectorConstructionPlugin
):
"""
Computes the subtraction of features in the feature vector
"""
def constructFeatureVector(
self, featureDictObjectA, featureDictObjectB, selectedFeatures
):
assert "Global<Maximum >" not in selectedFeatures
assert "Global<Minimum >" not in selectedFeatures
assert "Histrogram" not in selectedFeatures
assert "Polygon" not in selectedFeatures
features = []
for key in selectedFeatures:
if key == "RegionCenter":
continue
else:
if (
not isinstance(featureDictObjectA[key], np.ndarray)
or featureDictObjectA[key].size == 1
):
features.append(
float(featureDictObjectA[key]) - float(featureDictObjectB[key])
)
else:
features.extend(
flatten(
(
featureDictObjectA[key].astype("float32")
- featureDictObjectB[key].astype("float32")
).tolist()
)
)
# there should be no nans or infs
assert np.all(np.isfinite(np.array(features)))
return features
def getFeatureNames(self, featureDictObjectA, featureDictObjectB, selectedFeatures):
assert "Global<Maximum >" not in selectedFeatures
assert "Global<Minimum >" not in selectedFeatures
assert "Histrogram" not in selectedFeatures
assert "Polygon" not in selectedFeatures
featuresNames = []
for key in selectedFeatures:
if key == "RegionCenter":
continue
else:
if (
not isinstance(featureDictObjectA[key], np.ndarray)
or featureDictObjectA[key].size == 1
):
featuresNames.append("A[{key}]-B[{key}]".format(key=key))
else:
featuresNames.extend(
[
"A[{key}][{i}]-B[{key}][{i}]".format(key=key, i=i)
for i in range(
len(
(
featureDictObjectA[key]
- featureDictObjectB[key]
).tolist()
)
)
]
)
return featuresNames
|
[
"numpy.array"
] |
[((1564, 1582), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (1572, 1582), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021. Distributed under the terms of the MIT License.
from phonopy.interface.calculator import read_crystal_structure
from phonopy.structure.atoms import PhonopyAtoms
from vise.util.phonopy.phonopy_input import structure_to_phonopy_atoms
import numpy as np
def assert_same_phonopy_atoms(actual: PhonopyAtoms,
expected: PhonopyAtoms):
assert (actual.get_cell() == expected.get_cell()).all()
assert (actual.get_scaled_positions()
== expected.get_scaled_positions()).all()
assert actual.symbols == expected.symbols
def test_phonopy_atoms_behavior(sc_structure, tmpdir):
print(tmpdir)
tmpdir.chdir()
# actual = structure_to_phonopy_atoms(sc_structure)
sc_structure.to(fmt="poscar", filename="POSCAR")
a, _ = read_crystal_structure("POSCAR")
b = PhonopyAtoms(atoms=a)
print(type(a.get_cell()))
print(a.get_atomic_numbers())
assert_same_phonopy_atoms(a, b)
def test_structure_to_phonopy_atoms(sc_structure):
actual = structure_to_phonopy_atoms(sc_structure)
expected = PhonopyAtoms(symbols=["H"],
cell=np.array([[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]]),
scaled_positions=np.array([[0.0, 0.0, 0.0]]))
assert_same_phonopy_atoms(actual, expected)
#
# def test_make_phonopy_input(mc_structure, mc_structure_conv):
# actual = make_phonopy_input(unitcell=mc_structure,
# supercell_matrix=np.eye(3).tolist(),
# conventional_base=True)
# supercell_matrix = [[ 1., 1., 0.],
# [-1., 1., 0.],
# [ 0., 0., 1.]]
# supercell = mc_structure * supercell_matrix
# expected = PhonopyInput(unitcell=mc_structure,
# supercell=supercell,
# supercell_matrix=supercell_matrix)
# assert actual == expected
#
#
# def test_make_phonopy_input_default(mc_structure, mc_structure_conv):
# actual = make_phonopy_input(unitcell=mc_structure)
# supercell_matrix = [[ 2., 2., 0.],
# [-2., 2., 0.],
# [ 0., 0., 2.]]
# supercell = mc_structure * supercell_matrix
# expected = PhonopyInput(unitcell=mc_structure,
# supercell=supercell,
# supercell_matrix=supercell_matrix)
# assert actual == expected
#
#
# def test_make_phonopy_input_default_hexa():
# structure = Structure(Lattice.hexagonal(1.0, 2.0), species=["H"],
# coords=[[0.0]*3])
# actual = make_phonopy_input(unitcell=structure)
# supercell_matrix = [[2, -1, 0], [2, 1, 0], [0, 0, 2]]
# supercell = structure * supercell_matrix
# expected = PhonopyInput(unitcell=structure,
# supercell=supercell,
# supercell_matrix=supercell_matrix)
# assert actual == expected
|
[
"numpy.array",
"phonopy.interface.calculator.read_crystal_structure",
"phonopy.structure.atoms.PhonopyAtoms",
"vise.util.phonopy.phonopy_input.structure_to_phonopy_atoms"
] |
[((822, 854), 'phonopy.interface.calculator.read_crystal_structure', 'read_crystal_structure', (['"""POSCAR"""'], {}), "('POSCAR')\n", (844, 854), False, 'from phonopy.interface.calculator import read_crystal_structure\n'), ((863, 884), 'phonopy.structure.atoms.PhonopyAtoms', 'PhonopyAtoms', ([], {'atoms': 'a'}), '(atoms=a)\n', (875, 884), False, 'from phonopy.structure.atoms import PhonopyAtoms\n'), ((1051, 1091), 'vise.util.phonopy.phonopy_input.structure_to_phonopy_atoms', 'structure_to_phonopy_atoms', (['sc_structure'], {}), '(sc_structure)\n', (1077, 1091), False, 'from vise.util.phonopy.phonopy_input import structure_to_phonopy_atoms\n'), ((1168, 1229), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])\n', (1176, 1229), True, 'import numpy as np\n'), ((1362, 1389), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0]])\n', (1370, 1389), True, 'import numpy as np\n')]
|
import numpy as np
import math
import logging
from termcolor import colored
# Check a matrix for: negative eigenvalues, asymmetry and negative diagonal values
def positive_definite(M,epsilon = 0.000001,verbose=False):
# Symmetrization
Mt = np.transpose(M)
M = (M + Mt)/2
eigenvalues = np.linalg.eigvals(M)
for i in range(len(eigenvalues)):
if eigenvalues[i] <= epsilon:
if verbose:
logging.error("Negative eigenvalues")
return 0
for i in range(M.shape[0]):
if M[i][i] < 0:
if verbose:
logging.error("Negative value in diagonal")
return 0
return 1
|
[
"numpy.linalg.eigvals",
"numpy.transpose",
"logging.error"
] |
[((249, 264), 'numpy.transpose', 'np.transpose', (['M'], {}), '(M)\n', (261, 264), True, 'import numpy as np\n'), ((302, 322), 'numpy.linalg.eigvals', 'np.linalg.eigvals', (['M'], {}), '(M)\n', (319, 322), True, 'import numpy as np\n'), ((439, 476), 'logging.error', 'logging.error', (['"""Negative eigenvalues"""'], {}), "('Negative eigenvalues')\n", (452, 476), False, 'import logging\n'), ((594, 637), 'logging.error', 'logging.error', (['"""Negative value in diagonal"""'], {}), "('Negative value in diagonal')\n", (607, 637), False, 'import logging\n')]
|
import warnings
from typing import Callable, List, Optional, Union
import mpmath
import numpy as np
import paramak
import sympy as sp
from paramak import RotateMixedShape, diff_between_angles
from paramak.parametric_components.tokamak_plasma_plasmaboundaries import \
PlasmaBoundaries
from scipy.interpolate import interp1d
class BlanketFP(RotateMixedShape):
"""A blanket volume created from plasma parameters.
Args:
thickness (float or [float] or callable or [(float), (float)]):
the thickness of the blanket (cm). If the thickness is a float then
this produces a blanket of constant thickness. If the thickness is
a tuple of floats, blanket thickness will vary linearly between the
two values. If thickness is callable, then the blanket thickness
will be a function of poloidal angle (in degrees). If thickness is
a list of two lists (thicknesses and angles) then these will be
used together with linear interpolation.
start_angle: the angle in degrees to start the blanket, measured anti
clockwise from 3 o'clock.
stop_angle: the angle in degrees to stop the blanket, measured anti
clockwise from 3 o'clock.
plasma: If not None, the parameters of the plasma Object will be used.
minor_radius: the minor radius of the plasma (cm).
major_radius: the major radius of the plasma (cm).
triangularity: the triangularity of the plasma.
elongation: the elongation of the plasma.
vertical_displacement: the vertical_displacement of the plasma (cm).
offset_from_plasma: the distance between the plasma and the blanket
(cm). If float, constant offset. If list of floats, offset will
vary linearly between the values. If callable, offset will be a
function of poloidal angle (in degrees). If a list of two lists
(angles and offsets) then these will be used together with linear
interpolation.
num_points: number of points that will describe the shape.
"""
def __init__(self,
thickness,
start_angle: float,
stop_angle: float,
plasma: Optional[Union[paramak.Plasma,
paramak.PlasmaBoundaries,
paramak.PlasmaFromPoints]] = None,
minor_radius: Optional[float] = 150.0,
major_radius: Optional[float] = 450.0,
triangularity: Optional[float] = 0.55,
elongation: Optional[float] = 2.0,
vertical_displacement: Optional[float] = 0.0,
offset_from_plasma: Optional[float] = 0.0,
num_points: Optional[int] = 50,
**kwargs):
super().__init__(**kwargs)
self.thickness = thickness
self.start_angle, self.stop_angle = None, None
self.start_angle = start_angle
self.stop_angle = stop_angle
self.plasma = plasma
self.vertical_displacement = vertical_displacement
if plasma is None:
self.minor_radius = minor_radius
self.major_radius = major_radius
self.triangularity = triangularity
self.elongation = elongation
else: # if plasma object is given, use its parameters
self.minor_radius = plasma.minor_radius
self.major_radius = plasma.major_radius
self.triangularity = plasma.triangularity
self.elongation = plasma.elongation
self.offset_from_plasma = offset_from_plasma
self.num_points = num_points
@property
def start_angle(self):
return self._start_angle
@start_angle.setter
def start_angle(self, value):
self._start_angle = value
@property
def stop_angle(self):
return self._stop_angle
@stop_angle.setter
def stop_angle(self, value):
self._stop_angle = value
@property
def minor_radius(self):
return self._minor_radius
@minor_radius.setter
def minor_radius(self, minor_radius):
self._minor_radius = minor_radius
@property
def thickness(self):
return self._thickness
@thickness.setter
def thickness(self, thickness):
self._thickness = thickness
@property
def inner_points(self):
self.find_points()
return self._inner_points
@inner_points.setter
def inner_points(self, value):
self._inner_points = value
@property
def outer_points(self):
self.find_points()
return self._outer_points
@outer_points.setter
def outer_points(self, value):
self._outer_points = value
def make_callable(self, attribute):
"""This function transforms an attribute (thickness or offset) into a
callable function of theta
"""
# if the attribute is a list, create a interpolated object of the
# values
if isinstance(attribute, (tuple, list)):
if isinstance(attribute[0], (tuple, list)) and \
isinstance(attribute[1], (tuple, list)) and \
len(attribute) == 2:
# attribute is a list of 2 lists
if len(attribute[0]) != len(attribute[1]):
raise ValueError('The length of angles list must equal \
the length of values list')
list_of_angles = np.array(attribute[0])
offset_values = attribute[1]
else:
# no list of angles is given
offset_values = attribute
list_of_angles = np.linspace(
self.start_angle,
self.stop_angle,
len(offset_values),
endpoint=True)
interpolated_values = interp1d(list_of_angles, offset_values)
def fun(theta):
if callable(attribute):
return attribute(theta)
elif isinstance(attribute, (tuple, list)):
return interpolated_values(theta)
else:
return attribute
return fun
def find_points(self, angles=None):
self._overlapping_shape = False
# create array of angles theta
if angles is None:
thetas = np.linspace(
self.start_angle,
self.stop_angle,
num=self.num_points,
endpoint=True,
)
else:
thetas = angles
# create inner points
inner_offset = self.make_callable(self.offset_from_plasma)
inner_points = self.create_offset_points(thetas, inner_offset)
inner_points[-1][2] = "straight"
self.inner_points = inner_points
# create outer points
thickness = self.make_callable(self.thickness)
def outer_offset(theta):
return inner_offset(theta) + thickness(theta)
outer_points = self.create_offset_points(np.flip(thetas), outer_offset)
outer_points[-1][2] = "straight"
self.outer_points = outer_points
# assemble
points = inner_points + outer_points
if self._overlapping_shape:
msg = ("BlanketFP: Some points with negative R coordinate have "
"been ignored.")
warnings.warn(msg)
self.points = points
return points
def create_offset_points(self, thetas, offset):
"""generates a list of points following parametric equations with an
offset
Args:
thetas (np.array): the angles in degrees.
offset (callable): offset value (cm). offset=0 will follow the
parametric equations.
Returns:
list: list of points [[R1, Z1, connection1], [R2, Z2, connection2],
...]
"""
# create sympy objects and derivatives
theta_sp = sp.Symbol("theta")
R_sp, Z_sp = self.distribution(theta_sp, pkg=sp)
R_derivative = sp.diff(R_sp, theta_sp)
Z_derivative = sp.diff(Z_sp, theta_sp)
points = []
for theta in thetas:
# get local value of derivatives
val_R_derivative = float(R_derivative.subs("theta", theta))
val_Z_derivative = float(Z_derivative.subs("theta", theta))
# get normal vector components
nx = val_Z_derivative
ny = -val_R_derivative
# normalise normal vector
normal_vector_norm = (nx ** 2 + ny ** 2) ** 0.5
nx /= normal_vector_norm
ny /= normal_vector_norm
# calculate outer points
val_R_outer = self.distribution(theta)[0] + offset(theta) * nx
val_Z_outer = self.distribution(theta)[1] + offset(theta) * ny
if float(val_R_outer) > 0:
points.append(
[float(val_R_outer), float(val_Z_outer), "spline"])
else:
self._overlapping_shape = True
return points
def distribution(self, theta, pkg=np):
"""Plasma distribution theta in degrees
Args:
theta (float or np.array or sp.Symbol): the angle(s) in degrees.
pkg (module, optional): Module to use in the funciton. If sp, as
sympy object will be returned. If np, a np.array or a float
will be returned. Defaults to np.
Returns:
(float, float) or (sympy.Add, sympy.Mul) or
(numpy.array, numpy.array): The R and Z coordinates of the
point with angle theta
"""
if pkg == np:
theta = np.radians(theta)
else:
theta = mpmath.radians(theta)
R = self.major_radius + self.minor_radius * pkg.cos(
theta + self.triangularity * pkg.sin(theta)
)
Z = (
self.elongation * self.minor_radius * pkg.sin(theta)
+ self.vertical_displacement
)
return R, Z
|
[
"numpy.radians",
"numpy.flip",
"sympy.Symbol",
"scipy.interpolate.interp1d",
"numpy.array",
"numpy.linspace",
"mpmath.radians",
"sympy.diff",
"warnings.warn"
] |
[((8042, 8060), 'sympy.Symbol', 'sp.Symbol', (['"""theta"""'], {}), "('theta')\n", (8051, 8060), True, 'import sympy as sp\n'), ((8142, 8165), 'sympy.diff', 'sp.diff', (['R_sp', 'theta_sp'], {}), '(R_sp, theta_sp)\n', (8149, 8165), True, 'import sympy as sp\n'), ((8189, 8212), 'sympy.diff', 'sp.diff', (['Z_sp', 'theta_sp'], {}), '(Z_sp, theta_sp)\n', (8196, 8212), True, 'import sympy as sp\n'), ((5945, 5984), 'scipy.interpolate.interp1d', 'interp1d', (['list_of_angles', 'offset_values'], {}), '(list_of_angles, offset_values)\n', (5953, 5984), False, 'from scipy.interpolate import interp1d\n'), ((6429, 6515), 'numpy.linspace', 'np.linspace', (['self.start_angle', 'self.stop_angle'], {'num': 'self.num_points', 'endpoint': '(True)'}), '(self.start_angle, self.stop_angle, num=self.num_points,\n endpoint=True)\n', (6440, 6515), True, 'import numpy as np\n'), ((7112, 7127), 'numpy.flip', 'np.flip', (['thetas'], {}), '(thetas)\n', (7119, 7127), True, 'import numpy as np\n'), ((7451, 7469), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (7464, 7469), False, 'import warnings\n'), ((9784, 9801), 'numpy.radians', 'np.radians', (['theta'], {}), '(theta)\n', (9794, 9801), True, 'import numpy as np\n'), ((9836, 9857), 'mpmath.radians', 'mpmath.radians', (['theta'], {}), '(theta)\n', (9850, 9857), False, 'import mpmath\n'), ((5542, 5564), 'numpy.array', 'np.array', (['attribute[0]'], {}), '(attribute[0])\n', (5550, 5564), True, 'import numpy as np\n')]
|
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import os
import math
from utils import logger
use_cuda = torch.cuda.is_available()
# utility
def to_var(x, dtype=None):
if type(x) is np.ndarray:
x = torch.from_numpy(x)
elif type(x) is list:
x = torch.from_numpy(np.array(x, dtype=dtype))
if use_cuda:
x = x.cuda()
return Variable(x)
# optimization
# reference: http://pytorch.org/docs/master/_modules/torch/optim/lr_scheduler.html#ReduceLROnPlateau
def adjusting_learning_rate(optimizer, factor=.5, min_lr=0.00001):
for i, param_group in enumerate(optimizer.param_groups):
old_lr = float(param_group['lr'])
new_lr = max(old_lr*factor, min_lr)
param_group['lr'] = new_lr
logger.info('adjusting learning rate from %.6f to %.6f' % (old_lr, new_lr))
def lr_annealing_function(step, start=0, end=1, r=0.9999, type="exp"):
if type == "exp":
lr = start - (start - end) * (1 - math.pow(r, step))
else:
print("not available %s annealing" % type)
return lr
def update_lr(optimizer, new_lr):
old_lr = optimizer.param_groups[0]['lr']
# logger.info("adjusting learning rate from %.6f to %.6f" % (old_lr, new_lr))
for i, param_group in enumerate(optimizer.param_groups):
param_group['lr'] = new_lr
def transformer_learning_rate(optimizer, model_dim, step_num, warmup_steps=4000):
for i, param_group in enumerate(optimizer.param_groups):
new_lr = model_dim**(-0.5) * min(step_num**(-0.5), step_num*warmup_steps**(-1.5))
old_lr = float(param_group['lr'])
# new_lr = max(old_lr*factor, min_lr)
param_group['lr'] = new_lr
logger.info('adjusting learning rate from %.6f to %.6f' % (old_lr, new_lr))
# model save and loading
def load_model(asset_path, model, optimizer, restore_epoch=0):
if os.path.isfile(os.path.join(asset_path, 'model', 'checkpoint_%d.pth.tar' % restore_epoch)):
checkpoint = torch.load(os.path.join(asset_path, 'model', 'checkpoint_%d.pth.tar' % restore_epoch))
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
current_step = checkpoint['current_step']
logger.info("restore model with %d epoch" % restore_epoch)
else:
logger.info("no checkpoint with %d epoch" % restore_epoch)
current_step = 0
return model, optimizer, current_step
# class weighted_BCELoss(Module):
# def __init__(self, mode):
# self.mode = mode
#
# def forward(self, input, target, weight=10):
# if not (input.size() == target.size()):
# raise ValueError("Target and input must have the same size. target size ({}) "
# "!= input size ({})".format(target.size(), input.size()))
# loss_matrix = - (torch.mul(target, input.log()) + torch.mul(1 - target, (1 - input).log()))
# one_matrix = Variable(torch.ones(input.size()))
# if use_cuda:
# one_matrix = one_matrix.cuda()
# if self.mode == 'one':
# weight_matrix = (weight - 1) * target + one_matrix
# elif self.mode == 'pitch':
#
# weighted_loss_matrix = torch.mul(loss_matrix, weight_matrix)
# return torch.mean(weighted_loss_matrix)
# loss
def weighted_binary_cross_entropy(output, target, weights=None, eps=1e-12):
if weights is not None:
assert len(weights) == 2
loss = weights[1] * (target * torch.log(output + eps)) + \
weights[0] * ((1 - target) * torch.log(1 - output + eps))
else:
loss = target * torch.log(output + eps) + (1 - target) * torch.log(1 - output + eps)
return torch.neg(torch.mean(loss))
def kl_divergence(mu, sig, num_latent_group=0, freebits_ratio=2., p_mu=None, p_sigma=None, eps=1e-8):
# calculate kl divergence between two normal distribution
# mu, sig, p_mu, p_sigma: batch_size * latent_size
batch_size = mu.size(0)
latent_size = mu.size(1)
mu_square = mu * mu
sig_square = sig * sig
if p_mu is None:
kl = 0.5 * (mu_square + sig_square - torch.log(sig_square + eps) - 1)
else:
p_sig_square = p_sigma * p_sigma
p_mu_diff_square = (mu - p_mu) * (mu - p_mu)
kl = (sig_square + p_mu_diff_square)/(2*p_sig_square)
kl += torch.log(p_sigma/sig + eps)
kl -= 0.5
if num_latent_group == 0:
kl = torch.sum(kl) / batch_size
else:
group_size = latent_size // num_latent_group
kl = kl.mean(0) # mean along batch dimension
kl = kl.view(-1, group_size).sum(1) # summation along group dimension
kl = torch.clamp(kl, min=freebits_ratio) # clipping kl value
kl = kl.sum()
return kl
def vae_loss(target, prediction, mu, sig,
num_latent_group=0, freebits_ratio=2., kl_ratio=1., p_mu=None, p_sigma=None):
rec_loss = F.binary_cross_entropy(prediction, target)
kl_loss = kl_divergence(mu, sig, num_latent_group, freebits_ratio, p_mu, p_sigma)
total_loss = rec_loss + kl_ratio * kl_loss
return total_loss, rec_loss, kl_loss
|
[
"torch.log",
"torch.mean",
"math.pow",
"torch.nn.functional.binary_cross_entropy",
"os.path.join",
"torch.from_numpy",
"numpy.array",
"torch.cuda.is_available",
"utils.logger.info",
"torch.sum",
"torch.autograd.Variable",
"torch.clamp"
] |
[((160, 185), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (183, 185), False, 'import torch\n'), ((419, 430), 'torch.autograd.Variable', 'Variable', (['x'], {}), '(x)\n', (427, 430), False, 'from torch.autograd import Variable\n'), ((4962, 5004), 'torch.nn.functional.binary_cross_entropy', 'F.binary_cross_entropy', (['prediction', 'target'], {}), '(prediction, target)\n', (4984, 5004), True, 'import torch.nn.functional as F\n'), ((267, 286), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (283, 286), False, 'import torch\n'), ((806, 881), 'utils.logger.info', 'logger.info', (["('adjusting learning rate from %.6f to %.6f' % (old_lr, new_lr))"], {}), "('adjusting learning rate from %.6f to %.6f' % (old_lr, new_lr))\n", (817, 881), False, 'from utils import logger\n'), ((1739, 1815), 'utils.logger.info', 'logger.info', (["('adjusting learning rate from %.6f to %.6f' % (old_lr, new_lr))"], {}), "('adjusting learning rate from %.6f to %.6f' % (old_lr, new_lr))\n", (1750, 1815), False, 'from utils import logger\n'), ((1928, 2002), 'os.path.join', 'os.path.join', (['asset_path', '"""model"""', "('checkpoint_%d.pth.tar' % restore_epoch)"], {}), "(asset_path, 'model', 'checkpoint_%d.pth.tar' % restore_epoch)\n", (1940, 2002), False, 'import os\n'), ((2281, 2339), 'utils.logger.info', 'logger.info', (["('restore model with %d epoch' % restore_epoch)"], {}), "('restore model with %d epoch' % restore_epoch)\n", (2292, 2339), False, 'from utils import logger\n'), ((2358, 2416), 'utils.logger.info', 'logger.info', (["('no checkpoint with %d epoch' % restore_epoch)"], {}), "('no checkpoint with %d epoch' % restore_epoch)\n", (2369, 2416), False, 'from utils import logger\n'), ((3762, 3778), 'torch.mean', 'torch.mean', (['loss'], {}), '(loss)\n', (3772, 3778), False, 'import torch\n'), ((4390, 4420), 'torch.log', 'torch.log', (['(p_sigma / sig + eps)'], {}), '(p_sigma / sig + eps)\n', (4399, 4420), False, 'import torch\n'), ((4717, 4752), 'torch.clamp', 'torch.clamp', (['kl'], {'min': 'freebits_ratio'}), '(kl, min=freebits_ratio)\n', (4728, 4752), False, 'import torch\n'), ((2037, 2111), 'os.path.join', 'os.path.join', (['asset_path', '"""model"""', "('checkpoint_%d.pth.tar' % restore_epoch)"], {}), "(asset_path, 'model', 'checkpoint_%d.pth.tar' % restore_epoch)\n", (2049, 2111), False, 'import os\n'), ((4481, 4494), 'torch.sum', 'torch.sum', (['kl'], {}), '(kl)\n', (4490, 4494), False, 'import torch\n'), ((342, 366), 'numpy.array', 'np.array', (['x'], {'dtype': 'dtype'}), '(x, dtype=dtype)\n', (350, 366), True, 'import numpy as np\n'), ((3671, 3694), 'torch.log', 'torch.log', (['(output + eps)'], {}), '(output + eps)\n', (3680, 3694), False, 'import torch\n'), ((3712, 3739), 'torch.log', 'torch.log', (['(1 - output + eps)'], {}), '(1 - output + eps)\n', (3721, 3739), False, 'import torch\n'), ((1019, 1036), 'math.pow', 'math.pow', (['r', 'step'], {}), '(r, step)\n', (1027, 1036), False, 'import math\n'), ((3535, 3558), 'torch.log', 'torch.log', (['(output + eps)'], {}), '(output + eps)\n', (3544, 3558), False, 'import torch\n'), ((3608, 3635), 'torch.log', 'torch.log', (['(1 - output + eps)'], {}), '(1 - output + eps)\n', (3617, 3635), False, 'import torch\n'), ((4177, 4204), 'torch.log', 'torch.log', (['(sig_square + eps)'], {}), '(sig_square + eps)\n', (4186, 4204), False, 'import torch\n')]
|
"""
Explore raw composites based on indices from predicted testing data and
showing all the difference OHC levels for OBSERVATIONS
Author : <NAME>
Date : 21 September 2021
Version : 2 (mostly for testing)
"""
### Import packages
import sys
import matplotlib.pyplot as plt
import numpy as np
import calc_Utilities as UT
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
import palettable.cubehelix as cm
import cmocean as cmocean
import calc_dataFunctions as df
import calc_Stats as dSS
from netCDF4 import Dataset
### Plotting defaults
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
###############################################################################
###############################################################################
###############################################################################
### Data preliminaries
modelGCMs = ['CESM2le']
dataset_obs = 'ERA5'
allDataLabels = modelGCMs
monthlychoiceq = ['annual']
variables = ['T2M']
vari_predict = ['SST','OHC100','OHC300','OHC700']
reg_name = 'SMILEGlobe'
level = 'surface'
###############################################################################
###############################################################################
randomalso = False
timeper = 'hiatus'
shuffletype = 'GAUSS'
###############################################################################
###############################################################################
land_only = False
ocean_only = False
###############################################################################
###############################################################################
baseline = np.arange(1951,1980+1,1)
###############################################################################
###############################################################################
window = 0
if window == 0:
rm_standard_dev = False
ravel_modelens = False
ravelmodeltime = False
else:
rm_standard_dev = True
ravelmodeltime = False
ravel_modelens = True
yearsall = np.arange(1979+window,2099+1,1)
yearsobs = np.arange(1979+window,2020+1,1)
###############################################################################
###############################################################################
numOfEns = 40
lentime = len(yearsall)
###############################################################################
###############################################################################
lat_bounds,lon_bounds = UT.regions(reg_name)
###############################################################################
###############################################################################
ravelyearsbinary = False
ravelbinary = False
lensalso = True
###############################################################################
###############################################################################
### Remove ensemble mean
rm_ensemble_mean = True
###############################################################################
###############################################################################
### Accuracy for composites
accurate = True
if accurate == True:
typemodel = 'correcthiatus_obs'
elif accurate == False:
typemodel = 'extrahiatus_obs'
elif accurate == 'WRONG':
typemodel = 'wronghiatus_obs'
elif accurate == 'HIATUS':
typemodel = 'allhiatus_obs'
###############################################################################
###############################################################################
### Call functions
trendlength = 10
AGWstart = 1990
years_newmodel = np.arange(AGWstart,yearsall[-1]-8,1)
years_newobs = np.arange(AGWstart,yearsobs[-1]-8,1)
vv = 0
mo = 0
variq = variables[vv]
monthlychoice = monthlychoiceq[mo]
directoryfigure = '/Users/zlabe/Desktop/GmstTrendPrediction/ANN_v2/Obs/'
saveData = monthlychoice + '_' + variq + '_' + reg_name + '_' + dataset_obs
print('*Filename == < %s >' % saveData)
###############################################################################
###############################################################################
### Function to read in predictor variables (SST/OHC)
def read_primary_dataset(variq,dataset,monthlychoice,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,timeper,lat_bounds=lat_bounds,lon_bounds=lon_bounds):
data,lats,lons = df.readFiles(variq,dataset,monthlychoice,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,timeper)
datar,lats,lons = df.getRegion(data,lats,lons,lat_bounds,lon_bounds)
print('\nOur dataset: ',dataset,' is shaped',data.shape)
return datar,lats,lons
def read_obs_dataset(variq,dataset_obs,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,lat_bounds=lat_bounds,lon_bounds=lon_bounds):
data_obs,lats_obs,lons_obs = df.readFiles(variq,dataset_obs,monthlychoice,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,timeper)
data_obs,lats_obs,lons_obs = df.getRegion(data_obs,lats_obs,lons_obs,lat_bounds,lon_bounds)
print('our OBS dataset: ',dataset_obs,' is shaped',data_obs.shape)
return data_obs,lats_obs,lons_obs
###############################################################################
###############################################################################
### Loop through to read all the variables
ohcHIATUS = np.empty((len(vari_predict),92,144))
for vvv in range(len(vari_predict)):
### Function to read in predictor variables (SST/OHC)
models_var = []
for i in range(len(modelGCMs)):
if vari_predict[vvv][:3] == 'OHC':
obs_predict = 'OHC'
else:
obs_predict = 'ERA5'
obsq_var,lats,lons = read_obs_dataset(vari_predict[vvv],obs_predict,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,lat_bounds=lat_bounds,lon_bounds=lon_bounds)
### Save predictor
models_var.append(obsq_var)
models_var = np.asarray(models_var).squeeze()
### Remove ensemble mean
if rm_ensemble_mean == True:
models_var = dSS.remove_trend_obs(models_var,'surface')
print('\n*Removed observational linear trend*')
### Standardize
models_varravel = models_var.squeeze().reshape(yearsobs.shape[0],lats.shape[0]*lons.shape[0])
meanvar = np.nanmean(models_varravel,axis=0)
stdvar = np.nanstd(models_varravel,axis=0)
modelsstd_varravel = (models_varravel-meanvar)/stdvar
models_var = modelsstd_varravel.reshape(yearsobs.shape[0],lats.shape[0],lons.shape[0])
### Slice for number of years
yearsq_m = np.where((yearsobs >= AGWstart))[0]
models_slice = models_var[yearsq_m,:,:]
if rm_ensemble_mean == False:
variq = 'T2M'
fac = 0.7
random_segment_seed = int(np.genfromtxt('/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/SelectedSegmentSeed.txt',unpack=True))
random_network_seed = 87750
hidden = [20,20]
n_epochs = 500
batch_size = 128
lr_here = 0.001
ridgePenalty = 0.05
actFun = 'relu'
fractWeight = 0.5
elif rm_ensemble_mean == True:
variq = 'T2M'
fac = 0.7
random_segment_seed = int(np.genfromtxt('/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/SelectedSegmentSeed.txt',unpack=True))
random_network_seed = 87750
hidden = [30,30]
n_epochs = 500
batch_size = 128
lr_here = 0.001
ridgePenalty = 0.5
actFun = 'relu'
fractWeight = 0.5
else:
print(ValueError('SOMETHING IS WRONG WITH DATA PROCESSING!'))
sys.exit()
### Naming conventions for files
directorymodel = '/Users/zlabe/Documents/Research/GmstTrendPrediction/SavedModels/'
savename = 'ANNv2_'+'OHC100'+'_hiatus_' + actFun + '_L2_'+ str(ridgePenalty)+ '_LR_' + str(lr_here)+ '_Batch'+ str(batch_size)+ '_Iters' + str(n_epochs) + '_' + str(len(hidden)) + 'x' + str(hidden[0]) + '_SegSeed' + str(random_segment_seed) + '_NetSeed'+ str(random_network_seed)
if(rm_ensemble_mean==True):
savename = savename + '_EnsembleMeanRemoved'
### Directories to save files
directorydata = '/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/'
###############################################################################
###############################################################################
###############################################################################
### Read in data for testing predictions and actual hiatuses
actual_test = np.genfromtxt(directorydata + 'obsActualLabels_' + savename + '.txt')
predict_test = np.genfromtxt(directorydata + 'obsLabels_' + savename+ '.txt')
### Reshape arrays for [ensemble,year]
act_re = actual_test
pre_re = predict_test
### Slice ensembles for testing data
ohcready = models_slice[:,:,:].squeeze()
### Pick all hiatuses
if accurate == True: ### correct predictions
ohc_allenscomp = []
for yr in range(ohcready.shape[0]):
if (pre_re[yr]) == 1 and (act_re[yr] == 1):
ohc_allenscomp.append(ohcready[yr,:,:])
elif accurate == False: ### picks all hiatus predictions
ohc_allenscomp = []
for yr in range(ohcready.shape[0]):
if pre_re[yr] == 1:
ohc_allenscomp.append(ohcready[yr,:,:])
elif accurate == 'WRONG': ### picks hiatus but is wrong
ohc_allenscomp = []
for yr in range(ohcready.shape[0]):
if (pre_re[yr]) == 1 and (act_re[yr] == 0):
ohc_allenscomp.append(ohcready[yr,:,:])
elif accurate == 'HIATUS': ### accurate climate change
ohc_allenscomp = []
for yr in range(ohcready.shape[0]):
if (act_re[yr] == 1):
ohc_allenscomp.append(ohcready[yr,:,:])
else:
print(ValueError('SOMETHING IS WRONG WITH ACCURACY COMPOSITES!'))
sys.exit()
### Composite across all years to get hiatuses
ohcHIATUS[vvv,:,:] = np.nanmean(np.asarray(ohc_allenscomp),axis=0)
###############################################################################
###############################################################################
### Loop through to read all the variables
lag1 = 3
lag2 = 7
lag = lag2-lag1
ohcHIATUSlag = np.empty((len(vari_predict),92,144))
for vvv in range(len(vari_predict)):
### Function to read in predictor variables (SST/OHC)
models_var = []
for i in range(len(modelGCMs)):
if vari_predict[vvv][:3] == 'OHC':
obs_predict = 'OHC'
else:
obs_predict = 'ERA5'
obsq_var,lats,lons = read_obs_dataset(vari_predict[vvv],obs_predict,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,lat_bounds=lat_bounds,lon_bounds=lon_bounds)
### Save predictor
models_var.append(obsq_var)
models_var = np.asarray(models_var).squeeze()
### Remove ensemble mean
if rm_ensemble_mean == True:
models_var = dSS.remove_trend_obs(models_var,'surface')
print('\n*Removed observational linear trend*')
### Standardize
models_varravel = models_var.squeeze().reshape(yearsobs.shape[0],lats.shape[0]*lons.shape[0])
meanvar = np.nanmean(models_varravel,axis=0)
stdvar = np.nanstd(models_varravel,axis=0)
modelsstd_varravel = (models_varravel-meanvar)/stdvar
models_var = modelsstd_varravel.reshape(yearsobs.shape[0],lats.shape[0],lons.shape[0])
### Slice for number of years
yearsq_m = np.where((yearsobs >= AGWstart))[0]
models_slice = models_var[yearsq_m,:,:]
if rm_ensemble_mean == False:
variq = 'T2M'
fac = 0.7
random_segment_seed = int(np.genfromtxt('/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/SelectedSegmentSeed.txt',unpack=True))
random_network_seed = 87750
hidden = [20,20]
n_epochs = 500
batch_size = 128
lr_here = 0.001
ridgePenalty = 0.05
actFun = 'relu'
fractWeight = 0.5
elif rm_ensemble_mean == True:
variq = 'T2M'
fac = 0.7
random_segment_seed = int(np.genfromtxt('/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/SelectedSegmentSeed.txt',unpack=True))
random_network_seed = 87750
hidden = [30,30]
n_epochs = 500
batch_size = 128
lr_here = 0.001
ridgePenalty = 0.5
actFun = 'relu'
fractWeight = 0.5
else:
print(ValueError('SOMETHING IS WRONG WITH DATA PROCESSING!'))
sys.exit()
### Naming conventions for files
directorymodel = '/Users/zlabe/Documents/Research/GmstTrendPrediction/SavedModels/'
savename = 'ANNv2_'+'OHC100'+'_hiatus_' + actFun + '_L2_'+ str(ridgePenalty)+ '_LR_' + str(lr_here)+ '_Batch'+ str(batch_size)+ '_Iters' + str(n_epochs) + '_' + str(len(hidden)) + 'x' + str(hidden[0]) + '_SegSeed' + str(random_segment_seed) + '_NetSeed'+ str(random_network_seed)
if(rm_ensemble_mean==True):
savename = savename + '_EnsembleMeanRemoved'
### Directories to save files
directorydata = '/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/'
###############################################################################
###############################################################################
###############################################################################
### Read in data for testing predictions and actual hiatuses
actual_test = np.genfromtxt(directorydata + 'obsActualLabels_' + savename + '.txt')
predict_test = np.genfromtxt(directorydata + 'obsLabels_' + savename+ '.txt')
### Reshape arrays for [ensemble,year]
act_re = actual_test
pre_re = predict_test
### Slice ensembles for testing data
ohcready = models_slice[:,:,:].squeeze()
### Pick all hiatuses
if accurate == True: ### correct predictions
ohc_allenscomp = []
for yr in range(ohcready.shape[0]):
if (pre_re[yr]) == 1 and (act_re[yr] == 1):
ohc_allenscomp.append(np.nanmean(ohcready[yr+lag1:yr+lag2,:,:],axis=0))
elif accurate == False: ### picks all hiatus predictions
ohc_allenscomp = []
for yr in range(ohcready.shape[0]):
if pre_re[yr] == 1:
ohc_allenscomp.append(np.nanmean(ohcready[yr+lag1:yr+lag2,:,:],axis=0))
elif accurate == 'WRONG': ### picks hiatus but is wrong
ohc_allenscomp = []
for yr in range(ohcready.shape[0]):
if (pre_re[yr]) == 1 and (act_re[yr] == 0):
ohc_allenscomp.append(np.nanmean(ohcready[yr+lag1:yr+lag2,:,:],axis=0))
elif accurate == 'HIATUS': ### accurate climate change
ohc_allenscomp = []
for yr in range(ohcready.shape[0]):
if (act_re[yr] == 1):
ohc_allenscomp.append(np.nanmean(ohcready[yr+lag1:yr+lag2,:,:],axis=0))
else:
print(ValueError('SOMETHING IS WRONG WITH ACCURACY COMPOSITES!'))
sys.exit()
### Composite across all years to get hiatuses
ohcHIATUSlag[vvv,:,:] = np.nanmean(np.asarray(ohc_allenscomp),axis=0)
### Composite all for plotting
ohc_allcomp = np.append(ohcHIATUS,ohcHIATUSlag,axis=0)
###############################################################################
###############################################################################
### Plot subplot of obser+++++++++++++++vations
letters = ["a","b","c","d","e","f","g","h","i","j","k","l","m","n"]
plotloc = [1,3,5,7,2,4,6,8]
if rm_ensemble_mean == False:
limit = np.arange(-1.5,1.51,0.02)
barlim = np.round(np.arange(-1.5,1.6,0.5),2)
elif rm_ensemble_mean == True:
limit = np.arange(-1.5,1.6,0.02)
barlim = np.round(np.arange(-1.5,1.6,0.5),2)
cmap = cmocean.cm.balance
label = r'\textbf{[ HIATUS COMPOSITE ]}'
fig = plt.figure(figsize=(8,10))
###############################################################################
for ppp in range(ohc_allcomp.shape[0]):
ax1 = plt.subplot(ohc_allcomp.shape[0]//2,2,plotloc[ppp])
m = Basemap(projection='robin',lon_0=-180,resolution='l',area_thresh=10000)
m.drawcoastlines(color='darkgrey',linewidth=0.27)
### Variable
varn = ohc_allcomp[ppp]
if ppp == 0:
lons = np.where(lons >180,lons-360,lons)
x, y = np.meshgrid(lons,lats)
circle = m.drawmapboundary(fill_color='dimgrey',color='dimgray',
linewidth=0.7)
circle.set_clip_on(False)
cs1 = m.contourf(x,y,varn,limit,extend='both',latlon=True)
cs1.set_cmap(cmap)
m.fillcontinents(color='dimgrey',lake_color='dimgrey')
ax1.annotate(r'\textbf{[%s]}' % letters[ppp],xy=(0,0),xytext=(0.95,0.93),
textcoords='axes fraction',color='k',fontsize=10,
rotation=0,ha='center',va='center')
if ppp < 4:
ax1.annotate(r'\textbf{%s}' % vari_predict[ppp],xy=(0,0),xytext=(-0.08,0.5),
textcoords='axes fraction',color='dimgrey',fontsize=20,
rotation=90,ha='center',va='center')
if ppp == 0:
plt.title(r'\textbf{Onset}',fontsize=15,color='k')
if ppp == 4:
plt.title(r'\textbf{%s-Year Composite}' % lag,fontsize=15,color='k')
###############################################################################
cbar_ax1 = fig.add_axes([0.38,0.05,0.3,0.02])
cbar1 = fig.colorbar(cs1,cax=cbar_ax1,orientation='horizontal',
extend='both',extendfrac=0.07,drawedges=False)
cbar1.set_label(label,fontsize=6,color='dimgrey',labelpad=1.4)
cbar1.set_ticks(barlim)
cbar1.set_ticklabels(list(map(str,barlim)))
cbar1.ax.tick_params(axis='x', size=.01,labelsize=4)
cbar1.outline.set_edgecolor('dimgrey')
plt.tight_layout()
plt.subplots_adjust(bottom=0.08,wspace=0.01)
if rm_ensemble_mean == True:
plt.savefig(directoryfigure + 'RawCompositesHiatus_OBSERVATIONS_OHClevels-lag%s_v2_AccH-%s_AccR-%s_rmENSEMBLEmean.png' % (lag,accurate,accurate),dpi=300)
else:
plt.savefig(directoryfigure + 'RawCompositesHiatus_OBSERVATIONS_OHClevels-lag%s_v2_AccH-%s_AccR-%s.png' % (lag,accurate,accurate),dpi=300)
|
[
"calc_dataFunctions.getRegion",
"numpy.nanmean",
"sys.exit",
"numpy.genfromtxt",
"numpy.arange",
"numpy.where",
"numpy.asarray",
"numpy.meshgrid",
"calc_Utilities.regions",
"numpy.nanstd",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.title",
"calc_Stats.remove_trend_obs",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.rc",
"numpy.append",
"calc_dataFunctions.readFiles",
"matplotlib.pyplot.figure",
"mpl_toolkits.basemap.Basemap",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplot"
] |
[((566, 593), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (572, 593), True, 'import matplotlib.pyplot as plt\n'), ((593, 666), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {}), "('font', **{'family': 'sans-serif', 'sans-serif': ['Avant Garde']})\n", (599, 666), True, 'import matplotlib.pyplot as plt\n'), ((1727, 1755), 'numpy.arange', 'np.arange', (['(1951)', '(1980 + 1)', '(1)'], {}), '(1951, 1980 + 1, 1)\n', (1736, 1755), True, 'import numpy as np\n'), ((2118, 2155), 'numpy.arange', 'np.arange', (['(1979 + window)', '(2099 + 1)', '(1)'], {}), '(1979 + window, 2099 + 1, 1)\n', (2127, 2155), True, 'import numpy as np\n'), ((2161, 2198), 'numpy.arange', 'np.arange', (['(1979 + window)', '(2020 + 1)', '(1)'], {}), '(1979 + window, 2020 + 1, 1)\n', (2170, 2198), True, 'import numpy as np\n'), ((2575, 2595), 'calc_Utilities.regions', 'UT.regions', (['reg_name'], {}), '(reg_name)\n', (2585, 2595), True, 'import calc_Utilities as UT\n'), ((3694, 3734), 'numpy.arange', 'np.arange', (['AGWstart', '(yearsall[-1] - 8)', '(1)'], {}), '(AGWstart, yearsall[-1] - 8, 1)\n', (3703, 3734), True, 'import numpy as np\n'), ((3746, 3786), 'numpy.arange', 'np.arange', (['AGWstart', '(yearsobs[-1] - 8)', '(1)'], {}), '(AGWstart, yearsobs[-1] - 8, 1)\n', (3755, 3786), True, 'import numpy as np\n'), ((15455, 15497), 'numpy.append', 'np.append', (['ohcHIATUS', 'ohcHIATUSlag'], {'axis': '(0)'}), '(ohcHIATUS, ohcHIATUSlag, axis=0)\n', (15464, 15497), True, 'import numpy as np\n'), ((16109, 16136), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 10)'}), '(figsize=(8, 10))\n', (16119, 16136), True, 'import matplotlib.pyplot as plt\n'), ((18042, 18060), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (18058, 18060), True, 'import matplotlib.pyplot as plt\n'), ((18061, 18106), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.08)', 'wspace': '(0.01)'}), '(bottom=0.08, wspace=0.01)\n', (18080, 18106), True, 'import matplotlib.pyplot as plt\n'), ((4458, 4590), 'calc_dataFunctions.readFiles', 'df.readFiles', (['variq', 'dataset', 'monthlychoice', 'numOfEns', 'lensalso', 'randomalso', 'ravelyearsbinary', 'ravelbinary', 'shuffletype', 'timeper'], {}), '(variq, dataset, monthlychoice, numOfEns, lensalso, randomalso,\n ravelyearsbinary, ravelbinary, shuffletype, timeper)\n', (4470, 4590), True, 'import calc_dataFunctions as df\n'), ((4600, 4654), 'calc_dataFunctions.getRegion', 'df.getRegion', (['data', 'lats', 'lons', 'lat_bounds', 'lon_bounds'], {}), '(data, lats, lons, lat_bounds, lon_bounds)\n', (4612, 4654), True, 'import calc_dataFunctions as df\n'), ((4930, 5066), 'calc_dataFunctions.readFiles', 'df.readFiles', (['variq', 'dataset_obs', 'monthlychoice', 'numOfEns', 'lensalso', 'randomalso', 'ravelyearsbinary', 'ravelbinary', 'shuffletype', 'timeper'], {}), '(variq, dataset_obs, monthlychoice, numOfEns, lensalso,\n randomalso, ravelyearsbinary, ravelbinary, shuffletype, timeper)\n', (4942, 5066), True, 'import calc_dataFunctions as df\n'), ((5087, 5153), 'calc_dataFunctions.getRegion', 'df.getRegion', (['data_obs', 'lats_obs', 'lons_obs', 'lat_bounds', 'lon_bounds'], {}), '(data_obs, lats_obs, lons_obs, lat_bounds, lon_bounds)\n', (5099, 5153), True, 'import calc_dataFunctions as df\n'), ((6427, 6462), 'numpy.nanmean', 'np.nanmean', (['models_varravel'], {'axis': '(0)'}), '(models_varravel, axis=0)\n', (6437, 6462), True, 'import numpy as np\n'), ((6475, 6509), 'numpy.nanstd', 'np.nanstd', (['models_varravel'], {'axis': '(0)'}), '(models_varravel, axis=0)\n', (6484, 6509), True, 'import numpy as np\n'), ((8722, 8791), 'numpy.genfromtxt', 'np.genfromtxt', (["(directorydata + 'obsActualLabels_' + savename + '.txt')"], {}), "(directorydata + 'obsActualLabels_' + savename + '.txt')\n", (8735, 8791), True, 'import numpy as np\n'), ((8811, 8874), 'numpy.genfromtxt', 'np.genfromtxt', (["(directorydata + 'obsLabels_' + savename + '.txt')"], {}), "(directorydata + 'obsLabels_' + savename + '.txt')\n", (8824, 8874), True, 'import numpy as np\n'), ((11457, 11492), 'numpy.nanmean', 'np.nanmean', (['models_varravel'], {'axis': '(0)'}), '(models_varravel, axis=0)\n', (11467, 11492), True, 'import numpy as np\n'), ((11505, 11539), 'numpy.nanstd', 'np.nanstd', (['models_varravel'], {'axis': '(0)'}), '(models_varravel, axis=0)\n', (11514, 11539), True, 'import numpy as np\n'), ((13752, 13821), 'numpy.genfromtxt', 'np.genfromtxt', (["(directorydata + 'obsActualLabels_' + savename + '.txt')"], {}), "(directorydata + 'obsActualLabels_' + savename + '.txt')\n", (13765, 13821), True, 'import numpy as np\n'), ((13841, 13904), 'numpy.genfromtxt', 'np.genfromtxt', (["(directorydata + 'obsLabels_' + savename + '.txt')"], {}), "(directorydata + 'obsLabels_' + savename + '.txt')\n", (13854, 13904), True, 'import numpy as np\n'), ((15843, 15870), 'numpy.arange', 'np.arange', (['(-1.5)', '(1.51)', '(0.02)'], {}), '(-1.5, 1.51, 0.02)\n', (15852, 15870), True, 'import numpy as np\n'), ((16266, 16321), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(ohc_allcomp.shape[0] // 2)', '(2)', 'plotloc[ppp]'], {}), '(ohc_allcomp.shape[0] // 2, 2, plotloc[ppp])\n', (16277, 16321), True, 'import matplotlib.pyplot as plt\n'), ((16326, 16400), 'mpl_toolkits.basemap.Basemap', 'Basemap', ([], {'projection': '"""robin"""', 'lon_0': '(-180)', 'resolution': '"""l"""', 'area_thresh': '(10000)'}), "(projection='robin', lon_0=-180, resolution='l', area_thresh=10000)\n", (16333, 16400), False, 'from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid\n'), ((18139, 18305), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(directoryfigure + \n 'RawCompositesHiatus_OBSERVATIONS_OHClevels-lag%s_v2_AccH-%s_AccR-%s_rmENSEMBLEmean.png'\n % (lag, accurate, accurate))"], {'dpi': '(300)'}), "(directoryfigure + \n 'RawCompositesHiatus_OBSERVATIONS_OHClevels-lag%s_v2_AccH-%s_AccR-%s_rmENSEMBLEmean.png'\n % (lag, accurate, accurate), dpi=300)\n", (18150, 18305), True, 'import matplotlib.pyplot as plt\n'), ((18303, 18453), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(directoryfigure + \n 'RawCompositesHiatus_OBSERVATIONS_OHClevels-lag%s_v2_AccH-%s_AccR-%s.png' %\n (lag, accurate, accurate))"], {'dpi': '(300)'}), "(directoryfigure + \n 'RawCompositesHiatus_OBSERVATIONS_OHClevels-lag%s_v2_AccH-%s_AccR-%s.png' %\n (lag, accurate, accurate), dpi=300)\n", (18314, 18453), True, 'import matplotlib.pyplot as plt\n'), ((6191, 6234), 'calc_Stats.remove_trend_obs', 'dSS.remove_trend_obs', (['models_var', '"""surface"""'], {}), "(models_var, 'surface')\n", (6211, 6234), True, 'import calc_Stats as dSS\n'), ((6712, 6742), 'numpy.where', 'np.where', (['(yearsobs >= AGWstart)'], {}), '(yearsobs >= AGWstart)\n', (6720, 6742), True, 'import numpy as np\n'), ((10213, 10239), 'numpy.asarray', 'np.asarray', (['ohc_allenscomp'], {}), '(ohc_allenscomp)\n', (10223, 10239), True, 'import numpy as np\n'), ((11221, 11264), 'calc_Stats.remove_trend_obs', 'dSS.remove_trend_obs', (['models_var', '"""surface"""'], {}), "(models_var, 'surface')\n", (11241, 11264), True, 'import calc_Stats as dSS\n'), ((11742, 11772), 'numpy.where', 'np.where', (['(yearsobs >= AGWstart)'], {}), '(yearsobs >= AGWstart)\n', (11750, 11772), True, 'import numpy as np\n'), ((15374, 15400), 'numpy.asarray', 'np.asarray', (['ohc_allenscomp'], {}), '(ohc_allenscomp)\n', (15384, 15400), True, 'import numpy as np\n'), ((15891, 15916), 'numpy.arange', 'np.arange', (['(-1.5)', '(1.6)', '(0.5)'], {}), '(-1.5, 1.6, 0.5)\n', (15900, 15916), True, 'import numpy as np\n'), ((15961, 15987), 'numpy.arange', 'np.arange', (['(-1.5)', '(1.6)', '(0.02)'], {}), '(-1.5, 1.6, 0.02)\n', (15970, 15987), True, 'import numpy as np\n'), ((16538, 16576), 'numpy.where', 'np.where', (['(lons > 180)', '(lons - 360)', 'lons'], {}), '(lons > 180, lons - 360, lons)\n', (16546, 16576), True, 'import numpy as np\n'), ((16587, 16610), 'numpy.meshgrid', 'np.meshgrid', (['lons', 'lats'], {}), '(lons, lats)\n', (16598, 16610), True, 'import numpy as np\n'), ((17381, 17433), 'matplotlib.pyplot.title', 'plt.title', (['"""\\\\textbf{Onset}"""'], {'fontsize': '(15)', 'color': '"""k"""'}), "('\\\\textbf{Onset}', fontsize=15, color='k')\n", (17390, 17433), True, 'import matplotlib.pyplot as plt\n'), ((17458, 17528), 'matplotlib.pyplot.title', 'plt.title', (["('\\\\textbf{%s-Year Composite}' % lag)"], {'fontsize': '(15)', 'color': '"""k"""'}), "('\\\\textbf{%s-Year Composite}' % lag, fontsize=15, color='k')\n", (17467, 17528), True, 'import matplotlib.pyplot as plt\n'), ((6070, 6092), 'numpy.asarray', 'np.asarray', (['models_var'], {}), '(models_var)\n', (6080, 6092), True, 'import numpy as np\n'), ((6905, 7025), 'numpy.genfromtxt', 'np.genfromtxt', (['"""/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/SelectedSegmentSeed.txt"""'], {'unpack': '(True)'}), "(\n '/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/SelectedSegmentSeed.txt'\n , unpack=True)\n", (6918, 7025), True, 'import numpy as np\n'), ((7745, 7755), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7753, 7755), False, 'import sys\n'), ((11100, 11122), 'numpy.asarray', 'np.asarray', (['models_var'], {}), '(models_var)\n', (11110, 11122), True, 'import numpy as np\n'), ((11935, 12055), 'numpy.genfromtxt', 'np.genfromtxt', (['"""/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/SelectedSegmentSeed.txt"""'], {'unpack': '(True)'}), "(\n '/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/SelectedSegmentSeed.txt'\n , unpack=True)\n", (11948, 12055), True, 'import numpy as np\n'), ((12775, 12785), 'sys.exit', 'sys.exit', ([], {}), '()\n', (12783, 12785), False, 'import sys\n'), ((16008, 16033), 'numpy.arange', 'np.arange', (['(-1.5)', '(1.6)', '(0.5)'], {}), '(-1.5, 1.6, 0.5)\n', (16017, 16033), True, 'import numpy as np\n'), ((7336, 7456), 'numpy.genfromtxt', 'np.genfromtxt', (['"""/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/SelectedSegmentSeed.txt"""'], {'unpack': '(True)'}), "(\n '/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/SelectedSegmentSeed.txt'\n , unpack=True)\n", (7349, 7456), True, 'import numpy as np\n'), ((12366, 12486), 'numpy.genfromtxt', 'np.genfromtxt', (['"""/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/SelectedSegmentSeed.txt"""'], {'unpack': '(True)'}), "(\n '/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/SelectedSegmentSeed.txt'\n , unpack=True)\n", (12379, 12486), True, 'import numpy as np\n'), ((10106, 10116), 'sys.exit', 'sys.exit', ([], {}), '()\n', (10114, 10116), False, 'import sys\n'), ((14340, 14395), 'numpy.nanmean', 'np.nanmean', (['ohcready[yr + lag1:yr + lag2, :, :]'], {'axis': '(0)'}), '(ohcready[yr + lag1:yr + lag2, :, :], axis=0)\n', (14350, 14395), True, 'import numpy as np\n'), ((15264, 15274), 'sys.exit', 'sys.exit', ([], {}), '()\n', (15272, 15274), False, 'import sys\n'), ((14593, 14648), 'numpy.nanmean', 'np.nanmean', (['ohcready[yr + lag1:yr + lag2, :, :]'], {'axis': '(0)'}), '(ohcready[yr + lag1:yr + lag2, :, :], axis=0)\n', (14603, 14648), True, 'import numpy as np\n'), ((14869, 14924), 'numpy.nanmean', 'np.nanmean', (['ohcready[yr + lag1:yr + lag2, :, :]'], {'axis': '(0)'}), '(ohcready[yr + lag1:yr + lag2, :, :], axis=0)\n', (14879, 14924), True, 'import numpy as np\n'), ((15122, 15177), 'numpy.nanmean', 'np.nanmean', (['ohcready[yr + lag1:yr + lag2, :, :]'], {'axis': '(0)'}), '(ohcready[yr + lag1:yr + lag2, :, :], axis=0)\n', (15132, 15177), True, 'import numpy as np\n')]
|
import torch
import numpy as np
import torch.nn.functional as F
from torch.nn.utils.clip_grad import clip_grad_norm_
from mpi_utils.mpi_utils import sync_grads
def update_entropy(alpha, log_alpha, target_entropy, log_pi, alpha_optim, cfg):
if cfg.automatic_entropy_tuning:
alpha_loss = -(log_alpha * (log_pi + target_entropy).detach()).mean()
alpha_optim.zero_grad()
alpha_loss.backward()
alpha_optim.step()
alpha = log_alpha.exp()
alpha_tlogs = alpha.clone()
else:
alpha_loss = torch.tensor(0.)
alpha_tlogs = torch.tensor(alpha)
return alpha_loss, alpha_tlogs
def update_flat(actor_network, critic_network, critic_target_network, policy_optim, critic_optim, alpha, log_alpha,
target_entropy, alpha_optim, obs_norm, ag_norm, g_norm, obs_next_norm, actions, rewards, cfg):
inputs_norm = np.concatenate([obs_norm, ag_norm, g_norm], axis=1)
inputs_next_norm = np.concatenate([obs_next_norm, ag_norm, g_norm], axis=1)
inputs_norm_tensor = torch.tensor(inputs_norm, dtype=torch.float32)
inputs_next_norm_tensor = torch.tensor(inputs_next_norm, dtype=torch.float32)
actions_tensor = torch.tensor(actions, dtype=torch.float32)
r_tensor = torch.tensor(rewards, dtype=torch.float32).reshape(rewards.shape[0], 1)
if cfg.cuda:
inputs_norm_tensor = inputs_norm_tensor.cuda()
inputs_next_norm_tensor = inputs_next_norm_tensor.cuda()
actions_tensor = actions_tensor.cuda()
r_tensor = r_tensor.cuda()
with torch.no_grad():
actions_next, log_pi_next, _ = actor_network.sample(inputs_next_norm_tensor)
qf_next_target = critic_target_network(inputs_next_norm_tensor, actions_next)
min_qf_next_target = torch.min(qf_next_target, dim=0).values - alpha * log_pi_next
next_q_value = r_tensor + cfg.gamma * min_qf_next_target
# the q loss
qf = critic_network(inputs_norm_tensor, actions_tensor)
qf_loss = torch.stack([F.mse_loss(_qf, next_q_value) for _qf in qf]).mean()
# the actor loss
pi, log_pi, _ = actor_network.sample(inputs_norm_tensor)
qf_pi = critic_network(inputs_norm_tensor, pi)
min_qf_pi = torch.min(qf_pi, dim=0).values
policy_loss = ((alpha * log_pi) - min_qf_pi).mean()
# update actor network
policy_optim.zero_grad()
policy_loss.backward()
sync_grads(actor_network)
policy_optim.step()
# update the critic_network
critic_optim.zero_grad()
qf_loss.backward()
if cfg.clip_grad_norm:
clip_grad_norm_(critic_network.parameters(), cfg.max_norm)
sync_grads(critic_network)
critic_optim.step()
alpha_loss, alpha_tlogs = update_entropy(alpha, log_alpha, target_entropy, log_pi, alpha_optim, cfg)
train_metrics = dict(q_loss=qf_loss.item(),
next_q=next_q_value.mean().item(),
policy_loss=policy_loss.item(),
alpha_loss=alpha_loss.item(),
alpha_tlogs=alpha_tlogs.item())
for idx, (_qf, _qtarget) in enumerate(zip(qf, qf_next_target)):
train_metrics[f'q_{idx}'] = _qf.mean().item()
train_metrics[f'q_target_{idx}'] = _qtarget.mean().item()
return train_metrics
def update_language(actor_network, critic_network, critic_target_network, policy_optim, critic_optim, alpha, log_alpha,
target_entropy, alpha_optim, obs_norm, instruction, obs_next_norm, actions, rewards, cfg):
inputs_norm = obs_norm
inputs_next_norm = obs_next_norm
inputs_norm_tensor = torch.tensor(inputs_norm, dtype=torch.float32)
inputs_next_norm_tensor = torch.tensor(inputs_next_norm, dtype=torch.float32)
actions_tensor = torch.tensor(actions, dtype=torch.float32)
r_tensor = torch.tensor(rewards, dtype=torch.float32).reshape(rewards.shape[0], 1)
instruction_tensor = torch.tensor(instruction, dtype=torch.long)
if cfg.cuda:
inputs_norm_tensor = inputs_norm_tensor.cuda()
inputs_next_norm_tensor = inputs_next_norm_tensor.cuda()
actions_tensor = actions_tensor.cuda()
r_tensor = r_tensor.cuda()
instruction_tensor = instruction_tensor.cuda()
with torch.no_grad():
actions_next, log_pi_next, _ = actor_network.sample(inputs_next_norm_tensor, instruction_tensor)
qf_next_target = critic_target_network(inputs_next_norm_tensor, actions_next, instruction_tensor)
min_qf_next_target = torch.min(qf_next_target, dim=0).values - alpha * log_pi_next
next_q_value = r_tensor + cfg.gamma * min_qf_next_target
# the q loss
qf = critic_network(inputs_norm_tensor, actions_tensor, instruction_tensor)
qf_loss = torch.stack([F.mse_loss(_qf, next_q_value) for _qf in qf]).mean()
# the actor loss
pi, log_pi, _ = actor_network.sample(inputs_norm_tensor, instruction_tensor)
qf_pi = critic_network(inputs_norm_tensor, pi, instruction_tensor)
min_qf_pi = torch.min(qf_pi, dim=0).values
policy_loss = ((alpha * log_pi) - min_qf_pi).mean()
# update actor network
policy_optim.zero_grad()
policy_loss.backward()
sync_grads(actor_network)
policy_optim.step()
# update the critic_network
critic_optim.zero_grad()
qf_loss.backward()
if cfg.clip_grad_norm:
clip_grad_norm_(critic_network.parameters(), cfg.max_norm)
sync_grads(critic_network)
critic_optim.step()
alpha_loss, alpha_tlogs = update_entropy(alpha, log_alpha, target_entropy, log_pi, alpha_optim, cfg)
train_metrics = dict(q_loss=qf_loss.item(),
next_q=next_q_value.mean().item(),
policy_loss=policy_loss.item(),
alpha_loss=alpha_loss.item(),
alpha_tlogs=alpha_tlogs.item())
for idx, (_qf, _qtarget) in enumerate(zip(qf, qf_next_target)):
train_metrics[f'q_{idx}'] = _qf.mean().item()
train_metrics[f'q_target_{idx}'] = _qtarget.mean().item()
return train_metrics
|
[
"torch.nn.functional.mse_loss",
"torch.min",
"torch.tensor",
"numpy.concatenate",
"mpi_utils.mpi_utils.sync_grads",
"torch.no_grad"
] |
[((889, 940), 'numpy.concatenate', 'np.concatenate', (['[obs_norm, ag_norm, g_norm]'], {'axis': '(1)'}), '([obs_norm, ag_norm, g_norm], axis=1)\n', (903, 940), True, 'import numpy as np\n'), ((964, 1020), 'numpy.concatenate', 'np.concatenate', (['[obs_next_norm, ag_norm, g_norm]'], {'axis': '(1)'}), '([obs_next_norm, ag_norm, g_norm], axis=1)\n', (978, 1020), True, 'import numpy as np\n'), ((1047, 1093), 'torch.tensor', 'torch.tensor', (['inputs_norm'], {'dtype': 'torch.float32'}), '(inputs_norm, dtype=torch.float32)\n', (1059, 1093), False, 'import torch\n'), ((1124, 1175), 'torch.tensor', 'torch.tensor', (['inputs_next_norm'], {'dtype': 'torch.float32'}), '(inputs_next_norm, dtype=torch.float32)\n', (1136, 1175), False, 'import torch\n'), ((1197, 1239), 'torch.tensor', 'torch.tensor', (['actions'], {'dtype': 'torch.float32'}), '(actions, dtype=torch.float32)\n', (1209, 1239), False, 'import torch\n'), ((2383, 2408), 'mpi_utils.mpi_utils.sync_grads', 'sync_grads', (['actor_network'], {}), '(actor_network)\n', (2393, 2408), False, 'from mpi_utils.mpi_utils import sync_grads\n'), ((2616, 2642), 'mpi_utils.mpi_utils.sync_grads', 'sync_grads', (['critic_network'], {}), '(critic_network)\n', (2626, 2642), False, 'from mpi_utils.mpi_utils import sync_grads\n'), ((3588, 3634), 'torch.tensor', 'torch.tensor', (['inputs_norm'], {'dtype': 'torch.float32'}), '(inputs_norm, dtype=torch.float32)\n', (3600, 3634), False, 'import torch\n'), ((3665, 3716), 'torch.tensor', 'torch.tensor', (['inputs_next_norm'], {'dtype': 'torch.float32'}), '(inputs_next_norm, dtype=torch.float32)\n', (3677, 3716), False, 'import torch\n'), ((3738, 3780), 'torch.tensor', 'torch.tensor', (['actions'], {'dtype': 'torch.float32'}), '(actions, dtype=torch.float32)\n', (3750, 3780), False, 'import torch\n'), ((3893, 3936), 'torch.tensor', 'torch.tensor', (['instruction'], {'dtype': 'torch.long'}), '(instruction, dtype=torch.long)\n', (3905, 3936), False, 'import torch\n'), ((5149, 5174), 'mpi_utils.mpi_utils.sync_grads', 'sync_grads', (['actor_network'], {}), '(actor_network)\n', (5159, 5174), False, 'from mpi_utils.mpi_utils import sync_grads\n'), ((5382, 5408), 'mpi_utils.mpi_utils.sync_grads', 'sync_grads', (['critic_network'], {}), '(critic_network)\n', (5392, 5408), False, 'from mpi_utils.mpi_utils import sync_grads\n'), ((547, 564), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (559, 564), False, 'import torch\n'), ((586, 605), 'torch.tensor', 'torch.tensor', (['alpha'], {}), '(alpha)\n', (598, 605), False, 'import torch\n'), ((1557, 1572), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1570, 1572), False, 'import torch\n'), ((2208, 2231), 'torch.min', 'torch.min', (['qf_pi'], {'dim': '(0)'}), '(qf_pi, dim=0)\n', (2217, 2231), False, 'import torch\n'), ((4222, 4237), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4235, 4237), False, 'import torch\n'), ((4974, 4997), 'torch.min', 'torch.min', (['qf_pi'], {'dim': '(0)'}), '(qf_pi, dim=0)\n', (4983, 4997), False, 'import torch\n'), ((1255, 1297), 'torch.tensor', 'torch.tensor', (['rewards'], {'dtype': 'torch.float32'}), '(rewards, dtype=torch.float32)\n', (1267, 1297), False, 'import torch\n'), ((3796, 3838), 'torch.tensor', 'torch.tensor', (['rewards'], {'dtype': 'torch.float32'}), '(rewards, dtype=torch.float32)\n', (3808, 3838), False, 'import torch\n'), ((1774, 1806), 'torch.min', 'torch.min', (['qf_next_target'], {'dim': '(0)'}), '(qf_next_target, dim=0)\n', (1783, 1806), False, 'import torch\n'), ((4479, 4511), 'torch.min', 'torch.min', (['qf_next_target'], {'dim': '(0)'}), '(qf_next_target, dim=0)\n', (4488, 4511), False, 'import torch\n'), ((2006, 2035), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['_qf', 'next_q_value'], {}), '(_qf, next_q_value)\n', (2016, 2035), True, 'import torch.nn.functional as F\n'), ((4731, 4760), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['_qf', 'next_q_value'], {}), '(_qf, next_q_value)\n', (4741, 4760), True, 'import torch.nn.functional as F\n')]
|
##--------------------------------Main file------------------------------------
##
## Copyright (C) 2020 by <NAME> (<EMAIL>)
## June, 2020
## <EMAIL>
##-----------------------------------------------------------------------------
# Variables aleatorias múltiples
# Se consideran dos bases de datos las cuales contienen los descrito
# a continuación:
# 1. ****** Registro de la frecuencia relativa de dos variables aleatorias
# conjuntas en forma de tabla: xy.csv
# 2. ****** Pares (x, y) y su probabilidad asociada: xyp.csv
# Recordando que variable aleatoria es una función determinista.
#### **************** Algoritmo **************** ####
#******************************************************
# IMPORTANDO PAQUETES
#******************************************************
# Es importante considerar que notas son necesarias pero si
# fueron usadas durante el desarrollo de la tarea por diversas
# razones por lo cual se mantiene dentro del algortimo en forma
# comentario.
# from __future__ import division
# from pylab import *
# from sklearn import *
# from sklearn.preprocessing import PolynomialFeatures
# import math
# import decimal
# import pandas as pd
# from scipy.stats import norm
# from scipy.stats import rayleigh
# import csv
import pandas as pd
from collections import OrderedDict
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from mpl_toolkits.mplot3d import axes3d
from numpy import *
import numpy as np
from matplotlib import cm
import scipy.stats as stats
from scipy.optimize import curve_fit
#******************************************************
# DEFINICIONES
#******************************************************
def distribucion_normal(va, mu, sigma):
dist_normal = 1/(np.sqrt(2*np.pi*sigma**2)) * np.exp(-(va-mu)**2/(2*sigma**2))
return dist_normal
def densidad_conjunta(va0,va1,mu0,sigma0,mu1,sigma1):
val_conjunto = 1/((np.sqrt(2*np.pi*sigma0**2)) * np.exp(-(va0-mu0)**2/(2*sigma0**2)) * (1/(np.sqrt(2*np.pi*sigma1**2)) * np.exp(-(va1-mu1)**2/(2*sigma1**2))))
return val_conjunto
def ajuste_curva(marginal, par1, par2, distri_norm, graph_label_dis, distri_x_name_img, func_graph_label, function_va_img):
va = np.linspace(par1,par2,len(marginal))
plt.bar(va, marginal, label= graph_label_dis)
plt.legend()
plt.savefig("/Users/belindabrown/Desktop/VA_multiples/results/" + distri_x_name_img + ".png")
parametros_va, _ = curve_fit(distri_norm, va, marginal)
mu, sigma = parametros_va[0], parametros_va[1]
print("\n\nMu " + distri_x_name_img + " = ", mu)
print("Sigma " + distri_x_name_img + " = ", sigma)
va_function = stats.norm(mu,sigma)
curva_ajustada = np.linspace(va_function.ppf(0.01), va_function.ppf(0.99), 100)
plt.plot(curva_ajustada,va_function.pdf(curva_ajustada),label=func_graph_label)
plt.legend()
plt.savefig("/Users/belindabrown/Desktop/VA_multiples/results/" + function_va_img+".png")
# # Limpia el area de graficacion
plt.cla()
return curva_ajustada, mu, sigma
def valor_esperado(marginal,lim_inferior,lim_superior, de_quien_v_valor_esperado):
dominio = []
valor_esperado_marginal = 0
for k in range (5, lim_superior +1):
dominio.append(k)
dominio = list(OrderedDict.fromkeys(dominio))
print("\n\nEl dominio es de: ", dominio)
for i in range (0,len(marginal)):
valor_esperado_marginal = valor_esperado_marginal + dominio[i]*marginal[i]
print("\n" +de_quien_v_valor_esperado +" tiene un valor de: ", valor_esperado_marginal)
return valor_esperado_marginal
def grafica_en2d(mu_va, sigma_va, par1_modelo, nombre2d):
va_funcion_distri = stats.norm(mu_va,sigma_va)
curve = np.linspace(va_funcion_distri.ppf(0.01), va_funcion_distri.ppf(0.99), par1_modelo)
plt.plot(curve,va_funcion_distri.pdf(curve),label=nombre2d)
plt.legend()
plt.savefig("/Users/belindabrown/Desktop/VA_multiples/results/" + nombre2d+".png")
# # Limpia el area de graficacion
plt.cla()
return
def grafica_en3d(VA0_modelo, VA1_modelo, VA0, VA1, nombre):
Z = []
for i in VA0:
XY = []
for j in VA1:
XY.append(i*j)
Z.append(XY)
fig = plt.figure()
eje_x= plt.axes(projection='3d')
VA0,VA1 = np.meshgrid(VA0_modelo,VA1_modelo)
eje_x.plot_surface(VA0,VA1,np.array(Z),cmap=cm.coolwarm)
plt.savefig("/Users/belindabrown/Desktop/VA_multiples/results/" + nombre+".png")
return
#******************************************************
# OBTENIENDO VALORES
# DE LOS CSV
#******************************************************
data = pd.read_csv("/Users/belindabrown/Desktop/VA_multiples/data_base/xy.csv", index_col=0)
data_xyp = pd.read_csv("/Users/belindabrown/Desktop/VA_multiples/data_base/xyp.csv")
#******************************************************
# CURVA DE MEJOR AJUSTE
# DE LAS FUNCIONES DE
# DENSIDAD MARGINALES X & Y
#******************************************************
# Se requieren los valores marginales tanto de x como de y
# Columna con la sumatoria de todas las columnas es la probabilidad marginal de X
marg_value_x = [n for n in data.sum(axis=1, numeric_only=True)]
# Fila con la sumatoria de todas las filas es la probabilidad marginal de Y
marg_value_y = [n for n in data.sum(axis=0, numeric_only=True)]
print("\nValor marginal de X: ", marg_value_x)
print("\nValor marginal de Y: ", marg_value_y)
x_curva_modelo, x_mu, x_sigma = ajuste_curva(marg_value_x, 5, 15, distribucion_normal, "Datos que pertenencen a X","Datos_de_X", "Modelos de X(x)", "Modelado_X(x)")
y_curva_modelo, y_mu, y_sigma = ajuste_curva(marg_value_y, 5, 25, distribucion_normal, "Datos que pertenencen a Y","Datos_de_Y", "Modelos de Y(y)", "Modelado_Y(y)")
#******************************************************
# FUNCION DE DENSIDAD
# CONJUNTA DE
# X & Y
#******************************************************
probabi_conjuntaX = distribucion_normal(x_curva_modelo,x_mu,x_sigma)
probabi_conjuntaY = distribucion_normal(y_curva_modelo,y_mu,y_sigma)
#******************************************************
# VALORES DE CORRELACION, COVARIANZA
# COEFICIENTE DE CORRELACION (PEARSON)
# Y SIGNIFICADO
#******************************************************
###### OBTENIDOS CON XY.CSV
# Se requieren los valores anteriormente calculados. Para calcular
# E[X] & E[Y] lo que se conoce como los valores.
# Valores inicializados de los valores de X y Y (E[X] y E[Y])
# Este rango es de [x0, x1], es decir, incluye los limites
e_x = valor_esperado(marg_value_x,5,15, "X")
e_y = valor_esperado(marg_value_y,5,25, "Y")
multi_valor_esperados = e_x*e_y
# Se calcula E[X]*E[Y]
print("\n\nEl valor de E[X]E[Y] es de: ", multi_valor_esperados)
###### OBTENIDOS CON XYP.CSV
# Dado que la primera fila contiene las etiquetas de x, y, p
todos_mu_sum = data_xyp.x * data_xyp.y * data_xyp.p
# La sumatoria de E[XY] nos brinda su correlación
correlacion = todos_mu_sum.sum()
# Ahora para la covarianza, de acuerdo a lo visto en clase la
# covarianza es la correlacion menos la multiplicacion de los
# valores.
covarianza = correlacion - multi_valor_esperados
# Se requiere calcular el coeficiente de correlacion de
# Pearson en el cual se utilizan los valores de la data brindada de
# obtenidos entonces ...
# De acuerdo a los resultados obtenidos al correr el programa
# se ve que:
# SigmaDatos_de_X = 3.2994428707078436
# SigmaDatos_de_Y = 6.0269377486808775
# Para el coeficiente pearson se calcula como la covarianza
# divida entre la multiplicacion de los sigmas
coef_pearson = covarianza/(3.2994428707078436*6.0269377486808775)
print("\nEl resultado de la correlación es de: ", correlacion)
print("\nEl resultado de la covarianza es de: ",covarianza)
print("\nDe acuerdo a los datos obtenidos y considerando todo sus decimales se tiene que el coeficiente de Pearson es de: ", coef_pearson)
#******************************************************
# GRAFICA EN 2D DE LAS FUNCIONES
# DE DENSIDAD MARGINALES
# &
# GRAFICA EN 3D DE LA FUNCION
# DE DENSIDAD CONJUNTA
#******************************************************
# Dado que se requiere redondear los valores para la gráfica se toma en
# cuenta que los parámetros completos para el modelo serían los ya calculados
distribucion_de_x = grafica_en2d(x_mu, x_sigma, 100,"Distribucion_de_X")
distribucion_de_y = grafica_en2d(y_mu, y_sigma, 100,"Distribucion_de_Y")
dis_cojun3d = grafica_en3d(x_curva_modelo, y_curva_modelo, probabi_conjuntaX, probabi_conjuntaY, "Distribucion_en_3D")
|
[
"scipy.optimize.curve_fit",
"matplotlib.pyplot.savefig",
"collections.OrderedDict.fromkeys",
"pandas.read_csv",
"numpy.sqrt",
"scipy.stats.norm",
"numpy.exp",
"numpy.array",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axes",
"numpy.meshgrid",
"matplotlib.pyplot.cla",
"matplotlib.pyplot.legend"
] |
[((4558, 4647), 'pandas.read_csv', 'pd.read_csv', (['"""/Users/belindabrown/Desktop/VA_multiples/data_base/xy.csv"""'], {'index_col': '(0)'}), "('/Users/belindabrown/Desktop/VA_multiples/data_base/xy.csv',\n index_col=0)\n", (4569, 4647), True, 'import pandas as pd\n'), ((4655, 4728), 'pandas.read_csv', 'pd.read_csv', (['"""/Users/belindabrown/Desktop/VA_multiples/data_base/xyp.csv"""'], {}), "('/Users/belindabrown/Desktop/VA_multiples/data_base/xyp.csv')\n", (4666, 4728), True, 'import pandas as pd\n'), ((2280, 2324), 'matplotlib.pyplot.bar', 'plt.bar', (['va', 'marginal'], {'label': 'graph_label_dis'}), '(va, marginal, label=graph_label_dis)\n', (2287, 2324), True, 'import matplotlib.pyplot as plt\n'), ((2327, 2339), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2337, 2339), True, 'import matplotlib.pyplot as plt\n'), ((2341, 2438), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('/Users/belindabrown/Desktop/VA_multiples/results/' + distri_x_name_img +\n '.png')"], {}), "('/Users/belindabrown/Desktop/VA_multiples/results/' +\n distri_x_name_img + '.png')\n", (2352, 2438), True, 'import matplotlib.pyplot as plt\n'), ((2457, 2493), 'scipy.optimize.curve_fit', 'curve_fit', (['distri_norm', 'va', 'marginal'], {}), '(distri_norm, va, marginal)\n', (2466, 2493), False, 'from scipy.optimize import curve_fit\n'), ((2663, 2684), 'scipy.stats.norm', 'stats.norm', (['mu', 'sigma'], {}), '(mu, sigma)\n', (2673, 2684), True, 'import scipy.stats as stats\n'), ((2847, 2859), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2857, 2859), True, 'import matplotlib.pyplot as plt\n'), ((2861, 2956), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('/Users/belindabrown/Desktop/VA_multiples/results/' + function_va_img + '.png'\n )"], {}), "('/Users/belindabrown/Desktop/VA_multiples/results/' +\n function_va_img + '.png')\n", (2872, 2956), True, 'import matplotlib.pyplot as plt\n'), ((3007, 3016), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (3014, 3016), True, 'import matplotlib.pyplot as plt\n'), ((3642, 3669), 'scipy.stats.norm', 'stats.norm', (['mu_va', 'sigma_va'], {}), '(mu_va, sigma_va)\n', (3652, 3669), True, 'import scipy.stats as stats\n'), ((3823, 3835), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3833, 3835), True, 'import matplotlib.pyplot as plt\n'), ((3837, 3925), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('/Users/belindabrown/Desktop/VA_multiples/results/' + nombre2d + '.png')"], {}), "('/Users/belindabrown/Desktop/VA_multiples/results/' + nombre2d +\n '.png')\n", (3848, 3925), True, 'import matplotlib.pyplot as plt\n'), ((3976, 3985), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (3983, 3985), True, 'import matplotlib.pyplot as plt\n'), ((4144, 4156), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4154, 4156), True, 'import matplotlib.pyplot as plt\n'), ((4165, 4190), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (4173, 4190), True, 'import matplotlib.pyplot as plt\n'), ((4202, 4237), 'numpy.meshgrid', 'np.meshgrid', (['VA0_modelo', 'VA1_modelo'], {}), '(VA0_modelo, VA1_modelo)\n', (4213, 4237), True, 'import numpy as np\n'), ((4296, 4382), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('/Users/belindabrown/Desktop/VA_multiples/results/' + nombre + '.png')"], {}), "('/Users/belindabrown/Desktop/VA_multiples/results/' + nombre +\n '.png')\n", (4307, 4382), True, 'import matplotlib.pyplot as plt\n'), ((1821, 1863), 'numpy.exp', 'np.exp', (['(-(va - mu) ** 2 / (2 * sigma ** 2))'], {}), '(-(va - mu) ** 2 / (2 * sigma ** 2))\n', (1827, 1863), True, 'import numpy as np\n'), ((3252, 3281), 'collections.OrderedDict.fromkeys', 'OrderedDict.fromkeys', (['dominio'], {}), '(dominio)\n', (3272, 3281), False, 'from collections import OrderedDict\n'), ((4265, 4276), 'numpy.array', 'np.array', (['Z'], {}), '(Z)\n', (4273, 4276), True, 'import numpy as np\n'), ((1792, 1823), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi * sigma ** 2)'], {}), '(2 * np.pi * sigma ** 2)\n', (1799, 1823), True, 'import numpy as np\n'), ((1949, 1981), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi * sigma0 ** 2)'], {}), '(2 * np.pi * sigma0 ** 2)\n', (1956, 1981), True, 'import numpy as np\n'), ((1979, 2024), 'numpy.exp', 'np.exp', (['(-(va0 - mu0) ** 2 / (2 * sigma0 ** 2))'], {}), '(-(va0 - mu0) ** 2 / (2 * sigma0 ** 2))\n', (1985, 2024), True, 'import numpy as np\n'), ((2051, 2096), 'numpy.exp', 'np.exp', (['(-(va1 - mu1) ** 2 / (2 * sigma1 ** 2))'], {}), '(-(va1 - mu1) ** 2 / (2 * sigma1 ** 2))\n', (2057, 2096), True, 'import numpy as np\n'), ((2021, 2053), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi * sigma1 ** 2)'], {}), '(2 * np.pi * sigma1 ** 2)\n', (2028, 2053), True, 'import numpy as np\n')]
|
# -*- coding:UTF-8 -*-
import pandas as pd
from minepy import MINE
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.ensemble import ExtraTreesClassifier
import xgboost as xgb
import operator
from sklearn.utils import shuffle
from Common.ModelCommon import ModelCV
from sklearn import svm
import numpy as np
class NAClass(object):
def __init__(self):
pass
# 获取存在NA值的特征列表
def GetNAFeatures(self, df):
return df.columns[df.isnull().sum() != 0].tolist()
# 缺失特征按从多到少排序进行展示
def ShowNAInfo(self, df, NAlist):
NA_count = df[NAlist].isnull().sum().sort_values(ascending=False)
NAInfo = pd.DataFrame({'NA_count': NA_count, 'NA_percent': NA_count/df.shape[0]})
print(NAInfo)
# 含缺失值特征处理的通用接口,strategy为处理策略
def HandleNA(self, df, NAfeaturesList, strategy='mean'):
if strategy == 'mean':
for feature in NAfeaturesList:
if df[feature].dtypes == 'object':
raise ValueError('Nonnumeric feature!')
df[feature].fillna(df[feature].mean(), inplace=True)
elif strategy == 'mode':
for feature in NAfeaturesList:
df[feature].fillna(df[feature].mode()[0], inplace=True)
elif strategy == 'drop':
df.drop(NAfeaturesList, axis=1, inplace=True)
else:
for feature in NAfeaturesList:
if (df[feature].dtypes == 'object' and type(strategy) != str) or (
df[feature].dtypes != 'object' and type(strategy) == str):
raise ValueError('Mismatched type!')
df[feature].fillna(strategy, inplace=True)
def checkNA(self, df):
return df.isnull().sum().max()
def CategoricalList(df):
return [attr for attr in df.columns if df.dtypes[attr] == 'object']
def NumericalList(df):
return [attr for attr in df.columns if df.dtypes[attr] != 'object']
def GetTargetDf(df, target):
targetdf = pd.DataFrame(df[target].value_counts())
targetdf['Percent'] = targetdf[target]/df.shape[0]
return targetdf
def GetZeroDf(df):
zerodf = pd.DataFrame(df[df == 0].count())
zerodf['Percent'] = zerodf[0]/df.shape[0]
zerodf.rename(columns={0: 'Count'}, inplace=True)
return zerodf
def GetValueCountDf(df):
valueCountList = []
for feat in df.columns:
valueCountList.append(df[feat].value_counts().shape[0])
valueCountDf = pd.DataFrame({'feat': df.columns, 'valueCount': valueCountList})
return valueCountDf
def GetZeroColumns(df):
zeros = df[df != 0].count()
return zeros[zeros == 0].index
def mic(x, y):
m = MINE()
m.compute_score(x, y)
return m.mic()
def featShow(train_data, feat):
plt.scatter(range(train_data.shape[0]), train_data[feat].values, s=20)
plt.xlabel('index')
plt.ylabel(feat)
plt.show()
def TypeShow(train_data):
dtype_df = train_data.dtypes.reset_index()
dtype_df.columns = ["Count", "Column Type"]
print(dtype_df.groupby("Column Type").aggregate('count').reset_index())
# 通过决策树获取特征重要性
def TreeImportanceShow(train_data):
x = train_data[train_data.columns[:-1]]
y = train_data['TARGET']
clf = ExtraTreesClassifier()
clf.fit(x, y.astype('int'))
imptdf = pd.DataFrame({'feat': x.columns, 'importance': clf.feature_importances_})
imptdf_sort = imptdf.sort_values(by='importance', ascending=False)
# print("decision tree importance:\n", imptdf_sort)
sns.barplot(data=imptdf_sort, x='feat', y='importance')
plt.xticks(rotation='vertical')
# plt.show()
return imptdf_sort
def xgbImportanceShow(train_data):
x = train_data[train_data.columns[:-1]]
y = train_data['TARGET']
dtrain = xgb.DMatrix(x, y)
xgb_params = {"objective": "binary:logistic", "eta": 0.01, "max_depth": 8, "seed": 42, "silent": 1}
model = xgb.train(xgb_params, dtrain, num_boost_round=100)
impt = model.get_fscore()
impt = sorted(impt.items(), key=operator.itemgetter(1))
imptdf = pd.DataFrame(impt, columns=['feature', 'fscore'])
imptdf_sort = imptdf.sort_values(by='fscore', ascending=False)
# print("xgb importance:\n", imptdf_sort)
imptdf_sort.to_csv('../tmp/xgb_importance.csv', index=False)
xgb.plot_importance(model, max_num_features=400, height=0.8)
# plt.show()
return imptdf_sort
def valueCountsShow(train_data, featlist):
for feat in featlist:
print(train_data[feat].value_counts())
# rate为希望采样后的0样本的个数为rate*1样本
def underSampling(train, rate):
idx_0 = train[train['TARGET'] == 0].index
idx_1 = train[train['TARGET'] == 1].index
len_1 = len(train.loc[idx_1])
undersample_idx_0 = shuffle(idx_0, random_state=37, n_samples=int(len_1*rate))
idx_list = list(undersample_idx_0) + list(idx_1)
train = train.loc[idx_list].reset_index(drop=True)
return train
# repeat为重复样本1的次数
def overSampling(train, repeat):
idx_1 = train[train['TARGET'] == 1].index
i = 0
while i < repeat:
train = pd.concat([train, train.iloc[idx_1, :]], axis=0).reset_index(drop=True)
i += 1
return train
# 通过train_data的cv分数来作为评判标准,但是每种不同比率的sample,最终的样本数有一定不同,是否影响指标的客观准确性?
def getBestUnSamplingRate(train, ratelist):
bestscore = 0
bestrate = 0
for rate in ratelist:
svc = svm.LinearSVC()
train_data = underSampling(train, rate)
score = ModelCV(svc, 'svm', train_data, 5)
print("rate :%f, score:%f" % (rate, score))
if score > bestscore:
bestscore = score
bestrate = rate
print("best rate :%f, best score:%f" % (bestrate, bestscore))
return bestrate
def corr_heatmap(train, v):
correlations = train[v].corr()
# Create color map ranging between two colors
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.heatmap(correlations, cmap=cmap, vmax=1.0, center=0, fmt='.2f',
square=True, linewidths=.5, annot=True, cbar_kws={"shrink": .75})
plt.show()
def typeShow(train_data):
print(train_data.dtypes.value_counts())
def getTypeMap(train_data):
typeMap = {}
typeMap['int64'] = train_data.dtypes[train_data.dtypes == 'int64'].index
typeMap['float64'] = train_data.dtypes[train_data.dtypes == 'float64'].index
return typeMap
# iswhole为True时代表是完整的数据集,需要将TARGET去除再求相关性,为False时代表已经是筛选后的列,不包含TARGET
def getHighCorrList(df, thres, iswhole):
if iswhole:
x = df.iloc[:, :-1]
else:
x = df
corr = x.corr()
index = corr.index[np.where(corr > thres)[0]]
columns = corr.columns[np.where(corr > thres)[1]]
highCorrList = [[index[i], columns[i]] for i in range(len(index)) if index[i] != columns[i]]
uniqList = [[0, 0]]
for i in range(len(highCorrList)):
uniqCount = 0
for j in range(len(uniqList)):
if highCorrList[i][0] == uniqList[j][1] and highCorrList[i][1] == uniqList[j][0]:
uniqCount += 1
if uniqCount == 0:
uniqList.append(highCorrList[i])
del uniqList[0]
return uniqList
def getDropHighCorrList(highList):
dropList = []
for item in highList:
if item[0] in dropList:
break
if item[1] in dropList:
break
else:
dropList.append(item[1])
return dropList
def getUinqueCorrDf(train, threshold):
cor_mat = train.corr()
important_corrs = (cor_mat[abs(cor_mat) > threshold][cor_mat != 1.0]).unstack().dropna().to_dict()
unique_important_corrs = pd.DataFrame(
list(set([(tuple(sorted(key)), important_corrs[key]) for key in important_corrs])),
columns=['attribute pair', 'correlation'])
unique_important_corrs = unique_important_corrs.ix[abs(unique_important_corrs['correlation']).argsort()[::-1]]
return unique_important_corrs
|
[
"xgboost.DMatrix",
"sklearn.ensemble.ExtraTreesClassifier",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xticks",
"xgboost.train",
"matplotlib.pyplot.xlabel",
"xgboost.plot_importance",
"seaborn.diverging_palette",
"sklearn.svm.LinearSVC",
"seaborn.heatmap",
"operator.itemgetter",
"numpy.where",
"Common.ModelCommon.ModelCV",
"pandas.DataFrame",
"minepy.MINE",
"seaborn.barplot",
"pandas.concat",
"matplotlib.pyplot.show"
] |
[((2446, 2510), 'pandas.DataFrame', 'pd.DataFrame', (["{'feat': df.columns, 'valueCount': valueCountList}"], {}), "({'feat': df.columns, 'valueCount': valueCountList})\n", (2458, 2510), True, 'import pandas as pd\n'), ((2653, 2659), 'minepy.MINE', 'MINE', ([], {}), '()\n', (2657, 2659), False, 'from minepy import MINE\n'), ((2818, 2837), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""index"""'], {}), "('index')\n", (2828, 2837), True, 'import matplotlib.pyplot as plt\n'), ((2842, 2858), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['feat'], {}), '(feat)\n', (2852, 2858), True, 'import matplotlib.pyplot as plt\n'), ((2863, 2873), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2871, 2873), True, 'import matplotlib.pyplot as plt\n'), ((3209, 3231), 'sklearn.ensemble.ExtraTreesClassifier', 'ExtraTreesClassifier', ([], {}), '()\n', (3229, 3231), False, 'from sklearn.ensemble import ExtraTreesClassifier\n'), ((3277, 3350), 'pandas.DataFrame', 'pd.DataFrame', (["{'feat': x.columns, 'importance': clf.feature_importances_}"], {}), "({'feat': x.columns, 'importance': clf.feature_importances_})\n", (3289, 3350), True, 'import pandas as pd\n'), ((3482, 3537), 'seaborn.barplot', 'sns.barplot', ([], {'data': 'imptdf_sort', 'x': '"""feat"""', 'y': '"""importance"""'}), "(data=imptdf_sort, x='feat', y='importance')\n", (3493, 3537), True, 'import seaborn as sns\n'), ((3542, 3573), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '"""vertical"""'}), "(rotation='vertical')\n", (3552, 3573), True, 'import matplotlib.pyplot as plt\n'), ((3737, 3754), 'xgboost.DMatrix', 'xgb.DMatrix', (['x', 'y'], {}), '(x, y)\n', (3748, 3754), True, 'import xgboost as xgb\n'), ((3871, 3921), 'xgboost.train', 'xgb.train', (['xgb_params', 'dtrain'], {'num_boost_round': '(100)'}), '(xgb_params, dtrain, num_boost_round=100)\n', (3880, 3921), True, 'import xgboost as xgb\n'), ((4025, 4074), 'pandas.DataFrame', 'pd.DataFrame', (['impt'], {'columns': "['feature', 'fscore']"}), "(impt, columns=['feature', 'fscore'])\n", (4037, 4074), True, 'import pandas as pd\n'), ((4257, 4317), 'xgboost.plot_importance', 'xgb.plot_importance', (['model'], {'max_num_features': '(400)', 'height': '(0.8)'}), '(model, max_num_features=400, height=0.8)\n', (4276, 4317), True, 'import xgboost as xgb\n'), ((5781, 5825), 'seaborn.diverging_palette', 'sns.diverging_palette', (['(220)', '(10)'], {'as_cmap': '(True)'}), '(220, 10, as_cmap=True)\n', (5802, 5825), True, 'import seaborn as sns\n'), ((5830, 5970), 'seaborn.heatmap', 'sns.heatmap', (['correlations'], {'cmap': 'cmap', 'vmax': '(1.0)', 'center': '(0)', 'fmt': '""".2f"""', 'square': '(True)', 'linewidths': '(0.5)', 'annot': '(True)', 'cbar_kws': "{'shrink': 0.75}"}), "(correlations, cmap=cmap, vmax=1.0, center=0, fmt='.2f', square=\n True, linewidths=0.5, annot=True, cbar_kws={'shrink': 0.75})\n", (5841, 5970), True, 'import seaborn as sns\n'), ((5984, 5994), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5992, 5994), True, 'import matplotlib.pyplot as plt\n'), ((651, 725), 'pandas.DataFrame', 'pd.DataFrame', (["{'NA_count': NA_count, 'NA_percent': NA_count / df.shape[0]}"], {}), "({'NA_count': NA_count, 'NA_percent': NA_count / df.shape[0]})\n", (663, 725), True, 'import pandas as pd\n'), ((5314, 5329), 'sklearn.svm.LinearSVC', 'svm.LinearSVC', ([], {}), '()\n', (5327, 5329), False, 'from sklearn import svm\n'), ((5394, 5428), 'Common.ModelCommon.ModelCV', 'ModelCV', (['svc', '"""svm"""', 'train_data', '(5)'], {}), "(svc, 'svm', train_data, 5)\n", (5401, 5428), False, 'from Common.ModelCommon import ModelCV\n'), ((3988, 4010), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (4007, 4010), False, 'import operator\n'), ((6517, 6539), 'numpy.where', 'np.where', (['(corr > thres)'], {}), '(corr > thres)\n', (6525, 6539), True, 'import numpy as np\n'), ((6571, 6593), 'numpy.where', 'np.where', (['(corr > thres)'], {}), '(corr > thres)\n', (6579, 6593), True, 'import numpy as np\n'), ((5020, 5068), 'pandas.concat', 'pd.concat', (['[train, train.iloc[idx_1, :]]'], {'axis': '(0)'}), '([train, train.iloc[idx_1, :]], axis=0)\n', (5029, 5068), True, 'import pandas as pd\n')]
|
import mss
import numpy as np
from PIL import Image
from config import BOARD_HEIGHT, BOARD_WIDTH
CELL_SIZE = 22
BOARD_X = 14
BOARD_Y = 111
COLOR_CODES = {
(0, 0, 255): 1,
(0, 123, 0): 2,
(255, 0, 0): 3,
(0, 0, 123): 4,
(123, 0, 0): 5,
(0, 123, 123): 6,
(0, 0, 0): 7,
(123, 123, 123): 8,
(189, 189, 189): 0 #unopened/opened blank
}
def get_cell_type(cell) -> int:
cell_type = COLOR_CODES[cell.getpixel((15, 16))]
#cell_type=COLOR_CODES[cell.getpixel((13,14))]
if cell_type == 0 and cell.getpixel((1, 16)) != (255, 255, 255):
cell_type = -1
return cell_type
def get_board_array() -> np.ndarray:
with mss.mss() as sct:
screenshot = sct.grab(sct.monitors[0])
img = Image.frombytes('RGB', screenshot.size, screenshot.bgra, 'raw', 'BGRX')
#board=img.crop((384,111,1044,463))
board = img.crop((BOARD_X, BOARD_Y, BOARD_X + CELL_SIZE * BOARD_WIDTH, BOARD_Y + CELL_SIZE * BOARD_HEIGHT))
width, height = board.size
cell_imgs = [
board.crop((i, j, i + CELL_SIZE, j + CELL_SIZE)) for j in range(0, height, CELL_SIZE) for i in range(0, width, CELL_SIZE)
]
cells = np.fromiter((get_cell_type(cell) for cell in cell_imgs), dtype=np.int8)
grid = np.reshape(cells, (BOARD_HEIGHT, BOARD_WIDTH))
#surrond grid with -1(so you can make cell_surrondings with no errors)
return np.concatenate(
(
np.full((1, BOARD_WIDTH + 2), -1, dtype=np.int8), #top row of -1
np.insert(grid, (0, BOARD_WIDTH), -1, axis=1), #fill sides with -1
np.full((1, BOARD_WIDTH + 2), -1, dtype=np.int8) #bottom row of -1
)
)
|
[
"numpy.insert",
"numpy.reshape",
"mss.mss",
"numpy.full",
"PIL.Image.frombytes"
] |
[((1163, 1209), 'numpy.reshape', 'np.reshape', (['cells', '(BOARD_HEIGHT, BOARD_WIDTH)'], {}), '(cells, (BOARD_HEIGHT, BOARD_WIDTH))\n', (1173, 1209), True, 'import numpy as np\n'), ((617, 626), 'mss.mss', 'mss.mss', ([], {}), '()\n', (624, 626), False, 'import mss\n'), ((684, 755), 'PIL.Image.frombytes', 'Image.frombytes', (['"""RGB"""', 'screenshot.size', 'screenshot.bgra', '"""raw"""', '"""BGRX"""'], {}), "('RGB', screenshot.size, screenshot.bgra, 'raw', 'BGRX')\n", (699, 755), False, 'from PIL import Image\n'), ((1312, 1360), 'numpy.full', 'np.full', (['(1, BOARD_WIDTH + 2)', '(-1)'], {'dtype': 'np.int8'}), '((1, BOARD_WIDTH + 2), -1, dtype=np.int8)\n', (1319, 1360), True, 'import numpy as np\n'), ((1380, 1425), 'numpy.insert', 'np.insert', (['grid', '(0, BOARD_WIDTH)', '(-1)'], {'axis': '(1)'}), '(grid, (0, BOARD_WIDTH), -1, axis=1)\n', (1389, 1425), True, 'import numpy as np\n'), ((1450, 1498), 'numpy.full', 'np.full', (['(1, BOARD_WIDTH + 2)', '(-1)'], {'dtype': 'np.int8'}), '((1, BOARD_WIDTH + 2), -1, dtype=np.int8)\n', (1457, 1498), True, 'import numpy as np\n')]
|
import numpy
N,M,P = map(int,input().split())
p_cols1 =numpy.array([input().split() for _ in range(N)],int)
p_cols1.shape = (N,P)
p_cols2 =numpy.array([input().split() for _ in range(M)],int)
p_cols2.shape = (M,P)
concatenated = numpy.concatenate((p_cols1, p_cols2), axis = 0)
print(concatenated)
|
[
"numpy.concatenate"
] |
[((232, 277), 'numpy.concatenate', 'numpy.concatenate', (['(p_cols1, p_cols2)'], {'axis': '(0)'}), '((p_cols1, p_cols2), axis=0)\n', (249, 277), False, 'import numpy\n')]
|
# GCT634 (2018) HW1
#
# Mar-18-2018: initial version
#
# <NAME>
#
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
data_path = './dataset/'
mfcc_path = './mfcc/'
MFCC_DIM = 20
def mean_mfcc(dataset='train'):
f = open(data_path + dataset + '_list.txt','r')
if dataset == 'train':
mfcc_mat = np.zeros(shape=(MFCC_DIM, 1100))
else:
mfcc_mat = np.zeros(shape=(MFCC_DIM, 300))
i = 0
for file_name in f:
# load mfcc file
file_name = file_name.rstrip('\n')
file_name = file_name.replace('.wav','.npy')
mfcc_file = mfcc_path + file_name
mfcc = np.load(mfcc_file)
# mean pooling
temp = np.mean(mfcc, axis=1)
mfcc_mat[:,i]= np.mean(mfcc, axis=1)
i = i + 1
f.close()
return mfcc_mat
if __name__ == '__main__':
train_data = mean_mfcc('train')
valid_data = mean_mfcc('valid')
plt.figure(1)
plt.subplot(2,1,1)
plt.imshow(train_data, interpolation='nearest', origin='lower', aspect='auto')
plt.colorbar(format='%+2.0f dB')
plt.subplot(2,1,2)
plt.imshow(valid_data, interpolation='nearest', origin='lower', aspect='auto')
plt.colorbar(format='%+2.0f dB')
plt.show()
|
[
"matplotlib.pyplot.imshow",
"numpy.mean",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.figure",
"numpy.zeros",
"numpy.load",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] |
[((941, 954), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (951, 954), True, 'import matplotlib.pyplot as plt\n'), ((959, 979), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (970, 979), True, 'import matplotlib.pyplot as plt\n'), ((982, 1060), 'matplotlib.pyplot.imshow', 'plt.imshow', (['train_data'], {'interpolation': '"""nearest"""', 'origin': '"""lower"""', 'aspect': '"""auto"""'}), "(train_data, interpolation='nearest', origin='lower', aspect='auto')\n", (992, 1060), True, 'import matplotlib.pyplot as plt\n'), ((1065, 1097), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'format': '"""%+2.0f dB"""'}), "(format='%+2.0f dB')\n", (1077, 1097), True, 'import matplotlib.pyplot as plt\n'), ((1103, 1123), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (1114, 1123), True, 'import matplotlib.pyplot as plt\n'), ((1126, 1204), 'matplotlib.pyplot.imshow', 'plt.imshow', (['valid_data'], {'interpolation': '"""nearest"""', 'origin': '"""lower"""', 'aspect': '"""auto"""'}), "(valid_data, interpolation='nearest', origin='lower', aspect='auto')\n", (1136, 1204), True, 'import matplotlib.pyplot as plt\n'), ((1209, 1241), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'format': '"""%+2.0f dB"""'}), "(format='%+2.0f dB')\n", (1221, 1241), True, 'import matplotlib.pyplot as plt\n'), ((1247, 1257), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1255, 1257), True, 'import matplotlib.pyplot as plt\n'), ((341, 373), 'numpy.zeros', 'np.zeros', ([], {'shape': '(MFCC_DIM, 1100)'}), '(shape=(MFCC_DIM, 1100))\n', (349, 373), True, 'import numpy as np\n'), ((403, 434), 'numpy.zeros', 'np.zeros', ([], {'shape': '(MFCC_DIM, 300)'}), '(shape=(MFCC_DIM, 300))\n', (411, 434), True, 'import numpy as np\n'), ((649, 667), 'numpy.load', 'np.load', (['mfcc_file'], {}), '(mfcc_file)\n', (656, 667), True, 'import numpy as np\n'), ((707, 728), 'numpy.mean', 'np.mean', (['mfcc'], {'axis': '(1)'}), '(mfcc, axis=1)\n', (714, 728), True, 'import numpy as np\n'), ((752, 773), 'numpy.mean', 'np.mean', (['mfcc'], {'axis': '(1)'}), '(mfcc, axis=1)\n', (759, 773), True, 'import numpy as np\n')]
|
import json
from typing import Union, Optional, Tuple, List
import numpy as np
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer, TfidfTransformer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from shared import LANG_TO_INT
class DataSplitter:
def __init__(self, path: str, vectorizer: Optional[Union[DictVectorizer, TfidfVectorizer, CountVectorizer]] = None, seed: Optional[int] = None, scale: bool = True):
self.data_path = path
self.vectorizer = vectorizer or DictVectorizer(sparse=False)
self.transformer = TfidfTransformer() if type(self.vectorizer) == CountVectorizer else None
self.scale = type(self.vectorizer) not in (TfidfVectorizer, CountVectorizer) and scale
self.scaler = StandardScaler()
self.random_seed = seed
def collect_features_data(self) -> Tuple[Union[np.ndarray, List[str]], np.ndarray]:
if type(self.vectorizer) == DictVectorizer:
return self._collect_dict_vectorizer_features()
elif type(self.vectorizer) in (TfidfVectorizer, CountVectorizer):
return self._collect_tfidf_features()
else:
raise NotImplementedError
def _collect_dict_vectorizer_features(self) -> Tuple[np.ndarray, np.ndarray]:
examples = []
ys = []
with open(self.data_path, "r") as file:
for line in file:
info = json.loads(line)
examples.append(info["features"])
ys.append(LANG_TO_INT[info["lang"]])
return np.array(examples), np.array(ys)
def _collect_tfidf_features(self) -> Tuple[List[str], np.ndarray]:
examples = []
ys = []
with open(self.data_path, "r") as file:
for line in file:
info = json.loads(line)
examples.append(info["code"])
ys.append(LANG_TO_INT[info["lang"]])
return examples, np.array(ys)
def prepare_data(self, data: Union[np.ndarray, List[str]], fit: bool = False) -> np.ndarray:
if type(self.vectorizer) in (TfidfVectorizer, CountVectorizer):
assert not self.scale
if fit:
if self.scale:
transformed = self.scaler.fit_transform(self.vectorizer.fit_transform(data))
else:
transformed = self.vectorizer.fit_transform(data)
elif self.scale:
transformed = self.scaler.transform(self.vectorizer.transform(data))
else:
transformed = self.vectorizer.transform(data)
if type(transformed) != np.ndarray:
transformed = transformed.toarray()
return transformed
def split_train_vali_test(self, X: Union[np.ndarray, List[str]], y: np.ndarray, split_1: float = 0.75, split_2: float = 0.66) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
X_tv, X_test, y_tv, y_test = train_test_split(X, y, train_size=split_1, random_state=self.random_seed)
X_train, X_vali, y_train, y_vali = train_test_split(X_tv, y_tv, train_size=split_2, random_state=self.random_seed)
split_data = (self.prepare_data(X_train, fit=True), self.prepare_data(X_vali), self.prepare_data(X_test), y_train, y_vali, y_test)
if type(self.vectorizer) == CountVectorizer:
for split in split_data:
self.transformer.fit_transform(split.reshape(1, -1))
return split_data
|
[
"sklearn.feature_extraction.text.TfidfTransformer",
"json.loads",
"sklearn.feature_extraction.DictVectorizer",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.StandardScaler",
"numpy.array"
] |
[((870, 886), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (884, 886), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3031, 3104), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'train_size': 'split_1', 'random_state': 'self.random_seed'}), '(X, y, train_size=split_1, random_state=self.random_seed)\n', (3047, 3104), False, 'from sklearn.model_selection import train_test_split\n'), ((3148, 3227), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_tv', 'y_tv'], {'train_size': 'split_2', 'random_state': 'self.random_seed'}), '(X_tv, y_tv, train_size=split_2, random_state=self.random_seed)\n', (3164, 3227), False, 'from sklearn.model_selection import train_test_split\n'), ((624, 652), 'sklearn.feature_extraction.DictVectorizer', 'DictVectorizer', ([], {'sparse': '(False)'}), '(sparse=False)\n', (638, 652), False, 'from sklearn.feature_extraction import DictVectorizer\n'), ((680, 698), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {}), '()\n', (696, 698), False, 'from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer, TfidfTransformer\n'), ((1657, 1675), 'numpy.array', 'np.array', (['examples'], {}), '(examples)\n', (1665, 1675), True, 'import numpy as np\n'), ((1677, 1689), 'numpy.array', 'np.array', (['ys'], {}), '(ys)\n', (1685, 1689), True, 'import numpy as np\n'), ((2044, 2056), 'numpy.array', 'np.array', (['ys'], {}), '(ys)\n', (2052, 2056), True, 'import numpy as np\n'), ((1521, 1537), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (1531, 1537), False, 'import json\n'), ((1902, 1918), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (1912, 1918), False, 'import json\n')]
|
# -*- coding: utf-8 -*-
import random
import numpy as np
import scipy
import pandas as pd
import pandas
import numpy
import json
def resizeFeature(inputData,newSize):
# inputX: (temporal_length,feature_dimension) #
originalSize=len(inputData)
#print originalSize
if originalSize==1:
inputData=np.reshape(inputData,[-1])
return np.stack([inputData]*newSize)
x=numpy.array(range(originalSize))
f=scipy.interpolate.interp1d(x,inputData,axis=0)
x_new=[i*float(originalSize-1)/(newSize-1) for i in range(newSize)]
y_new=f(x_new)
return y_new
def readData(video_name,data_type=["spatial","temporal"]):
spatial_dir="./spatial/csv_action/"
temporal_dir="./temporal/csv_action/"
data=[]
for dtype in data_type:
if dtype=="spatial":
df=pandas.read_csv(spatial_dir+video_name+".csv")
elif dtype=="temporal":
df=pandas.read_csv(temporal_dir+video_name+".csv")
data.append(df.values[:,:])
lens=[len(d) for d in data]
#print lens
min_len=min(lens)
new_data=[d[:min_len] for d in data]
new_data=numpy.concatenate(new_data,axis=1)
return new_data
def load_json(file):
with open(file) as json_file:
data = json.load(json_file)
return data
def getDatasetDict():
df=pd.read_csv("./info/video_info.csv")
json_data= load_json("./info/activity_net.v1-3.min.json")
database=json_data['database']
out_dict={}
for i in range(len(df)):
video_name=df.video.values[i]
video_info=database[video_name[2:]]
video_new_info={}
video_new_info['duration_frame']=df.numFrame.values[i]
video_new_info['duration_second']=df.seconds.values[i]
video_new_info['annotations']=video_info['annotations']
out_dict[video_name]=video_new_info
return out_dict
def poolData(data,videoAnno,num_prop=100,num_bin=1,num_sample_bin=3,pool_type="mean"):
feature_frame=len(data)*16
video_frame=videoAnno['duration_frame']
video_second=videoAnno['duration_second']
corrected_second=float(feature_frame)/video_frame*video_second
fps=float(video_frame)/video_second
st=16/fps
if len(data)==1:
video_feature=np.stack([data]*num_prop)
video_feature=np.reshape(video_feature,[num_prop,400])
return video_feature
x=[st/2+ii*st for ii in range(len(data))]
f=scipy.interpolate.interp1d(x,data,axis=0)
video_feature=[]
zero_sample=np.zeros(num_bin*400)
tmp_anchor_xmin=[1.0/num_prop*i for i in range(num_prop)]
tmp_anchor_xmax=[1.0/num_prop*i for i in range(1,num_prop+1)]
num_sample=num_bin*num_sample_bin
for idx in range(num_prop):
xmin=max(x[0]+0.0001,tmp_anchor_xmin[idx]*corrected_second)
xmax=min(x[-1]-0.0001,tmp_anchor_xmax[idx]*corrected_second)
if xmax<x[0]:
#print "fuck"
video_feature.append(zero_sample)
continue
if xmin>x[-1]:
video_feature.append(zero_sample)
continue
plen=(xmax-xmin)/(num_sample-1)
x_new=[xmin+plen*ii for ii in range(num_sample)]
y_new=f(x_new)
y_new_pool=[]
for b in range(num_bin):
tmp_y_new=y_new[num_sample_bin*b:num_sample_bin*(b+1)]
if pool_type=="mean":
tmp_y_new=np.mean(y_new,axis=0)
elif pool_type=="max":
tmp_y_new=np.max(y_new,axis=0)
y_new_pool.append(tmp_y_new)
y_new_pool=np.stack(y_new_pool)
y_new_pool=np.reshape(y_new_pool,[-1])
video_feature.append(y_new_pool)
video_feature=np.stack(video_feature)
return video_feature
videoDict=getDatasetDict()
videoNameList=videoDict.keys()
random.shuffle(videoNameList)
col_names=[]
for i in range(400):
col_names.append("f"+str(i))
for videoName in videoNameList:
videoAnno=videoDict[videoName]
data=readData(videoName)
numFrame=videoAnno['duration_frame']
featureFrame=len(data)*16
videoAnno["feature_frame"]=featureFrame
videoDict[videoName]=videoAnno
print(numFrame,featureFrame)
videoFeature_mean=poolData(data,videoAnno,num_prop=100,num_bin=1,num_sample_bin=3,pool_type="mean")
outDf=pd.DataFrame(videoFeature_mean,columns=col_names)
outDf.to_csv("./csv_mean_100/"+videoName+".csv",index=False)
outfile=open("./anet_anno_anet.json","w")
json.dump(videoDict,outfile)
outfile.close()
|
[
"numpy.mean",
"numpy.reshape",
"random.shuffle",
"pandas.read_csv",
"scipy.interpolate.interp1d",
"json.load",
"numpy.stack",
"numpy.zeros",
"numpy.max",
"numpy.concatenate",
"pandas.DataFrame",
"json.dump"
] |
[((3780, 3809), 'random.shuffle', 'random.shuffle', (['videoNameList'], {}), '(videoNameList)\n', (3794, 3809), False, 'import random\n'), ((4440, 4469), 'json.dump', 'json.dump', (['videoDict', 'outfile'], {}), '(videoDict, outfile)\n', (4449, 4469), False, 'import json\n'), ((437, 485), 'scipy.interpolate.interp1d', 'scipy.interpolate.interp1d', (['x', 'inputData'], {'axis': '(0)'}), '(x, inputData, axis=0)\n', (463, 485), False, 'import scipy\n'), ((1120, 1155), 'numpy.concatenate', 'numpy.concatenate', (['new_data'], {'axis': '(1)'}), '(new_data, axis=1)\n', (1137, 1155), False, 'import numpy\n'), ((1317, 1353), 'pandas.read_csv', 'pd.read_csv', (['"""./info/video_info.csv"""'], {}), "('./info/video_info.csv')\n", (1328, 1353), True, 'import pandas as pd\n'), ((2403, 2446), 'scipy.interpolate.interp1d', 'scipy.interpolate.interp1d', (['x', 'data'], {'axis': '(0)'}), '(x, data, axis=0)\n', (2429, 2446), False, 'import scipy\n'), ((2491, 2514), 'numpy.zeros', 'np.zeros', (['(num_bin * 400)'], {}), '(num_bin * 400)\n', (2499, 2514), True, 'import numpy as np\n'), ((3672, 3695), 'numpy.stack', 'np.stack', (['video_feature'], {}), '(video_feature)\n', (3680, 3695), True, 'import numpy as np\n'), ((4281, 4331), 'pandas.DataFrame', 'pd.DataFrame', (['videoFeature_mean'], {'columns': 'col_names'}), '(videoFeature_mean, columns=col_names)\n', (4293, 4331), True, 'import pandas as pd\n'), ((320, 347), 'numpy.reshape', 'np.reshape', (['inputData', '[-1]'], {}), '(inputData, [-1])\n', (330, 347), True, 'import numpy as np\n'), ((362, 393), 'numpy.stack', 'np.stack', (['([inputData] * newSize)'], {}), '([inputData] * newSize)\n', (370, 393), True, 'import numpy as np\n'), ((1246, 1266), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (1255, 1266), False, 'import json\n'), ((2232, 2259), 'numpy.stack', 'np.stack', (['([data] * num_prop)'], {}), '([data] * num_prop)\n', (2240, 2259), True, 'import numpy as np\n'), ((2280, 2322), 'numpy.reshape', 'np.reshape', (['video_feature', '[num_prop, 400]'], {}), '(video_feature, [num_prop, 400])\n', (2290, 2322), True, 'import numpy as np\n'), ((3545, 3565), 'numpy.stack', 'np.stack', (['y_new_pool'], {}), '(y_new_pool)\n', (3553, 3565), True, 'import numpy as np\n'), ((3585, 3613), 'numpy.reshape', 'np.reshape', (['y_new_pool', '[-1]'], {}), '(y_new_pool, [-1])\n', (3595, 3613), True, 'import numpy as np\n'), ((818, 868), 'pandas.read_csv', 'pandas.read_csv', (["(spatial_dir + video_name + '.csv')"], {}), "(spatial_dir + video_name + '.csv')\n", (833, 868), False, 'import pandas\n'), ((912, 963), 'pandas.read_csv', 'pandas.read_csv', (["(temporal_dir + video_name + '.csv')"], {}), "(temporal_dir + video_name + '.csv')\n", (927, 963), False, 'import pandas\n'), ((3381, 3403), 'numpy.mean', 'np.mean', (['y_new'], {'axis': '(0)'}), '(y_new, axis=0)\n', (3388, 3403), True, 'import numpy as np\n'), ((3464, 3485), 'numpy.max', 'np.max', (['y_new'], {'axis': '(0)'}), '(y_new, axis=0)\n', (3470, 3485), True, 'import numpy as np\n')]
|
"""Test functions for FOOOF analysis."""
import numpy as np
from fooof.analysis import *
###################################################################################################
###################################################################################################
def test_get_band_peak_fm(tfm):
assert np.all(get_band_peak_fm(tfm, (8, 12)))
def test_get_band_peaks_fg(tfg):
assert np.all(get_band_peaks_fg(tfg, (8, 12)))
def test_get_band_peaks_group():
dat = np.array([[10, 1, 1.8, 0], [13, 1, 2, 2], [14, 2, 4, 2]])
out1 = get_band_peaks_group(dat, [8, 12], 3)
assert out1.shape == (3, 3)
assert np.array_equal(out1[0, :], [10, 1, 1.8])
out2 = get_band_peaks_group(dat, [12, 16], 3)
assert out2.shape == (3, 3)
assert np.array_equal(out2[2, :], [14, 2, 4])
def test_get_band_peak():
dat = np.array([[10, 1, 1.8], [14, 2, 4]])
# Test single result
assert np.array_equal(get_band_peak(dat, [10, 12]), [10, 1, 1.8])
# Test no results - returns nan
assert np.all(np.isnan(get_band_peak(dat, [4, 8])))
# Test muliple results - return all
assert np.array_equal(get_band_peak(dat, [10, 15], ret_one=False), [[10, 1, 1.8], [14, 2, 4]])
# Test multiple results - return one
assert np.array_equal(get_band_peak(dat, [10, 15], ret_one=True), [14, 2, 4])
def test_get_highest_peak():
dat = np.array([[10, 1, 1.8], [14, 2, 4], [12, 3, 2]])
assert np.array_equal(get_highest_peak(dat), [12, 3, 2])
def test_empty_inputs():
dat = np.empty(shape=[0, 3])
assert np.all(get_band_peak(dat, [8, 12]))
assert np.all(get_highest_peak(dat))
dat = np.empty(shape=[0, 4])
assert np.all(get_band_peaks_group(dat, [8, 12], 0))
|
[
"numpy.array",
"numpy.empty",
"numpy.array_equal"
] |
[((507, 564), 'numpy.array', 'np.array', (['[[10, 1, 1.8, 0], [13, 1, 2, 2], [14, 2, 4, 2]]'], {}), '([[10, 1, 1.8, 0], [13, 1, 2, 2], [14, 2, 4, 2]])\n', (515, 564), True, 'import numpy as np\n'), ((658, 698), 'numpy.array_equal', 'np.array_equal', (['out1[0, :]', '[10, 1, 1.8]'], {}), '(out1[0, :], [10, 1, 1.8])\n', (672, 698), True, 'import numpy as np\n'), ((793, 831), 'numpy.array_equal', 'np.array_equal', (['out2[2, :]', '[14, 2, 4]'], {}), '(out2[2, :], [14, 2, 4])\n', (807, 831), True, 'import numpy as np\n'), ((870, 906), 'numpy.array', 'np.array', (['[[10, 1, 1.8], [14, 2, 4]]'], {}), '([[10, 1, 1.8], [14, 2, 4]])\n', (878, 906), True, 'import numpy as np\n'), ((1401, 1449), 'numpy.array', 'np.array', (['[[10, 1, 1.8], [14, 2, 4], [12, 3, 2]]'], {}), '([[10, 1, 1.8], [14, 2, 4], [12, 3, 2]])\n', (1409, 1449), True, 'import numpy as np\n'), ((1549, 1571), 'numpy.empty', 'np.empty', ([], {'shape': '[0, 3]'}), '(shape=[0, 3])\n', (1557, 1571), True, 'import numpy as np\n'), ((1672, 1694), 'numpy.empty', 'np.empty', ([], {'shape': '[0, 4]'}), '(shape=[0, 4])\n', (1680, 1694), True, 'import numpy as np\n')]
|
import numpy as np
import unittest
import coremltools.models.datatypes as datatypes
from coremltools.models import neural_network as neural_network
from coremltools.models import MLModel
from coremltools.models.neural_network.printer import print_network_spec
from coremltools.converters.nnssa.coreml.graph_pass.mlmodel_passes import \
remove_disconnected_layers, transform_conv_crop, remove_redundant_transposes
import copy
import pytest
DEBUG = False
np.random.seed(100)
class MLModelPassesTest(unittest.TestCase):
def test_load_constant_remove(self):
input_features = [('data', datatypes.Array(*(3, 4)))]
output_features = [('out', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_activation('relu1', 'RELU', 'data', 'relu1')
builder.add_load_constant_nd('const1', 'c1', constant_value=np.ones((5,)), shape=(5,))
builder.add_activation('relu2', 'RELU', 'relu1', 'out')
builder.add_load_constant_nd('const2', 'c2', constant_value=np.ones((5,)), shape=(5,))
builder.add_load_constant_nd('const3', 'c3', constant_value=np.ones((5,)), shape=(5,))
spec = builder.spec
np.testing.assert_equal(5, len(spec.neuralNetwork.layers))
remove_disconnected_layers(spec)
np.testing.assert_equal(2, len(spec.neuralNetwork.layers))
def test_dead_layer_remove(self):
input_features = [('data', datatypes.Array(*(3, 4)))]
output_features = [('out', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_activation('relu1', 'RELU', 'data', 'relu1')
builder.add_load_constant_nd('const1', 'c1', constant_value=np.ones((5,)), shape=(5,))
builder.add_load_constant_nd('const2', 'c2', constant_value=np.ones((5,)), shape=(5,))
builder.add_split_nd('splitnd1', 'const2', ['s1', 's2', 's3'], axis=0, num_splits=3)
builder.add_squeeze('squeeze', 's1', 'squeeze_out')
builder.add_activation('relu4', 'RELU', 's2', 'relu4')
builder.add_activation('relu5', 'RELU', 'relu4', 'relu5')
builder.add_load_constant_nd('const3', 'c3', constant_value=np.ones((5,)), shape=(5,))
builder.add_activation('relu2', 'RELU', 'relu1', 'out')
spec = builder.spec
np.testing.assert_equal(9, len(spec.neuralNetwork.layers))
remove_disconnected_layers(spec)
np.testing.assert_equal(2, len(spec.neuralNetwork.layers))
@pytest.mark.xfail
def test_dead_layer_remove_branch(self):
convergence_tolerance = 1e-8
input_features = [('input', datatypes.Array(*(2,)))]
output_features = [('out', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features, disable_rank5_shape_mapping=True)
# add condition to break from the loop, if convergence criterion is met
builder.add_less_than('cond', ['input'], 'cond', alpha=convergence_tolerance)
branch_layer = builder.add_branch('branch_layer', 'cond')
builder_ifbranch = neural_network.NeuralNetworkBuilder(nn_spec=branch_layer.branch.ifBranch)
builder_ifbranch.add_activation('relu1', 'RELU', 'input', 'relu1_out')
builder_ifbranch.add_activation('relu2_out', 'RELU', 'relu1_out', 'relu2_out')
builder_elsebranch = neural_network.NeuralNetworkBuilder(nn_spec=branch_layer.branch.elseBranch)
builder_elsebranch.add_activation('linear1', 'LINEAR', 'input', 'linear1_out')
builder_elsebranch.add_activation('linear2', 'LINEAR', 'linear1_out', 'relu2_out')
builder.add_squeeze('out', 'input', 'out', squeeze_all=True)
mlmodel = MLModel(builder.spec)
data = np.random.rand(2,)
data_dict = {'input': data}
before_pass_out = mlmodel.predict(data_dict)['out']
if DEBUG:
print('\n mlmodel description before remove disconnected layers pass: \n')
print_network_spec(builder.spec, style='coding')
remove_disconnected_layers(builder.spec)
if DEBUG:
print('\n mlmodel description after remove disconnected layers pass: \n')
print_network_spec(builder.spec, style='coding')
mlmodel = MLModel(builder.spec)
after_pass_out = mlmodel.predict(data_dict)['out']
np.testing.assert_almost_equal(before_pass_out, after_pass_out, decimal=2)
np.testing.assert_equal(len(builder.spec.neuralNetwork.layers), 1)
@pytest.mark.xfail
def test_dead_layer_partial_branch(self):
convergence_tolerance = 1e-8
input_features = [('input', datatypes.Array(*(2,)))]
output_features = [('out', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features, disable_rank5_shape_mapping=True)
# add condition to break from the loop, if convergence criterion is met
builder.add_less_than('cond', ['input'], 'cond', alpha=convergence_tolerance)
branch_layer = builder.add_branch('branch_layer', 'cond')
builder_ifbranch = neural_network.NeuralNetworkBuilder(nn_spec=branch_layer.branch.ifBranch)
builder_ifbranch.add_activation('relu1', 'RELU', 'input', 'relu1_out')
builder_ifbranch.add_activation('relu2_out', 'RELU', 'relu1_out', 'relu2_out')
builder_elsebranch = neural_network.NeuralNetworkBuilder(nn_spec=branch_layer.branch.elseBranch)
builder_elsebranch.add_activation('linear1', 'LINEAR', 'input', 'linear1_out')
builder_elsebranch.add_activation('linear_red_1', 'LINEAR', 'input', 'linear_red1_out')
builder_elsebranch.add_activation('linear_red_2', 'LINEAR', 'linear_red1_out', 'linear_red2_out')
builder_elsebranch.add_activation('linear2', 'LINEAR', 'linear1_out', 'relu2_out')
builder.add_squeeze('out', 'relu2_out', 'out', squeeze_all=True)
mlmodel = MLModel(builder.spec)
data = np.random.rand(2,)
data_dict = {'input': data}
before_pass_out = mlmodel.predict(data_dict)['out']
if DEBUG:
print('\n mlmodel description before remove disconnected layers pass: \n')
print_network_spec(builder.spec, style='coding')
old_spec = copy.copy(builder.spec)
remove_disconnected_layers(builder.spec)
if DEBUG:
print('\n mlmodel description after remove disconnected layers pass: \n')
print_network_spec(builder.spec, style='coding')
mlmodel = MLModel(builder.spec)
after_pass_out = mlmodel.predict(data_dict)['out']
np.testing.assert_almost_equal(before_pass_out, after_pass_out, decimal=2)
np.testing.assert_equal(len(old_spec.neuralNetwork.layers[1].branch.ifBranch.layers),
len(builder.spec.neuralNetwork.layers[1].branch.ifBranch.layers))
np.testing.assert_equal(len(builder.spec.neuralNetwork.layers[1].branch.elseBranch.layers), 2)
def test_conv_crop_bn_to_conv_bn_crop(self):
input_features = [('data', datatypes.Array(1, 10, 10))]
output_features = [('out', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features)
W = np.ones((2,10,1,10), dtype=np.float32)
builder.add_convolution(name='conv',
kernel_channels=1,
output_channels=2,
height=2, width=2,
stride_height=1, stride_width=1,
border_mode='valid', groups=1,
W=W,
b=None, has_bias=False,
input_name='data', output_name='conv_out')
builder.add_crop(name='crop',
left=1, right=1, top=1, bottom=1, offset=0,
input_names=['conv_out'],
output_name='crop_out')
builder.add_batchnorm(name='bn',
channels=2,
gamma=np.ones(2,).astype(np.float32),
beta=np.ones(2,).astype(np.float32),
mean=np.ones(2,).astype(np.float32),
variance=np.ones(2,).astype(np.float32),
input_name='crop_out',
output_name='out')
# Conv -> Crop -> BN
spec = builder.spec.neuralNetwork
np.testing.assert_equal('crop', spec.layers[1].WhichOneof('layer'))
np.testing.assert_equal('batchnorm', spec.layers[2].WhichOneof('layer'))
# transform the pattern
transform_conv_crop(builder.spec)
# Conv -> BN -> Crop
np.testing.assert_equal('batchnorm', spec.layers[1].WhichOneof('layer'))
np.testing.assert_equal('crop', spec.layers[2].WhichOneof('layer'))
def test_conv_crop_bn_relu_to_conv_bn_relu_crop(self):
input_features = [('data', datatypes.Array(1, 10, 10))]
output_features = [('out', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features)
W = np.ones((2,10,1,10), dtype=np.float32)
builder.add_convolution(name='conv',
kernel_channels=1,
output_channels=2,
height=2, width=2,
stride_height=1, stride_width=1,
border_mode='valid', groups=1,
W=W,
b=None, has_bias=False,
input_name='data', output_name='conv_out')
builder.add_crop(name='crop',
left=1, right=1, top=1, bottom=1, offset=0,
input_names=['conv_out'],
output_name='crop_out')
builder.add_batchnorm(name='bn',
channels=2,
gamma=np.ones(2,).astype(np.float32),
beta=np.ones(2,).astype(np.float32),
mean=np.ones(2,).astype(np.float32),
variance=np.ones(2,).astype(np.float32),
input_name='crop_out',
output_name='bn_out')
builder.add_activation(name='relu',
non_linearity='RELU',
input_name='bn_out',
output_name='out')
# Conv -> Crop -> BN -> ReLU
spec = builder.spec.neuralNetwork
np.testing.assert_equal('crop', spec.layers[1].WhichOneof('layer'))
np.testing.assert_equal('batchnorm', spec.layers[2].WhichOneof('layer'))
np.testing.assert_equal('activation', spec.layers[3].WhichOneof('layer'))
# transform the pattern
transform_conv_crop(builder.spec)
# Conv -> BN -> ReLU -> Crop
np.testing.assert_equal('batchnorm', spec.layers[1].WhichOneof('layer'))
np.testing.assert_equal('activation', spec.layers[2].WhichOneof('layer'))
np.testing.assert_equal('crop', spec.layers[3].WhichOneof('layer'))
def test_redundant_transposes(self):
def _build_and_test_network(input_size, transpose_layers, expected_layers):
"""
Helper function for testing transpose removal.
Args:
input_size: Size of the input network tensor.
transpose_layers: Array of transpose axes definitions.
expected_layers: Array of indices into transpose_layers indicating
which of the transpose layers should be present after the
graph pass.
"""
input_features = [('data', datatypes.Array(*input_size))]
output_features = [('out', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features)
last_layer = 'data'
for idx, axes in enumerate(transpose_layers):
name = 't{}'.format(idx)
if idx == len(transpose_layers) - 1:
output_name = 'out'
else:
output_name = name + '_out'
builder.add_transpose(name=name,
axes=axes,
input_name=last_layer,
output_name=output_name)
last_layer = output_name
spec = builder.spec.neuralNetwork
# Check the network before the graph pass.
for idx in range(len(transpose_layers)):
np.testing.assert_equal('transpose', spec.layers[idx].WhichOneof('layer'))
# Run the removal pass.
remove_redundant_transposes(builder.spec)
# Verify only the expected layers remain.
np.testing.assert_equal(len(spec.layers), len(expected_layers))
for output_layer_idx, input_layer_idx in enumerate(expected_layers):
np.testing.assert_equal(
'transpose',
spec.layers[output_layer_idx].WhichOneof('layer')
)
np.testing.assert_array_equal(
transpose_layers[input_layer_idx],
spec.layers[output_layer_idx].transpose.axes
)
_build_and_test_network(
input_size=[1, 10, 10],
# These transposes together are the identity.
transpose_layers=[[2, 0, 1], [1, 2, 0]],
expected_layers=[],
)
_build_and_test_network(
input_size=[1, 10, 10],
# These transposes are not inverses.
transpose_layers=[[2, 0, 1], [2, 0, 1]],
expected_layers=[0, 1],
)
_build_and_test_network(
input_size=[1, 1, 10, 10, 3],
# First two are the identity, then an extra.
transpose_layers=[[2, 4, 1, 0, 3], [3, 2, 0, 4, 1], [1, 0, 2, 3, 4]],
expected_layers=[2],
)
_build_and_test_network(
input_size=[1, 1, 10, 10, 3],
# First is okay, next two are the identity.
transpose_layers=[[1, 0, 2, 3, 4], [2, 4, 1, 0, 3], [3, 2, 0, 4, 1]],
expected_layers=[0],
)
# A slightly more complicated test case where there are two transposes
# in topological order, but are actually in parallel in the graph.
builder = neural_network.NeuralNetworkBuilder(
[('data', datatypes.Array(2, 4, 8))],
[('out', None)]
)
last_layer = 'data'
builder.add_transpose(name='t1',
axes=[0, 2, 1],
input_name='data',
output_name='t1')
builder.add_transpose(name='t2',
axes=[0, 2, 1],
input_name='data',
output_name='t2')
builder.add_stack(name='stack',
input_names=['t1', 't2'],
output_name='out')
spec = builder.spec.neuralNetwork
# Run the removal pass.
remove_redundant_transposes(builder.spec)
# Verify nothing was removed.
np.testing.assert_equal(len(spec.layers), 3)
if __name__ == '__main__':
RUN_ALL_TESTS = True
if RUN_ALL_TESTS:
unittest.main()
else:
suite = unittest.TestSuite()
suite.addTest(MLModelPassesTest('test_load_constant_remove'))
unittest.TextTestRunner().run(suite)
|
[
"coremltools.models.MLModel",
"coremltools.converters.nnssa.coreml.graph_pass.mlmodel_passes.transform_conv_crop",
"unittest.TestSuite",
"numpy.random.rand",
"numpy.ones",
"coremltools.converters.nnssa.coreml.graph_pass.mlmodel_passes.remove_disconnected_layers",
"numpy.testing.assert_almost_equal",
"coremltools.models.datatypes.Array",
"numpy.random.seed",
"coremltools.models.neural_network.NeuralNetworkBuilder",
"unittest.main",
"coremltools.models.neural_network.printer.print_network_spec",
"copy.copy",
"coremltools.converters.nnssa.coreml.graph_pass.mlmodel_passes.remove_redundant_transposes",
"unittest.TextTestRunner",
"numpy.testing.assert_array_equal"
] |
[((462, 481), 'numpy.random.seed', 'np.random.seed', (['(100)'], {}), '(100)\n', (476, 481), True, 'import numpy as np\n'), ((691, 797), 'coremltools.models.neural_network.NeuralNetworkBuilder', 'neural_network.NeuralNetworkBuilder', (['input_features', 'output_features'], {'disable_rank5_shape_mapping': '(True)'}), '(input_features, output_features,\n disable_rank5_shape_mapping=True)\n', (726, 797), True, 'from coremltools.models import neural_network as neural_network\n'), ((1311, 1343), 'coremltools.converters.nnssa.coreml.graph_pass.mlmodel_passes.remove_disconnected_layers', 'remove_disconnected_layers', (['spec'], {}), '(spec)\n', (1337, 1343), False, 'from coremltools.converters.nnssa.coreml.graph_pass.mlmodel_passes import remove_disconnected_layers, transform_conv_crop, remove_redundant_transposes\n'), ((1572, 1678), 'coremltools.models.neural_network.NeuralNetworkBuilder', 'neural_network.NeuralNetworkBuilder', (['input_features', 'output_features'], {'disable_rank5_shape_mapping': '(True)'}), '(input_features, output_features,\n disable_rank5_shape_mapping=True)\n', (1607, 1678), True, 'from coremltools.models import neural_network as neural_network\n'), ((2474, 2506), 'coremltools.converters.nnssa.coreml.graph_pass.mlmodel_passes.remove_disconnected_layers', 'remove_disconnected_layers', (['spec'], {}), '(spec)\n', (2500, 2506), False, 'from coremltools.converters.nnssa.coreml.graph_pass.mlmodel_passes import remove_disconnected_layers, transform_conv_crop, remove_redundant_transposes\n'), ((2803, 2909), 'coremltools.models.neural_network.NeuralNetworkBuilder', 'neural_network.NeuralNetworkBuilder', (['input_features', 'output_features'], {'disable_rank5_shape_mapping': '(True)'}), '(input_features, output_features,\n disable_rank5_shape_mapping=True)\n', (2838, 2909), True, 'from coremltools.models import neural_network as neural_network\n'), ((3165, 3238), 'coremltools.models.neural_network.NeuralNetworkBuilder', 'neural_network.NeuralNetworkBuilder', ([], {'nn_spec': 'branch_layer.branch.ifBranch'}), '(nn_spec=branch_layer.branch.ifBranch)\n', (3200, 3238), True, 'from coremltools.models import neural_network as neural_network\n'), ((3434, 3509), 'coremltools.models.neural_network.NeuralNetworkBuilder', 'neural_network.NeuralNetworkBuilder', ([], {'nn_spec': 'branch_layer.branch.elseBranch'}), '(nn_spec=branch_layer.branch.elseBranch)\n', (3469, 3509), True, 'from coremltools.models import neural_network as neural_network\n'), ((3776, 3797), 'coremltools.models.MLModel', 'MLModel', (['builder.spec'], {}), '(builder.spec)\n', (3783, 3797), False, 'from coremltools.models import MLModel\n'), ((3813, 3830), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (3827, 3830), True, 'import numpy as np\n'), ((4102, 4142), 'coremltools.converters.nnssa.coreml.graph_pass.mlmodel_passes.remove_disconnected_layers', 'remove_disconnected_layers', (['builder.spec'], {}), '(builder.spec)\n', (4128, 4142), False, 'from coremltools.converters.nnssa.coreml.graph_pass.mlmodel_passes import remove_disconnected_layers, transform_conv_crop, remove_redundant_transposes\n'), ((4326, 4347), 'coremltools.models.MLModel', 'MLModel', (['builder.spec'], {}), '(builder.spec)\n', (4333, 4347), False, 'from coremltools.models import MLModel\n'), ((4416, 4490), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['before_pass_out', 'after_pass_out'], {'decimal': '(2)'}), '(before_pass_out, after_pass_out, decimal=2)\n', (4446, 4490), True, 'import numpy as np\n'), ((4796, 4902), 'coremltools.models.neural_network.NeuralNetworkBuilder', 'neural_network.NeuralNetworkBuilder', (['input_features', 'output_features'], {'disable_rank5_shape_mapping': '(True)'}), '(input_features, output_features,\n disable_rank5_shape_mapping=True)\n', (4831, 4902), True, 'from coremltools.models import neural_network as neural_network\n'), ((5158, 5231), 'coremltools.models.neural_network.NeuralNetworkBuilder', 'neural_network.NeuralNetworkBuilder', ([], {'nn_spec': 'branch_layer.branch.ifBranch'}), '(nn_spec=branch_layer.branch.ifBranch)\n', (5193, 5231), True, 'from coremltools.models import neural_network as neural_network\n'), ((5427, 5502), 'coremltools.models.neural_network.NeuralNetworkBuilder', 'neural_network.NeuralNetworkBuilder', ([], {'nn_spec': 'branch_layer.branch.elseBranch'}), '(nn_spec=branch_layer.branch.elseBranch)\n', (5462, 5502), True, 'from coremltools.models import neural_network as neural_network\n'), ((5975, 5996), 'coremltools.models.MLModel', 'MLModel', (['builder.spec'], {}), '(builder.spec)\n', (5982, 5996), False, 'from coremltools.models import MLModel\n'), ((6012, 6029), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (6026, 6029), True, 'import numpy as np\n'), ((6312, 6335), 'copy.copy', 'copy.copy', (['builder.spec'], {}), '(builder.spec)\n', (6321, 6335), False, 'import copy\n'), ((6344, 6384), 'coremltools.converters.nnssa.coreml.graph_pass.mlmodel_passes.remove_disconnected_layers', 'remove_disconnected_layers', (['builder.spec'], {}), '(builder.spec)\n', (6370, 6384), False, 'from coremltools.converters.nnssa.coreml.graph_pass.mlmodel_passes import remove_disconnected_layers, transform_conv_crop, remove_redundant_transposes\n'), ((6568, 6589), 'coremltools.models.MLModel', 'MLModel', (['builder.spec'], {}), '(builder.spec)\n', (6575, 6589), False, 'from coremltools.models import MLModel\n'), ((6658, 6732), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['before_pass_out', 'after_pass_out'], {'decimal': '(2)'}), '(before_pass_out, after_pass_out, decimal=2)\n', (6688, 6732), True, 'import numpy as np\n'), ((7202, 7270), 'coremltools.models.neural_network.NeuralNetworkBuilder', 'neural_network.NeuralNetworkBuilder', (['input_features', 'output_features'], {}), '(input_features, output_features)\n', (7237, 7270), True, 'from coremltools.models import neural_network as neural_network\n'), ((7283, 7324), 'numpy.ones', 'np.ones', (['(2, 10, 1, 10)'], {'dtype': 'np.float32'}), '((2, 10, 1, 10), dtype=np.float32)\n', (7290, 7324), True, 'import numpy as np\n'), ((8747, 8780), 'coremltools.converters.nnssa.coreml.graph_pass.mlmodel_passes.transform_conv_crop', 'transform_conv_crop', (['builder.spec'], {}), '(builder.spec)\n', (8766, 8780), False, 'from coremltools.converters.nnssa.coreml.graph_pass.mlmodel_passes import remove_disconnected_layers, transform_conv_crop, remove_redundant_transposes\n'), ((9151, 9219), 'coremltools.models.neural_network.NeuralNetworkBuilder', 'neural_network.NeuralNetworkBuilder', (['input_features', 'output_features'], {}), '(input_features, output_features)\n', (9186, 9219), True, 'from coremltools.models import neural_network as neural_network\n'), ((9232, 9273), 'numpy.ones', 'np.ones', (['(2, 10, 1, 10)'], {'dtype': 'np.float32'}), '((2, 10, 1, 10), dtype=np.float32)\n', (9239, 9273), True, 'import numpy as np\n'), ((10988, 11021), 'coremltools.converters.nnssa.coreml.graph_pass.mlmodel_passes.transform_conv_crop', 'transform_conv_crop', (['builder.spec'], {}), '(builder.spec)\n', (11007, 11021), False, 'from coremltools.converters.nnssa.coreml.graph_pass.mlmodel_passes import remove_disconnected_layers, transform_conv_crop, remove_redundant_transposes\n'), ((15388, 15429), 'coremltools.converters.nnssa.coreml.graph_pass.mlmodel_passes.remove_redundant_transposes', 'remove_redundant_transposes', (['builder.spec'], {}), '(builder.spec)\n', (15415, 15429), False, 'from coremltools.converters.nnssa.coreml.graph_pass.mlmodel_passes import remove_disconnected_layers, transform_conv_crop, remove_redundant_transposes\n'), ((15605, 15620), 'unittest.main', 'unittest.main', ([], {}), '()\n', (15618, 15620), False, 'import unittest\n'), ((15647, 15667), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (15665, 15667), False, 'import unittest\n'), ((4045, 4093), 'coremltools.models.neural_network.printer.print_network_spec', 'print_network_spec', (['builder.spec'], {'style': '"""coding"""'}), "(builder.spec, style='coding')\n", (4063, 4093), False, 'from coremltools.models.neural_network.printer import print_network_spec\n'), ((4259, 4307), 'coremltools.models.neural_network.printer.print_network_spec', 'print_network_spec', (['builder.spec'], {'style': '"""coding"""'}), "(builder.spec, style='coding')\n", (4277, 4307), False, 'from coremltools.models.neural_network.printer import print_network_spec\n'), ((6244, 6292), 'coremltools.models.neural_network.printer.print_network_spec', 'print_network_spec', (['builder.spec'], {'style': '"""coding"""'}), "(builder.spec, style='coding')\n", (6262, 6292), False, 'from coremltools.models.neural_network.printer import print_network_spec\n'), ((6501, 6549), 'coremltools.models.neural_network.printer.print_network_spec', 'print_network_spec', (['builder.spec'], {'style': '"""coding"""'}), "(builder.spec, style='coding')\n", (6519, 6549), False, 'from coremltools.models.neural_network.printer import print_network_spec\n'), ((11999, 12067), 'coremltools.models.neural_network.NeuralNetworkBuilder', 'neural_network.NeuralNetworkBuilder', (['input_features', 'output_features'], {}), '(input_features, output_features)\n', (12034, 12067), True, 'from coremltools.models import neural_network as neural_network\n'), ((12919, 12960), 'coremltools.converters.nnssa.coreml.graph_pass.mlmodel_passes.remove_redundant_transposes', 'remove_redundant_transposes', (['builder.spec'], {}), '(builder.spec)\n', (12946, 12960), False, 'from coremltools.converters.nnssa.coreml.graph_pass.mlmodel_passes import remove_disconnected_layers, transform_conv_crop, remove_redundant_transposes\n'), ((604, 628), 'coremltools.models.datatypes.Array', 'datatypes.Array', (['*(3, 4)'], {}), '(*(3, 4))\n', (619, 628), True, 'import coremltools.models.datatypes as datatypes\n'), ((927, 940), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (934, 940), True, 'import numpy as np\n'), ((1086, 1099), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (1093, 1099), True, 'import numpy as np\n'), ((1181, 1194), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (1188, 1194), True, 'import numpy as np\n'), ((1485, 1509), 'coremltools.models.datatypes.Array', 'datatypes.Array', (['*(3, 4)'], {}), '(*(3, 4))\n', (1500, 1509), True, 'import coremltools.models.datatypes as datatypes\n'), ((1808, 1821), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (1815, 1821), True, 'import numpy as np\n'), ((1903, 1916), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (1910, 1916), True, 'import numpy as np\n'), ((2280, 2293), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (2287, 2293), True, 'import numpy as np\n'), ((2717, 2739), 'coremltools.models.datatypes.Array', 'datatypes.Array', (['*(2,)'], {}), '(*(2,))\n', (2732, 2739), True, 'import coremltools.models.datatypes as datatypes\n'), ((4710, 4732), 'coremltools.models.datatypes.Array', 'datatypes.Array', (['*(2,)'], {}), '(*(2,))\n', (4725, 4732), True, 'import coremltools.models.datatypes as datatypes\n'), ((7113, 7139), 'coremltools.models.datatypes.Array', 'datatypes.Array', (['(1)', '(10)', '(10)'], {}), '(1, 10, 10)\n', (7128, 7139), True, 'import coremltools.models.datatypes as datatypes\n'), ((9062, 9088), 'coremltools.models.datatypes.Array', 'datatypes.Array', (['(1)', '(10)', '(10)'], {}), '(1, 10, 10)\n', (9077, 9088), True, 'import coremltools.models.datatypes as datatypes\n'), ((13350, 13465), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['transpose_layers[input_layer_idx]', 'spec.layers[output_layer_idx].transpose.axes'], {}), '(transpose_layers[input_layer_idx], spec.\n layers[output_layer_idx].transpose.axes)\n', (13379, 13465), True, 'import numpy as np\n'), ((15746, 15771), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {}), '()\n', (15769, 15771), False, 'import unittest\n'), ((11900, 11928), 'coremltools.models.datatypes.Array', 'datatypes.Array', (['*input_size'], {}), '(*input_size)\n', (11915, 11928), True, 'import coremltools.models.datatypes as datatypes\n'), ((14707, 14731), 'coremltools.models.datatypes.Array', 'datatypes.Array', (['(2)', '(4)', '(8)'], {}), '(2, 4, 8)\n', (14722, 14731), True, 'import coremltools.models.datatypes as datatypes\n'), ((8139, 8149), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (8146, 8149), True, 'import numpy as np\n'), ((8206, 8216), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (8213, 8216), True, 'import numpy as np\n'), ((8273, 8283), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (8280, 8283), True, 'import numpy as np\n'), ((8344, 8354), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (8351, 8354), True, 'import numpy as np\n'), ((10088, 10098), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (10095, 10098), True, 'import numpy as np\n'), ((10155, 10165), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (10162, 10165), True, 'import numpy as np\n'), ((10222, 10232), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (10229, 10232), True, 'import numpy as np\n'), ((10293, 10303), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (10300, 10303), True, 'import numpy as np\n')]
|
import time, datetime, argparse
import os, sys
import numpy as np
np.set_printoptions(precision=2)
import matplotlib.pyplot as plt
import copy as cp
import pickle
PROJECT_PATH = '/home/nbuckman/Dropbox (MIT)/DRL/2020_01_cooperative_mpc/mpc-multiple-vehicles/'
sys.path.append(PROJECT_PATH)
import casadi as cas
import src.MPC_Casadi as mpc
import src.TrafficWorld as tw
import src.IterativeBestResponseMPCMultiple as mibr
import src.car_plotting_multiple as cmplot
##########################################################
svo_theta = np.pi/4.0
# random_seed = args.random_seed[0]
random_seed = 3
NEW = True
if NEW:
optional_suffix = "ellipses"
subdir_name = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") + optional_suffix
folder = "results/" + subdir_name + "/"
os.makedirs(folder)
os.makedirs(folder+"imgs/")
os.makedirs(folder+"data/")
os.makedirs(folder+"vids/")
os.makedirs(folder+"plots/")
else:
subdir_name = "20200224-103456_real_dim_CA"
folder = "results/" + subdir_name + "/"
print(folder)
if random_seed > 0:
np.random.seed(random_seed)
#######################################################################
T = 3 # MPC Planning Horizon
dt = 0.3
N = int(T/dt) #Number of control intervals in MPC
n_rounds_mpc = 6
percent_mpc_executed = 0.5 ## This is the percent of MPC that is executed
number_ctrl_pts_executed = int(np.floor(N*percent_mpc_executed))
XAMB_ONLY = False
n_other = 2
n_rounds_ibr = 2
world = tw.TrafficWorld(2, 0, 1000)
# large_world = tw.TrafficWorld(2, 0, 1000, 5.0)
#########################################################################
actual_xamb = np.zeros((6, n_rounds_mpc*number_ctrl_pts_executed + 1))
actual_uamb = np.zeros((2, n_rounds_mpc*number_ctrl_pts_executed))
actual_xothers = [np.zeros((6, n_rounds_mpc*number_ctrl_pts_executed + 1)) for i in range(n_other)]
actual_uothers = [np.zeros((2, n_rounds_mpc*number_ctrl_pts_executed)) for i in range(n_other)]
actual_all_other_x0 = [np.zeros((6, 2*N)) for i in range(n_other)]
xamb = np.zeros(shape=(6, N+1))
t_start_time = time.time()
####################################################
## Create the Cars in this Problem
all_other_x0 = []
all_other_u = []
all_other_MPC = []
all_other_x = [np.zeros(shape=(6, N+1)) for i in range(n_other)]
next_x0 = 0
for i in range(n_other):
x1_MPC = mpc.MPC(dt)
x1_MPC.n_circles = 3
x1_MPC.theta_iamb = svo_theta
x1_MPC.N = N
x1_MPC.k_change_u_v = 0.001
x1_MPC.max_delta_u = 50 * np.pi/180 * x1_MPC.dt
x1_MPC.k_u_v = 0.01
x1_MPC.k_u_delta = .00001
x1_MPC.k_change_u_v = 0.01
x1_MPC.k_change_u_delta = 0.001
x1_MPC.k_s = 0
x1_MPC.k_x = 0
x1_MPC.k_x_dot = -1.0 / 100.0
x1_MPC.k_lat = 0.001
x1_MPC.k_lon = 0.0
x1_MPC.k_phi_error = 0.001
x1_MPC.k_phi_dot = 0.01
####Vehicle Initial Conditions
if i%2 == 0:
lane_number = 0
next_x0 += x1_MPC.L + 2*x1_MPC.min_dist
else:
lane_number = 1
initial_speed = 0.75*x1_MPC.max_v
traffic_world = world
x1_MPC.fd = x1_MPC.gen_f_desired_lane(traffic_world, lane_number, True)
x0 = np.array([next_x0, traffic_world.get_lane_centerline_y(lane_number), 0, 0, initial_speed, 0]).T
## Set the initial control of the other vehicles
u1 = np.zeros((2,N))
# u1[0,:] = np.clip(np.pi/180 *np.random.normal(size=(1,N)), -2 * np.pi/180, 2 * np.pi/180)
SAME_SIDE = False
if lane_number == 1 or SAME_SIDE:
u1[0,0] = 2 * np.pi/180
else:
u1[0,0] = -2 * np.pi/180
u1[0,0] = 0
all_other_MPC += [x1_MPC]
all_other_x0 += [x0]
all_other_u += [u1]
# Settings for Ambulance
amb_MPC = cp.deepcopy(x1_MPC)
amb_MPC.theta_iamb = 0.0
amb_MPC.k_u_v = 0.0000
amb_MPC.k_u_delta = .01
amb_MPC.k_change_u_v = 0.0000
amb_MPC.k_change_u_delta = 0
amb_MPC.k_s = 0
amb_MPC.k_x = 0
amb_MPC.k_x_dot = -1.0 / 100.0
amb_MPC.k_x = -1.0/100
amb_MPC.k_x_dot = 0
amb_MPC.k_lat = 0.00001
amb_MPC.k_lon = 0.0
# amb_MPC.min_v = 0.8*initial_speed
amb_MPC.max_v = 35 * 0.447 # m/s
amb_MPC.k_phi_error = 0.1
amb_MPC.k_phi_dot = 0.01
NO_GRASS = False
amb_MPC.min_y = world.y_min
amb_MPC.max_y = world.y_max
if NO_GRASS:
amb_MPC.min_y += world.grass_width
amb_MPC.max_y -= world.grass_width
amb_MPC.fd = amb_MPC.gen_f_desired_lane(world, 0, True)
x0_amb = np.array([0, 0, 0, 0, initial_speed , 0]).T
pickle.dump(x1_MPC, open(folder + "data/"+"mpc%d"%i + ".p",'wb'))
pickle.dump(amb_MPC, open(folder + "data/"+"mpcamb" + ".p",'wb'))
########################################################################
#### SOLVE THE MPC #####################################################
for i_mpc in range(n_rounds_mpc):
min_slack = np.infty
actual_t = i_mpc * number_ctrl_pts_executed
###### Update the initial conditions for all vehicles
if i_mpc > 0:
x0_amb = xamb[:, number_ctrl_pts_executed]
for i in range(len(all_other_x0)):
all_other_x0[i] = all_other_x[i][:, number_ctrl_pts_executed]
###### Initial guess for the other u. This will be updated once the other vehicles
###### solve the best response to the ambulance. Initial guess just looks at the last solution. This could also be a lange change
# Obtain a simulated trajectory from other vehicle control inputs
all_other_x = [np.zeros(shape=(6, N+1)) for i in range(n_other)]
all_other_x_des = [np.zeros(shape=(3, N+1)) for i in range(n_other)]
for i in range(n_other):
if i_mpc == 0:
all_other_u[i] = np.zeros(shape=(6,N))
else:
all_other_u[i] = np.concatenate((all_other_u[i][:, number_ctrl_pts_executed:], np.tile(all_other_u[i][:,-1:],(1, number_ctrl_pts_executed))),axis=1) ##
x_mpci, u_all_i, x_0_i = all_other_MPC[i], all_other_u[i], all_other_x0[i]
all_other_x[i], all_other_x_des[i] = x_mpci.forward_simulate_all(x_0_i, u_all_i)
for i_rounds_ibr in range(n_rounds_ibr):
########## Solve the Ambulance MPC ##########
response_MPC = amb_MPC
response_x0 = x0_amb
nonresponse_MPC_list = all_other_MPC
nonresponse_x0_list = all_other_x0
nonresponse_u_list = all_other_u
nonresponse_x_list = all_other_x
nonresponse_xd_list = all_other_x_des
################# Generate the warm starts ###############################
u_warm_profiles = mibr.generate_warm_u(N, response_MPC)
### Ambulance Warm Start
if i_rounds_ibr > 0: # warm start with the solution from the last IBR round
u_warm_profiles["previous"] = uamb
else:
# take the control inputs of the last MPC and continue the ctrl
if i_mpc > 0:
u_warm_profiles["previous"] = np.concatenate((uamb[:, number_ctrl_pts_executed:], np.tile(uamb[:,-1:],(1, number_ctrl_pts_executed))),axis=1) ##
#######################################################################
min_response_cost = 99999999
for k_warm in u_warm_profiles.keys():
u_warm = u_warm_profiles[k_warm]
x_warm, x_des_warm = response_MPC.forward_simulate_all(response_x0.reshape(6,1), u_warm)
bri = mibr.IterativeBestResponseMPCMultiple(response_MPC, None, nonresponse_MPC_list )
k_slack = 10000.0
k_CA = 0.000000000000000
k_CA_power = 4
wall_CA = True
bri.k_slack = k_slack
bri.k_CA = k_CA
bri.k_CA_power = k_CA_power
bri.world = world
bri.wall_CA = wall_CA
# for slack_var in bri.slack_vars_list: ## Added to constrain slacks
# bri.opti.subject_to(cas.vec(slack_var) <= 1.0)
INFEASIBLE = True
bri.generate_optimization(N, T, response_x0, None, nonresponse_x0_list, 1, slack=False)
bri.opti.set_initial(bri.u_opt, u_warm)
bri.opti.set_initial(bri.x_opt, x_warm)
bri.opti.set_initial(bri.x_desired, x_des_warm)
### Set the trajectories of the nonresponse vehicles (as given)
for i in range(n_other):
bri.opti.set_value(bri.allother_x_opt[i], nonresponse_x_list[i])
bri.opti.set_value(bri.allother_x_desired[i], nonresponse_xd_list[i])
### Solve the Optimization
# Debugging
# plot_range = [N]
# bri.opti.callback(lambda i: bri.debug_callback(i, plot_range))
# bri.opti.callback(lambda i: print("J_i %.03f, J_j %.03f, Slack %.03f, CA %.03f"%(bri.solution.value(bri.response_svo_cost), bri.solution.value(bri.other_svo_cost), bri.solution.value(bri.k_slack*bri.slack_cost), bri.solution.value(bri.k_CA*bri.collision_cost))))
try:
bri.solve(None, nonresponse_u_list)
x1, u1, x1_des, _, _, _, _, _, _ = bri.get_solution()
print("i_mpc %d n_round %d i %02d Cost %.02f Slack %.02f "%(i_mpc, i_rounds_ibr, i, bri.solution.value(bri.total_svo_cost), bri.solution.value(bri.slack_cost)))
print("J_i %.03f, J_j %.03f, Slack %.03f, CA %.03f"%(bri.solution.value(bri.response_svo_cost), bri.solution.value(bri.other_svo_cost), bri.solution.value(bri.k_slack*bri.slack_cost), bri.solution.value(bri.k_CA*bri.collision_cost)))
print("Dir:", subdir_name)
print("k_warm", k_warm)
INFEASIBLE = False
if bri.solution.value(bri.slack_cost) < min_slack:
current_cost = bri.solution.value(bri.total_svo_cost)
if current_cost < min_response_cost:
uamb = u1
xamb = x1
xamb_des = x1_des
min_response_cost = current_cost
min_response_warm = k_warm
min_bri = bri
# file_name = folder + "data/"+'%03d'%ibr_sub_it
# mibr.save_state(file_name, xamb, uamb, xamb_des, all_other_x, all_other_u, all_other_x_des)
# mibr.save_costs(file_name, bri)
except RuntimeError:
print("Infeasibility: k_warm %s"%k_warm)
# ibr_sub_it +=1
########### SOLVE FOR THE OTHER VEHICLES ON THE ROAD
if not XAMB_ONLY:
for i in range(len(all_other_MPC)):
response_MPC = all_other_MPC[i]
response_x0 = all_other_x0[i]
nonresponse_MPC_list = all_other_MPC[:i] + all_other_MPC[i+1:]
nonresponse_x0_list = all_other_x0[:i] + all_other_x0[i+1:]
nonresponse_u_list = all_other_u[:i] + all_other_u[i+1:]
nonresponse_x_list = all_other_x[:i] + all_other_x[i+1:]
nonresponse_xd_list = all_other_x_des[:i] + all_other_x_des[i+1:]
################ Warm Start
u_warm_profiles = mibr.generate_warm_u(N, response_MPC)
if i_rounds_ibr > 0: # warm start with the solution from the last IBR round
u_warm_profiles["previous"] = all_other_u[i]
else:
# take the control inputs of the last MPC and continue the ctrl
if i_mpc > 0:
u_warm_profiles["previous"] = np.concatenate((all_other_u[i][:, number_ctrl_pts_executed:], np.tile(all_other_u[i][:,-1:],(1, number_ctrl_pts_executed))),axis=1) ##
min_response_cost = 99999999
for k_warm in u_warm_profiles.keys():
u_warm = u_warm_profiles[k_warm]
x_warm, x_des_warm = response_MPC.forward_simulate_all(response_x0.reshape(6,1), u_warm)
bri = mibr.IterativeBestResponseMPCMultiple(response_MPC, amb_MPC, nonresponse_MPC_list)
bri.k_slack = k_slack
bri.k_CA = k_CA
bri.k_CA_power = k_CA_power
bri.world = world
bri.wall_CA = wall_CA
INFEASIBLE = True
bri.generate_optimization(N, T, response_x0, x0_amb, nonresponse_x0_list, 1, slack=False)
# for slack_var in bri.slack_vars_list: ## Added to constrain slacks
# bri.opti.subject_to(cas.vec(slack_var) <= 1.0)
bri.opti.set_initial(bri.u_opt, u_warm)
bri.opti.set_initial(bri.x_opt, x_warm)
bri.opti.set_initial(bri.x_desired, x_des_warm)
### Set the trajectories of the nonresponse vehicles (as given)
bri.opti.set_value(bri.xamb_opt, xamb)
for i in range(len(nonresponse_x_list)):
bri.opti.set_value(bri.allother_x_opt[i], nonresponse_x_list[i])
bri.opti.set_value(bri.allother_x_desired[i], nonresponse_xd_list[i])
# Debugging
# bri.opti.callback(lambda i: bri.debug_callback(i, [N]))
# bri.opti.callback(lambda i: print("J_i %.03f, J_j %.03f, Slack %.03f, CA %.03f"%(bri.solution.value(bri.response_svo_cost), bri.solution.value(bri.other_svo_cost), bri.solution.value(bri.k_slack*bri.slack_cost), bri.solution.value(bri.k_CA*bri.collision_cost))))
try: ### Solve the Optimization
bri.solve(uamb, nonresponse_u_list)
x1_nr, u1_nr, x1_des_nr, _, _, _, _, _, _ = bri.get_solution()
print(" i_mpc %d n_round %d i %02d Cost %.02f Slack %.02f "%(i_mpc, i_rounds_ibr, i, bri.solution.value(bri.total_svo_cost), bri.solution.value(bri.slack_cost)))
print(" J_i %.03f, J_j %.03f, Slack %.03f, CA %.03f"%(bri.solution.value(bri.response_svo_cost), bri.solution.value(bri.other_svo_cost), bri.solution.value(bri.k_slack*bri.slack_cost), bri.solution.value(bri.k_CA*bri.collision_cost)))
print(" Dir:", subdir_name)
print(" k_warm", k_warm)
INFEASIBLE = False
if bri.solution.value(bri.slack_cost) < min_slack:
current_cost = bri.solution.value(bri.total_svo_cost)
if current_cost < min_response_cost:
all_other_u[i] = u1_nr
all_other_x = all_other_x[:i] + [x1_nr] + all_other_x[i:]
all_other_u = all_other_u[:i] + [u1_nr] + all_other_u[i:]
all_other_x_des = all_other_x_des[:i] + [x1_des_nr] + all_other_x_des[i:]
min_response_cost = current_cost
min_response_warm = k_warm
min_bri = bri
# file_name = folder + "data/"+'%03d'%ibr_sub_it
# mibr.save_state(file_name, xamb, uamb, xamb_des, all_other_x, all_other_u, all_other_x_des)
# mibr.save_costs(file_name, bri)
except RuntimeError:
print(" Infeasibility: k_warm %s"%k_warm)
# ibr_sub_it +=1
#
print(" IBR Done: Rd %02d / %02d"%(i_rounds_ibr, n_rounds_ibr))
file_name = folder + "data/"+'r%02d%03d'%(i_mpc, i_rounds_ibr)
if not INFEASIBLE:
mibr.save_state(file_name, xamb, uamb, xamb_des, xothers, uothers, xothers_des)
mibr.save_costs(file_name, bri)
actual_t = i_mpc * number_ctrl_pts_executed
actual_xamb[:,actual_t:actual_t+number_ctrl_pts_executed+1] = xamb[:,:number_ctrl_pts_executed+1]
print(" MPC Done: Rd %02d / %02d"%(i_mpc, n_rounds_mpc))
print(" Full MPC Solution", xamb[0:2,:])
print(" Executed MPC", xamb[0:2,:number_ctrl_pts_executed+1])
print(" Solution Costs...")
for cost in bri.car1_costs_list:
print("%.04f"%bri.solution.value(cost))
print(min_bri.solution.value(min_bri.k_CA * min_bri.collision_cost), min_bri.solution.value(min_bri.collision_cost))
print(min_bri.solution.value(min_bri.k_slack * min_bri.slack_cost), min_bri.solution.value(min_bri.slack_cost))
print(" Save to...", file_name)
actual_uamb[:,actual_t:actual_t+number_ctrl_pts_executed] = uamb[:,:number_ctrl_pts_executed]
plot_range = range(N+1)
for k in plot_range:
cmplot.plot_multiple_cars( k, min_bri.responseMPC, xothers, xamb, True, None, None, None, min_bri.world, 0)
plt.show()
plt.plot(xamb[4,:],'--')
plt.plot(xamb[4,:] * np.cos(xamb[2,:]))
plt.ylabel("Velocity / Vx")
plt.hlines(35*0.447,0,xamb.shape[1])
plt.show()
plt.plot(uamb[1,:],'o')
plt.hlines(amb_MPC.max_v_u,0,xamb.shape[1])
plt.ylabel("delta_u_v")
plt.show()
for i in range(len(xothers)):
actual_xothers[i][:,actual_t:actual_t+number_ctrl_pts_executed+1] = xothers[i][:,:number_ctrl_pts_executed+1]
actual_uothers[i][:,actual_t:actual_t+number_ctrl_pts_executed] = uothers[i][:,:number_ctrl_pts_executed]
# all_other_u[i] = np.concatenate((uothers[i][:, number_ctrl_pts_executed:],uothers[i][:,:number_ctrl_pts_executed]),axis=1)
else:
raise Exception("Xamb is None", i_mpc, i_rounds_ibr, "slack cost", bri.solution.value(bri.slack_cost))
print("Solver Done! Runtime: %.1d"%(time.time()-t_start_time))
|
[
"src.IterativeBestResponseMPCMultiple.save_state",
"matplotlib.pyplot.ylabel",
"src.TrafficWorld.TrafficWorld",
"numpy.array",
"copy.deepcopy",
"sys.path.append",
"src.IterativeBestResponseMPCMultiple.IterativeBestResponseMPCMultiple",
"matplotlib.pyplot.plot",
"numpy.random.seed",
"src.MPC_Casadi.MPC",
"numpy.tile",
"src.car_plotting_multiple.plot_multiple_cars",
"numpy.floor",
"src.IterativeBestResponseMPCMultiple.generate_warm_u",
"numpy.cos",
"time.time",
"matplotlib.pyplot.show",
"numpy.set_printoptions",
"os.makedirs",
"src.IterativeBestResponseMPCMultiple.save_costs",
"matplotlib.pyplot.hlines",
"datetime.datetime.now",
"numpy.zeros"
] |
[((66, 98), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)'}), '(precision=2)\n', (85, 98), True, 'import numpy as np\n'), ((261, 290), 'sys.path.append', 'sys.path.append', (['PROJECT_PATH'], {}), '(PROJECT_PATH)\n', (276, 290), False, 'import os, sys\n'), ((1477, 1504), 'src.TrafficWorld.TrafficWorld', 'tw.TrafficWorld', (['(2)', '(0)', '(1000)'], {}), '(2, 0, 1000)\n', (1492, 1504), True, 'import src.TrafficWorld as tw\n'), ((1647, 1705), 'numpy.zeros', 'np.zeros', (['(6, n_rounds_mpc * number_ctrl_pts_executed + 1)'], {}), '((6, n_rounds_mpc * number_ctrl_pts_executed + 1))\n', (1655, 1705), True, 'import numpy as np\n'), ((1718, 1772), 'numpy.zeros', 'np.zeros', (['(2, n_rounds_mpc * number_ctrl_pts_executed)'], {}), '((2, n_rounds_mpc * number_ctrl_pts_executed))\n', (1726, 1772), True, 'import numpy as np\n'), ((2043, 2069), 'numpy.zeros', 'np.zeros', ([], {'shape': '(6, N + 1)'}), '(shape=(6, N + 1))\n', (2051, 2069), True, 'import numpy as np\n'), ((2083, 2094), 'time.time', 'time.time', ([], {}), '()\n', (2092, 2094), False, 'import time, datetime, argparse\n'), ((3687, 3706), 'copy.deepcopy', 'cp.deepcopy', (['x1_MPC'], {}), '(x1_MPC)\n', (3698, 3706), True, 'import copy as cp\n'), ((788, 807), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (799, 807), False, 'import os, sys\n'), ((812, 841), 'os.makedirs', 'os.makedirs', (["(folder + 'imgs/')"], {}), "(folder + 'imgs/')\n", (823, 841), False, 'import os, sys\n'), ((844, 873), 'os.makedirs', 'os.makedirs', (["(folder + 'data/')"], {}), "(folder + 'data/')\n", (855, 873), False, 'import os, sys\n'), ((876, 905), 'os.makedirs', 'os.makedirs', (["(folder + 'vids/')"], {}), "(folder + 'vids/')\n", (887, 905), False, 'import os, sys\n'), ((908, 938), 'os.makedirs', 'os.makedirs', (["(folder + 'plots/')"], {}), "(folder + 'plots/')\n", (919, 938), False, 'import os, sys\n'), ((1073, 1100), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (1087, 1100), True, 'import numpy as np\n'), ((1386, 1420), 'numpy.floor', 'np.floor', (['(N * percent_mpc_executed)'], {}), '(N * percent_mpc_executed)\n', (1394, 1420), True, 'import numpy as np\n'), ((1789, 1847), 'numpy.zeros', 'np.zeros', (['(6, n_rounds_mpc * number_ctrl_pts_executed + 1)'], {}), '((6, n_rounds_mpc * number_ctrl_pts_executed + 1))\n', (1797, 1847), True, 'import numpy as np\n'), ((1889, 1943), 'numpy.zeros', 'np.zeros', (['(2, n_rounds_mpc * number_ctrl_pts_executed)'], {}), '((2, n_rounds_mpc * number_ctrl_pts_executed))\n', (1897, 1943), True, 'import numpy as np\n'), ((1990, 2010), 'numpy.zeros', 'np.zeros', (['(6, 2 * N)'], {}), '((6, 2 * N))\n', (1998, 2010), True, 'import numpy as np\n'), ((2252, 2278), 'numpy.zeros', 'np.zeros', ([], {'shape': '(6, N + 1)'}), '(shape=(6, N + 1))\n', (2260, 2278), True, 'import numpy as np\n'), ((2352, 2363), 'src.MPC_Casadi.MPC', 'mpc.MPC', (['dt'], {}), '(dt)\n', (2359, 2363), True, 'import src.MPC_Casadi as mpc\n'), ((3303, 3319), 'numpy.zeros', 'np.zeros', (['(2, N)'], {}), '((2, N))\n', (3311, 3319), True, 'import numpy as np\n'), ((4350, 4390), 'numpy.array', 'np.array', (['[0, 0, 0, 0, initial_speed, 0]'], {}), '([0, 0, 0, 0, initial_speed, 0])\n', (4358, 4390), True, 'import numpy as np\n'), ((5355, 5381), 'numpy.zeros', 'np.zeros', ([], {'shape': '(6, N + 1)'}), '(shape=(6, N + 1))\n', (5363, 5381), True, 'import numpy as np\n'), ((5428, 5454), 'numpy.zeros', 'np.zeros', ([], {'shape': '(3, N + 1)'}), '(shape=(3, N + 1))\n', (5436, 5454), True, 'import numpy as np\n'), ((6431, 6468), 'src.IterativeBestResponseMPCMultiple.generate_warm_u', 'mibr.generate_warm_u', (['N', 'response_MPC'], {}), '(N, response_MPC)\n', (6451, 6468), True, 'import src.IterativeBestResponseMPCMultiple as mibr\n'), ((15747, 15826), 'src.IterativeBestResponseMPCMultiple.save_state', 'mibr.save_state', (['file_name', 'xamb', 'uamb', 'xamb_des', 'xothers', 'uothers', 'xothers_des'], {}), '(file_name, xamb, uamb, xamb_des, xothers, uothers, xothers_des)\n', (15762, 15826), True, 'import src.IterativeBestResponseMPCMultiple as mibr\n'), ((15835, 15866), 'src.IterativeBestResponseMPCMultiple.save_costs', 'mibr.save_costs', (['file_name', 'bri'], {}), '(file_name, bri)\n', (15850, 15866), True, 'import src.IterativeBestResponseMPCMultiple as mibr\n'), ((16944, 16970), 'matplotlib.pyplot.plot', 'plt.plot', (['xamb[4, :]', '"""--"""'], {}), "(xamb[4, :], '--')\n", (16952, 16970), True, 'import matplotlib.pyplot as plt\n'), ((17025, 17052), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Velocity / Vx"""'], {}), "('Velocity / Vx')\n", (17035, 17052), True, 'import matplotlib.pyplot as plt\n'), ((17061, 17101), 'matplotlib.pyplot.hlines', 'plt.hlines', (['(35 * 0.447)', '(0)', 'xamb.shape[1]'], {}), '(35 * 0.447, 0, xamb.shape[1])\n', (17071, 17101), True, 'import matplotlib.pyplot as plt\n'), ((17106, 17116), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17114, 17116), True, 'import matplotlib.pyplot as plt\n'), ((17125, 17150), 'matplotlib.pyplot.plot', 'plt.plot', (['uamb[1, :]', '"""o"""'], {}), "(uamb[1, :], 'o')\n", (17133, 17150), True, 'import matplotlib.pyplot as plt\n'), ((17157, 17202), 'matplotlib.pyplot.hlines', 'plt.hlines', (['amb_MPC.max_v_u', '(0)', 'xamb.shape[1]'], {}), '(amb_MPC.max_v_u, 0, xamb.shape[1])\n', (17167, 17202), True, 'import matplotlib.pyplot as plt\n'), ((17209, 17232), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""delta_u_v"""'], {}), "('delta_u_v')\n", (17219, 17232), True, 'import matplotlib.pyplot as plt\n'), ((17241, 17251), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17249, 17251), True, 'import matplotlib.pyplot as plt\n'), ((5559, 5581), 'numpy.zeros', 'np.zeros', ([], {'shape': '(6, N)'}), '(shape=(6, N))\n', (5567, 5581), True, 'import numpy as np\n'), ((7247, 7326), 'src.IterativeBestResponseMPCMultiple.IterativeBestResponseMPCMultiple', 'mibr.IterativeBestResponseMPCMultiple', (['response_MPC', 'None', 'nonresponse_MPC_list'], {}), '(response_MPC, None, nonresponse_MPC_list)\n', (7284, 7326), True, 'import src.IterativeBestResponseMPCMultiple as mibr\n'), ((16800, 16910), 'src.car_plotting_multiple.plot_multiple_cars', 'cmplot.plot_multiple_cars', (['k', 'min_bri.responseMPC', 'xothers', 'xamb', '(True)', 'None', 'None', 'None', 'min_bri.world', '(0)'], {}), '(k, min_bri.responseMPC, xothers, xamb, True, None,\n None, None, min_bri.world, 0)\n', (16825, 16910), True, 'import src.car_plotting_multiple as cmplot\n'), ((16925, 16935), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16933, 16935), True, 'import matplotlib.pyplot as plt\n'), ((17826, 17837), 'time.time', 'time.time', ([], {}), '()\n', (17835, 17837), False, 'import time, datetime, argparse\n'), ((672, 695), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (693, 695), False, 'import time, datetime, argparse\n'), ((11081, 11118), 'src.IterativeBestResponseMPCMultiple.generate_warm_u', 'mibr.generate_warm_u', (['N', 'response_MPC'], {}), '(N, response_MPC)\n', (11101, 11118), True, 'import src.IterativeBestResponseMPCMultiple as mibr\n'), ((16998, 17016), 'numpy.cos', 'np.cos', (['xamb[2, :]'], {}), '(xamb[2, :])\n', (17004, 17016), True, 'import numpy as np\n'), ((5686, 5748), 'numpy.tile', 'np.tile', (['all_other_u[i][:, -1:]', '(1, number_ctrl_pts_executed)'], {}), '(all_other_u[i][:, -1:], (1, number_ctrl_pts_executed))\n', (5693, 5748), True, 'import numpy as np\n'), ((11921, 12007), 'src.IterativeBestResponseMPCMultiple.IterativeBestResponseMPCMultiple', 'mibr.IterativeBestResponseMPCMultiple', (['response_MPC', 'amb_MPC', 'nonresponse_MPC_list'], {}), '(response_MPC, amb_MPC,\n nonresponse_MPC_list)\n', (11958, 12007), True, 'import src.IterativeBestResponseMPCMultiple as mibr\n'), ((6853, 6905), 'numpy.tile', 'np.tile', (['uamb[:, -1:]', '(1, number_ctrl_pts_executed)'], {}), '(uamb[:, -1:], (1, number_ctrl_pts_executed))\n', (6860, 6905), True, 'import numpy as np\n'), ((11535, 11597), 'numpy.tile', 'np.tile', (['all_other_u[i][:, -1:]', '(1, number_ctrl_pts_executed)'], {}), '(all_other_u[i][:, -1:], (1, number_ctrl_pts_executed))\n', (11542, 11597), True, 'import numpy as np\n')]
|
import numpy as np
# softmax function
def softmax(a):
exp_a = np.exp(a)
sum_a = np.sum(exp_a)
return exp_a / sum_a
# modified softmax function
def modified_softmax(a):
maxA = np.max(a)
exp_a = np.exp(a - maxA)
sum_a = np.sum(exp_a)
return exp_a / sum_a
|
[
"numpy.exp",
"numpy.sum",
"numpy.max"
] |
[((68, 77), 'numpy.exp', 'np.exp', (['a'], {}), '(a)\n', (74, 77), True, 'import numpy as np\n'), ((90, 103), 'numpy.sum', 'np.sum', (['exp_a'], {}), '(exp_a)\n', (96, 103), True, 'import numpy as np\n'), ((195, 204), 'numpy.max', 'np.max', (['a'], {}), '(a)\n', (201, 204), True, 'import numpy as np\n'), ((218, 234), 'numpy.exp', 'np.exp', (['(a - maxA)'], {}), '(a - maxA)\n', (224, 234), True, 'import numpy as np\n'), ((247, 260), 'numpy.sum', 'np.sum', (['exp_a'], {}), '(exp_a)\n', (253, 260), True, 'import numpy as np\n')]
|
from itertools import product
import struct
import pickle
import numpy as np
from scipy import sparse
from scipy import isnan as scipy_isnan
import numpy.matlib
ASCII_FACET = """facet normal 0 0 0
outer loop
vertex {face[0][0]:.4f} {face[0][1]:.4f} {face[0][2]:.4f}
vertex {face[1][0]:.4f} {face[1][1]:.4f} {face[1][2]:.4f}
vertex {face[2][0]:.4f} {face[2][1]:.4f} {face[2][2]:.4f}
endloop
endfacet
"""
BINARY_HEADER ="80sI"
BINARY_FACET = "12fH"
class ASCII_STL_Writer(object):
""" Export 3D objects build of 3 or 4 vertices as ASCII STL file.
"""
def __init__(self, stream):
self.fp = stream
self._write_header()
def _write_header(self):
self.fp.write("solid python\n")
def close(self):
self.fp.write("endsolid python\n")
def _write(self, face):
self.fp.write(ASCII_FACET.format(face=face))
def _split(self, face):
p1, p2, p3, p4 = face
return (p1, p2, p3), (p3, p4, p1)
def add_face(self, face):
""" Add one face with 3 or 4 vertices. """
if len(face) == 4:
face1, face2 = self._split(face)
self._write(face1)
self._write(face2)
elif len(face) == 3:
self._write(face)
else:
raise ValueError('only 3 or 4 vertices for each face')
def add_faces(self, faces):
""" Add many faces. """
for face in faces:
self.add_face(face)
class Binary_STL_Writer(ASCII_STL_Writer):
""" Export 3D objects build of 3 or 4 vertices as binary STL file.
"""
def __init__(self, stream):
self.counter = 0
super(Binary_STL_Writer, self).__init__(stream)
def close(self):
self._write_header()
def _write_header(self):
self.fp.seek(0)
self.fp.write(struct.pack(BINARY_HEADER, b'Python Binary STL Writer', self.counter))
def _write(self, face):
self.counter += 1
data = [
0., 0., 0.,
face[0][0], face[0][1], face[0][2],
face[1][0], face[1][1], face[1][2],
face[2][0], face[2][1], face[2][2],
0
]
self.fp.write(struct.pack(BINARY_FACET, *data))
def get_quad(center, n, side=1.):
x, y, z = np.array(center).astype('float64')
n1, n2, n3 = np.array(n).astype('float64')
l = side/2.
nm = np.linalg.norm
s = np.sign
if any(np.isnan(v) for v in n):
return
if np.allclose(n, np.zeros(n.shape)):
return
# Build two vectors orthogonal between themselves and the normal
if (np.abs(n2) > 0.2 or np.abs(n3) > 0.2):
C = np.array([1, 0, 0])
else:
C = np.array([0, 1, 0])
ortho1 = np.cross(n, C)
ortho1 *= l / np.linalg.norm(ortho1)
ortho2 = np.cross(n, ortho1)
ortho2 *= l / np.linalg.norm(ortho2)
#ortho1[[2,1]] = ortho1[[1,2]]
#ortho2[[2,1]] = ortho2[[1,2]]
ortho1[1] = -ortho1[1]
ortho2[1] = -ortho2[1]
return [[
center + ortho1,
center + ortho2,
center - ortho1,
center - ortho2,
]]
def surfaceFromNormals(normals):
valid_indices = ~np.isnan(normals)
w, h, d = normals.shape
nx = np.transpose(np.hstack((
normals[:,:,0].ravel(),
normals[:,:,0].ravel(),
)))
ny = np.transpose(np.hstack((
normals[:,:,1].ravel(),
normals[:,:,1].ravel(),
)))
nz = np.transpose(np.hstack((
normals[:,:,2].ravel(),
normals[:,:,2].ravel(),
)))
vectorsize = nz.shape
valid_idx = ~np.isnan(nz)
M = sparse.dia_matrix((2*w*h, w*h), dtype=np.float64)
# n_z z(x + 1, y) - n_z z(x,y) = n_x
M.setdiag(-nz, 0)
M.setdiag(nz, 1)
# n_z z(x, y + 1) - n_z z(x,y) = n_y
M.setdiag(-nz, -w*h)
M.setdiag(np.hstack(([0] * w, nz)), -w*h + w)
# Boundary values
# n_y ( z(x,y) - z(x + 1, y)) = n_x ( z(x,y) - z(x, y + 1))
# TODO: Redo for boundaries in Y-axis
M = M.tolil()
half_size = valid_idx.size // 2
bidxd = np.hstack((np.diff(valid_idx.astype('int8')[:half_size]), [0]))
inner_boundaries = np.roll(bidxd==1, 1) | (bidxd==-1)
outer_boundaries = (bidxd==1) | (np.roll(bidxd==-1, 1))
nz_t = np.transpose(valid_idx.reshape((w,h,d*2//3)), (1, 0, 2)).ravel()
valid_idx_t = ~np.isnan(nz_t)
bidxd = np.hstack((np.diff(valid_idx_t.astype('int8')[:half_size]), [0]))
inner_boundaries |= np.roll(bidxd==1, 1) | (bidxd==-1)
outer_boundaries |= (bidxd==1) | (np.roll(bidxd==-1, 1))
bidx = np.zeros((half_size,), dtype=np.bool)
bidx[inner_boundaries] = True
bidx = np.indices(bidx.shape)[0][bidx]
M[bidx, bidx] = nx[bidx]
M[bidx, bidx + w] = -nx[bidx]
M[bidx + half_size, bidx] = ny[bidx]
M[bidx + half_size, bidx + 1] = -ny[bidx]
M = M.tocsr()[valid_idx]
weight = 1
OB = np.zeros((outer_boundaries.sum(), w*h,))
OB[np.indices((outer_boundaries.sum(),))[0], np.where(outer_boundaries==True)] = weight
M = sparse.vstack((M,OB))
# Build [ n_x n_y ]'
m = np.hstack((
normals[:,:,0].ravel(),
normals[:,:,1].ravel(),
)).reshape(-1, 1)
print(inner_boundaries.shape, m.shape)
i_b = np.hstack((inner_boundaries, inner_boundaries)).reshape(-1,1)
print(i_b.shape, m.shape)
m[i_b] = 0
m = m[valid_idx]
m = np.vstack((
m,
np.zeros((outer_boundaries.sum(), 1)),
))
# Solve least squares
assert not np.isnan(m).any()
# x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var = sparse.linalg.lsqr(M, m)
x, istop, itn, normr, normar, norma, conda, normx = sparse.linalg.lsmr(M, m)
# Build the surface (x, y, z) with the computed values of z
surface = np.dstack((
np.indices((w, h))[0],
np.indices((w, h))[1],
x.reshape((w, h))
))
return surface
def writeMesh(surface, normals, filename):
s = surface
with open(filename, 'wb') as fp:
writer = Binary_STL_Writer(fp)
for x in range(0, s.shape[0], 5):
for y in range(0, s.shape[1], 5):
#for x, y in product(range(s.shape[0]), range(s.shape[1])):
quad = get_quad(
s[x,y,:],
normals[x,y,:],
4,
)
if quad:
writer.add_faces(quad)
writer.close()
def write3dNormals(normals, filename):
with open(filename, 'wb') as fp:
writer = Binary_STL_Writer(fp)
for x in range(0, normals.shape[0], 5):
for y in range(0, normals.shape[1], 5):
quad = get_quad(
(0, x, y),
normals[x,y,:],
4,
)
if quad:
writer.add_faces(quad)
writer.close()
def surfaceToHeight(surface):
minH = np.amin(surface[:,:,2])
maxH = np.amax(surface[:,:,2])
scale = maxH - minH
height = (surface[:,:,2] - minH) / scale
return height
def writeObj(surface, normals, filename):
print('obj here')
if __name__ == '__main__':
with open('data.pkl', 'rb') as fhdl:
normals = pickle.load(fhdl)
writeMesh(normals)
|
[
"scipy.sparse.linalg.lsmr",
"numpy.abs",
"numpy.roll",
"numpy.cross",
"numpy.amin",
"numpy.hstack",
"numpy.where",
"scipy.sparse.dia_matrix",
"pickle.load",
"struct.pack",
"numpy.indices",
"numpy.array",
"numpy.zeros",
"numpy.isnan",
"numpy.linalg.norm",
"scipy.sparse.vstack",
"numpy.amax"
] |
[((2702, 2716), 'numpy.cross', 'np.cross', (['n', 'C'], {}), '(n, C)\n', (2710, 2716), True, 'import numpy as np\n'), ((2771, 2790), 'numpy.cross', 'np.cross', (['n', 'ortho1'], {}), '(n, ortho1)\n', (2779, 2790), True, 'import numpy as np\n'), ((3564, 3619), 'scipy.sparse.dia_matrix', 'sparse.dia_matrix', (['(2 * w * h, w * h)'], {'dtype': 'np.float64'}), '((2 * w * h, w * h), dtype=np.float64)\n', (3581, 3619), False, 'from scipy import sparse\n'), ((4522, 4559), 'numpy.zeros', 'np.zeros', (['(half_size,)'], {'dtype': 'np.bool'}), '((half_size,), dtype=np.bool)\n', (4530, 4559), True, 'import numpy as np\n'), ((4982, 5004), 'scipy.sparse.vstack', 'sparse.vstack', (['(M, OB)'], {}), '((M, OB))\n', (4995, 5004), False, 'from scipy import sparse\n'), ((5615, 5639), 'scipy.sparse.linalg.lsmr', 'sparse.linalg.lsmr', (['M', 'm'], {}), '(M, m)\n', (5633, 5639), False, 'from scipy import sparse\n'), ((6859, 6884), 'numpy.amin', 'np.amin', (['surface[:, :, 2]'], {}), '(surface[:, :, 2])\n', (6866, 6884), True, 'import numpy as np\n'), ((6894, 6919), 'numpy.amax', 'np.amax', (['surface[:, :, 2]'], {}), '(surface[:, :, 2])\n', (6901, 6919), True, 'import numpy as np\n'), ((2463, 2480), 'numpy.zeros', 'np.zeros', (['n.shape'], {}), '(n.shape)\n', (2471, 2480), True, 'import numpy as np\n'), ((2627, 2646), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (2635, 2646), True, 'import numpy as np\n'), ((2669, 2688), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (2677, 2688), True, 'import numpy as np\n'), ((2735, 2757), 'numpy.linalg.norm', 'np.linalg.norm', (['ortho1'], {}), '(ortho1)\n', (2749, 2757), True, 'import numpy as np\n'), ((2809, 2831), 'numpy.linalg.norm', 'np.linalg.norm', (['ortho2'], {}), '(ortho2)\n', (2823, 2831), True, 'import numpy as np\n'), ((3135, 3152), 'numpy.isnan', 'np.isnan', (['normals'], {}), '(normals)\n', (3143, 3152), True, 'import numpy as np\n'), ((3542, 3554), 'numpy.isnan', 'np.isnan', (['nz'], {}), '(nz)\n', (3550, 3554), True, 'import numpy as np\n'), ((3778, 3802), 'numpy.hstack', 'np.hstack', (['([0] * w, nz)'], {}), '(([0] * w, nz))\n', (3787, 3802), True, 'import numpy as np\n'), ((4101, 4123), 'numpy.roll', 'np.roll', (['(bidxd == 1)', '(1)'], {}), '(bidxd == 1, 1)\n', (4108, 4123), True, 'import numpy as np\n'), ((4173, 4196), 'numpy.roll', 'np.roll', (['(bidxd == -1)', '(1)'], {}), '(bidxd == -1, 1)\n', (4180, 4196), True, 'import numpy as np\n'), ((4292, 4306), 'numpy.isnan', 'np.isnan', (['nz_t'], {}), '(nz_t)\n', (4300, 4306), True, 'import numpy as np\n'), ((4409, 4431), 'numpy.roll', 'np.roll', (['(bidxd == 1)', '(1)'], {}), '(bidxd == 1, 1)\n', (4416, 4431), True, 'import numpy as np\n'), ((4482, 4505), 'numpy.roll', 'np.roll', (['(bidxd == -1)', '(1)'], {}), '(bidxd == -1, 1)\n', (4489, 4505), True, 'import numpy as np\n'), ((7165, 7182), 'pickle.load', 'pickle.load', (['fhdl'], {}), '(fhdl)\n', (7176, 7182), False, 'import pickle\n'), ((1808, 1877), 'struct.pack', 'struct.pack', (['BINARY_HEADER', "b'Python Binary STL Writer'", 'self.counter'], {}), "(BINARY_HEADER, b'Python Binary STL Writer', self.counter)\n", (1819, 1877), False, 'import struct\n'), ((2165, 2197), 'struct.pack', 'struct.pack', (['BINARY_FACET', '*data'], {}), '(BINARY_FACET, *data)\n', (2176, 2197), False, 'import struct\n'), ((2249, 2265), 'numpy.array', 'np.array', (['center'], {}), '(center)\n', (2257, 2265), True, 'import numpy as np\n'), ((2301, 2312), 'numpy.array', 'np.array', (['n'], {}), '(n)\n', (2309, 2312), True, 'import numpy as np\n'), ((2400, 2411), 'numpy.isnan', 'np.isnan', (['v'], {}), '(v)\n', (2408, 2411), True, 'import numpy as np\n'), ((2576, 2586), 'numpy.abs', 'np.abs', (['n2'], {}), '(n2)\n', (2582, 2586), True, 'import numpy as np\n'), ((2596, 2606), 'numpy.abs', 'np.abs', (['n3'], {}), '(n3)\n', (2602, 2606), True, 'import numpy as np\n'), ((4605, 4627), 'numpy.indices', 'np.indices', (['bidx.shape'], {}), '(bidx.shape)\n', (4615, 4627), True, 'import numpy as np\n'), ((4931, 4965), 'numpy.where', 'np.where', (['(outer_boundaries == True)'], {}), '(outer_boundaries == True)\n', (4939, 4965), True, 'import numpy as np\n'), ((5189, 5236), 'numpy.hstack', 'np.hstack', (['(inner_boundaries, inner_boundaries)'], {}), '((inner_boundaries, inner_boundaries))\n', (5198, 5236), True, 'import numpy as np\n'), ((5444, 5455), 'numpy.isnan', 'np.isnan', (['m'], {}), '(m)\n', (5452, 5455), True, 'import numpy as np\n'), ((5739, 5757), 'numpy.indices', 'np.indices', (['(w, h)'], {}), '((w, h))\n', (5749, 5757), True, 'import numpy as np\n'), ((5770, 5788), 'numpy.indices', 'np.indices', (['(w, h)'], {}), '((w, h))\n', (5780, 5788), True, 'import numpy as np\n')]
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
#from matplotlib import pyplot as plt
from tkinter import filedialog
from tkinter import *
root = Tk()
root.withdraw()
root.filename = filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("all files",".*"),("jpg files",".jpg")))
img = cv2.imread(root.filename)
root.destroy()
# Convert to gray-scale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Blur the image to reduce noise
img_blur = cv2.medianBlur(gray, 5)
# Apply hough transform on the image8 $$$img.shape[0]/16, param1=100, param2=11, minRadius=62, maxRadius=67
# Draw detected circles; circles = cv2.HoughCircles(img_blur, cv2.HOUGH_GRADIENT, 1, img.shape[0]/16, param1=200, param2=25, minRadius=60, maxRadius=67)
face_cascade = cv2.CascadeClassifier('C:/Users/andre/Desktop/NovenoSemestre/VisionArtificial/Python/haarcascade_frontalface_alt.xml')
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
center = (x + w//2, y + h//2)
#circles = cv2.HoughCircles(img_blur, cv2.HOUGH_GRADIENT, 1, img.shape[0]/128, param1=100, param2=11, minRadius=50, maxRadius=100)
circles = cv2.HoughCircles(img_blur, cv2.HOUGH_GRADIENT, 1, img.shape[0]/128, param1=100, param2=11, minRadius=(w//2-10), maxRadius=(w//2+10))
(h, w) = img_blur.shape[:2] #Calcular tamaño de la imageb
(pointRefX,pointRefY) = center
puntoMinimo =100
if circles is not None:
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
#Definir el circulo mas cercano de la
xCercano =np.absolute(i[0]-pointRefX)
yCercano =np.absolute(i[1]-pointRefY)
puntoCercano = xCercano+yCercano
if (puntoCercano < puntoMinimo):
puntoMinimo = puntoCercano
circuloCercano = i
# Draw outer circle
#frame = cv2.ellipse(img, center, (w//2, h//2), 0, 0, 360,(100, 7, 55), 2)
cv2.ellipse(img, (circuloCercano[0], circuloCercano[1]),(circuloCercano[2],circuloCercano[2]+15),0,0,360,(0, 255, 0), 2)
# Draw inner circle
cv2.circle(img, (circuloCercano[0], circuloCercano[1]), circuloCercano[2], (0, 255, 0), 2)
cv2.circle(img, (circuloCercano[0], circuloCercano[1]), 2, (0, 0, 255), 3)
""" cv2.circle(img, (circuloCercano[0], circuloCercano[1]), circuloCercano[2], (0, 255, 0), 2)
# Draw inner circle
cv2.circle(img, (circuloCercano[0], circuloCercano[1]), 2, (0, 0, 255), 3) """
""" if circles is not None:
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
#Definir el circulo mas cercano de la
xCercano =np.absolute(i[0]-pointRefX)
yCercano =np.absolute(i[1]-pointRefY)
puntoCercano = xCercano+yCercano
if (puntoCercano < puntoMinimo):
puntoMinimo = puntoCercano
circuloCercano = i
# Draw outer circle
cv2.circle(img, (i[0], i[1]), i[2], (0, 255, 0), 2)
# Draw inner circle
cv2.circle(img, (i[0], i[1]), 2, (0, 0, 255), 3)
"""
cv2.imshow("Mascara",img)
cv2.waitKey(0)
|
[
"numpy.absolute",
"cv2.medianBlur",
"cv2.HoughCircles",
"cv2.imshow",
"cv2.ellipse",
"cv2.circle",
"cv2.waitKey",
"numpy.around",
"cv2.cvtColor",
"cv2.CascadeClassifier",
"cv2.imread",
"tkinter.filedialog.askopenfilename"
] |
[((208, 332), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {'initialdir': '"""/"""', 'title': '"""Select file"""', 'filetypes': "(('all files', '.*'), ('jpg files', '.jpg'))"}), "(initialdir='/', title='Select file', filetypes=(\n ('all files', '.*'), ('jpg files', '.jpg')))\n", (234, 332), False, 'from tkinter import filedialog\n'), ((336, 361), 'cv2.imread', 'cv2.imread', (['root.filename'], {}), '(root.filename)\n', (346, 361), False, 'import cv2\n'), ((413, 450), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (425, 450), False, 'import cv2\n'), ((497, 520), 'cv2.medianBlur', 'cv2.medianBlur', (['gray', '(5)'], {}), '(gray, 5)\n', (511, 520), False, 'import cv2\n'), ((800, 928), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""C:/Users/andre/Desktop/NovenoSemestre/VisionArtificial/Python/haarcascade_frontalface_alt.xml"""'], {}), "(\n 'C:/Users/andre/Desktop/NovenoSemestre/VisionArtificial/Python/haarcascade_frontalface_alt.xml'\n )\n", (821, 928), False, 'import cv2\n'), ((929, 966), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (941, 966), False, 'import cv2\n'), ((1228, 1370), 'cv2.HoughCircles', 'cv2.HoughCircles', (['img_blur', 'cv2.HOUGH_GRADIENT', '(1)', '(img.shape[0] / 128)'], {'param1': '(100)', 'param2': '(11)', 'minRadius': '(w // 2 - 10)', 'maxRadius': '(w // 2 + 10)'}), '(img_blur, cv2.HOUGH_GRADIENT, 1, img.shape[0] / 128,\n param1=100, param2=11, minRadius=w // 2 - 10, maxRadius=w // 2 + 10)\n', (1244, 1370), False, 'import cv2\n'), ((1991, 2123), 'cv2.ellipse', 'cv2.ellipse', (['img', '(circuloCercano[0], circuloCercano[1])', '(circuloCercano[2], circuloCercano[2] + 15)', '(0)', '(0)', '(360)', '(0, 255, 0)', '(2)'], {}), '(img, (circuloCercano[0], circuloCercano[1]), (circuloCercano[2],\n circuloCercano[2] + 15), 0, 0, 360, (0, 255, 0), 2)\n', (2002, 2123), False, 'import cv2\n'), ((2134, 2228), 'cv2.circle', 'cv2.circle', (['img', '(circuloCercano[0], circuloCercano[1])', 'circuloCercano[2]', '(0, 255, 0)', '(2)'], {}), '(img, (circuloCercano[0], circuloCercano[1]), circuloCercano[2],\n (0, 255, 0), 2)\n', (2144, 2228), False, 'import cv2\n'), ((2226, 2300), 'cv2.circle', 'cv2.circle', (['img', '(circuloCercano[0], circuloCercano[1])', '(2)', '(0, 0, 255)', '(3)'], {}), '(img, (circuloCercano[0], circuloCercano[1]), 2, (0, 0, 255), 3)\n', (2236, 2300), False, 'import cv2\n'), ((3134, 3160), 'cv2.imshow', 'cv2.imshow', (['"""Mascara"""', 'img'], {}), "('Mascara', img)\n", (3144, 3160), False, 'import cv2\n'), ((3163, 3177), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3174, 3177), False, 'import cv2\n'), ((1524, 1542), 'numpy.around', 'np.around', (['circles'], {}), '(circles)\n', (1533, 1542), True, 'import numpy as np\n'), ((1653, 1682), 'numpy.absolute', 'np.absolute', (['(i[0] - pointRefX)'], {}), '(i[0] - pointRefX)\n', (1664, 1682), True, 'import numpy as np\n'), ((1701, 1730), 'numpy.absolute', 'np.absolute', (['(i[1] - pointRefY)'], {}), '(i[1] - pointRefY)\n', (1712, 1730), True, 'import numpy as np\n')]
|
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
import qtawesome
import matplotlib.pyplot as plt
import csv
import numpy as np
import datetime
import os
class Stack:
def __init__(self):
self.items=[]
def isEmpty(self):
return self.items==[]
def push(self,item):
self.items.append(item)
def peek(self):
return self.items[len(self.items)-1]
def pop(self):
return self.items.pop()
def size(self):
return len(self.items)
class MainUI(QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
self.advice=[]
self.stack=Stack()
self.isLeftPressDown = False
self.dragPosition = 0
self.Numbers = self.enum(UP=0, DOWN=1, LEFT=2, RIGHT=3, LEFTTOP=4, LEFTBOTTOM=5, RIGHTBOTTOM=6, RIGHTTOP=7,NONE=8)
self.dir = self.Numbers.NONE
self.setMouseTracking(True)
def enum(self, **enums):
return type('Enum', (), enums)
def mouseReleaseEvent(self, event):
if (event.button() == Qt.LeftButton):
self.isLeftPressDown = False
if (self.dir != self.Numbers.NONE):
self.releaseMouse()
def mousePressEvent(self, event):
if (event.button() == Qt.LeftButton):
self.isLeftPressDown = True
if (self.dir != self.Numbers.NONE):
self.mouseGrabber()
else:
self.dragPosition = event.globalPos() - self.frameGeometry().topLeft()
def mouseMoveEvent(self, event):
gloPoint = event.globalPos()
rect = self.rect()
tl = self.mapToGlobal(rect.topLeft())
rb = self.mapToGlobal(rect.bottomRight())
if (not self.isLeftPressDown):
self.region(gloPoint)
else:
if (self.dir != self.Numbers.NONE):
rmove = QRect(tl, rb)
if (self.dir == self.Numbers.LEFT):
if (rb.x() - gloPoint.x() <= self.minimumWidth()):
rmove.setX(tl.x())
else:
rmove.setX(gloPoint.x())
elif (self.dir == self.Numbers.RIGHT):
rmove.setWidth(gloPoint.x() - tl.x())
elif (self.dir == self.Numbers.UP):
if (rb.y() - gloPoint.y() <= self.minimumHeight()):
rmove.setY(tl.y())
else:
rmove.setY(gloPoint.y())
elif (self.dir == self.Numbers.DOWN):
rmove.setHeight(gloPoint.y() - tl.y())
elif (self.dir == self.Numbers.LEFTTOP):
if (rb.x() - gloPoint.x() <= self.minimumWidth()):
rmove.setX(tl.x())
else:
rmove.setX(gloPoint.x())
if (rb.y() - gloPoint.y() <= self.minimumHeight()):
rmove.setY(tl.y())
else:
rmove.setY(gloPoint.y())
elif (self.dir == self.Numbers.RIGHTTOP):
rmove.setWidth(gloPoint.x() - tl.x())
rmove.setY(gloPoint.y())
elif (self.dir == self.Numbers.LEFTBOTTOM):
rmove.setX(gloPoint.x())
rmove.setHeight(gloPoint.y() - tl.y())
elif (self.dir == self.Numbers.RIGHTBOTTOM):
rmove.setWidth(gloPoint.x() - tl.x())
rmove.setHeight(gloPoint.y() - tl.y())
else:
pass
self.setGeometry(rmove)
else:
self.move(event.globalPos() - self.dragPosition)
event.accept()
def initUI(self):
self.setFixedSize(1200,900)
self.main_widget = QWidget()
self.main_layout = QGridLayout()
self.main_widget.setLayout(self.main_layout)
self.left_widget = QWidget()
self.left_widget.setObjectName('left_widget')
self.left_layout = QGridLayout()
self.left_widget.setLayout(self.left_layout)
self.right_widget = QWidget()
self.right_widget.setObjectName('right_widget')
self.right_layout = QGridLayout()
self.right_widget.setLayout(self.right_layout)
self.main_layout.addWidget(self.left_widget,0,0,16,2)
self.main_layout.addWidget(self.right_widget,0,2,16,9)
self.setCentralWidget(self.main_widget)
self.left_label_1 = QPushButton("参数设置")
self.left_label_1.setObjectName('left_label')
self.left_label_1.setEnabled(False)
self.left_label_2 = QPushButton("图像显示")
self.left_label_2.setObjectName('left_label')
self.left_label_2.setEnabled(False)
self.left_label_3 = QPushButton("帮助")
self.left_label_3.setObjectName('left_label')
self.left_label_3.setEnabled(False)
self.left_button_1 = QPushButton(qtawesome.icon('fa.rmb', color='white'), "设置期初资金")
self.left_button_1.setObjectName('left_button')
self.left_button_1.clicked.connect(self.buttonDialog1)
self.left_button_2 = QPushButton(qtawesome.icon('fa.hourglass-start', color='white'), "设置交易开始时间")
self.left_button_2.setObjectName('left_button')
self.left_button_2.clicked.connect(self.buttonDialog2)
self.left_button_3 = QPushButton(qtawesome.icon('fa.hourglass-end', color='white'), "设置交易结束时间")
self.left_button_3.setObjectName('left_button')
self.left_button_3.clicked.connect(self.buttonDialog3)
self.left_button_4 = QPushButton(qtawesome.icon('fa.line-chart', color='white'), "修改唐奇安通道")
self.left_button_4.setObjectName('left_button')
self.left_button_4.clicked.connect(self.buttonDialog4)
self.left_button_5 = QPushButton(qtawesome.icon('fa.check-circle-o', color='white'), "修改ATR")
self.left_button_5.setObjectName('left_button')
self.left_button_5.clicked.connect(self.buttonDialog5)
self.left_button_6 = QPushButton(qtawesome.icon('fa.pie-chart', color='white'), "修改手续费")
self.left_button_6.setObjectName('left_button')
self.left_button_6.clicked.connect(self.buttonDialog6)
self.left_button_7 = QPushButton(qtawesome.icon('fa.sort-amount-asc', color='white'), "修改投资系数")
self.left_button_7.setObjectName('left_button')
self.left_button_7.clicked.connect(self.buttonDialog7)
self.left_checkbox_1 = QCheckBox('策略收益')
self.left_checkbox_1.setChecked(True)
self.left_checkbox_2 = QCheckBox('沪深300')
self.left_checkbox_2.setChecked(True)
self.left_checkbox_3 = QCheckBox('仓位图')
self.left_checkbox_3.setChecked(True)
self.left_button_8 = QPushButton(qtawesome.icon('fa.question', color='white'), "专业名词含义查询")
self.left_button_8.setObjectName('left_button')
self.left_button_8.clicked.connect(self.buttonDialog8)
self.left_button_9 = QPushButton(qtawesome.icon('fa.comment', color='white'), "反馈建议")
self.left_button_9.setObjectName('left_button')
self.left_button_9.clicked.connect(self.buttonDialog9)
self.left_button_10 = QPushButton(qtawesome.icon('fa.envelope', color='white'), "联系我们")
self.left_button_10.setObjectName('left_button')
self.left_button_10.clicked.connect(self.buttonDialog10)
self.left_layout.addWidget(self.left_label_1, 0, 0, 1, 3)
self.left_layout.addWidget(self.left_button_1, 1, 0, 1, 3)
self.left_layout.addWidget(self.left_button_2, 2, 0, 1, 3)
self.left_layout.addWidget(self.left_button_3, 3, 0, 1, 3)
self.left_layout.addWidget(self.left_button_4, 4, 0, 1, 3)
self.left_layout.addWidget(self.left_button_5, 5, 0, 1, 3)
self.left_layout.addWidget(self.left_button_6, 6, 0, 1, 3)
self.left_layout.addWidget(self.left_button_7, 7, 0, 1, 3)
self.left_layout.addWidget(self.left_label_2, 8, 0, 1, 3)
self.left_layout.addWidget(self.left_checkbox_1, 9, 0, 1, 3)
self.left_layout.addWidget(self.left_checkbox_2, 10, 0, 1, 3)
self.left_layout.addWidget(self.left_checkbox_3, 11, 0, 1, 3)
self.left_layout.addWidget(self.left_label_3, 12, 0, 1, 3)
self.left_layout.addWidget(self.left_button_8, 13, 0, 1, 3)
self.left_layout.addWidget(self.left_button_9, 14, 0, 1, 3)
self.left_layout.addWidget(self.left_button_10, 15, 0, 1, 3)
self.left_checkbox_1.setStyleSheet("QCheckBox{color:rgb(255,250,250)}")
self.left_checkbox_2.setStyleSheet("QCheckBox{color:rgb(255,250,250)}")
self.left_checkbox_3.setStyleSheet("QCheckBox{color:rgb(255,250,250)}")
self.left_widget.setStyleSheet('''
QCheckBox{font-family: "Helvetica Neue", Helvetica, KaiTi, sans-serif;
font-size:16px}
QPushButton{border:none;
color:white;
text-align: left;
font-family: "Helvetica Neue", Helvetica, KaiTi, sans-serif;
font-size:16px}
QPushButton#left_label{
border:none;
border-bottom:1px solid white;
font-size:20px;
font-weight:700;
font-family: "Helvetica Neue", Helvetica, KaiTi, sans-serif;
}
QPushButton#left_button:hover{border-left:4px solid blue;font-weight:700;}
QWidget#left_widget{
background:gray;
border-top:1px solid white;
border-bottom:1px solid white;
border-left:1px solid white;
border-top-left-radius:10px;
border-bottom-left-radius:10px;
}
''')
self.right_label_0 =QLabel('')
self.right_label_1 = QLabel('期初资金')
self.right_label_1.setAlignment(Qt.AlignCenter)
self.right_label_1.setFont(QFont('KaiTi',12))
self.right_label_2 = QLabel('总资产')
self.right_label_2.setAlignment(Qt.AlignCenter)
self.right_label_2.setFont(QFont('KaiTi', 12))
self.right_label_3 = QLabel('累计盈亏')
self.right_label_3.setAlignment(Qt.AlignCenter)
self.right_label_3.setFont(QFont('KaiTi', 12))
self.right_label_4 = QLabel('可交易天数')
self.right_label_4.setAlignment(Qt.AlignCenter)
self.right_label_4.setFont(QFont('KaiTi', 12))
self.right_label_5 = QLabel('基准收益率')
self.right_label_5.setAlignment(Qt.AlignCenter)
self.right_label_5.setFont(QFont('KaiTi', 12))
self.right_label_6 = QLabel('年化收益率')
self.right_label_6.setAlignment(Qt.AlignCenter)
self.right_label_6.setFont(QFont('KaiTi', 12))
self.right_label_7 = QLabel('开始时间')
self.right_label_7.setAlignment(Qt.AlignCenter)
self.right_label_7.setFont(QFont('KaiTi', 12))
self.right_label_8 = QLabel('结束时间')
self.right_label_8.setAlignment(Qt.AlignCenter)
self.right_label_8.setFont(QFont('KaiTi', 12))
self.right_label_9 = QLabel('胜率')
self.right_label_9.setAlignment(Qt.AlignCenter)
self.right_label_9.setFont(QFont('KaiTi', 12))
self.right_layout.addWidget(self.right_label_0, 0, 3, 1, 3)
self.right_layout.addWidget(self.right_label_1, 1, 3, 1, 1)
self.right_layout.addWidget(self.right_label_2, 1, 4, 1, 1)
self.right_layout.addWidget(self.right_label_3, 1, 5, 1, 1)
self.right_layout.addWidget(self.right_label_4, 1, 6, 1, 1)
self.right_layout.addWidget(self.right_label_5, 1, 7, 1, 1)
self.right_layout.addWidget(self.right_label_6, 1, 8, 1, 1)
self.right_layout.addWidget(self.right_label_7, 1, 9, 1, 1)
self.right_layout.addWidget(self.right_label_8, 1, 10, 1, 1)
self.right_layout.addWidget(self.right_label_9, 1, 11, 1, 1)
self.right_lineEdit_1 = QLineEdit()
self.right_lineEdit_1.setReadOnly(True)
self.right_lineEdit_1.setText('')
self.right_lineEdit_2 = QLineEdit()
self.right_lineEdit_2.setReadOnly(True)
self.right_lineEdit_2.setText('')
self.right_lineEdit_3 = QLineEdit()
self.right_lineEdit_3.setReadOnly(True)
self.right_lineEdit_3.setText('')
self.right_lineEdit_4 = QLineEdit()
self.right_lineEdit_4.setReadOnly(True)
self.right_lineEdit_4.setText('')
self.right_lineEdit_5 = QLineEdit()
self.right_lineEdit_5.setReadOnly(True)
self.right_lineEdit_5.setText('')
self.right_lineEdit_6 = QLineEdit()
self.right_lineEdit_6.setReadOnly(True)
self.right_lineEdit_6.setText('')
self.right_lineEdit_7 = QLineEdit()
self.right_lineEdit_7.setReadOnly(True)
self.right_lineEdit_7.setText('')
self.right_lineEdit_8 = QLineEdit()
self.right_lineEdit_8.setReadOnly(True)
self.right_lineEdit_8.setText('')
self.right_lineEdit_9 = QLineEdit()
self.right_lineEdit_9.setReadOnly(True)
self.right_lineEdit_9.setText('')
self.right_layout.addWidget(self.right_lineEdit_1, 2, 3, 1, 1)
self.right_layout.addWidget(self.right_lineEdit_2, 2, 4, 1, 1)
self.right_layout.addWidget(self.right_lineEdit_3, 2, 5, 1, 1)
self.right_layout.addWidget(self.right_lineEdit_4, 2, 6, 1, 1)
self.right_layout.addWidget(self.right_lineEdit_5, 2, 7, 1, 1)
self.right_layout.addWidget(self.right_lineEdit_6, 2, 8, 1, 1)
self.right_layout.addWidget(self.right_lineEdit_7, 2, 9, 1, 1)
self.right_layout.addWidget(self.right_lineEdit_8, 2, 10, 1, 1)
self.right_layout.addWidget(self.right_lineEdit_9, 2, 11, 1, 1)
self.right_figure_1 = QLabel()
self.figure_1 = QPixmap("猫咪老师4.png")
self.right_figure_1.setPixmap(self.figure_1)
self.right_figure_1.setScaledContents(True)
self.right_figure_2 = QLabel()
self.figure_2 = QPixmap("喵.png")
self.right_figure_2.setPixmap(self.figure_2)
self.right_figure_2.setScaledContents(True)
self.right_layout.addWidget(self.right_figure_1, 3, 3, 7, 9)
self.right_layout.addWidget(self.right_figure_2, 10, 3, 5, 9)
self.right_button_1 = QPushButton(qtawesome.icon('fa.repeat', color='blue'), "测试/重测")
self.right_button_1.clicked.connect(self.start)
self.right_button_1.clicked.connect(self.tryOrRepeat1)
self.right_button_1.clicked.connect(self.tryOrRepeat2)
self.right_button_2 = QPushButton(qtawesome.icon('fa.floppy-o', color='gray'), "删除当前结果")
self.right_button_2.clicked.connect(self.figuredelete)
self.right_button_3 = QPushButton(qtawesome.icon('fa.times', color='red'), "退出")
self.right_button_3.clicked.connect(self.quitApplicaton)
self.right_layout.addWidget(self.right_button_1, 16, 3, 1, 3)
self.right_layout.addWidget(self.right_button_2, 16, 6, 1, 3)
self.right_layout.addWidget(self.right_button_3, 16, 9, 1, 3)
self.right_widget.setStyleSheet('''
QWidget#right_widget{
color:#232C51;
background:white;
border-top:1px solid darkGray;
border-bottom:1px solid darkGray;
border-right:1px solid darkGray;
border-top-right-radius:10px;
border-bottom-right-radius:10px;
}
QLabel{
border:None;
font-weight:700;
font-size=25px;
font-family: "Helvetica Neue", Helvetica, KaiTi, sans-serif;
}
QLineEdit{
font:bold;
border:1px solid gray;
width:300px;
padding:2px 4px;
background-color:rgb(255,250,250);
selection-color:white;
}
QPushButton{font-family: "Helvetica Neue", Helvetica, KaiTi, sans-serif;
font-size:16px}
''')
self.setAttribute(Qt.WA_TranslucentBackground)
self.setWindowFlag(Qt.FramelessWindowHint)
self.main_layout.setSpacing(0)
def buttonDialog1(self):
self.dialog1 = QDialog()
self.dialog1.setWindowIcon(QIcon("猫咪老师1.jpg"))
self.dialog1.resize(250,100)
self.dialog1.setWindowTitle('设置期初资金')
formLayout = QFormLayout()
label = QLabel('请输入您的期初资金(整数万元)')
self.edit1 = QLineEdit()
self.edit1.setValidator(QIntValidator())
self.edit1.setAlignment(Qt.AlignRight)
self.edit1.setFont(QFont('Arial', 10))
button_ok = QPushButton('OK')
button_ok.clicked.connect(self.okk1)
button_cancel = QPushButton('Cancel')
button_cancel.clicked.connect(self.cancel1)
formLayout.addRow(label)
formLayout.addRow(self.edit1)
formLayout.addRow(button_ok, button_cancel)
self.dialog1.setLayout(formLayout)
self.dialog1.setStyleSheet('''
QPushButton{color:black;text-align: center;}
QLabel{font-family: "Helvetica Neue", Helvetica, KaiTi, sans-serif;font-size:16px}
QDialog{background:lightgray;
border-top:1px solid royalblue;
border-bottom:1px solid royalblue;
border-left:1px solid royalblue;
border-right:1px solid royalblue;
border-top-left-radius:10px;
border-bottom-left-radius:10px;
border-top-right-radius:10px;
border-bottom-right-radius:10px;
}
''')
self.dialog1.setWindowModality(Qt.ApplicationModal)
self.dialog1.exec_()
def okk1(self):
if self.edit1.text() != '':
global initial_cash
global cash
initial_cash=eval(self.edit1.text())*10000
self.dialog1.close()
def cancel1(self):
self.edit1.setText('')
def buttonDialog2(self):
self.dialog2 = QDialog()
self.dialog2.setWindowIcon(QIcon("猫咪老师1.jpg"))
self.dialog2.resize(280,100)
self.dialog2.setWindowTitle('设置交易开始时间')
formLayout = QFormLayout()
label1 = QLabel('请输入您的交易开始时间')
label2 = QLabel('时间格式示例:2011-03-01')
label3 = QLabel('时间范围为2011-03-01至2021-04-01')
self.edit2 = QLineEdit()
self.edit2.setAlignment(Qt.AlignRight)
self.edit2.setFont(QFont('Arial', 10))
button_ok = QPushButton('OK')
button_ok.clicked.connect(self.okk2)
button_cancel = QPushButton('Cancel')
button_cancel.clicked.connect(self.cancel2)
formLayout.addRow(label1)
formLayout.addRow(label2)
formLayout.addRow(label3)
formLayout.addRow(self.edit2)
formLayout.addRow(button_ok, button_cancel)
self.dialog2.setLayout(formLayout)
self.dialog2.setStyleSheet('''
QPushButton{color:black;text-align: center;}
QLabel{font-family: "Helvetica Neue", Helvetica, KaiTi, sans-serif;font-size:16px}
QDialog{background:lightgray;
border-top:1px solid royalblue;
border-bottom:1px solid royalblue;
border-left:1px solid royalblue;
border-right:1px solid royalblue;
border-top-left-radius:10px;
border-bottom-left-radius:10px;
border-top-right-radius:10px;
border-bottom-right-radius:10px;
}
''')
self.dialog2.setWindowModality(Qt.ApplicationModal)
self.dialog2.exec_()
def okk2(self):
if self.edit2.text()!='':
global start_time
start_time=self.edit2.text()
start_time = nearestdate(start_time, 1)
self.dialog2.close()
def cancel2(self):
self.edit2.setText('')
def buttonDialog3(self):
self.dialog3 = QDialog()
self.dialog3.setWindowIcon(QIcon("猫咪老师1.jpg"))
self.dialog3.resize(280,100)
self.dialog3.setWindowTitle('设置交易结束时间')
formLayout = QFormLayout()
label1 = QLabel('请输入您的交易结束时间')
label2 = QLabel('时间格式示例:2021-04-01')
label3 = QLabel('时间范围为2011-03-01至2021-04-01')
self.edit3 = QLineEdit()
self.edit3.setAlignment(Qt.AlignRight)
self.edit3.setFont(QFont('Arial', 10))
button_ok = QPushButton('OK')
button_ok.clicked.connect(self.okk3)
button_cancel = QPushButton('Cancel')
button_cancel.clicked.connect(self.cancel3)
formLayout.addRow(label1)
formLayout.addRow(label2)
formLayout.addRow(label3)
formLayout.addRow(self.edit3)
formLayout.addRow(button_ok, button_cancel)
self.dialog3.setLayout(formLayout)
self.dialog3.setStyleSheet('''
QPushButton{color:black;text-align: center;}
QLabel{font-family: "Helvetica Neue", Helvetica, KaiTi, sans-serif;font-size:16px}
QDialog{background:lightgray;
border-top:1px solid royalblue;
border-bottom:1px solid royalblue;
border-left:1px solid royalblue;
border-right:1px solid royalblue;
border-top-left-radius:10px;
border-bottom-left-radius:10px;
border-top-right-radius:10px;
border-bottom-right-radius:10px;
}
''')
self.dialog3.setWindowModality(Qt.ApplicationModal)
self.dialog3.exec_()
def okk3(self):
if self.edit3.text()!='':
global end_time
end_time=self.edit3.text()
end_time = nearestdate(end_time, -1)
self.dialog3.close()
def cancel3(self):
self.edit3.setText('')
def buttonDialog4(self):
self.dialog4 = QDialog()
self.dialog4.setWindowIcon(QIcon("猫咪老师1.jpg"))
self.dialog4.resize(280,100)
self.dialog4.setWindowTitle('修改唐奇安通道')
formLayout = QFormLayout()
label = QLabel('唐奇安通道修改为(5~50):')
self.edit4 = QLineEdit('20')
self.edit4.setReadOnly(True)
self.edit4.setAlignment(Qt.AlignRight)
self.edit4.setFont(QFont('Arial', 10))
self.slider1 = QSlider(Qt.Horizontal)
self.slider1.setMinimum(5)
self.slider1.setMaximum(50)
self.slider1.setSingleStep(1)
self.slider1.setValue(20)
self.slider1.setTickPosition(QSlider.TicksBelow)
self.slider1.setTickInterval(1)
self.slider1.valueChanged.connect(self.valueChange1)
button_ok = QPushButton('OK')
button_ok.clicked.connect(self.okk4)
button_cancel = QPushButton('Cancel')
button_cancel.clicked.connect(self.cancel4)
formLayout.addRow(label)
formLayout.addRow(self.edit4)
formLayout.addRow(self.slider1)
formLayout.addRow(button_ok, button_cancel)
self.dialog4.setLayout(formLayout)
self.dialog4.setStyleSheet('''
QPushButton{color:black;text-align: center;}
QLabel{font-family: "Helvetica Neue", Helvetica, KaiTi, sans-serif;font-size:16px}
QDialog{background:lightgray;
border-top:1px solid royalblue;
border-bottom:1px solid royalblue;
border-left:1px solid royalblue;
border-right:1px solid royalblue;
border-top-left-radius:10px;
border-bottom-left-radius:10px;
border-top-right-radius:10px;
border-bottom-right-radius:10px;
}
''')
self.dialog4.setWindowModality(Qt.ApplicationModal)
self.dialog4.exec_()
def okk4(self):
global Dontime
Dontime=int(self.edit4.text())
self.dialog4.close()
def cancel4(self):
self.slider1.setValue(20)
def valueChange1(self):
self.edit4.setText('%d'%self.slider1.value())
def buttonDialog5(self):
self.dialog5 = QDialog()
self.dialog5.setWindowIcon(QIcon("猫咪老师1.jpg"))
self.dialog5.resize(250,100)
self.dialog5.setWindowTitle('修改ATR')
formLayout = QFormLayout()
label = QLabel('ATR修改为(5~50):')
self.edit5 = QLineEdit('20')
self.edit5.setReadOnly(True)
self.edit5.setAlignment(Qt.AlignRight)
self.edit5.setFont(QFont('Arial', 10))
self.slider2 = QSlider(Qt.Horizontal)
self.slider2.setMinimum(5)
self.slider2.setMaximum(50)
self.slider2.setSingleStep(1)
self.slider2.setValue(20)
self.slider2.setTickPosition(QSlider.TicksBelow)
self.slider2.setTickInterval(1)
self.slider2.valueChanged.connect(self.valueChange2)
button_ok = QPushButton('OK')
button_ok.clicked.connect(self.okk5)
button_cancel = QPushButton('Cancel')
button_cancel.clicked.connect(self.cancel5)
formLayout.addRow(label)
formLayout.addRow(self.edit5)
formLayout.addRow(self.slider2)
formLayout.addRow(button_ok, button_cancel)
self.dialog5.setLayout(formLayout)
self.dialog5.setStyleSheet('''
QPushButton{color:black;text-align: center;}
QLabel{font-family: "Helvetica Neue", Helvetica, KaiTi, sans-serif;font-size:16px}
QDialog{background:lightgray;
border-top:1px solid royalblue;
border-bottom:1px solid royalblue;
border-left:1px solid royalblue;
border-right:1px solid royalblue;
border-top-left-radius:10px;
border-bottom-left-radius:10px;
border-top-right-radius:10px;
border-bottom-right-radius:10px;
}
''')
self.dialog5.setWindowModality(Qt.ApplicationModal)
self.dialog5.exec_()
def okk5(self):
global atrtime
atrtime=int(self.edit5.text())
self.dialog5.close()
def cancel5(self):
self.slider2.setValue(20)
def valueChange2(self):
self.edit5.setText('%d'%self.slider2.value())
def buttonDialog6(self):
self.dialog6 = QDialog()
self.dialog6.setWindowIcon(QIcon("猫咪老师1.jpg"))
self.dialog6.resize(280,100)
self.dialog6.setWindowTitle('修改手续费')
formLayout = QFormLayout()
label = QLabel('修改手续费为(单位:万分之一):')
self.edit6 = QLineEdit('1')
self.edit6.setValidator(QIntValidator())
self.edit6.setAlignment(Qt.AlignRight)
self.edit6.setFont(QFont('Arial', 10))
button_ok = QPushButton('OK')
button_ok.clicked.connect(self.okk6)
button_cancel = QPushButton('Cancel')
button_cancel.clicked.connect(self.cancel6)
formLayout.addRow(label)
formLayout.addRow(self.edit6)
formLayout.addRow(button_ok, button_cancel)
self.dialog6.setLayout(formLayout)
self.dialog6.setStyleSheet('''
QPushButton{color:black;text-align: center;}
QLabel{font-family: "Helvetica Neue", Helvetica, KaiTi, sans-serif;font-size:16px}
QDialog{background:lightgray;
border-top:1px solid royalblue;
border-bottom:1px solid royalblue;
border-left:1px solid royalblue;
border-right:1px solid royalblue;
border-top-left-radius:10px;
border-bottom-left-radius:10px;
border-top-right-radius:10px;
border-bottom-right-radius:10px;
}
''')
self.dialog6.setWindowModality(Qt.ApplicationModal)
self.dialog6.exec_()
def okk6(self):
if self.edit6.text() != '':
global backtest_commission_ratio
backtest_commission_ratio=eval(self.edit6.text())/10000
self.dialog6.close()
def cancel6(self):
self.edit6.setText('1')
def buttonDialog7(self):
self.dialog7 = QDialog()
self.dialog7.setWindowIcon(QIcon("猫咪老师1.jpg"))
self.dialog7.resize(280,100)
self.dialog7.setWindowTitle('修改投资系数')
formLayout = QFormLayout()
label = QLabel('修改投资系数为(单位:百分之一):')
self.edit7 = QLineEdit('1')
self.edit7.setAlignment(Qt.AlignRight)
self.edit7.setFont(QFont('Arial', 10))
button_ok = QPushButton('OK')
button_ok.clicked.connect(self.okk7)
button_cancel = QPushButton('Cancel')
button_cancel.clicked.connect(self.cancel7)
formLayout.addRow(label)
formLayout.addRow(self.edit7)
formLayout.addRow(button_ok, button_cancel)
self.dialog7.setLayout(formLayout)
self.dialog7.setStyleSheet('''
QPushButton{color:black;text-align: center;}
QLabel{font-family: "Helvetica Neue", Helvetica, KaiTi, sans-serif;font-size:16px}
QDialog{background:lightgray;
border-top:1px solid royalblue;
border-bottom:1px solid royalblue;
border-left:1px solid royalblue;
border-right:1px solid royalblue;
border-top-left-radius:10px;
border-bottom-left-radius:10px;
border-top-right-radius:10px;
border-bottom-right-radius:10px;
}
''')
self.dialog7.setWindowModality(Qt.ApplicationModal)
self.dialog7.exec_()
def okk7(self):
if self.edit7.text() != '':
global unit_rate
unit_rate=eval(self.edit7.text())/100
self.dialog7.close()
def cancel7(self):
self.edit7.setText('1')
def buttonDialog8(self):
self.dialog8 = QDialog()
self.dialog8.setWindowIcon(QIcon("猫咪老师1.jpg"))
self.dialog8.resize(280,100)
self.dialog8.setWindowTitle('专业名词含义查询')
layout=QVBoxLayout()
self.label = QLabel('请选择专业名词:')
self.cb = QComboBox()
self.cb.addItems(['唐奇安通道', 'ATR', '投资系数', '基准收益率','年化收益率'])
self.cb.currentIndexChanged.connect(self.selectionChange)
layout.addWidget(self.label)
layout.addWidget(self.cb)
self.dialog8.setLayout(layout)
self.dialog8.setStyleSheet('''
QLabel{font-family: "Helvetica Neue", Helvetica, KaiTi, sans-serif;font-size:16px}
QComboBox{font-family: "Helvetica Neue", Helvetica, KaiTi, sans-serif;font-size:16px}
QDialog{background:lightgray;
border-top:1px solid royalblue;
border-bottom:1px solid royalblue;
border-left:1px solid royalblue;
border-right:1px solid royalblue;
border-top-left-radius:10px;
border-bottom-left-radius:10px;
border-top-right-radius:10px;
border-bottom-right-radius:10px;
}
''')
self.dialog8.setWindowModality(Qt.ApplicationModal)
self.dialog8.exec_()
def selectionChange(self,i):
dict0={'唐奇安通道':"唐奇安通道主要是一个突破型趋势跟踪指标,可以提供两种不同的突破信号", 'ATR':"ATR是日内指数最大波动的平均振幅,由当日最高、最低价和上一交易日的收盘价决定", '投资系数':"每一次开仓交易合约数unit的确定是将总资产的投资系数除以价值波动量得到", '基准收益率':"默认沪深300指数收益",'年化收益率':"年化收益率是指投资期限为一年的收益率"}
self.label.setText('%s'%dict0[self.cb.currentText()])
def buttonDialog9(self):
self.dialog9 = QDialog()
self.dialog9.setWindowIcon(QIcon("猫咪老师1.jpg"))
self.dialog9.resize(250,100)
self.dialog9.setWindowTitle('反馈建议')
formlayout=QFormLayout()
label = QLabel('您的反馈与建议是:')
self.edit9 = QTextEdit('')
self.edit9.setAlignment(Qt.AlignLeft)
self.edit9.setFont(QFont('KaiTi', 10))
button_ok = QPushButton('OK')
button_ok.clicked.connect(self.okk9)
button_cancel = QPushButton('Cancel')
button_cancel.clicked.connect(self.cancel9)
formlayout.addRow(label)
formlayout.addRow(self.edit9)
formlayout.addRow(button_ok,button_cancel)
self.dialog9.setLayout(formlayout)
self.dialog9.setStyleSheet('''
QPushButton{color:black;text-align: center;}
QLabel{font-family: "Helvetica Neue", Helvetica, KaiTi, sans-serif;font-size:16px}
QDialog{background:lightgray;
border-top:1px solid royalblue;
border-bottom:1px solid royalblue;
border-left:1px solid royalblue;
border-right:1px solid royalblue;
border-top-left-radius:10px;
border-bottom-left-radius:10px;
border-top-right-radius:10px;
border-bottom-right-radius:10px;
}
''')
self.dialog9.setWindowModality(Qt.ApplicationModal)
self.dialog9.exec_()
def okk9(self):
QMessageBox.about(self,'感谢','感谢您的反馈与建议!基于您的反馈与建议,我们会努力做得更好!')
self.dialog9.close()
def cancel9(self):
self.edit9.setText('')
def buttonDialog10(self):
self.dialog10 = QDialog()
self.dialog10.setWindowIcon(QIcon("猫咪老师1.jpg"))
self.dialog10.resize(250,150)
self.dialog10.setWindowTitle('联系我们')
layout=QVBoxLayout()
label1 = QLabel('欢迎您来信联系我们!')
label2 = QLabel('我们的邮箱是:')
label5 = QLabel('<EMAIL>')
label6 = QLabel('<EMAIL>')
label7 = QLabel('<EMAIL>')
label3 = QLabel('')
label3.setOpenExternalLinks(True)
label3.setText("<A href='https://mail.163.com/'>网易邮箱</a>")
label3.setAlignment(Qt.AlignCenter)
label3.setToolTip('点击进入网易邮箱主页')
label4 = QLabel('')
label4.setOpenExternalLinks(True)
label4.setText("<A href='https://mail.qq.com/'>QQ邮箱</a>")
label4.setAlignment(Qt.AlignCenter)
label4.setToolTip('点击进入QQ邮箱主页')
layout.addWidget(label1)
layout.addWidget(label2)
layout.addWidget(label5)
layout.addWidget(label6)
layout.addWidget(label7)
layout.addWidget(label3)
layout.addWidget(label4)
self.dialog10.setLayout(layout)
self.dialog10.setStyleSheet('''
QLabel{font-family: "Helvetica Neue", Helvetica, KaiTi, sans-serif;font-size:16px}
QDialog{background:lightgray;
border-top:1px solid royalblue;
border-bottom:1px solid royalblue;
border-left:1px solid royalblue;
border-right:1px solid royalblue;
border-top-left-radius:10px;
border-bottom-left-radius:10px;
border-top-right-radius:10px;
border-bottom-right-radius:10px;
}
''')
self.dialog10.setWindowModality(Qt.ApplicationModal)
self.dialog10.exec_()
def tryOrRepeat1(self):
if self.left_checkbox_1.isChecked() or self.left_checkbox_2.isChecked():
plt.figure()
plt.title('Asset-Time')
if self.left_checkbox_1.isChecked():
plt.plot(xs, l_asset, linestyle='-', color='firebrick', linewidth=1.5, label='Asset')
if self.left_checkbox_2.isChecked():
plt.plot(xs, l_index, linestyle='-', color='royalblue', linewidth=1, label='Index')
plt.plot(xs, l_initial, linestyle='--', color='black', label='Initial')
plt.xlabel('Time')
plt.ylabel('Asset')
plt.gcf().autofmt_xdate()
plt.legend()
plt.rcParams['figure.figsize'] = (9.0, 4.0)
theTime1=datetime.datetime.now()
figure_1_name='figure_1'+str(theTime1)+'.png'
figure_1_name = ''.join(figure_1_name.split(':'))
self.stack.push(figure_1_name)
plt.savefig(figure_1_name,dpi=300,bbox_inches='tight')
plt.close()
self.figure_1=QPixmap(figure_1_name)
self.right_figure_1.setPixmap(self.figure_1)
else:
self.figure_1 = QPixmap("猫咪老师4.png")
self.right_figure_1.setPixmap(self.figure_1)
def tryOrRepeat2(self):
if self.left_checkbox_3.isChecked():
plt.figure()
plt.title('Long/Short-Time')
long_tem = []
short_tem = []
initial_bar = []
for i in range(len(position_long)-1):
long_tem.append(position_long[i][1])
short_tem.append(-position_short[i][1])
initial_bar.append(0)
plt.bar(xs, long_tem,linestyle='-', color='firebrick', linewidth=1, label='long')
plt.bar(xs, short_tem,linestyle='-', color='royalblue', linewidth=1, label='short')
plt.plot(xs, initial_bar, linestyle='--', color='black', label='Initial')
plt.xlabel('Time')
plt.ylabel('')
plt.gcf().autofmt_xdate()
plt.legend()
plt.rcParams['figure.figsize'] = (9.0, 4.0)
theTime2 = datetime.datetime.now()
figure_2_name = 'figure_2' + str(theTime2) + '.png'
figure_2_name = ''.join(figure_2_name.split(':'))
self.stack.push(figure_2_name)
plt.savefig(figure_2_name, dpi=300, bbox_inches='tight')
plt.close()
self.figure_2 = QPixmap(figure_2_name)
self.right_figure_2.setPixmap(self.figure_2)
else:
self.figure_2 = QPixmap("喵.png")
self.right_figure_2.setPixmap(self.figure_2)
def quitApplicaton(self):
app = MainUI.instance()
app.quit()
def figuredelete(self):
figure_1_delete=self.stack.pop()
figure_2_delete = self.stack.pop()
os.remove(figure_1_delete)
os.remove(figure_2_delete)
self.right_button_2.setEnabled(False)
def start(self):
global time
global date
global winningRate
global baseline
global annualized_rate
global xs
global l_initial
global position_long
global position_short
global l_time
global l_asset
global l_index
self.right_button_2.setEnabled(True)
position_long = []
position_short = []
for n in range(finddatepos(start_time), finddatepos(end_time) + 1):
position_long.append([result[n][0], 0])
position_short.append([result[n][0], 0])
cash.append([result[n][0], 0])
cash[0][1] = initial_cash
start_date_position = finddatepos(start_time)
end_date_position = finddatepos(end_time)
for d in range(start_date_position + 1, end_date_position + 1):
on_bar(result[d][0], atrtime)
in_bar(result[d][0], atrtime)
l_time = []
l_asset = []
l_index = []
time = 0
for d in range(start_date_position + 1, end_date_position + 1):
time += 1
l_time.append(result[d][0])
l_asset.append(current_asset(result[d][0]))
l_index.append(result[d][4] * initial_cash / result[start_date_position + 1][4])
if position_short[time][1] != position_short[time - 1][1] or position_long[time][1] != \
position_long[time - 1][1]:
date += 1
if current_asset(result[d][0]) >= current_asset(result[d - 1][0]):
winningRate += 1
winningRate /= date
baseline = (l_index[-1] / l_index[0]) - 1
d1 = datetime.datetime(int(start_time.split('-')[0]), int(start_time.split('-')[1]),
int(start_time.split('-')[2]))
d2 = datetime.datetime(int(end_time.split('-')[0]), int(end_time.split('-')[1]), int(end_time.split('-')[2]))
interval = d2 - d1
annualized_rate = ((current_asset(end_time) / current_asset(start_time)) - 1) * 365 / interval.days
xs =[]
xs = [datetime.datetime.strptime(d, '%Y-%m-%d').date() for d in l_time]
l_initial = []
l_initial = [initial_cash] * (end_date_position - start_date_position)
self.right_lineEdit_1.setText('%d' % int(initial_cash))
self.right_lineEdit_2.setText('%d' % int(current_asset(end_time)))
self.right_lineEdit_3.setText('%d' % int(current_asset(end_time)-initial_cash))
self.right_lineEdit_4.setText('%d' % date)
baseline0 = baseline * 100
self.right_lineEdit_5.setText('%.2f' % baseline0 + '%')
annualized_rate0 = annualized_rate * 100
self.right_lineEdit_6.setText('%.2f' % annualized_rate0 + '%')
self.right_lineEdit_7.setText('%s' % start_time)
self.right_lineEdit_8.setText('%s' % end_time)
winningRate0 = winningRate * 100
self.right_lineEdit_9.setText('%.2f' % winningRate0 + '%')
def main():
app = QApplication(sys.argv)
gui = MainUI()
gui.show()
sys.exit(app.exec_())
def finddatepos(date):
i = 0
while result[i][0] != date:
i += 1
return i
def calAtr(result, start_time, end_time, tr_list): # Calculate atr
counter = 0
atr_list = []
for i in range(1, len(result)-1):
if result[i][0] == start_time:
counter = 1
if counter == 1:
tr = max(float(result[i][2])-float(result[i][3]), float(result[i][2])-float(result[i-1][4]), float(result[i-1][4])-float(result[i][3]))
tr_list.append([result[i][0], tr])
atr_list.append(tr)
if result[i][0] == end_time:
counter = 0
atr = int(np.floor(np.mean(atr_list)))
atr_half = int(np.floor(0.5 * atr))
return [atr, atr_half]
def calDon(result, time, atr_half, Dontime = 30): # Calculate Donchian tunnel
for i in range(Dontime, len(result)-1):
high_list = []
low_list = []
if result[i][0] == time:
for j in range(i-Dontime, i):
high_list.append(result[j][2])
low_list.append(result[j][3])
don_open = np.max(high_list)
don_close = np.min(low_list)
short_add_point = don_close - atr_half
short_stop_loss = don_close + atr_half
long_add_point = don_open + atr_half
long_stop_loss = don_open - atr_half
return [long_add_point, long_stop_loss, short_add_point, short_stop_loss]
def on_bar(date, atrtime = 10):
i = 0
while result[i][0] != date:
i += 1
yesterday = result[i-1][0]
startatrday = result[i-atrtime][0]
open = result[i][1]
atr = calAtr(result, startatrday, yesterday, tr_list)[0]
atr_half = calAtr(result, startatrday, yesterday, tr_list)[1]
Donlst = calDon(result, date, atr_half)
long_add_point = Donlst[0]
long_stop_loss = Donlst[1]
short_add_point = Donlst[2]
short_stop_loss = Donlst[3]
date_pos = 0
while cash[date_pos][0] != date:
date_pos += 1
position_long[date_pos][1] = position_long[date_pos - 1][1]
position_short[date_pos][1] = position_short[date_pos - 1][1]
cash[date_pos][1] = cash[date_pos - 1][1]
if position_long[date_pos][1] == 0 and position_short[date_pos][1] == 0:
if open > long_add_point - atr_half:
# 如果向上突破唐奇安通道,则开多
if cash[date_pos][1] >= (1 + backtest_commission_ratio) * open * unit(current_asset(yesterday),yesterday):
position_long[date_pos][1] = unit(current_asset(yesterday),yesterday)
print(date, '开多仓%.1f'%(unit(current_asset(yesterday),yesterday)))
cash[date_pos][1] -= (1 + backtest_commission_ratio) * open * unit(current_asset(yesterday),yesterday)
else:
position_long[date_pos][1] = cash[date_pos][1] / (1 + backtest_commission_ratio) / open
print(date, '开多仓%.1f'%(cash[date_pos][1] / (1 + backtest_commission_ratio) / open))
cash[date_pos][1] = 0
if open < short_add_point + atr_half:
# 如果向下突破唐奇安通道,则开空
position_short[date_pos][1] = unit(current_asset(yesterday),yesterday)
print(date, '开空仓%.1f'%(unit(current_asset(yesterday),yesterday)))
cash[date_pos][1] += (1 - backtest_commission_ratio) * open * unit(current_asset(yesterday),yesterday)
if position_long[date_pos][1] != 0:
if open > long_add_point:
# 当突破1/2atr时加仓
if cash[date_pos][1] >= (1 + backtest_commission_ratio) * open * unit(current_asset(yesterday), yesterday):
position_long[date_pos][1] += unit(current_asset(yesterday),yesterday)
print(date, '继续加仓%.1f'%(unit(current_asset(yesterday),yesterday)))
cash[date_pos][1] -= (1 + backtest_commission_ratio) * open * unit(current_asset(yesterday), yesterday)
else:
position_long[date_pos][1] += cash[date_pos][1] / (1 + backtest_commission_ratio) / open
print(date, '继续加仓%.1f' % (cash[date_pos][1] / (1 + backtest_commission_ratio) / open))
cash[date_pos][1] = 0
if open < long_stop_loss:
# 持多仓,止损位计算
if position_long[date_pos][1] - unit(current_asset(yesterday),yesterday) >= 0:
print(date, '平多仓%.1f'%(unit(current_asset(yesterday),yesterday)))
cash[date_pos][1] += (1 - backtest_commission_ratio) * open * unit(current_asset(yesterday),
yesterday)
else:
print(date, '平多仓%.1f' % (position_long[date_pos][1]))
cash[date_pos][1] += (1 - backtest_commission_ratio) * position_long[date_pos][1] * open
position_long[date_pos][1] = max(position_long[date_pos][1] - unit(current_asset(yesterday),yesterday), 0)
'''print(date, '平多仓%.1f'%(position_long[date_pos][1]))
cash[date_pos][1] += (1 - backtest_commission_ratio) * open * position_long[date_pos][1]
position_long[date_pos][1] = 0'''
if position_short[date_pos][1] != 0:
if open < short_add_point:
# 当突破1/2atr时加仓
position_short[date_pos][1] += unit(current_asset(yesterday),yesterday)
print(date, '继续加仓%.1f'%(unit(current_asset(yesterday),yesterday)))
cash[date_pos][1] += (1 - backtest_commission_ratio) * open * unit(current_asset(yesterday), yesterday)
if open > short_stop_loss:
# 持空仓,止损位计算
m = min(position_short[date_pos][1] * open, open * unit(current_asset(yesterday),yesterday), cash[date_pos][1] / (1 + backtest_commission_ratio))
print(date, '平空仓%.1f'%(m / open))
cash[date_pos][1] -= (1 + backtest_commission_ratio) * m
position_short[date_pos][1] = position_short[date_pos][1] - m / open
'''m = position_short[date_pos][1] * open
print(date, '平空仓%.1f'%(m / open))
cash[date_pos][1] -= (1 + backtest_commission_ratio) * m
position_short[date_pos][1] = position_short[date_pos][1] - m / open'''
def in_bar(date, atrtime = 10):
i = 0
while result[i][0] != date:
i += 1
yesterday = result[i-1][0]
startatrday = result[i-atrtime][0]
close = result[i][4]
atr = calAtr(result, startatrday, yesterday, tr_list)[0]
atr_half = calAtr(result, startatrday, yesterday, tr_list)[1]
Donlst = calDon(result, date, atr_half)
long_add_point = Donlst[0]
long_stop_loss = Donlst[1]
short_add_point = Donlst[2]
short_stop_loss = Donlst[3]
date_pos = 0
while cash[date_pos][0] != date:
date_pos += 1
if position_long[date_pos][1] == 0 and position_short[date_pos][1] == 0:
if close > long_add_point - atr_half:
# 如果向上突破唐奇安通道,则开多
if cash[date_pos][1] >= (1 + backtest_commission_ratio) * close * unit(current_asset(yesterday),yesterday):
position_long[date_pos][1] = unit(current_asset(yesterday),yesterday)
print(date, '开多仓%.1f'%(unit(current_asset(yesterday),yesterday)))
cash[date_pos][1] -= (1 + backtest_commission_ratio) * close * unit(current_asset(yesterday),yesterday)
else:
position_long[date_pos][1] = cash[date_pos][1] / (1 + backtest_commission_ratio) / close
print(date, '开多仓%.1f'%(cash[date_pos][1] / (1 + backtest_commission_ratio) / close))
cash[date_pos][1] = 0
if close < short_add_point + atr_half:
# 如果向下突破唐奇安通道,则开空
position_short[date_pos][1] = unit(current_asset(yesterday),yesterday)
print(date, '开空仓%.1f'%(unit(current_asset(yesterday),yesterday)))
cash[date_pos][1] += (1 - backtest_commission_ratio) * close * unit(current_asset(yesterday),yesterday)
if position_long[date_pos][1] != 0:
if close > long_add_point:
# 当突破1/2atr时加仓
if cash[date_pos][1] >= (1 + backtest_commission_ratio) * close * unit(current_asset(yesterday), yesterday):
position_long[date_pos][1] += unit(current_asset(yesterday),yesterday)
print(date, '继续加仓%.1f'%(unit(current_asset(yesterday),yesterday)))
cash[date_pos][1] -= (1 + backtest_commission_ratio) * close * unit(current_asset(yesterday), yesterday)
else:
position_long[date_pos][1] += cash[date_pos][1] / (1 + backtest_commission_ratio) / close
print(date, '继续加仓%.1f' % (cash[date_pos][1] / (1 + backtest_commission_ratio) / close))
cash[date_pos][1] = 0
if close < long_stop_loss:
# 持多仓,止损位计算
if position_long[date_pos][1] - unit(current_asset(yesterday),yesterday) >= 0:
print(date, '平多仓%.1f'%(unit(current_asset(yesterday),yesterday)))
cash[date_pos][1] += (1 - backtest_commission_ratio) * close * unit(current_asset(yesterday),
yesterday)
else:
print(date, '平多仓%.1f' % (position_long[date_pos][1]))
cash[date_pos][1] += (1 - backtest_commission_ratio) * position_long[date_pos][1] * close
position_long[date_pos][1] = max(position_long[date_pos][1] - unit(current_asset(yesterday),yesterday), 0)
'''print(date, '平多仓%.1f'%(position_long[date_pos][1]))
cash[date_pos][1] += (1 - backtest_commission_ratio) * close * position_long[date_pos][1]
position_long[date_pos][1] = 0'''
if position_short[date_pos][1] != 0:
if close < short_add_point:
# 当突破1/2atr时加仓
position_short[date_pos][1] += unit(current_asset(yesterday),yesterday)
print(date, '继续加仓%.1f'%(unit(current_asset(yesterday),yesterday)))
cash[date_pos][1] += (1 - backtest_commission_ratio) * close * unit(current_asset(yesterday), yesterday)
if close > short_stop_loss:
# 持空仓,止损位计算
m = min(position_short[date_pos][1] * close, close * unit(current_asset(yesterday),yesterday), cash[date_pos][1] / (1 + backtest_commission_ratio))
print(date, '平空仓%.1f'%(m / close))
cash[date_pos][1] -= (1 + backtest_commission_ratio) * m
position_short[date_pos][1] = position_short[date_pos][1] - m / close
'''m = position_short[date_pos][1] * close
print(date, '平空仓%.1f'%(m / close))
cash[date_pos][1] -= (1 + backtest_commission_ratio) * m
position_short[date_pos][1] = position_short[date_pos][1] - m / close'''
def unit(total_asset, date, atrtime = 10):
i = 0
while result[i][0] != date:
i += 1
end_time = result[i + atrtime - 1][0]
DV = calAtr(result, date, end_time, tr_list)[0]
return total_asset * unit_rate / DV
def current_asset(date):
date_pos = 0
while cash[date_pos][0] != date:
date_pos += 1
return cash[date_pos][1] + (position_long[date_pos][1] - position_short[date_pos][1]) * result[finddatepos(date)][4]
def nearestdate(date, counter = 1):
dateset = set()
for k in range(len(result)):
dateset.add(result[k][0])
while date not in dateset:
dt = datetime.datetime.strptime(date, '%Y-%m-%d')
if counter == 1:
date = (dt + datetime.timedelta(days=1)).strftime('%Y-%m-%d')
if date[8] == '0':
date = date[:8] + date[9:]
if date[5] == '0':
date = date[:5] + date[6:]
elif counter == -1:
date = (dt - datetime.timedelta(days=1)).strftime('%Y-%m-%d')
if date[8] == '0':
date = date[:8] + date[9:]
if date[5] == '0':
date = date[:5] + date[6:]
return date
if __name__ == '__main__':
csvFile = open("data.csv", "r")
reader = csv.reader(csvFile)
result = []
for item in reader:
# Ignore first line
if reader.line_num == 1:
continue
result.append(
[item[0], float(item[1]), float(item[2]), float(item[3]), float(item[4])]) # date, open, high, low, close
csvFile.close()
initial_cash = 0
backtest_commission_ratio = 0.0001
start_time = '2021-03-01'
end_time = '2021-04-27'
tr_list = []
cash = []
position_short = []
position_long = []
atrtime = 20
Dontime = 30
unit_rate = 0.01
winningRate = 0
date = 0
time = 0
baseline = 0
annualized_rate = 0
l_time = []
l_asset = []
l_index = []
xs=[]
l_initial = []
main()
|
[
"matplotlib.pyplot.ylabel",
"datetime.timedelta",
"os.remove",
"numpy.mean",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.min",
"csv.reader",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.gcf",
"numpy.floor",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"qtawesome.icon",
"datetime.datetime.strptime",
"datetime.datetime.now",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.bar"
] |
[((55610, 55629), 'csv.reader', 'csv.reader', (['csvFile'], {}), '(csvFile)\n', (55620, 55629), False, 'import csv\n'), ((40154, 40180), 'os.remove', 'os.remove', (['figure_1_delete'], {}), '(figure_1_delete)\n', (40163, 40180), False, 'import os\n'), ((40190, 40216), 'os.remove', 'os.remove', (['figure_2_delete'], {}), '(figure_2_delete)\n', (40199, 40216), False, 'import os\n'), ((44136, 44155), 'numpy.floor', 'np.floor', (['(0.5 * atr)'], {}), '(0.5 * atr)\n', (44144, 44155), True, 'import numpy as np\n'), ((54950, 54994), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['date', '"""%Y-%m-%d"""'], {}), "(date, '%Y-%m-%d')\n", (54976, 54994), False, 'import datetime\n'), ((5133, 5172), 'qtawesome.icon', 'qtawesome.icon', (['"""fa.rmb"""'], {'color': '"""white"""'}), "('fa.rmb', color='white')\n", (5147, 5172), False, 'import qtawesome\n'), ((5347, 5398), 'qtawesome.icon', 'qtawesome.icon', (['"""fa.hourglass-start"""'], {'color': '"""white"""'}), "('fa.hourglass-start', color='white')\n", (5361, 5398), False, 'import qtawesome\n'), ((5575, 5624), 'qtawesome.icon', 'qtawesome.icon', (['"""fa.hourglass-end"""'], {'color': '"""white"""'}), "('fa.hourglass-end', color='white')\n", (5589, 5624), False, 'import qtawesome\n'), ((5801, 5847), 'qtawesome.icon', 'qtawesome.icon', (['"""fa.line-chart"""'], {'color': '"""white"""'}), "('fa.line-chart', color='white')\n", (5815, 5847), False, 'import qtawesome\n'), ((6023, 6073), 'qtawesome.icon', 'qtawesome.icon', (['"""fa.check-circle-o"""'], {'color': '"""white"""'}), "('fa.check-circle-o', color='white')\n", (6037, 6073), False, 'import qtawesome\n'), ((6247, 6292), 'qtawesome.icon', 'qtawesome.icon', (['"""fa.pie-chart"""'], {'color': '"""white"""'}), "('fa.pie-chart', color='white')\n", (6261, 6292), False, 'import qtawesome\n'), ((6466, 6517), 'qtawesome.icon', 'qtawesome.icon', (['"""fa.sort-amount-asc"""'], {'color': '"""white"""'}), "('fa.sort-amount-asc', color='white')\n", (6480, 6517), False, 'import qtawesome\n'), ((6983, 7027), 'qtawesome.icon', 'qtawesome.icon', (['"""fa.question"""'], {'color': '"""white"""'}), "('fa.question', color='white')\n", (6997, 7027), False, 'import qtawesome\n'), ((7204, 7247), 'qtawesome.icon', 'qtawesome.icon', (['"""fa.comment"""'], {'color': '"""white"""'}), "('fa.comment', color='white')\n", (7218, 7247), False, 'import qtawesome\n'), ((7421, 7465), 'qtawesome.icon', 'qtawesome.icon', (['"""fa.envelope"""'], {'color': '"""white"""'}), "('fa.envelope', color='white')\n", (7435, 7465), False, 'import qtawesome\n'), ((14685, 14726), 'qtawesome.icon', 'qtawesome.icon', (['"""fa.repeat"""'], {'color': '"""blue"""'}), "('fa.repeat', color='blue')\n", (14699, 14726), False, 'import qtawesome\n'), ((14965, 15008), 'qtawesome.icon', 'qtawesome.icon', (['"""fa.floppy-o"""'], {'color': '"""gray"""'}), "('fa.floppy-o', color='gray')\n", (14979, 15008), False, 'import qtawesome\n'), ((15127, 15166), 'qtawesome.icon', 'qtawesome.icon', (['"""fa.times"""'], {'color': '"""red"""'}), "('fa.times', color='red')\n", (15141, 15166), False, 'import qtawesome\n'), ((37332, 37344), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (37342, 37344), True, 'import matplotlib.pyplot as plt\n'), ((37358, 37381), 'matplotlib.pyplot.title', 'plt.title', (['"""Asset-Time"""'], {}), "('Asset-Time')\n", (37367, 37381), True, 'import matplotlib.pyplot as plt\n'), ((37699, 37770), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'l_initial'], {'linestyle': '"""--"""', 'color': '"""black"""', 'label': '"""Initial"""'}), "(xs, l_initial, linestyle='--', color='black', label='Initial')\n", (37707, 37770), True, 'import matplotlib.pyplot as plt\n'), ((37784, 37802), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (37794, 37802), True, 'import matplotlib.pyplot as plt\n'), ((37816, 37835), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Asset"""'], {}), "('Asset')\n", (37826, 37835), True, 'import matplotlib.pyplot as plt\n'), ((37888, 37900), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (37898, 37900), True, 'import matplotlib.pyplot as plt\n'), ((37981, 38004), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (38002, 38004), False, 'import datetime\n'), ((38184, 38240), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figure_1_name'], {'dpi': '(300)', 'bbox_inches': '"""tight"""'}), "(figure_1_name, dpi=300, bbox_inches='tight')\n", (38195, 38240), True, 'import matplotlib.pyplot as plt\n'), ((38252, 38263), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (38261, 38263), True, 'import matplotlib.pyplot as plt\n'), ((38595, 38607), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (38605, 38607), True, 'import matplotlib.pyplot as plt\n'), ((38621, 38649), 'matplotlib.pyplot.title', 'plt.title', (['"""Long/Short-Time"""'], {}), "('Long/Short-Time')\n", (38630, 38649), True, 'import matplotlib.pyplot as plt\n'), ((38949, 39036), 'matplotlib.pyplot.bar', 'plt.bar', (['xs', 'long_tem'], {'linestyle': '"""-"""', 'color': '"""firebrick"""', 'linewidth': '(1)', 'label': '"""long"""'}), "(xs, long_tem, linestyle='-', color='firebrick', linewidth=1, label=\n 'long')\n", (38956, 39036), True, 'import matplotlib.pyplot as plt\n'), ((39044, 39133), 'matplotlib.pyplot.bar', 'plt.bar', (['xs', 'short_tem'], {'linestyle': '"""-"""', 'color': '"""royalblue"""', 'linewidth': '(1)', 'label': '"""short"""'}), "(xs, short_tem, linestyle='-', color='royalblue', linewidth=1, label\n ='short')\n", (39051, 39133), True, 'import matplotlib.pyplot as plt\n'), ((39141, 39214), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'initial_bar'], {'linestyle': '"""--"""', 'color': '"""black"""', 'label': '"""Initial"""'}), "(xs, initial_bar, linestyle='--', color='black', label='Initial')\n", (39149, 39214), True, 'import matplotlib.pyplot as plt\n'), ((39228, 39246), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (39238, 39246), True, 'import matplotlib.pyplot as plt\n'), ((39260, 39274), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['""""""'], {}), "('')\n", (39270, 39274), True, 'import matplotlib.pyplot as plt\n'), ((39327, 39339), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (39337, 39339), True, 'import matplotlib.pyplot as plt\n'), ((39422, 39445), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (39443, 39445), False, 'import datetime\n'), ((39631, 39687), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figure_2_name'], {'dpi': '(300)', 'bbox_inches': '"""tight"""'}), "(figure_2_name, dpi=300, bbox_inches='tight')\n", (39642, 39687), True, 'import matplotlib.pyplot as plt\n'), ((39701, 39712), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (39710, 39712), True, 'import matplotlib.pyplot as plt\n'), ((44096, 44113), 'numpy.mean', 'np.mean', (['atr_list'], {}), '(atr_list)\n', (44103, 44113), True, 'import numpy as np\n'), ((44554, 44571), 'numpy.max', 'np.max', (['high_list'], {}), '(high_list)\n', (44560, 44571), True, 'import numpy as np\n'), ((44597, 44613), 'numpy.min', 'np.min', (['low_list'], {}), '(low_list)\n', (44603, 44613), True, 'import numpy as np\n'), ((37449, 37538), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'l_asset'], {'linestyle': '"""-"""', 'color': '"""firebrick"""', 'linewidth': '(1.5)', 'label': '"""Asset"""'}), "(xs, l_asset, linestyle='-', color='firebrick', linewidth=1.5,\n label='Asset')\n", (37457, 37538), True, 'import matplotlib.pyplot as plt\n'), ((37602, 37690), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'l_index'], {'linestyle': '"""-"""', 'color': '"""royalblue"""', 'linewidth': '(1)', 'label': '"""Index"""'}), "(xs, l_index, linestyle='-', color='royalblue', linewidth=1, label=\n 'Index')\n", (37610, 37690), True, 'import matplotlib.pyplot as plt\n'), ((37849, 37858), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (37856, 37858), True, 'import matplotlib.pyplot as plt\n'), ((39288, 39297), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (39295, 39297), True, 'import matplotlib.pyplot as plt\n'), ((42428, 42469), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['d', '"""%Y-%m-%d"""'], {}), "(d, '%Y-%m-%d')\n", (42454, 42469), False, 'import datetime\n'), ((55047, 55073), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (55065, 55073), False, 'import datetime\n'), ((55303, 55329), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (55321, 55329), False, 'import datetime\n')]
|
#! /usr/bin/python
# encoding=utf-8
import os
import datetime,time
from selenium import webdriver
import config
import threading
import numpy as np
def writelog(msg,log):
nt=datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S')
text="[%s] %s " % (nt,msg)
os.system("echo %s >> %s" % (text.encode('utf8'),log))
def create_chrome():
ops=webdriver.ChromeOptions()
ops.add_experimental_option('mobileEmulation',config.mobileEmulation)
web=webdriver.Chrome(chrome_options=ops)
web.set_page_load_timeout(10)
web.set_script_timeout(10)
web.set_window_size(config.mWidth,config.mHeight)
return web
#Create Threading Pool
def threading_pool(tnum,funname):
threadlist=[]
for i in range(tnum):
t=threading.Thread(target=funname)
threadlist.append(t)
for t in threadlist:
t.setDaemon(True)
t.start()
for t in threadlist:
t.join()
return threadlist
def set_interval(*args):
s = 3
e = 6
if len(args)>=1:
s = args[0]
if len(args)>=2:
e = args[1]
f = np.random.uniform(s,e)
time.sleep(f)
|
[
"selenium.webdriver.ChromeOptions",
"selenium.webdriver.Chrome",
"time.sleep",
"datetime.datetime.now",
"numpy.random.uniform",
"threading.Thread"
] |
[((355, 380), 'selenium.webdriver.ChromeOptions', 'webdriver.ChromeOptions', ([], {}), '()\n', (378, 380), False, 'from selenium import webdriver\n'), ((464, 500), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'chrome_options': 'ops'}), '(chrome_options=ops)\n', (480, 500), False, 'from selenium import webdriver\n'), ((1096, 1119), 'numpy.random.uniform', 'np.random.uniform', (['s', 'e'], {}), '(s, e)\n', (1113, 1119), True, 'import numpy as np\n'), ((1123, 1136), 'time.sleep', 'time.sleep', (['f'], {}), '(f)\n', (1133, 1136), False, 'import datetime, time\n'), ((748, 780), 'threading.Thread', 'threading.Thread', ([], {'target': 'funname'}), '(target=funname)\n', (764, 780), False, 'import threading\n'), ((181, 204), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (202, 204), False, 'import datetime, time\n')]
|
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
from sklearn import datasets, linear_model
from difflib import SequenceMatcher
import seaborn as sns
from statistics import mean
from ast import literal_eval
from scipy import stats
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from pygam import LinearGAM, s, l, f
from matplotlib import lines
import six
def extract_boar_teloFISH_as_list(path):
"""
FUNCTION FOR PULLING KELLY'S TELOFISH DATA FOR 40 BOARS into a LIST.. TO BE MADE INTO A DATAFRAME & JOINED W/
MAIN DATAFRAME if possible
These excel files take forever to load.. the objective here is to synthesize all the excel files for
telomere FISH data into one dataframe, then save that dataframe to csv file to be retrieved later
loading one whole csv file containing all the data will be much, much faster than loading the parts of the whole
Along the way, we'll normalize the teloFISH data using controls internal to each excel file
"""
boar_teloFISH_list = []
for file in os.scandir(path):
if 'Hyb' in file.name:
print(f'Handling {file.name}...')
full_name = path + file.name
# making a dict of excel sheets, where KEY:VALUE pairs are SAMPLE ID:TELO DATA
telo_excel_dict = pd.read_excel(full_name, sheet_name=None, skiprows=4, usecols=[3], nrows=5000)
if 'Telomere Template' in telo_excel_dict.keys():
del telo_excel_dict['Telomere Template']
excel_file_list = []
for sample_id, telos in telo_excel_dict.items():
telos_cleaned = clean_individ_telos(telos)
if sample_id != 'Control':
excel_file_list.append([sample_id, telos_cleaned.values, np.mean(telos_cleaned)])
elif sample_id == 'Control':
control_value = np.mean(telos_cleaned)
#normalize teloFISH values by control value
for sample in excel_file_list:
sample_data = sample
#normalize individual telos
sample_data[1] = np.divide(sample_data[1], control_value)
#normalize telo means
sample_data[2] = np.divide(sample_data[2], control_value)
boar_teloFISH_list.append(sample_data)
print('Finished collecting boar teloFISH data')
return boar_teloFISH_list
def gen_missing_values_andimpute_or_randomsampledown(n_cells, telosPercell, df):
max_telos = n_cells * telosPercell
half_telos = (n_cells * telosPercell) / 2
if df.size > max_telos:
df_sampled = df.sample(max_telos)
return df_sampled
if df.size > 25 and df.size <= half_telos:
missing_data_difference = abs( (n_cells * telosPercell) - df.size )
rsampled = df.sample(missing_data_difference, replace=True, random_state=28)
concat_ed = pd.concat([rsampled, df], sort=False)
np.random.shuffle(concat_ed.to_numpy())
return concat_ed
if df.size > 25 and df.size < max_telos:
missing_data_difference = abs( (n_cells * telosPercell) - df.size )
rsampled = df.sample(missing_data_difference, random_state=28)
concat_ed = pd.concat([rsampled, df], sort=False)
np.random.shuffle(concat_ed.to_numpy())
return concat_ed
else:
return df
def clean_individ_telos(telo_data):
labels=[6, 172, 338, 504, 670, 836, 1002, 1168, 1334, 1500, 1666, 1832,
1998, 2164, 2330, 2496, 2662, 2828, 2994, 3160, 3326, 3492, 3658, 3824,
3990, 4156, 4322, 4488, 4654, 4820]
labels_offset_by6 = [(x-6) for x in labels]
telo_data = telo_data.drop(labels_offset_by6)
telo_data = pd.to_numeric(telo_data.iloc[:,0], errors='coerce')
telo_data = telo_data.dropna(axis=0, how='any')
telo_data = telo_data.to_frame(name=None)
telo_data = telo_data[(np.abs(stats.zscore(telo_data)) < 3).all(axis=1)]
telo_data = pd.Series(telo_data.iloc[:,0])
telo_data = gen_missing_values_andimpute_or_randomsampledown(30, 160, telo_data)
telo_data.reset_index(drop=True, inplace=True)
return telo_data
def remove_dashes_space_sampleIDs(row):
if '-' in str(row):
row = str(row).replace('-', '').replace(' ', '')
if '_' in str(row):
row = str(row).replace('_', '')
if ' ' in str(row):
row = str(row).replace(' ', '')
if 'gps' in str(row):
row = str(row).replace('gps', '')
if 'GPS' in str(row):
row = str(row).replace('GPS', '')
if 'collar' in (row):
row = str(row).replace('collar', '')
if 'COLLAR' in str(row):
row = str(row).replace('COLLAR', '')
return str(row)
def readable_snake_df_dummy_variables(snake_df):
Exposure_Status = []
for row in snake_df['Sample ID']:
if row.startswith('C'):
Exposure_Status.append('Control')
elif row.startswith('E'):
Exposure_Status.append('Exposed')
snake_df['Exposure Status'] = Exposure_Status
### making dummy variables for snake exposure status
snake_dum = pd.get_dummies(snake_df['Exposure Status'], prefix='Encoded', drop_first=True)
snake_df['Encoded Exposed'] = snake_dum
return snake_df
def count_shared_sample_IDs(df1, df2, print_names=None):
df1_IDs = set(df1['Sample ID'].unique())
df2_IDs = set(df2['Sample ID'].unique())
# common_IDs = df1_list - (df1_list - df2_list)
common_IDs = list(df1_IDs & df2_IDs)
print(f'The number of sample IDs in common are: {len(common_IDs)}')
if print_names == 'yes' or print_names == 'Yes':
print(f'The sample IDs in common are:\n{common_IDs}')
def average_age_weeks(row):
if '-' in str(row):
numbers = str(row).split('-')
average = (int(numbers[1]) + int(numbers[0])) / len(numbers)
return int(average)
else:
return int(row)
def quartile_cts_rel_to_df1(df1, df2):
df1 = pd.DataFrame(df1)
df2 = pd.DataFrame(df2)
# count how many instances in df2 are below the 0.25 quantile of df1
quartile_1 = df2[df2 <= df1.quantile(0.25)].count()
# count how many instances in df2 are within the 0.25 - 0.75 range quantile of df1
quartile_2_3 = df2[(df2 > df1.quantile(0.25)) & (df2 < df1.quantile(0.75))].count()
# count how many instances in df2 are above 0.75 range quantile of df1
quartile_4 = df2[df2 >= df1.quantile(0.75)].count()
# return counts of values
return int(quartile_1.values), int(quartile_2_3.values), int(quartile_4.values)
def make_quartiles_columns(total_boar_telos, df):
pos_1, pos_2, pos_3 = 17, 18, 19
sample_id, telo_data = 0, 1
for i, row in df.iterrows():
boar_sample_telos = row[telo_data]
df.iat[i, pos_1], df.iat[i, pos_2], df.iat[i, pos_3] = (quartile_cts_rel_to_df1(total_boar_telos, boar_sample_telos))
return df
def linear_regression_graphs_between_variables(x=None, y=None, data=None,
hue=None, col=None,
hue_order=None, col_order=None,
snake=False):
if 'Binary' in y:
ax=sns.lmplot(x=x, y=y, hue=hue, col=col, data=data, logistic=True,
height=5.5, aspect=1, scatter_kws={"s": 175, "edgecolor":'black'})
else:
ax=sns.lmplot(x=x, y=y, hue=hue, col=col, data=data,
height=5.5, aspect=1, scatter_kws={"s": 175, "edgecolor":'black'})
fig = ax.fig
ax.set_xlabels(x, fontsize=18)
ax.set_xticklabels(fontsize=14)
ax.set_ylabels(y, fontsize=18)
ax.set_yticklabels(fontsize=14)
ax.set_titles(size=14)
# if 'Cortisol' in y:
# ax.set(ylim=(0, 40))
plt.subplots_adjust(top=0.88)
if hue == None and col == None:
fig.suptitle(f'{x} vs.\n {y} in Fukushima Wild Boar', fontsize=18,
)
# ax.savefig(f"../graphs/{x} vs {y}.png", dpi=400)
if snake:
fig.suptitle(f'{x} vs.\n {y} in Fukushima Wild Snake', fontsize=18,
)
# elif hue == 'Sex' and col == 'Sex':
# fig.suptitle(f'{x} vs. {y}\nper Sex in Fukushima Wild Boar', fontsize=16, weight='bold')
# fig.legend(fontsize='large')
# ax.savefig(f"../graphs/{x} vs {y} per sex.png", dpi=400)
def graph_dose_age_vs_telos(df=None, x=None, x2=None, y=None, hue=None,):
f, axes = plt.subplots(1, 2, figsize=(12,5), sharey=False, sharex=False)
# dose vs. telomeres
sns.regplot(x=x, y=y, data=df, ax=axes[0],
# hue=hue,
scatter_kws={'alpha':0.8, 'linewidth':1, 'edgecolor':'black', 's':df['Age (months)']*12, })
axes[0].set_xlabel(x, fontsize=14)
axes[0].set_ylabel(y, fontsize=14)
axes[0].tick_params(labelsize=12)
# age vs. telomeres
sns.regplot(x=x2, y=y, data=df, ax=axes[1],
# hue=hue,
scatter_kws={'alpha':0.8, 'linewidth':1, 'edgecolor':'black', 's':175, })
axes[1].set_xlabel(x2, fontsize=14)
axes[1].set_xlim(-4,55)
axes[1].set_ylabel(y, fontsize=14)
if y == 'Mean Telomere Length (FISH)':
axes[1].set_ylim(0.2,1.6)
if y == 'Mean Telomere Length (qPCR)':
axes[1].set_ylim(0.6,1.8)
axes[1].tick_params(labelsize=12)
def score_linear_regressions(x=None, y=None, data=None, sexes=['Overall']):
for sex in sexes:
if sex == 'Overall':
X_r = data[x].values.reshape(-1, len(x))
y_r = data[y].values.reshape(-1, 1)
regression = LinearRegression().fit(X_r,y_r)
print(f'Linear regression for {x} vs. {y}:\nOverall R2 is {regression.score(X_r, y_r):.4f}\n')
return regression
else:
X_r = data[data['Sex'] == sex][x].values.reshape(-1, len(x))
y_r = data[data['Sex'] == sex][y].values.reshape(-1, 1)
regression = LinearRegression().fit(X_r,y_r)
print(f"Linear regression for {x} vs. {y}:\nR2 for {sex}s is {regression.score(X_r, y_r):.4f}")
return regression
def eval_number(x):
if x > 15:
x = 1
return x
elif x < 15:
x = 0
return x
def score_logistic_regressions(x=None, y=None, data=None):
# for y in y_cols:
sexes = [
# 'Male',
# 'Female',
'Overall']
for sex in sexes:
if sex == 'Overall':
X_r = data[x].values.reshape(-1, 1)
y_r = data[y].values.reshape(-1, )
log_reg = LogisticRegression(solver='lbfgs')
regression = log_reg.fit(X_r,y_r)
print(f'Logistic regression for {x} vs. {y}:\nOverall R2 is {regression.score(X_r, y_r):.4f}\n')
else:
X_r = data[data['Sex'] == sex][x].values.reshape(-1, 1)
y_r = data[data['Sex'] == sex][y].values.reshape(-1, )
regression = LinearRegression().fit(X_r,y_r)
print(f"Logistic regression for {x} vs. {y}:\nR2 for {sex}s is {regression.score(X_r, y_r):.4f}")
def encode_sex(row):
if row == 'Male':
return 0
elif row == 'Female':
return 1
else:
print(f'ERROR.. row == {row}')
def merge_return_df_cols_interest(dose_df, cortisol_df, cols_of_interest):
merge_dose_cortisol = dose_df.merge(cortisol_df, on=['Sample ID'])
trim_dose_cortisol = merge_dose_cortisol[cols_of_interest].copy()
return trim_dose_cortisol
def enforce_col_types(df):
for col in df.columns:
if col == 'Sample ID' or col == 'Sex':
df[col] = df[col].astype('str')
elif col == 'Age (months)' or col == 'encode sex':
df[col] = df[col].astype('int64')
else:
df[col] = df[col].astype('float64')
def male_or_female(row):
if row == 'M' or row == 'm' or row == 'Male':
return 'Male'
elif row == 'F' or row == 'f' or row == 'Female':
return 'Female'
else:
print(f'error... row == {row}')
return np.NaN
def make_age_class(row):
if row <= 12:
return 'piglet'
elif row > 12 and row < 24:
return 'yearling'
elif row >= 20:
return 'adult'
def linear_regression_scores_X_y(df, y, y_name, dose_types):
"""
specifically for EDA
"""
for Xn in dose_types:
features_list = [[Xn], [Xn, 'Age (months)'], [Xn, 'Age (months)', 'encoded sex']]
for features in features_list:
X = df[features].values.reshape(-1, len(features))
fit_lm = LinearRegression().fit(X, y)
print(f'OLS | {features} vs. {y_name} --> R2: {fit_lm.score(X, y):.4f}')
print('')
return fit_lm
def fit_gam_plot_dependencies(df=None, features=None, target=None,
basis_1=s, basis_2=False, summary=False):
X = df[features]
y = df[target]
if basis_1 and basis_2:
gam = LinearGAM(basis_1(0, lam=60) + basis_2(1, lam=60), fit_intercept=True).fit(X, y)
elif basis_1:
gam = LinearGAM(basis_1(0, lam=60), fit_intercept=True).fit(X, y)
else:
print('no basis called for features.. error')
if summary:
print(gam.summary())
plot_gam_partial_dependencies(gam, features, target)
def plot_gam_partial_dependencies(gam, features, target):
for i, term in enumerate(gam.terms):
if term.isintercept:
continue
XX = gam.generate_X_grid(term=i)
pdep, confi = gam.partial_dependence(term=i, X=XX, width=0.95)
plt.figure()
plt.plot(XX[:, term.feature], pdep)
plt.plot(XX[:, term.feature], confi, c='r', ls='--')
plt.xlabel(f'{features[i]}', fontsize=14)
plt.ylabel(f'{target}', fontsize=14)
plt.title(f'Functional dependence of Y on X', fontsize=14)
plt.show()
def graph_y_vs_dose_age_sex(df=None, x=None, x2=None, x3=None, y=None, hue=None,
dose_x_size='Age (months)', multiplier=12):
f, axes = plt.subplots(1, 3, figsize=(15,5), sharey=True, sharex=False)
fontsize=16
colors = sns.color_palette('Paired', len(df['Sample ID'].unique())),
t = (0.7,)
test = [x + t for x in colors[0]]
# DOSE vs. Y
sns.regplot(x=x, y=y, data=df, ax=axes[0], color=test[4],
scatter_kws={'alpha':.8, 'linewidth':1, 'edgecolor':'black', 's':df[dose_x_size]*multiplier})
# AGE vs. Y
# male O markers
sns.regplot(x=x2, y=y, data=df[df['Sex'] == 'Male'], ax=axes[1], color=test[8], marker='o', fit_reg=False,
scatter_kws={'alpha':.8, 'linewidth':1, 'edgecolor':'black', 's':175,})
# female X markers
sns.regplot(x=x2, y=y, data=df[df['Sex'] == 'Female'], ax=axes[1], color=test[8], marker='X', fit_reg=False,
scatter_kws={'alpha':.8, 'linewidth':1, 'edgecolor':'black', 's':200,})
# plotting just the linear reg
sns.regplot(x=x2, y=y, data=df, ax=axes[1], color=test[8], scatter_kws={'s':0,})
# creating custom legend
handles, labels = [], []
line1 = lines.Line2D([], [], color=test[8], alpha=.8, marker='o', mew=1, mec='black')
line2 = lines.Line2D([], [], color=test[8], alpha=.8, marker='X', mew=1, mec='black')
handles.append(line1)
handles.append(line2)
labels.append('Male')
labels.append('Female')
axes[1].legend(handles, labels, loc='upper right',ncol=1, fancybox=True,
fontsize=fontsize, markerscale=2)
# SEX vs. Y
palette_cust = {'Male':test[0], 'Female':test[10]}
sns.boxplot(x=x3, y=y, dodge=True, palette=palette_cust, order=['Male', 'Female'], data=df, ax=axes[2],)
for patch in axes[2].artists:
r, g, b, a = patch.get_facecolor()
patch.set_facecolor((r, g, b, .6))
sns.swarmplot(x=x3, y=y, dodge=True, palette=palette_cust, order=['Male', 'Female'], data=df, ax=axes[2],
size=12, edgecolor='black', linewidth=1, **{'alpha':0.8})
x_name = 'Reasonable Total Life Time Dose (mGy)'
axes[0].set_xlabel(x_name, fontsize=fontsize)
axes[0].set_ylabel(y, fontsize=fontsize)
axes[0].tick_params(labelsize=fontsize)
axes[1].set_xlabel(x2, fontsize=fontsize)
axes[1].set_ylabel('', fontsize=fontsize)
axes[1].tick_params(labelsize=fontsize)
axes[2].set_xlabel(x3, fontsize=fontsize)
axes[2].set_ylabel('', fontsize=fontsize)
axes[2].tick_params(labelsize=fontsize)
# axes[0].set_xlim(-50,700)
# axes[1].set_xlim(-4,55)
if y == 'Mean Telomere Length (Telo-FISH)':
axes[0].set_ylim(0.2,1.6)
axes[1].set_ylim(0.2,1.6)
y_name = y
elif y == 'Mean Telomere Length (qPCR)':
axes[0].set_ylim(0.6,1.8)
axes[1].set_ylim(0.6,1.8)
y_name = y
elif y == 'Cortisol (pg/mg)':
axes[0].set_ylim(-3, 35)
y_name = y.replace('/', '')
elif y == 'Average # of dicentrics per cell':
axes[0].set_ylim(-0.005, .065)
y_name = y
plt.tight_layout()
plt.savefig(f'graphs/main figures/{y_name} vs {x} and {x2}.png', dpi=600, bbox_inches='tight')
def render_mpl_table(data, col_width=3.0, row_height=0.625, font_size=14,
header_color='#40466e', row_colors=['#f1f1f2', 'w'], edge_color='black',
bbox=[0, 0, 1, 1], header_columns=0, path=None,
ax=None, **kwargs):
if ax is None:
size = (np.array(data.shape[::-1]) + np.array([0, 1])) * np.array([col_width, row_height])
fig, ax = plt.subplots(figsize=size)
ax.axis('off')
mpl_table = ax.table(cellText=data.values, bbox=bbox, colLabels=data.columns, **kwargs)
mpl_table.auto_set_font_size(False)
mpl_table.set_fontsize(font_size)
for k, cell in six.iteritems(mpl_table._cells):
cell.set_edgecolor(edge_color)
if k[0] == 0 or k[1] < header_columns:
cell.set_text_props(weight='bold', color='w')
cell.set_facecolor(header_color)
else:
cell.set_facecolor(row_colors[k[0]%len(row_colors) ])
plt.tight_layout()
if path != None:
plt.savefig(path, dpi=600, bbox_inches='tight')
plt.close()
|
[
"matplotlib.pyplot.ylabel",
"numpy.array",
"pandas.read_excel",
"matplotlib.lines.Line2D",
"numpy.divide",
"numpy.mean",
"seaborn.regplot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"pandas.DataFrame",
"seaborn.swarmplot",
"matplotlib.pyplot.savefig",
"os.scandir",
"scipy.stats.zscore",
"pandas.get_dummies",
"matplotlib.pyplot.title",
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show",
"pandas.Series",
"seaborn.lmplot",
"sklearn.linear_model.LogisticRegression",
"seaborn.boxplot",
"matplotlib.pyplot.figure",
"pandas.to_numeric",
"matplotlib.pyplot.tight_layout",
"six.iteritems",
"pandas.concat",
"matplotlib.pyplot.subplots"
] |
[((1115, 1131), 'os.scandir', 'os.scandir', (['path'], {}), '(path)\n', (1125, 1131), False, 'import os\n'), ((3840, 3892), 'pandas.to_numeric', 'pd.to_numeric', (['telo_data.iloc[:, 0]'], {'errors': '"""coerce"""'}), "(telo_data.iloc[:, 0], errors='coerce')\n", (3853, 3892), True, 'import pandas as pd\n'), ((4083, 4114), 'pandas.Series', 'pd.Series', (['telo_data.iloc[:, 0]'], {}), '(telo_data.iloc[:, 0])\n', (4092, 4114), True, 'import pandas as pd\n'), ((5259, 5337), 'pandas.get_dummies', 'pd.get_dummies', (["snake_df['Exposure Status']"], {'prefix': '"""Encoded"""', 'drop_first': '(True)'}), "(snake_df['Exposure Status'], prefix='Encoded', drop_first=True)\n", (5273, 5337), True, 'import pandas as pd\n'), ((6163, 6180), 'pandas.DataFrame', 'pd.DataFrame', (['df1'], {}), '(df1)\n', (6175, 6180), True, 'import pandas as pd\n'), ((6191, 6208), 'pandas.DataFrame', 'pd.DataFrame', (['df2'], {}), '(df2)\n', (6203, 6208), True, 'import pandas as pd\n'), ((8015, 8044), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.88)'}), '(top=0.88)\n', (8034, 8044), True, 'import matplotlib.pyplot as plt\n'), ((8703, 8766), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(12, 5)', 'sharey': '(False)', 'sharex': '(False)'}), '(1, 2, figsize=(12, 5), sharey=False, sharex=False)\n', (8715, 8766), True, 'import matplotlib.pyplot as plt\n'), ((8795, 8937), 'seaborn.regplot', 'sns.regplot', ([], {'x': 'x', 'y': 'y', 'data': 'df', 'ax': 'axes[0]', 'scatter_kws': "{'alpha': 0.8, 'linewidth': 1, 'edgecolor': 'black', 's': df['Age (months)'\n ] * 12}"}), "(x=x, y=y, data=df, ax=axes[0], scatter_kws={'alpha': 0.8,\n 'linewidth': 1, 'edgecolor': 'black', 's': df['Age (months)'] * 12})\n", (8806, 8937), True, 'import seaborn as sns\n'), ((9120, 9243), 'seaborn.regplot', 'sns.regplot', ([], {'x': 'x2', 'y': 'y', 'data': 'df', 'ax': 'axes[1]', 'scatter_kws': "{'alpha': 0.8, 'linewidth': 1, 'edgecolor': 'black', 's': 175}"}), "(x=x2, y=y, data=df, ax=axes[1], scatter_kws={'alpha': 0.8,\n 'linewidth': 1, 'edgecolor': 'black', 's': 175})\n", (9131, 9243), True, 'import seaborn as sns\n'), ((14484, 14546), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(15, 5)', 'sharey': '(True)', 'sharex': '(False)'}), '(1, 3, figsize=(15, 5), sharey=True, sharex=False)\n', (14496, 14546), True, 'import matplotlib.pyplot as plt\n'), ((14725, 14893), 'seaborn.regplot', 'sns.regplot', ([], {'x': 'x', 'y': 'y', 'data': 'df', 'ax': 'axes[0]', 'color': 'test[4]', 'scatter_kws': "{'alpha': 0.8, 'linewidth': 1, 'edgecolor': 'black', 's': df[dose_x_size] *\n multiplier}"}), "(x=x, y=y, data=df, ax=axes[0], color=test[4], scatter_kws={\n 'alpha': 0.8, 'linewidth': 1, 'edgecolor': 'black', 's': df[dose_x_size\n ] * multiplier})\n", (14736, 14893), True, 'import seaborn as sns\n'), ((14939, 15130), 'seaborn.regplot', 'sns.regplot', ([], {'x': 'x2', 'y': 'y', 'data': "df[df['Sex'] == 'Male']", 'ax': 'axes[1]', 'color': 'test[8]', 'marker': '"""o"""', 'fit_reg': '(False)', 'scatter_kws': "{'alpha': 0.8, 'linewidth': 1, 'edgecolor': 'black', 's': 175}"}), "(x=x2, y=y, data=df[df['Sex'] == 'Male'], ax=axes[1], color=test\n [8], marker='o', fit_reg=False, scatter_kws={'alpha': 0.8, 'linewidth':\n 1, 'edgecolor': 'black', 's': 175})\n", (14950, 15130), True, 'import seaborn as sns\n'), ((15161, 15354), 'seaborn.regplot', 'sns.regplot', ([], {'x': 'x2', 'y': 'y', 'data': "df[df['Sex'] == 'Female']", 'ax': 'axes[1]', 'color': 'test[8]', 'marker': '"""X"""', 'fit_reg': '(False)', 'scatter_kws': "{'alpha': 0.8, 'linewidth': 1, 'edgecolor': 'black', 's': 200}"}), "(x=x2, y=y, data=df[df['Sex'] == 'Female'], ax=axes[1], color=\n test[8], marker='X', fit_reg=False, scatter_kws={'alpha': 0.8,\n 'linewidth': 1, 'edgecolor': 'black', 's': 200})\n", (15172, 15354), True, 'import seaborn as sns\n'), ((15397, 15482), 'seaborn.regplot', 'sns.regplot', ([], {'x': 'x2', 'y': 'y', 'data': 'df', 'ax': 'axes[1]', 'color': 'test[8]', 'scatter_kws': "{'s': 0}"}), "(x=x2, y=y, data=df, ax=axes[1], color=test[8], scatter_kws={'s': 0}\n )\n", (15408, 15482), True, 'import seaborn as sns\n'), ((15558, 15636), 'matplotlib.lines.Line2D', 'lines.Line2D', (['[]', '[]'], {'color': 'test[8]', 'alpha': '(0.8)', 'marker': '"""o"""', 'mew': '(1)', 'mec': '"""black"""'}), "([], [], color=test[8], alpha=0.8, marker='o', mew=1, mec='black')\n", (15570, 15636), False, 'from matplotlib import lines\n'), ((15648, 15726), 'matplotlib.lines.Line2D', 'lines.Line2D', (['[]', '[]'], {'color': 'test[8]', 'alpha': '(0.8)', 'marker': '"""X"""', 'mew': '(1)', 'mec': '"""black"""'}), "([], [], color=test[8], alpha=0.8, marker='X', mew=1, mec='black')\n", (15660, 15726), False, 'from matplotlib import lines\n'), ((16058, 16165), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': 'x3', 'y': 'y', 'dodge': '(True)', 'palette': 'palette_cust', 'order': "['Male', 'Female']", 'data': 'df', 'ax': 'axes[2]'}), "(x=x3, y=y, dodge=True, palette=palette_cust, order=['Male',\n 'Female'], data=df, ax=axes[2])\n", (16069, 16165), True, 'import seaborn as sns\n'), ((16301, 16473), 'seaborn.swarmplot', 'sns.swarmplot', ([], {'x': 'x3', 'y': 'y', 'dodge': '(True)', 'palette': 'palette_cust', 'order': "['Male', 'Female']", 'data': 'df', 'ax': 'axes[2]', 'size': '(12)', 'edgecolor': '"""black"""', 'linewidth': '(1)'}), "(x=x3, y=y, dodge=True, palette=palette_cust, order=['Male',\n 'Female'], data=df, ax=axes[2], size=12, edgecolor='black', linewidth=1,\n **{'alpha': 0.8})\n", (16314, 16473), True, 'import seaborn as sns\n'), ((17508, 17526), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (17524, 17526), True, 'import matplotlib.pyplot as plt\n'), ((17531, 17629), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""graphs/main figures/{y_name} vs {x} and {x2}.png"""'], {'dpi': '(600)', 'bbox_inches': '"""tight"""'}), "(f'graphs/main figures/{y_name} vs {x} and {x2}.png', dpi=600,\n bbox_inches='tight')\n", (17542, 17629), True, 'import matplotlib.pyplot as plt\n'), ((18292, 18323), 'six.iteritems', 'six.iteritems', (['mpl_table._cells'], {}), '(mpl_table._cells)\n', (18305, 18323), False, 'import six\n'), ((18598, 18616), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (18614, 18616), True, 'import matplotlib.pyplot as plt\n'), ((18703, 18714), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (18712, 18714), True, 'import matplotlib.pyplot as plt\n'), ((3008, 3045), 'pandas.concat', 'pd.concat', (['[rsampled, df]'], {'sort': '(False)'}), '([rsampled, df], sort=False)\n', (3017, 3045), True, 'import pandas as pd\n'), ((3332, 3369), 'pandas.concat', 'pd.concat', (['[rsampled, df]'], {'sort': '(False)'}), '([rsampled, df], sort=False)\n', (3341, 3369), True, 'import pandas as pd\n'), ((7472, 7608), 'seaborn.lmplot', 'sns.lmplot', ([], {'x': 'x', 'y': 'y', 'hue': 'hue', 'col': 'col', 'data': 'data', 'logistic': '(True)', 'height': '(5.5)', 'aspect': '(1)', 'scatter_kws': "{'s': 175, 'edgecolor': 'black'}"}), "(x=x, y=y, hue=hue, col=col, data=data, logistic=True, height=5.5,\n aspect=1, scatter_kws={'s': 175, 'edgecolor': 'black'})\n", (7482, 7608), True, 'import seaborn as sns\n'), ((7634, 7755), 'seaborn.lmplot', 'sns.lmplot', ([], {'x': 'x', 'y': 'y', 'hue': 'hue', 'col': 'col', 'data': 'data', 'height': '(5.5)', 'aspect': '(1)', 'scatter_kws': "{'s': 175, 'edgecolor': 'black'}"}), "(x=x, y=y, hue=hue, col=col, data=data, height=5.5, aspect=1,\n scatter_kws={'s': 175, 'edgecolor': 'black'})\n", (7644, 7755), True, 'import seaborn as sns\n'), ((13999, 14011), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (14009, 14011), True, 'import matplotlib.pyplot as plt\n'), ((14020, 14055), 'matplotlib.pyplot.plot', 'plt.plot', (['XX[:, term.feature]', 'pdep'], {}), '(XX[:, term.feature], pdep)\n', (14028, 14055), True, 'import matplotlib.pyplot as plt\n'), ((14064, 14116), 'matplotlib.pyplot.plot', 'plt.plot', (['XX[:, term.feature]', 'confi'], {'c': '"""r"""', 'ls': '"""--"""'}), "(XX[:, term.feature], confi, c='r', ls='--')\n", (14072, 14116), True, 'import matplotlib.pyplot as plt\n'), ((14125, 14166), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['f"""{features[i]}"""'], {'fontsize': '(14)'}), "(f'{features[i]}', fontsize=14)\n", (14135, 14166), True, 'import matplotlib.pyplot as plt\n'), ((14175, 14211), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['f"""{target}"""'], {'fontsize': '(14)'}), "(f'{target}', fontsize=14)\n", (14185, 14211), True, 'import matplotlib.pyplot as plt\n'), ((14220, 14278), 'matplotlib.pyplot.title', 'plt.title', (['f"""Functional dependence of Y on X"""'], {'fontsize': '(14)'}), "(f'Functional dependence of Y on X', fontsize=14)\n", (14229, 14278), True, 'import matplotlib.pyplot as plt\n'), ((14287, 14297), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14295, 14297), True, 'import matplotlib.pyplot as plt\n'), ((18050, 18076), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'size'}), '(figsize=size)\n', (18062, 18076), True, 'import matplotlib.pyplot as plt\n'), ((18651, 18698), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {'dpi': '(600)', 'bbox_inches': '"""tight"""'}), "(path, dpi=600, bbox_inches='tight')\n", (18662, 18698), True, 'import matplotlib.pyplot as plt\n'), ((1372, 1450), 'pandas.read_excel', 'pd.read_excel', (['full_name'], {'sheet_name': 'None', 'skiprows': '(4)', 'usecols': '[3]', 'nrows': '(5000)'}), '(full_name, sheet_name=None, skiprows=4, usecols=[3], nrows=5000)\n', (1385, 1450), True, 'import pandas as pd\n'), ((10867, 10901), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""lbfgs"""'}), "(solver='lbfgs')\n", (10885, 10901), False, 'from sklearn.linear_model import LogisticRegression\n'), ((17998, 18031), 'numpy.array', 'np.array', (['[col_width, row_height]'], {}), '([col_width, row_height])\n', (18006, 18031), True, 'import numpy as np\n'), ((2190, 2230), 'numpy.divide', 'np.divide', (['sample_data[1]', 'control_value'], {}), '(sample_data[1], control_value)\n', (2199, 2230), True, 'import numpy as np\n'), ((2319, 2359), 'numpy.divide', 'np.divide', (['sample_data[2]', 'control_value'], {}), '(sample_data[2], control_value)\n', (2328, 2359), True, 'import numpy as np\n'), ((17949, 17975), 'numpy.array', 'np.array', (['data.shape[::-1]'], {}), '(data.shape[::-1])\n', (17957, 17975), True, 'import numpy as np\n'), ((17978, 17994), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (17986, 17994), True, 'import numpy as np\n'), ((9866, 9884), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (9882, 9884), False, 'from sklearn.linear_model import LinearRegression\n'), ((10224, 10242), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (10240, 10242), False, 'from sklearn.linear_model import LinearRegression\n'), ((11233, 11251), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (11249, 11251), False, 'from sklearn.linear_model import LinearRegression\n'), ((12949, 12967), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (12965, 12967), False, 'from sklearn.linear_model import LinearRegression\n'), ((1952, 1974), 'numpy.mean', 'np.mean', (['telos_cleaned'], {}), '(telos_cleaned)\n', (1959, 1974), True, 'import numpy as np\n'), ((4024, 4047), 'scipy.stats.zscore', 'stats.zscore', (['telo_data'], {}), '(telo_data)\n', (4036, 4047), False, 'from scipy import stats\n'), ((1845, 1867), 'numpy.mean', 'np.mean', (['telos_cleaned'], {}), '(telos_cleaned)\n', (1852, 1867), True, 'import numpy as np\n')]
|
import time
from datetime import datetime
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.dates import epoch2num
import device_factory
if __name__ == '__main__':
amount = 50
devices = []
for i in range(amount):
device = device_factory.ecopower_4(i, i)
devices.append(device)
start = int(time.mktime(datetime(2010, 1, 2).timetuple()) // 60)
end = int(time.mktime(datetime(2010, 1, 3).timetuple()) // 60)
sample_time = start + 15 * 24
sample_dur = 16
P = [[] for d in devices]
T = [[] for d in devices]
Th = [[] for d in devices]
for now in range(start, sample_time):
for idx, device in enumerate(devices):
device.step(now)
P[idx].append(device.components.consumer.P)
T[idx].append(device.components.storage.T)
Th[idx].append(device.components.heatsink.in_heat)
samples = []
for d in devices:
# d.components.sampler.setpoint_density = 0.1
samples.append(d.components.sampler.sample(100, sample_dur))
# samples = [d.components.sampler.sample(100, sample_dur) for d in devices]
schedule = np.zeros(sample_dur)
for idx, device in enumerate(devices):
# min_schedule_idx = np.argmin(np.sum(np.abs(samples[idx]), axis=1))
# device.components.scheduler.schedule = samples[idx][min_schedule_idx]
# schedule += samples[idx][min_schedule_idx]
max_schedule_idx = np.argmax(np.sum(np.abs(samples[idx]), axis=1))
device.components.scheduler.schedule = samples[idx][max_schedule_idx]
schedule += samples[idx][max_schedule_idx]
for now in range(sample_time, end):
for idx, device in enumerate(devices):
device.step(now)
P[idx].append(device.components.consumer.P)
T[idx].append(device.components.storage.T)
Th[idx].append(device.components.heatsink.in_heat)
P = np.sum(P, axis=0)
Th = np.sum(Th, axis=0)
T = np.mean(T, axis=0)
ax = plt.subplot(2, 1, 1)
ax.grid(True)
tz = 60 # timezone deviation in minutes
x = epoch2num(np.arange((start + tz) * 60, (end + tz) * 60, 60))
Th = np.reshape(Th, (len(x) // 15, 15)).mean(axis=1)
ax.plot_date(x[::15], Th, color='magenta', label='P$_{th,out}$ (kW)', ls='-',
marker=None)
ax.legend()
ax = plt.subplot(2, 1, 2, sharex=ax)
ax.grid(True)
l1 = ax.plot_date(x, P, label='P$_{el}$ (kW)', ls='-', marker=None)
sched_x = epoch2num(np.arange(
(sample_time + tz) * 60, ((sample_time + tz) + sample_dur * 15) * 60, 60))
l2 = ax.plot_date(sched_x[::15], schedule, color='r', label='Schedule',
ls='-', marker=None)
ax = plt.twinx()
l3 = ax.plot_date(x, T, color='g', label='T (\\textdegree C)', ls='-', marker=None)
lines = l1 + l2 + l3
labels = [l.get_label() for l in lines]
ax.legend(lines, labels)
plt.gcf().autofmt_xdate()
# # Samples plot
# fig, ax = plt.subplots(len(samples))
# if len(samples) == 1:
# ax = [ax]
# for i, sample in enumerate(samples):
# t = np.arange(len(sample[0]))
# for s in sample:
# ax[i].plot(t, s)
plt.show()
|
[
"datetime.datetime",
"numpy.mean",
"device_factory.ecopower_4",
"numpy.abs",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.twinx",
"numpy.sum",
"numpy.zeros",
"matplotlib.pyplot.subplot",
"numpy.arange",
"matplotlib.pyplot.show"
] |
[((1207, 1227), 'numpy.zeros', 'np.zeros', (['sample_dur'], {}), '(sample_dur)\n', (1215, 1227), True, 'import numpy as np\n'), ((2003, 2020), 'numpy.sum', 'np.sum', (['P'], {'axis': '(0)'}), '(P, axis=0)\n', (2009, 2020), True, 'import numpy as np\n'), ((2031, 2049), 'numpy.sum', 'np.sum', (['Th'], {'axis': '(0)'}), '(Th, axis=0)\n', (2037, 2049), True, 'import numpy as np\n'), ((2059, 2077), 'numpy.mean', 'np.mean', (['T'], {'axis': '(0)'}), '(T, axis=0)\n', (2066, 2077), True, 'import numpy as np\n'), ((2090, 2110), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (2101, 2110), True, 'from matplotlib import pyplot as plt\n'), ((2440, 2471), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {'sharex': 'ax'}), '(2, 1, 2, sharex=ax)\n', (2451, 2471), True, 'from matplotlib import pyplot as plt\n'), ((2809, 2820), 'matplotlib.pyplot.twinx', 'plt.twinx', ([], {}), '()\n', (2818, 2820), True, 'from matplotlib import pyplot as plt\n'), ((3314, 3324), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3322, 3324), True, 'from matplotlib import pyplot as plt\n'), ((282, 313), 'device_factory.ecopower_4', 'device_factory.ecopower_4', (['i', 'i'], {}), '(i, i)\n', (307, 313), False, 'import device_factory\n'), ((2195, 2244), 'numpy.arange', 'np.arange', (['((start + tz) * 60)', '((end + tz) * 60)', '(60)'], {}), '((start + tz) * 60, (end + tz) * 60, 60)\n', (2204, 2244), True, 'import numpy as np\n'), ((2589, 2675), 'numpy.arange', 'np.arange', (['((sample_time + tz) * 60)', '((sample_time + tz + sample_dur * 15) * 60)', '(60)'], {}), '((sample_time + tz) * 60, (sample_time + tz + sample_dur * 15) * \n 60, 60)\n', (2598, 2675), True, 'import numpy as np\n'), ((3016, 3025), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (3023, 3025), True, 'from matplotlib import pyplot as plt\n'), ((1532, 1552), 'numpy.abs', 'np.abs', (['samples[idx]'], {}), '(samples[idx])\n', (1538, 1552), True, 'import numpy as np\n'), ((379, 399), 'datetime.datetime', 'datetime', (['(2010)', '(1)', '(2)'], {}), '(2010, 1, 2)\n', (387, 399), False, 'from datetime import datetime\n'), ((447, 467), 'datetime.datetime', 'datetime', (['(2010)', '(1)', '(3)'], {}), '(2010, 1, 3)\n', (455, 467), False, 'from datetime import datetime\n')]
|
"""
Tests for the h5py.Datatype class.
"""
from __future__ import absolute_import
from itertools import count
import numpy as np
import h5py
from ..common import ut, TestCase
class TestVlen(TestCase):
"""
Check that storage of vlen strings is carried out correctly.
"""
def assertVlenArrayEqual(self, dset, arr, message=None, precision=None):
self.assert_(
dset.shape == arr.shape,
"Shape mismatch (%s vs %s)%s" % (dset.shape, arr.shape, message)
)
for (i, d, a) in zip(count(), dset, arr):
self.assertArrayEqual(d, a, message, precision)
def test_compound(self):
fields = []
fields.append(('field_1', h5py.special_dtype(vlen=str)))
fields.append(('field_2', np.int32))
dt = np.dtype(fields)
self.f['mytype'] = np.dtype(dt)
dt_out = self.f['mytype'].dtype.fields['field_1'][0]
self.assertEqual(h5py.check_dtype(vlen=dt_out), str)
def test_compound_vlen_bool(self):
vidt = h5py.special_dtype(vlen=np.uint8)
def a(items):
return np.array(items, dtype=np.uint8)
f = self.f
dt_vb = np.dtype([
('foo', vidt),
('logical', np.bool)])
vb = f.create_dataset('dt_vb', shape=(4,), dtype=dt_vb)
data = np.array([(a([1,2,3]), True),
(a([1 ]), False),
(a([1,5 ]), True),
(a([], ), False),],
dtype=dt_vb)
vb[:] = data
actual = f['dt_vb'][:]
self.assertVlenArrayEqual(data['foo'], actual['foo'])
self.assertArrayEqual(data['logical'], actual['logical'])
dt_vv = np.dtype([
('foo', vidt),
('bar', vidt)])
f.create_dataset('dt_vv', shape=(4,), dtype=dt_vv)
dt_vvb = np.dtype([
('foo', vidt),
('bar', vidt),
('logical', np.bool)])
vvb = f.create_dataset('dt_vvb', shape=(2,), dtype=dt_vvb)
dt_bvv = np.dtype([
('logical', np.bool),
('foo', vidt),
('bar', vidt)])
bvv = f.create_dataset('dt_bvv', shape=(2,), dtype=dt_bvv)
data = np.array([(True, a([1,2,3]), a([1,2]) ),
(False, a([]), a([2,4,6])),],
dtype=bvv)
bvv[:] = data
actual = bvv[:]
self.assertVlenArrayEqual(data['foo'], actual['foo'])
self.assertVlenArrayEqual(data['bar'], actual['bar'])
self.assertArrayEqual(data['logical'], actual['logical'])
def test_compound_vlen_enum(self):
eidt = h5py.special_dtype(enum=(np.uint8, {'OFF': 0, 'ON': 1}))
vidt = h5py.special_dtype(vlen=np.uint8)
def a(items):
return np.array(items, dtype=np.uint8)
f = self.f
dt_vve = np.dtype([
('foo', vidt),
('bar', vidt),
('switch', eidt)])
vve = f.create_dataset('dt_vve', shape=(2,), dtype=dt_vve)
data = np.array([(a([1,2,3]), a([1,2]), 1),
(a([]), a([2,4,6]), 0),],
dtype=dt_vve)
vve[:] = data
actual = vve[:]
self.assertVlenArrayEqual(data['foo'], actual['foo'])
self.assertVlenArrayEqual(data['bar'], actual['bar'])
self.assertArrayEqual(data['switch'], actual['switch'])
def test_vlen_enum(self):
fname = self.mktemp()
arr1 = [[1],[1,2]]
dt1 = h5py.special_dtype(vlen=h5py.special_dtype(
enum=('i', dict(foo=1, bar=2))))
with h5py.File(fname,'w') as f:
df1 = f.create_dataset('test', (len(arr1),), dtype=dt1)
df1[:] = np.array(arr1)
with h5py.File(fname,'r') as f:
df2 = f['test']
dt2 = df2.dtype
arr2 = [e.tolist() for e in df2[:]]
self.assertEqual(arr1, arr2)
self.assertEqual(h5py.check_dtype(enum=h5py.check_dtype(vlen=dt1)),
h5py.check_dtype(enum=h5py.check_dtype(vlen=dt2)))
class TestOffsets(TestCase):
"""
Check that compound members with aligned or manual offsets are handled
correctly.
"""
def test_compound_vlen(self):
vidt = h5py.special_dtype(vlen=np.uint8)
eidt = h5py.special_dtype(enum=(np.uint8, {'OFF': 0, 'ON': 1}))
for np_align in (False, True):
dt = np.dtype([
('a', eidt),
('foo', vidt),
('bar', vidt),
('switch', eidt)], align=np_align)
np_offsets = [dt.fields[i][1] for i in dt.names]
for logical in (False, True):
if logical and np_align:
# Vlen types have different size in the numpy struct
self.assertRaises(TypeError, h5py.h5t.py_create, dt,
logical=logical)
else:
ht = h5py.h5t.py_create(dt, logical=logical)
offsets = [ht.get_member_offset(i)
for i in range(ht.get_nmembers())]
if np_align:
self.assertEqual(np_offsets, offsets)
def test_aligned_offsets(self):
dt = np.dtype('i2,i8', align=True)
ht = h5py.h5t.py_create(dt)
self.assertEqual(dt.itemsize, ht.get_size())
self.assertEqual(
[dt.fields[i][1] for i in dt.names],
[ht.get_member_offset(i) for i in range(ht.get_nmembers())]
)
def test_aligned_data(self):
dt = np.dtype('i2,f8', align=True)
data = np.empty(10, dtype=dt)
data['f0'] = np.array(np.random.randint(-100, 100, size=data.size),
dtype='i2')
data['f1'] = np.random.rand(data.size)
fname = self.mktemp()
with h5py.File(fname, 'w') as f:
f['data'] = data
with h5py.File(fname, 'r') as f:
self.assertArrayEqual(f['data'], data)
def test_out_of_order_offsets(self):
dt = np.dtype({
'names' : ['f1', 'f2', 'f3'],
'formats' : ['<f4', '<i4', '<f8'],
'offsets' : [0, 16, 8]
})
data = np.empty(10, dtype=dt)
data['f1'] = np.random.rand(data.size)
data['f2'] = np.random.random_integers(-10, 10, data.size)
data['f3'] = np.random.rand(data.size)*-1
fname = self.mktemp()
with h5py.File(fname, 'w') as fd:
fd.create_dataset('data', data=data)
with h5py.File(fname, 'r') as fd:
self.assertArrayEqual(fd['data'], data)
|
[
"h5py.check_dtype",
"numpy.random.rand",
"numpy.random.random_integers",
"h5py.File",
"h5py.h5t.py_create",
"numpy.array",
"itertools.count",
"numpy.empty",
"numpy.random.randint",
"h5py.special_dtype",
"numpy.dtype"
] |
[((806, 822), 'numpy.dtype', 'np.dtype', (['fields'], {}), '(fields)\n', (814, 822), True, 'import numpy as np\n'), ((850, 862), 'numpy.dtype', 'np.dtype', (['dt'], {}), '(dt)\n', (858, 862), True, 'import numpy as np\n'), ((1040, 1073), 'h5py.special_dtype', 'h5py.special_dtype', ([], {'vlen': 'np.uint8'}), '(vlen=np.uint8)\n', (1058, 1073), False, 'import h5py\n'), ((1184, 1231), 'numpy.dtype', 'np.dtype', (["[('foo', vidt), ('logical', np.bool)]"], {}), "([('foo', vidt), ('logical', np.bool)])\n", (1192, 1231), True, 'import numpy as np\n'), ((1736, 1776), 'numpy.dtype', 'np.dtype', (["[('foo', vidt), ('bar', vidt)]"], {}), "([('foo', vidt), ('bar', vidt)])\n", (1744, 1776), True, 'import numpy as np\n'), ((1879, 1941), 'numpy.dtype', 'np.dtype', (["[('foo', vidt), ('bar', vidt), ('logical', np.bool)]"], {}), "([('foo', vidt), ('bar', vidt), ('logical', np.bool)])\n", (1887, 1941), True, 'import numpy as np\n'), ((2064, 2126), 'numpy.dtype', 'np.dtype', (["[('logical', np.bool), ('foo', vidt), ('bar', vidt)]"], {}), "([('logical', np.bool), ('foo', vidt), ('bar', vidt)])\n", (2072, 2126), True, 'import numpy as np\n'), ((2675, 2731), 'h5py.special_dtype', 'h5py.special_dtype', ([], {'enum': "(np.uint8, {'OFF': 0, 'ON': 1})"}), "(enum=(np.uint8, {'OFF': 0, 'ON': 1}))\n", (2693, 2731), False, 'import h5py\n'), ((2747, 2780), 'h5py.special_dtype', 'h5py.special_dtype', ([], {'vlen': 'np.uint8'}), '(vlen=np.uint8)\n', (2765, 2780), False, 'import h5py\n'), ((2892, 2950), 'numpy.dtype', 'np.dtype', (["[('foo', vidt), ('bar', vidt), ('switch', eidt)]"], {}), "([('foo', vidt), ('bar', vidt), ('switch', eidt)])\n", (2900, 2950), True, 'import numpy as np\n'), ((4306, 4339), 'h5py.special_dtype', 'h5py.special_dtype', ([], {'vlen': 'np.uint8'}), '(vlen=np.uint8)\n', (4324, 4339), False, 'import h5py\n'), ((4355, 4411), 'h5py.special_dtype', 'h5py.special_dtype', ([], {'enum': "(np.uint8, {'OFF': 0, 'ON': 1})"}), "(enum=(np.uint8, {'OFF': 0, 'ON': 1}))\n", (4373, 4411), False, 'import h5py\n'), ((5311, 5340), 'numpy.dtype', 'np.dtype', (['"""i2,i8"""'], {'align': '(True)'}), "('i2,i8', align=True)\n", (5319, 5340), True, 'import numpy as np\n'), ((5354, 5376), 'h5py.h5t.py_create', 'h5py.h5t.py_create', (['dt'], {}), '(dt)\n', (5372, 5376), False, 'import h5py\n'), ((5635, 5664), 'numpy.dtype', 'np.dtype', (['"""i2,f8"""'], {'align': '(True)'}), "('i2,f8', align=True)\n", (5643, 5664), True, 'import numpy as np\n'), ((5680, 5702), 'numpy.empty', 'np.empty', (['(10)'], {'dtype': 'dt'}), '(10, dtype=dt)\n', (5688, 5702), True, 'import numpy as np\n'), ((5829, 5854), 'numpy.random.rand', 'np.random.rand', (['data.size'], {}), '(data.size)\n', (5843, 5854), True, 'import numpy as np\n'), ((6106, 6206), 'numpy.dtype', 'np.dtype', (["{'names': ['f1', 'f2', 'f3'], 'formats': ['<f4', '<i4', '<f8'], 'offsets':\n [0, 16, 8]}"], {}), "({'names': ['f1', 'f2', 'f3'], 'formats': ['<f4', '<i4', '<f8'],\n 'offsets': [0, 16, 8]})\n", (6114, 6206), True, 'import numpy as np\n'), ((6267, 6289), 'numpy.empty', 'np.empty', (['(10)'], {'dtype': 'dt'}), '(10, dtype=dt)\n', (6275, 6289), True, 'import numpy as np\n'), ((6311, 6336), 'numpy.random.rand', 'np.random.rand', (['data.size'], {}), '(data.size)\n', (6325, 6336), True, 'import numpy as np\n'), ((6358, 6403), 'numpy.random.random_integers', 'np.random.random_integers', (['(-10)', '(10)', 'data.size'], {}), '(-10, 10, data.size)\n', (6383, 6403), True, 'import numpy as np\n'), ((551, 558), 'itertools.count', 'count', ([], {}), '()\n', (556, 558), False, 'from itertools import count\n'), ((949, 978), 'h5py.check_dtype', 'h5py.check_dtype', ([], {'vlen': 'dt_out'}), '(vlen=dt_out)\n', (965, 978), False, 'import h5py\n'), ((1115, 1146), 'numpy.array', 'np.array', (['items'], {'dtype': 'np.uint8'}), '(items, dtype=np.uint8)\n', (1123, 1146), True, 'import numpy as np\n'), ((2822, 2853), 'numpy.array', 'np.array', (['items'], {'dtype': 'np.uint8'}), '(items, dtype=np.uint8)\n', (2830, 2853), True, 'import numpy as np\n'), ((3643, 3664), 'h5py.File', 'h5py.File', (['fname', '"""w"""'], {}), "(fname, 'w')\n", (3652, 3664), False, 'import h5py\n'), ((3759, 3773), 'numpy.array', 'np.array', (['arr1'], {}), '(arr1)\n', (3767, 3773), True, 'import numpy as np\n'), ((3788, 3809), 'h5py.File', 'h5py.File', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (3797, 3809), False, 'import h5py\n'), ((4469, 4560), 'numpy.dtype', 'np.dtype', (["[('a', eidt), ('foo', vidt), ('bar', vidt), ('switch', eidt)]"], {'align': 'np_align'}), "([('a', eidt), ('foo', vidt), ('bar', vidt), ('switch', eidt)],\n align=np_align)\n", (4477, 4560), True, 'import numpy as np\n'), ((5734, 5778), 'numpy.random.randint', 'np.random.randint', (['(-100)', '(100)'], {'size': 'data.size'}), '(-100, 100, size=data.size)\n', (5751, 5778), True, 'import numpy as np\n'), ((5900, 5921), 'h5py.File', 'h5py.File', (['fname', '"""w"""'], {}), "(fname, 'w')\n", (5909, 5921), False, 'import h5py\n'), ((5971, 5992), 'h5py.File', 'h5py.File', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (5980, 5992), False, 'import h5py\n'), ((6425, 6450), 'numpy.random.rand', 'np.random.rand', (['data.size'], {}), '(data.size)\n', (6439, 6450), True, 'import numpy as np\n'), ((6499, 6520), 'h5py.File', 'h5py.File', (['fname', '"""w"""'], {}), "(fname, 'w')\n", (6508, 6520), False, 'import h5py\n'), ((6591, 6612), 'h5py.File', 'h5py.File', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (6600, 6612), False, 'import h5py\n'), ((717, 745), 'h5py.special_dtype', 'h5py.special_dtype', ([], {'vlen': 'str'}), '(vlen=str)\n', (735, 745), False, 'import h5py\n'), ((4006, 4032), 'h5py.check_dtype', 'h5py.check_dtype', ([], {'vlen': 'dt1'}), '(vlen=dt1)\n', (4022, 4032), False, 'import h5py\n'), ((4082, 4108), 'h5py.check_dtype', 'h5py.check_dtype', ([], {'vlen': 'dt2'}), '(vlen=dt2)\n', (4098, 4108), False, 'import h5py\n'), ((5005, 5044), 'h5py.h5t.py_create', 'h5py.h5t.py_create', (['dt'], {'logical': 'logical'}), '(dt, logical=logical)\n', (5023, 5044), False, 'import h5py\n')]
|
from pathlib import Path
import numpy as np
import pickle
import argparse
import errno
import sys
def file_exists(path):
return Path(path).is_file()
def dir_exists(path):
return Path(path).is_dir()
def remove_extension(x): return x.split('.')[0]
def print_error(type, file):
print(FileNotFoundError(errno.ENOENT,
'The {} {} does not exist'.format(type, file)))
def calculate_threshold(similarity, output='confusables',
threshold=0.8, verbose=False):
lines = [line.rstrip('\n') for line in open(similarity)]
unicode_characters = np.asarray(lines[0].split(' ')[1:])
data = {}
data['threshold'] = threshold
data['characters'] = {}
for l in lines[1:]:
line = l.split(' ')
latin = line[0]
del line[0]
similarity_row = np.asarray(line, dtype=np.float)
indexes = np.where(similarity_row >= threshold)
data['characters'][latin] = unicode_characters[np.asarray(indexes[0])]\
.tolist()
chars = unicode_characters[np.asarray(indexes[0])].tolist()
if(verbose):
print('[{}] {}: {}'.format(len(chars), latin, ','.join(chars)))
output = '{}-{}.pickle'.format(output, int(threshold*100))
with open(output, 'wb') as f:
pickle.dump(data, f)
def main():
parser = argparse.ArgumentParser(description='Filter Unicode characters '
'based on a given threshold '
'between 0 and 1 '
'and a similarity matrix')
parser.add_argument('-s', '--similarity', default='similarities.txt')
parser.add_argument('-t', '--threshold', default=0.8, type=float)
parser.add_argument('-o', '--output', default='confusables')
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
similarity = args.similarity
threshold = args.threshold
output = args.output
verbose = args.verbose
if not file_exists(similarity):
print_error('file', similarity)
sys.exit(1)
calculate_threshold(similarity, output, threshold, verbose)
if __name__ == '__main__':
main()
|
[
"pickle.dump",
"argparse.ArgumentParser",
"pathlib.Path",
"numpy.where",
"numpy.asarray",
"sys.exit"
] |
[((1358, 1499), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Filter Unicode characters based on a given threshold between 0 and 1 and a similarity matrix"""'}), "(description=\n 'Filter Unicode characters based on a given threshold between 0 and 1 and a similarity matrix'\n )\n", (1381, 1499), False, 'import argparse\n'), ((846, 878), 'numpy.asarray', 'np.asarray', (['line'], {'dtype': 'np.float'}), '(line, dtype=np.float)\n', (856, 878), True, 'import numpy as np\n'), ((897, 934), 'numpy.where', 'np.where', (['(similarity_row >= threshold)'], {}), '(similarity_row >= threshold)\n', (905, 934), True, 'import numpy as np\n'), ((1310, 1330), 'pickle.dump', 'pickle.dump', (['data', 'f'], {}), '(data, f)\n', (1321, 1330), False, 'import pickle\n'), ((2154, 2165), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2162, 2165), False, 'import sys\n'), ((134, 144), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (138, 144), False, 'from pathlib import Path\n'), ((190, 200), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (194, 200), False, 'from pathlib import Path\n'), ((990, 1012), 'numpy.asarray', 'np.asarray', (['indexes[0]'], {}), '(indexes[0])\n', (1000, 1012), True, 'import numpy as np\n'), ((1073, 1095), 'numpy.asarray', 'np.asarray', (['indexes[0]'], {}), '(indexes[0])\n', (1083, 1095), True, 'import numpy as np\n')]
|
#!/usr/bin/python
from aos.util.trapezoid_profile import TrapezoidProfile
from frc971.control_loops.python import control_loop
from frc971.control_loops.python import angular_system
from frc971.control_loops.python import controls
import copy
import numpy
import sys
from matplotlib import pylab
import gflags
import glog
FLAGS = gflags.FLAGS
try:
gflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')
except gflags.DuplicateFlagError:
pass
# Wrist alone
# 0.1348
# Wrist with ball
# 0.3007
# Wrist with hatch
# 0.446
kWrist = angular_system.AngularSystemParams(
name='Wrist',
motor=control_loop.BAG(),
G=(6.0 / 60.0) * (20.0 / 100.0) * (24.0 / 84.0),
J=0.30,
q_pos=0.20,
q_vel=5.0,
kalman_q_pos=0.12,
kalman_q_vel=2.0,
kalman_q_voltage=4.0,
kalman_r_position=0.05)
kWristBall = copy.copy(kWrist)
kWristBall.J = 0.4007
kWristBall.q_pos = 0.55
kWristBall.q_vel = 5.0
kWristPanel = copy.copy(kWrist)
kWristPanel.J = 0.446
kWristModel = copy.copy(kWrist)
kWristModel.J = 0.1348
def main(argv):
if FLAGS.plot:
R = numpy.matrix([[numpy.pi / 2.0], [0.0]])
angular_system.PlotKick(kWristBall, R, plant_params=kWristBall)
angular_system.PlotMotion(kWristBall, R, plant_params=kWristBall)
# Write the generated constants out to a file.
if len(argv) != 5:
glog.fatal(
'Expected .h file name and .cc file name for the wrist and integral wrist.'
)
else:
namespaces = ['y2019', 'control_loops', 'superstructure', 'wrist']
angular_system.WriteAngularSystem([kWrist, kWristBall, kWristPanel],
argv[1:3], argv[3:5], namespaces)
if __name__ == '__main__':
argv = FLAGS(sys.argv)
glog.init()
sys.exit(main(argv))
|
[
"frc971.control_loops.python.control_loop.BAG",
"gflags.DEFINE_bool",
"frc971.control_loops.python.angular_system.PlotKick",
"glog.fatal",
"frc971.control_loops.python.angular_system.PlotMotion",
"copy.copy",
"numpy.matrix",
"glog.init",
"frc971.control_loops.python.angular_system.WriteAngularSystem"
] |
[((852, 869), 'copy.copy', 'copy.copy', (['kWrist'], {}), '(kWrist)\n', (861, 869), False, 'import copy\n'), ((954, 971), 'copy.copy', 'copy.copy', (['kWrist'], {}), '(kWrist)\n', (963, 971), False, 'import copy\n'), ((1009, 1026), 'copy.copy', 'copy.copy', (['kWrist'], {}), '(kWrist)\n', (1018, 1026), False, 'import copy\n'), ((355, 424), 'gflags.DEFINE_bool', 'gflags.DEFINE_bool', (['"""plot"""', '(False)', '"""If true, plot the loop response."""'], {}), "('plot', False, 'If true, plot the loop response.')\n", (373, 424), False, 'import gflags\n'), ((1776, 1787), 'glog.init', 'glog.init', ([], {}), '()\n', (1785, 1787), False, 'import glog\n'), ((623, 641), 'frc971.control_loops.python.control_loop.BAG', 'control_loop.BAG', ([], {}), '()\n', (639, 641), False, 'from frc971.control_loops.python import control_loop\n'), ((1099, 1138), 'numpy.matrix', 'numpy.matrix', (['[[numpy.pi / 2.0], [0.0]]'], {}), '([[numpy.pi / 2.0], [0.0]])\n', (1111, 1138), False, 'import numpy\n'), ((1147, 1210), 'frc971.control_loops.python.angular_system.PlotKick', 'angular_system.PlotKick', (['kWristBall', 'R'], {'plant_params': 'kWristBall'}), '(kWristBall, R, plant_params=kWristBall)\n', (1170, 1210), False, 'from frc971.control_loops.python import angular_system\n'), ((1219, 1284), 'frc971.control_loops.python.angular_system.PlotMotion', 'angular_system.PlotMotion', (['kWristBall', 'R'], {'plant_params': 'kWristBall'}), '(kWristBall, R, plant_params=kWristBall)\n', (1244, 1284), False, 'from frc971.control_loops.python import angular_system\n'), ((1368, 1465), 'glog.fatal', 'glog.fatal', (['"""Expected .h file name and .cc file name for the wrist and integral wrist."""'], {}), "(\n 'Expected .h file name and .cc file name for the wrist and integral wrist.'\n )\n", (1378, 1465), False, 'import glog\n'), ((1571, 1678), 'frc971.control_loops.python.angular_system.WriteAngularSystem', 'angular_system.WriteAngularSystem', (['[kWrist, kWristBall, kWristPanel]', 'argv[1:3]', 'argv[3:5]', 'namespaces'], {}), '([kWrist, kWristBall, kWristPanel], argv[1\n :3], argv[3:5], namespaces)\n', (1604, 1678), False, 'from frc971.control_loops.python import angular_system\n')]
|
#! -*- coding:utf-8 -*-
# 语义相似度任务-无监督:训练集为网上pretrain数据, dev集为sts-b
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding, Callback, ListDataset
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from sklearn.metrics.pairwise import paired_cosine_distances
from scipy.stats import pearsonr, spearmanr
import copy
import random
import numpy as np
random.seed(2022)
np.random.seed(2002)
maxlen = 256
batch_size = 8
config_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/bert_config.json'
checkpoint_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/pytorch_model.bin'
dict_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# 建立分词器
tokenizer = Tokenizer(dict_path, do_lower_case=True)
def collate_fn(batch):
def add_noise(token_ids, del_ratio=0.6):
n = len(token_ids)
keep_or_not = np.random.rand(n) > del_ratio
if sum(keep_or_not) == 0:
keep_or_not[np.random.choice(n)] = True # guarantee that at least one word remains
return list(np.array(token_ids)[keep_or_not])
texts_list = [[] for _ in range(3)]
for text in batch:
token_ids, _ = tokenizer.encode(text, maxlen=maxlen)
texts_list[0].append([tokenizer._token_start_id] + add_noise(token_ids[1:-1]) + [tokenizer._token_end_id])
texts_list[1].append(token_ids[:-1])
texts_list[2].append(token_ids[1:])
for i, texts in enumerate(texts_list):
texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device)
return texts_list[:2], texts_list[2].flatten()
# 加载数据集
def get_data(filename):
train_data = []
with open(filename, encoding='utf-8') as f:
for row, l in enumerate(f):
if row == 0: # 跳过首行
continue
text = l.strip().replace(' ', '')
if len(text) > 0:
train_data.append(text)
return train_data
train_data = get_data('F:/Projects/data/corpus/pretrain/film/film.txt')
train_dataloader = DataLoader(ListDataset(data=train_data), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
from task_sentence_embedding_sbert_sts_b__CosineSimilarityLoss import valid_dataloader
# 定义bert上的模型结构
class Model(BaseModel):
def __init__(self, pool_method='mean', scale=20.0):
super().__init__()
self.encoder, self.config = build_transformer_model(config_path=config_path, checkpoint_path=checkpoint_path, with_pool=True, with_mlm=True, return_model_config=True, segment_vocab_size=0)
self.decoder = self.encoder # 这里可以通过使用copy和不使用copy来决定一个模型还是两个独立的模型
self.pool_method = pool_method
self.scale = scale
def forward(self, token_ids_list):
token_ids1 = token_ids_list[0]
hidden_state1, pool_cls1, _ = self.encoder([token_ids1])
embeddings_a = self.get_pool_emb(hidden_state1, pool_cls1, attention_mask=token_ids1.gt(0).long())
token_ids2 = token_ids_list[1]
_, _, mlm_score2 = self.decoder([token_ids2, embeddings_a.unsqueeze(1), torch.ones_like(token_ids1)[:, 0:1]])
return mlm_score2.reshape(-1, mlm_score2.shape[-1])
def encode(self, token_ids):
self.eval()
with torch.no_grad():
hidden_state, pool_cls, _ = self.encoder([token_ids])
output = self.get_pool_emb(hidden_state, pool_cls, attention_mask=token_ids.gt(0).long())
return output
def get_pool_emb(self, hidden_state, pool_cls, attention_mask):
if self.pool_method == 'cls':
return pool_cls
elif self.pool_method == 'mean':
hidden_state = torch.sum(hidden_state * attention_mask[:, :, None], dim=1)
attention_mask = torch.sum(attention_mask, dim=1)[:, None]
return hidden_state / attention_mask
elif self.pool_method == 'max':
seq_state = hidden_state * attention_mask[:, :, None]
return torch.max(seq_state, dim=1)
else:
raise ValueError('pool_method illegal')
model = Model().to(device)
# 定义使用的loss和optimizer,这里支持自定义
model.compile(
loss=nn.CrossEntropyLoss(ignore_index=0),
optimizer=optim.Adam(model.parameters(), lr=2e-5), # 用足够小的学习率
)
# 定义评价函数
def evaluate(data):
embeddings1, embeddings2, labels = [], [], []
for (batch_token1_ids, batch_token2_ids), label in data:
embeddings1.append(model.encode(batch_token1_ids))
embeddings2.append(model.encode(batch_token2_ids))
labels.append(label)
embeddings1 = torch.concat(embeddings1).cpu().numpy()
embeddings2 = torch.concat(embeddings2).cpu().numpy()
labels = torch.concat(labels).cpu().numpy()
cosine_scores = 1 - (paired_cosine_distances(embeddings1, embeddings2))
eval_pearson_cosine, _ = pearsonr(labels, cosine_scores)
return eval_pearson_cosine
class Evaluator(Callback):
"""评估与保存
"""
def __init__(self):
self.best_val_consine = 0.
def on_epoch_end(self, global_step, epoch, logs=None):
val_consine = evaluate(valid_dataloader)
if val_consine > self.best_val_consine:
self.best_val_consine = val_consine
# model.save_weights('best_model.pt')
print(f'val_consine: {val_consine:.5f}, best_val_consine: {self.best_val_consine:.5f}\n')
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader,
epochs=20,
steps_per_epoch=100,
callbacks=[evaluator]
)
else:
model.load_weights('best_model.pt')
|
[
"torch.ones_like",
"torch.nn.CrossEntropyLoss",
"sklearn.metrics.pairwise.paired_cosine_distances",
"numpy.random.rand",
"numpy.random.choice",
"torch.max",
"random.seed",
"bert4torch.tokenizers.Tokenizer",
"bert4torch.snippets.sequence_padding",
"torch.cuda.is_available",
"torch.no_grad",
"numpy.random.seed",
"numpy.array",
"scipy.stats.pearsonr",
"torch.sum",
"bert4torch.models.build_transformer_model",
"bert4torch.snippets.ListDataset",
"torch.concat"
] |
[((503, 520), 'random.seed', 'random.seed', (['(2022)'], {}), '(2022)\n', (514, 520), False, 'import random\n'), ((521, 541), 'numpy.random.seed', 'np.random.seed', (['(2002)'], {}), '(2002)\n', (535, 541), True, 'import numpy as np\n'), ((963, 1003), 'bert4torch.tokenizers.Tokenizer', 'Tokenizer', (['dict_path'], {'do_lower_case': '(True)'}), '(dict_path, do_lower_case=True)\n', (972, 1003), False, 'from bert4torch.tokenizers import Tokenizer\n'), ((905, 930), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (928, 930), False, 'import torch\n'), ((2300, 2328), 'bert4torch.snippets.ListDataset', 'ListDataset', ([], {'data': 'train_data'}), '(data=train_data)\n', (2311, 2328), False, 'from bert4torch.snippets import sequence_padding, Callback, ListDataset\n'), ((5036, 5067), 'scipy.stats.pearsonr', 'pearsonr', (['labels', 'cosine_scores'], {}), '(labels, cosine_scores)\n', (5044, 5067), False, 'from scipy.stats import pearsonr, spearmanr\n'), ((2637, 2807), 'bert4torch.models.build_transformer_model', 'build_transformer_model', ([], {'config_path': 'config_path', 'checkpoint_path': 'checkpoint_path', 'with_pool': '(True)', 'with_mlm': '(True)', 'return_model_config': '(True)', 'segment_vocab_size': '(0)'}), '(config_path=config_path, checkpoint_path=\n checkpoint_path, with_pool=True, with_mlm=True, return_model_config=\n True, segment_vocab_size=0)\n', (2660, 2807), False, 'from bert4torch.models import build_transformer_model, BaseModel\n'), ((4372, 4407), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'ignore_index': '(0)'}), '(ignore_index=0)\n', (4391, 4407), True, 'import torch.nn as nn\n'), ((4956, 5005), 'sklearn.metrics.pairwise.paired_cosine_distances', 'paired_cosine_distances', (['embeddings1', 'embeddings2'], {}), '(embeddings1, embeddings2)\n', (4979, 5005), False, 'from sklearn.metrics.pairwise import paired_cosine_distances\n'), ((1122, 1139), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (1136, 1139), True, 'import numpy as np\n'), ((1750, 1773), 'bert4torch.snippets.sequence_padding', 'sequence_padding', (['texts'], {}), '(texts)\n', (1766, 1773), False, 'from bert4torch.snippets import sequence_padding, Callback, ListDataset\n'), ((3476, 3491), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3489, 3491), False, 'import torch\n'), ((1210, 1229), 'numpy.random.choice', 'np.random.choice', (['n'], {}), '(n)\n', (1226, 1229), True, 'import numpy as np\n'), ((1301, 1320), 'numpy.array', 'np.array', (['token_ids'], {}), '(token_ids)\n', (1309, 1320), True, 'import numpy as np\n'), ((3890, 3949), 'torch.sum', 'torch.sum', (['(hidden_state * attention_mask[:, :, None])'], {'dim': '(1)'}), '(hidden_state * attention_mask[:, :, None], dim=1)\n', (3899, 3949), False, 'import torch\n'), ((3310, 3337), 'torch.ones_like', 'torch.ones_like', (['token_ids1'], {}), '(token_ids1)\n', (3325, 3337), False, 'import torch\n'), ((3979, 4011), 'torch.sum', 'torch.sum', (['attention_mask'], {'dim': '(1)'}), '(attention_mask, dim=1)\n', (3988, 4011), False, 'import torch\n'), ((4195, 4222), 'torch.max', 'torch.max', (['seq_state'], {'dim': '(1)'}), '(seq_state, dim=1)\n', (4204, 4222), False, 'import torch\n'), ((4785, 4810), 'torch.concat', 'torch.concat', (['embeddings1'], {}), '(embeddings1)\n', (4797, 4810), False, 'import torch\n'), ((4843, 4868), 'torch.concat', 'torch.concat', (['embeddings2'], {}), '(embeddings2)\n', (4855, 4868), False, 'import torch\n'), ((4896, 4916), 'torch.concat', 'torch.concat', (['labels'], {}), '(labels)\n', (4908, 4916), False, 'import torch\n')]
|
# @Time : 2020/10/6
# @Author : <NAME>
# @Email : <EMAIL>
"""
recbole.quick_start
########################
"""
import logging
from logging import getLogger
from recbole.config import Config
from recbole.data import create_dataset, data_preparation
from recbole.utils import init_logger, get_model, get_trainer, init_seed
from recbole.utils.utils import set_color
def run_recbole(model=None, dataset=None, config_file_list=None, config_dict=None, saved=True):
r""" A fast running api, which includes the complete process of
training and testing a model on a specified dataset
Args:
model (str): model name
dataset (str): dataset name
config_file_list (list): config files used to modify experiment parameters
config_dict (dict): parameters dictionary used to modify experiment parameters
saved (bool): whether to save the model
"""
# configurations initialization
config = Config(model=model, dataset=dataset, config_file_list=config_file_list, config_dict=config_dict)
# init_seed(config['seed'], config['reproducibility'])
# logger initialization
init_logger(config)
logger = getLogger()
import os
log_dir = os.path.dirname(logger.handlers[0].baseFilename)
config['log_dir'] = log_dir
logger.info(config)
# dataset filtering
dataset = create_dataset(config)
logger.info(dataset)
# dataset splitting
train_data, valid_data, test_data = data_preparation(config, dataset)
# model loading and initialization
model = get_model(config['model'])(config, train_data).to(config['device'])
logger.info(model)
# trainer loading and initialization
trainer = get_trainer(config['MODEL_TYPE'], config['model'])(config, model)
# model training
best_valid_score, best_valid_result = trainer.fit(
train_data, valid_data, saved=saved, show_progress=config['show_progress']
)
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.decomposition import TruncatedSVD
embedding_matrix = model.item_embedding.weight[1:].cpu().detach().numpy()
svd = TruncatedSVD(n_components=2)
svd.fit(embedding_matrix)
comp_tr = np.transpose(svd.components_)
proj = np.dot(embedding_matrix, comp_tr)
cnt = {}
for i in dataset['item_id']:
if i.item() in cnt:
cnt[i.item()] += 1
else:
cnt[i.item()] = 1
freq = np.zeros(embedding_matrix.shape[0])
for i in cnt:
freq[i-1] = cnt[i]
# freq /= freq.max()
sns.set(style='darkgrid')
sns.set_context("notebook", font_scale=1.8, rc={"lines.linewidth": 3, 'lines.markersize': 20})
plt.figure(figsize=(6, 4.5))
plt.scatter(proj[:, 0], proj[:, 1], s=1, c=freq, cmap='viridis_r')
plt.colorbar()
plt.xlim(-2, 2)
plt.ylim(-2, 2)
# plt.axis('square')
# plt.show()
plt.savefig(log_dir + '/' + config['model'] + '-' + config['dataset'] + '.pdf', format='pdf', transparent=False, bbox_inches='tight')
from scipy.linalg import svdvals
svs = svdvals(embedding_matrix)
svs /= svs.max()
np.save(log_dir + '/sv.npy', svs)
sns.set(style='darkgrid')
sns.set_context("notebook", font_scale=1.8, rc={"lines.linewidth": 3, 'lines.markersize': 20})
plt.figure(figsize=(6, 4.5))
plt.plot(svs)
# plt.show()
plt.savefig(log_dir + '/svs.pdf', format='pdf', transparent=False, bbox_inches='tight')
# model evaluation
test_result = trainer.evaluate(test_data, load_best_model=saved, show_progress=config['show_progress'])
logger.info(set_color('best valid ', 'yellow') + f': {best_valid_result}')
logger.info(set_color('test result', 'yellow') + f': {test_result}')
return {
'best_valid_score': best_valid_score,
'valid_score_bigger': config['valid_metric_bigger'],
'best_valid_result': best_valid_result,
'test_result': test_result
}
def objective_function(config_dict=None, config_file_list=None, saved=True):
r""" The default objective_function used in HyperTuning
Args:
config_dict (dict): parameters dictionary used to modify experiment parameters
config_file_list (list): config files used to modify experiment parameters
saved (bool): whether to save the model
"""
config = Config(config_dict=config_dict, config_file_list=config_file_list)
init_seed(config['seed'], config['reproducibility'])
logging.basicConfig(level=logging.ERROR)
dataset = create_dataset(config)
train_data, valid_data, test_data = data_preparation(config, dataset)
model = get_model(config['model'])(config, train_data).to(config['device'])
trainer = get_trainer(config['MODEL_TYPE'], config['model'])(config, model)
best_valid_score, best_valid_result = trainer.fit(train_data, valid_data, verbose=False, saved=saved)
test_result = trainer.evaluate(test_data, load_best_model=saved)
return {
'best_valid_score': best_valid_score,
'valid_score_bigger': config['valid_metric_bigger'],
'best_valid_result': best_valid_result,
'test_result': test_result
}
|
[
"logging.getLogger",
"recbole.utils.init_seed",
"recbole.config.Config",
"numpy.save",
"seaborn.set",
"scipy.linalg.svdvals",
"matplotlib.pyplot.plot",
"recbole.utils.init_logger",
"numpy.dot",
"matplotlib.pyplot.scatter",
"recbole.utils.get_trainer",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.savefig",
"seaborn.set_context",
"sklearn.decomposition.TruncatedSVD",
"os.path.dirname",
"recbole.utils.get_model",
"matplotlib.pyplot.xlim",
"numpy.transpose",
"recbole.data.data_preparation",
"recbole.data.create_dataset",
"logging.basicConfig",
"matplotlib.pyplot.colorbar",
"numpy.zeros",
"matplotlib.pyplot.figure",
"recbole.utils.utils.set_color"
] |
[((944, 1044), 'recbole.config.Config', 'Config', ([], {'model': 'model', 'dataset': 'dataset', 'config_file_list': 'config_file_list', 'config_dict': 'config_dict'}), '(model=model, dataset=dataset, config_file_list=config_file_list,\n config_dict=config_dict)\n', (950, 1044), False, 'from recbole.config import Config\n'), ((1133, 1152), 'recbole.utils.init_logger', 'init_logger', (['config'], {}), '(config)\n', (1144, 1152), False, 'from recbole.utils import init_logger, get_model, get_trainer, init_seed\n'), ((1166, 1177), 'logging.getLogger', 'getLogger', ([], {}), '()\n', (1175, 1177), False, 'from logging import getLogger\n'), ((1207, 1255), 'os.path.dirname', 'os.path.dirname', (['logger.handlers[0].baseFilename'], {}), '(logger.handlers[0].baseFilename)\n', (1222, 1255), False, 'import os\n'), ((1356, 1378), 'recbole.data.create_dataset', 'create_dataset', (['config'], {}), '(config)\n', (1370, 1378), False, 'from recbole.data import create_dataset, data_preparation\n'), ((1469, 1502), 'recbole.data.data_preparation', 'data_preparation', (['config', 'dataset'], {}), '(config, dataset)\n', (1485, 1502), False, 'from recbole.data import create_dataset, data_preparation\n'), ((2160, 2188), 'sklearn.decomposition.TruncatedSVD', 'TruncatedSVD', ([], {'n_components': '(2)'}), '(n_components=2)\n', (2172, 2188), False, 'from sklearn.decomposition import TruncatedSVD\n'), ((2233, 2262), 'numpy.transpose', 'np.transpose', (['svd.components_'], {}), '(svd.components_)\n', (2245, 2262), True, 'import numpy as np\n'), ((2274, 2307), 'numpy.dot', 'np.dot', (['embedding_matrix', 'comp_tr'], {}), '(embedding_matrix, comp_tr)\n', (2280, 2307), True, 'import numpy as np\n'), ((2478, 2513), 'numpy.zeros', 'np.zeros', (['embedding_matrix.shape[0]'], {}), '(embedding_matrix.shape[0])\n', (2486, 2513), True, 'import numpy as np\n'), ((2594, 2619), 'seaborn.set', 'sns.set', ([], {'style': '"""darkgrid"""'}), "(style='darkgrid')\n", (2601, 2619), True, 'import seaborn as sns\n'), ((2624, 2722), 'seaborn.set_context', 'sns.set_context', (['"""notebook"""'], {'font_scale': '(1.8)', 'rc': "{'lines.linewidth': 3, 'lines.markersize': 20}"}), "('notebook', font_scale=1.8, rc={'lines.linewidth': 3,\n 'lines.markersize': 20})\n", (2639, 2722), True, 'import seaborn as sns\n'), ((2723, 2751), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4.5)'}), '(figsize=(6, 4.5))\n', (2733, 2751), True, 'import matplotlib.pyplot as plt\n'), ((2756, 2822), 'matplotlib.pyplot.scatter', 'plt.scatter', (['proj[:, 0]', 'proj[:, 1]'], {'s': '(1)', 'c': 'freq', 'cmap': '"""viridis_r"""'}), "(proj[:, 0], proj[:, 1], s=1, c=freq, cmap='viridis_r')\n", (2767, 2822), True, 'import matplotlib.pyplot as plt\n'), ((2827, 2841), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2839, 2841), True, 'import matplotlib.pyplot as plt\n'), ((2846, 2861), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-2)', '(2)'], {}), '(-2, 2)\n', (2854, 2861), True, 'import matplotlib.pyplot as plt\n'), ((2866, 2881), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-2)', '(2)'], {}), '(-2, 2)\n', (2874, 2881), True, 'import matplotlib.pyplot as plt\n'), ((2928, 3065), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(log_dir + '/' + config['model'] + '-' + config['dataset'] + '.pdf')"], {'format': '"""pdf"""', 'transparent': '(False)', 'bbox_inches': '"""tight"""'}), "(log_dir + '/' + config['model'] + '-' + config['dataset'] +\n '.pdf', format='pdf', transparent=False, bbox_inches='tight')\n", (2939, 3065), True, 'import matplotlib.pyplot as plt\n'), ((3114, 3139), 'scipy.linalg.svdvals', 'svdvals', (['embedding_matrix'], {}), '(embedding_matrix)\n', (3121, 3139), False, 'from scipy.linalg import svdvals\n'), ((3165, 3198), 'numpy.save', 'np.save', (["(log_dir + '/sv.npy')", 'svs'], {}), "(log_dir + '/sv.npy', svs)\n", (3172, 3198), True, 'import numpy as np\n'), ((3204, 3229), 'seaborn.set', 'sns.set', ([], {'style': '"""darkgrid"""'}), "(style='darkgrid')\n", (3211, 3229), True, 'import seaborn as sns\n'), ((3234, 3332), 'seaborn.set_context', 'sns.set_context', (['"""notebook"""'], {'font_scale': '(1.8)', 'rc': "{'lines.linewidth': 3, 'lines.markersize': 20}"}), "('notebook', font_scale=1.8, rc={'lines.linewidth': 3,\n 'lines.markersize': 20})\n", (3249, 3332), True, 'import seaborn as sns\n'), ((3333, 3361), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4.5)'}), '(figsize=(6, 4.5))\n', (3343, 3361), True, 'import matplotlib.pyplot as plt\n'), ((3366, 3379), 'matplotlib.pyplot.plot', 'plt.plot', (['svs'], {}), '(svs)\n', (3374, 3379), True, 'import matplotlib.pyplot as plt\n'), ((3401, 3492), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(log_dir + '/svs.pdf')"], {'format': '"""pdf"""', 'transparent': '(False)', 'bbox_inches': '"""tight"""'}), "(log_dir + '/svs.pdf', format='pdf', transparent=False,\n bbox_inches='tight')\n", (3412, 3492), True, 'import matplotlib.pyplot as plt\n'), ((4374, 4440), 'recbole.config.Config', 'Config', ([], {'config_dict': 'config_dict', 'config_file_list': 'config_file_list'}), '(config_dict=config_dict, config_file_list=config_file_list)\n', (4380, 4440), False, 'from recbole.config import Config\n'), ((4445, 4497), 'recbole.utils.init_seed', 'init_seed', (["config['seed']", "config['reproducibility']"], {}), "(config['seed'], config['reproducibility'])\n", (4454, 4497), False, 'from recbole.utils import init_logger, get_model, get_trainer, init_seed\n'), ((4502, 4542), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.ERROR'}), '(level=logging.ERROR)\n', (4521, 4542), False, 'import logging\n'), ((4557, 4579), 'recbole.data.create_dataset', 'create_dataset', (['config'], {}), '(config)\n', (4571, 4579), False, 'from recbole.data import create_dataset, data_preparation\n'), ((4620, 4653), 'recbole.data.data_preparation', 'data_preparation', (['config', 'dataset'], {}), '(config, dataset)\n', (4636, 4653), False, 'from recbole.data import create_dataset, data_preparation\n'), ((1702, 1752), 'recbole.utils.get_trainer', 'get_trainer', (["config['MODEL_TYPE']", "config['model']"], {}), "(config['MODEL_TYPE'], config['model'])\n", (1713, 1752), False, 'from recbole.utils import init_logger, get_model, get_trainer, init_seed\n'), ((4748, 4798), 'recbole.utils.get_trainer', 'get_trainer', (["config['MODEL_TYPE']", "config['model']"], {}), "(config['MODEL_TYPE'], config['model'])\n", (4759, 4798), False, 'from recbole.utils import init_logger, get_model, get_trainer, init_seed\n'), ((3638, 3672), 'recbole.utils.utils.set_color', 'set_color', (['"""best valid """', '"""yellow"""'], {}), "('best valid ', 'yellow')\n", (3647, 3672), False, 'from recbole.utils.utils import set_color\n'), ((3717, 3751), 'recbole.utils.utils.set_color', 'set_color', (['"""test result"""', '"""yellow"""'], {}), "('test result', 'yellow')\n", (3726, 3751), False, 'from recbole.utils.utils import set_color\n'), ((1555, 1581), 'recbole.utils.get_model', 'get_model', (["config['model']"], {}), "(config['model'])\n", (1564, 1581), False, 'from recbole.utils import init_logger, get_model, get_trainer, init_seed\n'), ((4666, 4692), 'recbole.utils.get_model', 'get_model', (["config['model']"], {}), "(config['model'])\n", (4675, 4692), False, 'from recbole.utils import init_logger, get_model, get_trainer, init_seed\n')]
|
import matplotlib.pyplot as plt
from matplotlib import collections
from matplotlib.lines import Line2D
def autosize(fig=None, figsize=None):
## Take current figure if no figure provided
if fig is None:
fig = plt.gcf()
if figsize is None:
## Get size of figure
figsize = fig.get_size_inches()
else:
## Set size of figure
fig.set_size_inches(figsize)
## Make font sizes proportional to figure size
fontsize_labels = figsize[0] * 5
fontsize_ticks = fontsize_labels / 2
scatter_size = (figsize[0] * 1.5) ** 2
linewidth = figsize[0]
axes = fig.get_axes()
for ax in axes:
## Set label font sizes
for item in [ax.title, ax.xaxis.label, ax.yaxis.label]:
item.set_fontsize(fontsize_labels)
## Set tick font sizes
for item in ax.get_xticklabels() + ax.get_yticklabels():
item.set_fontsize(fontsize_ticks)
## Set line widths
plot_objs = [child for child in ax.get_children() if isinstance(child, Line2D)]
for plot_obj in plot_objs:
plot_obj.set_linewidth(linewidth)
## Set scatter point sizes
plot_objs = [
child
for child in ax.get_children()
if isinstance(child, collections.PathCollection)
]
for plot_obj in plot_objs:
plot_obj.set_sizes([scatter_size])
## Set tight layout
plt.tight_layout()
if __name__ == "__main__":
import numpy as np
from plottify import autosize
import matplotlib.pyplot as plt
n = 100
x = np.random.uniform(low=-5, high=5, size=n)
y = x + np.random.normal(scale=0.5, size=n)
for size in [3, 10, 20]:
plt.figure(figsize=(size, size))
plt.scatter(x, y)
plt.xlabel("X")
plt.ylabel("Y")
plt.title("Default")
plt.show()
plt.figure(figsize=(size, size))
plt.scatter(x, y)
plt.xlabel("X")
plt.ylabel("Y")
plt.title("Autosized")
autosize()
plt.show()
|
[
"numpy.random.normal",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"numpy.random.uniform",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"plottify.autosize",
"matplotlib.pyplot.show"
] |
[((1442, 1460), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1458, 1460), True, 'import matplotlib.pyplot as plt\n'), ((1603, 1644), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-5)', 'high': '(5)', 'size': 'n'}), '(low=-5, high=5, size=n)\n', (1620, 1644), True, 'import numpy as np\n'), ((227, 236), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (234, 236), True, 'import matplotlib.pyplot as plt\n'), ((1657, 1692), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(0.5)', 'size': 'n'}), '(scale=0.5, size=n)\n', (1673, 1692), True, 'import numpy as np\n'), ((1732, 1764), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(size, size)'}), '(figsize=(size, size))\n', (1742, 1764), True, 'import matplotlib.pyplot as plt\n'), ((1773, 1790), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {}), '(x, y)\n', (1784, 1790), True, 'import matplotlib.pyplot as plt\n'), ((1799, 1814), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X"""'], {}), "('X')\n", (1809, 1814), True, 'import matplotlib.pyplot as plt\n'), ((1823, 1838), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y"""'], {}), "('Y')\n", (1833, 1838), True, 'import matplotlib.pyplot as plt\n'), ((1847, 1867), 'matplotlib.pyplot.title', 'plt.title', (['"""Default"""'], {}), "('Default')\n", (1856, 1867), True, 'import matplotlib.pyplot as plt\n'), ((1876, 1886), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1884, 1886), True, 'import matplotlib.pyplot as plt\n'), ((1896, 1928), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(size, size)'}), '(figsize=(size, size))\n', (1906, 1928), True, 'import matplotlib.pyplot as plt\n'), ((1937, 1954), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {}), '(x, y)\n', (1948, 1954), True, 'import matplotlib.pyplot as plt\n'), ((1963, 1978), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X"""'], {}), "('X')\n", (1973, 1978), True, 'import matplotlib.pyplot as plt\n'), ((1987, 2002), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y"""'], {}), "('Y')\n", (1997, 2002), True, 'import matplotlib.pyplot as plt\n'), ((2011, 2033), 'matplotlib.pyplot.title', 'plt.title', (['"""Autosized"""'], {}), "('Autosized')\n", (2020, 2033), True, 'import matplotlib.pyplot as plt\n'), ((2042, 2052), 'plottify.autosize', 'autosize', ([], {}), '()\n', (2050, 2052), False, 'from plottify import autosize\n'), ((2061, 2071), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2069, 2071), True, 'import matplotlib.pyplot as plt\n')]
|
import numpy as np
from time import time
import matplotlib.pyplot as plt
measure2index={"y-coordinate":0,"x-coordinate":1,"timestamp":2, "button_status":3,"tilt":4, "elevation":5,"pressure":6}
index2measure=list(measure2index.keys())
task2index={"spiral":0,"l":1,"le":2 ,"les":3,"lektorka" :4,"porovnat":5,"nepopadnout":6, "tram":7}
index2task=list(task2index.keys())
max_lengths=[16071, 4226, 6615, 6827, 7993, 5783, 4423, 7676]#max length per task
token_lengths=[16071,1242,1649,1956]#max length per token
stroke_lengths=[16071,752,1104,1476,3568,2057,2267,1231]#max length per stroke (either on paper or in air)
stroke_avg_plus_std=[2904,277,363,411,484,346,324,218]#stroke avg length + stroke avg length std
max_strokes=[25,15,15,21,29,43,35, 67]#max n° of strokes per task (in air + on paper)
plot2index={"loss":0,"accuracy":1}
index2plot= list(plot2index.keys())
on_paper_value=1.0#on_paper_stroke iff button_status==1.0
one_hot=np.identity(8)
def downsample(task,factor=2):
downsampled=[point for i,point in enumerate(task) if i%factor==0]
downsampled=np.array(downsampled)
return downsampled
def upsample(task):
upsampled=[]
for i,point in enumerate(task[:-1]):
upsampled.append(point)
upsampled.append(np.mean(task[i:i+2],axis=0))
upsampled=np.array(upsampled)
#/!\ np.aronud button_status after resampling !!
upsampled[:,measure2index["button_status"]]=np.around(upsampled[:,measure2index["button_status"]])
return upsampled
def get_significance(p):
"""used to print significance of a statistic test given p-value)"""
if p<0.01:
significance="***"
elif p<0.05:
significance="**"
elif p<0.1:
significance="*"
else:
significance="_"
return significance
def CorrectPool(out_size,current_pool):
"""makes convolved size divisible by pooling kernel"""
ratio=out_size/current_pool
if (ratio)%1==0:#whole number
return int(current_pool)
else:
whole_ratio=round(ratio)
if whole_ratio==0:
whole_ratio+=1
return int(out_size/whole_ratio)
def CorrectHyperparameters(input_size,seq_len,hidden_size,conv_kernel,pool_kernel ,padding=0,
stride=1,dilation=1, dropout=0.0,output_size=1,n_seq=1):
"""makes convolved size divisible by pooling kernel and computes size of sequence after convolutions"""
out_size=seq_len
print("seq_len :",out_size)
for i, (h,c,p,pad,d) in enumerate(list(zip(hidden_size,conv_kernel,pool_kernel,padding,dilation))):
print("layer",i+1)
in_size=out_size
out_size=get_out_size(out_size,pad,d,c,stride=1)
print("\tafter conv{} :{}".format(i+1,out_size))
if out_size<1:
c=(in_size-1)//d+1
out_size=get_out_size(in_size,pad,d,c,stride=1)
print("\t\tupdate c. after conv{} :{}".format(i+1,out_size))
conv_kernel[i]=c
pool_kernel[i]=CorrectPool(out_size,p)
out_size=get_out_size(out_size,padding=0,dilation=1,kernel_size=pool_kernel[i],stride=pool_kernel[i])
print("\tafter pool{} :{}".format(i+1,out_size))
out_size*=hidden_size[-1]
print("after flatting",out_size)
return input_size,out_size,hidden_size,conv_kernel,pool_kernel ,padding,stride,dilation, dropout,output_size
def wrong_len_gen(data,good_len):
"""used for splitting tasks into tokens"""
for i,s in enumerate(data):
if len(s) != good_len:
yield i
def get_out_size(in_size,padding,dilation,kernel_size,stride):
"""computes output size after a conv or a pool layer"""
return (in_size+2*padding-dilation*(kernel_size-1)-1)//stride +1
def min_max_scale(data,min_=0,max_=1):
return (max_-min_)*(data-np.min(data)/(np.max(data)-np.min(data)))+min_
def count_params(model):
"""returns (total n° of parameters, n° of trainable parameters)"""
total_params = sum(p.numel() for p in model.parameters())
trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
return total_params, trainable_params
def plot_task(task,measure2index=measure2index):
plt.plot(task[:,measure2index["x-coordinate"]],task[:,measure2index["y-coordinate"]])
plt.xlabel("x-coordinate")
plt.ylabel("y-coordinate")
def plot_measures(task,subplot=False,figsize=(6,4),index2measure=index2measure):
plt.figure(figsize=figsize)
for i,measure in enumerate(index2measure):
if subplot:
plt.subplot(3,3,i+1)
plt.plot(task[:,i],label=measure)
plt.xlabel("timesteps")
plt.ylabel(measure)
plt.legend()
def return_metrics(tp,tn,fp,fn):
accuracy= (tp+tn)/(tp+tn+fp+fn)
sensitivity = tp/(tp+fn) if (tp+fn) != 0 else 0.0 #without condition positives the sensitivity should be 0
specificity = tn/(tn+fp) if (tn+fp)!= 0 else 0.0 #idem
ppv = tp/(tp+fp) if tp+fp != 0 else 0.0 #without predicted positives the ppv should be 0
npv = tn/(tn+fn) if tn+fn !=0 else 0.0 #idem
return accuracy,sensitivity,specificity,ppv,npv
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise ValueError('Boolean value expected.')
def flat_list(list):
return [item for sublist in list for item in sublist]
def timeSince(since):
now = time()
s = now - since
m = np.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def ReshapeAndVote(model_train_predictions,round_before_voting=True):
"""used to fuse the predictions of n_models models after n_CV CV"""
n_CV=len(model_train_predictions[0])
n_models=len(model_train_predictions)
if round_before_voting:
reshaped_train_predictions=[[np.around(model_train_predictions[i][j]) for i in range(n_models)] for j in range(n_CV)]
else:
reshaped_train_predictions=[[model_train_predictions[i][j] for i in range(n_models)] for j in range(n_CV)]
voted_train_predictions=[np.around(np.mean(reshaped_train_predictions[i],axis=0)) for i in range(n_CV)]
return voted_train_predictions
def confusion_matrix(y_true,y_pred):
if len(y_true)!=len(y_pred):
raise ValueError("y_true and y_pred should have the same shape, got {} and {}, respectively".format(len(y_true),len(y_pred)))
tn, fp, fn, tp=0,0,0,0
false_i=[]
for i, (target, pred) in enumerate(list(zip(y_true,y_pred))):
if target==0:#condition negative
if pred==0:
tn+=1
elif pred==1:
fp+=1
false_i.append(i)
else:
raise ValueError("model prediction should either be 0 or 1, got {}".format(pred))
elif target==1:#condition positive
if pred==0:
fn+=1
false_i.append(i)
elif pred ==1:
tp+=1
else:
raise ValueError("model prediction should either be 0 or 1, got {}".format(pred))
else:
raise ValueError("target should either be 0 or 1, got {}".format(target))
return tn, fp, fn, tp, false_i
|
[
"numpy.identity",
"numpy.mean",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.floor",
"numpy.max",
"matplotlib.pyplot.subplot",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.around",
"numpy.min",
"time.time",
"matplotlib.pyplot.legend"
] |
[((938, 952), 'numpy.identity', 'np.identity', (['(8)'], {}), '(8)\n', (949, 952), True, 'import numpy as np\n'), ((1071, 1092), 'numpy.array', 'np.array', (['downsampled'], {}), '(downsampled)\n', (1079, 1092), True, 'import numpy as np\n'), ((1295, 1314), 'numpy.array', 'np.array', (['upsampled'], {}), '(upsampled)\n', (1303, 1314), True, 'import numpy as np\n'), ((1416, 1471), 'numpy.around', 'np.around', (["upsampled[:, measure2index['button_status']]"], {}), "(upsampled[:, measure2index['button_status']])\n", (1425, 1471), True, 'import numpy as np\n'), ((4129, 4222), 'matplotlib.pyplot.plot', 'plt.plot', (["task[:, measure2index['x-coordinate']]", "task[:, measure2index['y-coordinate']]"], {}), "(task[:, measure2index['x-coordinate']], task[:, measure2index[\n 'y-coordinate']])\n", (4137, 4222), True, 'import matplotlib.pyplot as plt\n'), ((4219, 4245), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x-coordinate"""'], {}), "('x-coordinate')\n", (4229, 4245), True, 'import matplotlib.pyplot as plt\n'), ((4250, 4276), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y-coordinate"""'], {}), "('y-coordinate')\n", (4260, 4276), True, 'import matplotlib.pyplot as plt\n'), ((4362, 4389), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (4372, 4389), True, 'import matplotlib.pyplot as plt\n'), ((4596, 4608), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4606, 4608), True, 'import matplotlib.pyplot as plt\n'), ((5381, 5387), 'time.time', 'time', ([], {}), '()\n', (5385, 5387), False, 'from time import time\n'), ((5416, 5432), 'numpy.floor', 'np.floor', (['(s / 60)'], {}), '(s / 60)\n', (5424, 5432), True, 'import numpy as np\n'), ((4498, 4533), 'matplotlib.pyplot.plot', 'plt.plot', (['task[:, i]'], {'label': 'measure'}), '(task[:, i], label=measure)\n', (4506, 4533), True, 'import matplotlib.pyplot as plt\n'), ((4540, 4563), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""timesteps"""'], {}), "('timesteps')\n", (4550, 4563), True, 'import matplotlib.pyplot as plt\n'), ((4572, 4591), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['measure'], {}), '(measure)\n', (4582, 4591), True, 'import matplotlib.pyplot as plt\n'), ((1252, 1282), 'numpy.mean', 'np.mean', (['task[i:i + 2]'], {'axis': '(0)'}), '(task[i:i + 2], axis=0)\n', (1259, 1282), True, 'import numpy as np\n'), ((4469, 4493), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(3)', '(i + 1)'], {}), '(3, 3, i + 1)\n', (4480, 4493), True, 'import matplotlib.pyplot as plt\n'), ((6023, 6069), 'numpy.mean', 'np.mean', (['reshaped_train_predictions[i]'], {'axis': '(0)'}), '(reshaped_train_predictions[i], axis=0)\n', (6030, 6069), True, 'import numpy as np\n'), ((5769, 5809), 'numpy.around', 'np.around', (['model_train_predictions[i][j]'], {}), '(model_train_predictions[i][j])\n', (5778, 5809), True, 'import numpy as np\n'), ((3744, 3756), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (3750, 3756), True, 'import numpy as np\n'), ((3758, 3770), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (3764, 3770), True, 'import numpy as np\n'), ((3771, 3783), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (3777, 3783), True, 'import numpy as np\n')]
|
import numpy as np
import torch
from torch_utils import training_stats
from torch_utils import misc
from torch_utils.ops import conv2d_gradfix
import torch.nn.functional as F
import torchvision.transforms as T
import clip
import dnnlib
import random
#----------------------------------------------------------------------------
class Loss:
def accumulate_gradients(self, phase, real_img, real_c, gen_z, gen_c, sync, gain, real_features): # to be overridden by subclass
raise NotImplementedError()
class Model(torch.nn.Module):
def __init__(self, device):
super(Model, self).__init__()
self.linear1 = torch.nn.Linear(512, 1024)
self.linear2 = torch.nn.Linear(1024, 1024)
self.linear3 = torch.nn.Linear(1024, 1024)
self.linear4 = torch.nn.Linear(1024, 512)
self.linear5 = torch.nn.Linear(512, 1024)
self.linear6 = torch.nn.Linear(1024, 1024)
self.linear7 = torch.nn.Linear(1024, 1024)
self.linear8 = torch.nn.Linear(1024, 512)
self.device = device
def forward(self, x):
mu = F.leaky_relu(self.linear1(x))
mu = F.leaky_relu(self.linear2(mu))
mu = F.leaky_relu(self.linear3(mu))
mu = self.linear4(mu)
std = F.leaky_relu(self.linear5(x))
std = F.leaky_relu(self.linear6(std))
std = F.leaky_relu(self.linear7(std))
std = self.linear8(std)
return mu + std.exp()*(torch.randn(mu.shape).to(self.device))
def loss(self, real, fake, temp=0.1, lam=0.5):
sim = torch.cosine_similarity(real.unsqueeze(1), fake.unsqueeze(0), dim=-1)
if temp > 0.:
sim = torch.exp(sim/temp)
sim1 = torch.diagonal(F.softmax(sim, dim=1))*temp
sim2 = torch.diagonal(F.softmax(sim, dim=0))*temp
if 0.<lam < 1.:
return -(lam*torch.log(sim1) + (1.-lam)*torch.log(sim2))
elif lam == 0:
return -torch.log(sim2)
else:
return -torch.log(sim1)
else:
return -torch.diagonal(sim)
#----------------------------------------------------------------------------
class StyleGAN2Loss(Loss):
def __init__(self, device, G_mapping, G_synthesis, G_mani, D, augment_pipe=None, style_mixing_prob=0.9, r1_gamma=10, pl_batch_shrink=2, pl_decay=0.01, pl_weight=2):
super().__init__()
self.device = device
self.G_mapping = G_mapping
self.G_synthesis = G_synthesis
self.G_mani = G_mani
self.D = D
self.augment_pipe = augment_pipe
self.style_mixing_prob = style_mixing_prob
self.r1_gamma = r1_gamma
self.pl_batch_shrink = pl_batch_shrink
self.pl_decay = pl_decay
self.pl_weight = pl_weight
self.pl_mean = torch.zeros([], device=device)
clip_model, _ = clip.load("ViT-B/32", device=device) # Load CLIP model here
self.clip_model = clip_model.eval()
self.mapper = Model(device)
self.mapper.load_state_dict(torch.load('./implicit.0.001.64.True.0.0.pth', map_location='cpu')) # path to the noise mapping network
self.mapper.to(device)
def run_G(self, z, c, sync, txt_fts=None, ):
with misc.ddp_sync(self.G_mapping, sync):
ws = self.G_mapping(z, c)
if self.style_mixing_prob > 0:
new_ws = self.G_mapping(torch.randn_like(z), c, skip_w_avg_update=True)
with torch.autograd.profiler.record_function('style_mixing'):
cutoff = torch.empty([], dtype=torch.int64, device=ws.device).random_(1, ws.shape[1])
cutoff = torch.where(torch.rand([], device=ws.device) < self.style_mixing_prob, cutoff, torch.full_like(cutoff, ws.shape[1]))
ws[:, cutoff:] = new_ws[:, cutoff:]
with misc.ddp_sync(self.G_synthesis, sync):
img = self.G_synthesis(ws, fts=txt_fts)
return img, ws
def run_D(self, img, c, sync, fts=None):
if self.augment_pipe is not None:
img = self.augment_pipe(img)
with misc.ddp_sync(self.D, sync):
logits, d_fts = self.D(img, c, fts=fts)
return logits, d_fts
def normalize(self):
return T.Compose([
T.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def full_preprocess(self, img, mode='bicubic', ratio=0.5):
full_size = img.shape[-2]
if full_size < 224:
pad_1 = torch.randint(0, 224-full_size, ())
pad_2 = torch.randint(0, 224-full_size, ())
m = torch.nn.ConstantPad2d((pad_1, 224-full_size-pad_1, pad_2, 224-full_size-pad_2), 1.)
reshaped_img = m(img)
else:
cut_size = torch.randint(int(ratio*full_size), full_size, ())
left = torch.randint(0, full_size-cut_size, ())
top = torch.randint(0, full_size-cut_size, ())
cropped_img = img[:, :, top:top+cut_size, left:left+cut_size]
reshaped_img = F.interpolate(cropped_img, (224, 224), mode=mode, align_corners=False)
reshaped_img = (reshaped_img + 1.)*0.5 # range in [0., 1.] now
reshaped_img = self.normalize()(reshaped_img)
return reshaped_img
def custom_preprocess(self, img, ind, cut_num, mode='bicubic'): # more to be implemented here
full_size = img.shape[-2]
grid = np.sqrt(cut_num)
most_right = min(int((ind%grid + 1)*full_size/grid), full_size)
most_bottom = min(int((ind//grid + 1)*full_size/grid), full_size)
cut_size = torch.randint(int(full_size//(grid+1)), int(min(min(full_size//2, most_right), most_bottom)), ()) # TODO: tune this later
left = torch.randint(0, most_right-cut_size, ())
top = torch.randint(0, most_bottom-cut_size, ())
cropped_img = img[:, :, top:top+cut_size, left:left+cut_size]
reshaped_img = F.interpolate(cropped_img, (224, 224), mode=mode, align_corners=False)
reshaped_img = (reshaped_img + 1.)*0.5 # range in [0., 1.] now
reshaped_img = self.normalize()(reshaped_img)
return reshaped_img
def contra_loss(self, temp, mat1, mat2, lam):
sim = torch.cosine_similarity(mat1.unsqueeze(1), mat2.unsqueeze(0), dim=-1)
if temp > 0.:
sim = torch.exp(sim/temp) # This implementation is incorrect, it should be sim=sim/temp.
# However, this incorrect implementation can reproduce our results with provided hyper-parameters.
# If you want to use the correct implementation, please manually revise it.
# The correct implementation should lead to better results, but don't use our provided hyper-parameters, you need to carefully tune lam, temp, itd, itc and other hyper-parameters
sim1 = torch.diagonal(F.softmax(sim, dim=1))*temp
sim2 = torch.diagonal(F.softmax(sim, dim=0))*temp
if 0.<lam < 1.:
return lam*torch.log(sim1) + (1.-lam)*torch.log(sim2)
elif lam == 0:
return torch.log(sim2)
else:
return torch.log(sim1)
else:
return torch.diagonal(sim)
def accumulate_gradients(self, phase, real_img, real_c, gen_z, gen_c, sync, gain, img_fts, txt_fts, lam, temp, gather, d_use_fts, itd, itc, iid, iic, mixing_prob=0.):
assert phase in ['Gmain', 'Greg', 'Gboth', 'Dmain', 'Dreg', 'Dboth']
do_Gmain = (phase in ['Gmain', 'Gboth'])
do_Dmain = (phase in ['Dmain', 'Dboth'])
do_Gpl = (phase in ['Greg', 'Gboth']) and (self.pl_weight != 0)
do_Dr1 = (phase in ['Dreg', 'Dboth']) and (self.r1_gamma != 0)
# augmentation
aug_level_1 = 0.1
aug_level_2 = 0.75
# print(torch.cosine_similarity(img_fts, txt_fts, dim=-1))
mixing_prob = mixing_prob # probability to use img_fts instead of txt_fts
random_noise = torch.randn(txt_fts.shape).to(img_fts.device)# + torch.randn((1, 512)).to(img_fts.device)
random_noise = random_noise/random_noise.norm(dim=-1, keepdim=True)
txt_fts_ = txt_fts*(1-aug_level_1) + random_noise*aug_level_1
txt_fts_ = txt_fts_/txt_fts_.norm(dim=-1, keepdim=True)
if txt_fts.shape[-1] == img_fts.shape[-1]:
# # Gaussian purterbation
img_fts_ = img_fts*(1-aug_level_2) + random_noise*aug_level_2
# learned generation
# with torch.no_grad():
# normed_real_full_img = self.full_preprocess(real_img, ratio=0.99)
# img_fts_real_full_ = self.clip_model.encode_image(normed_real_full_img).float()
# img_fts_real_full_ = img_fts_real_full_/img_fts_real_full_.norm(dim=-1, keepdim=True)
# # img_fts_real_full_ = img_fts
# img_fts_ = self.mapper(img_fts_real_full_) + img_fts_real_full_
img_fts_ = img_fts_/img_fts_.norm(dim=-1, keepdim=True)
if mixing_prob > 0.99:
txt_fts_ = img_fts_
elif mixing_prob < 0.01:
txt_fts_ = txt_fts_
else:
txt_fts_ = torch.where(torch.rand([txt_fts_.shape[0], 1], device=txt_fts_.device) < mixing_prob, img_fts_, txt_fts_)
img_img_d = iid # discriminator
img_img_c = iic # clip
img_txt_d = itd # discriminator
img_txt_c = itc # clip
temp = temp
lam = lam
def gather_tensor(input_tensor, gather_or_not):
if gather_or_not:
world_size = torch.distributed.get_world_size()
rank = torch.distributed.get_rank()
output_tensor = [torch.zeros_like(input_tensor) for _ in range(world_size)]
torch.distributed.all_gather(output_tensor, input_tensor)
output_tensor[rank] = input_tensor
# # print(torch.cat(output_tensor).size())
return torch.cat(output_tensor)
else:
return input_tensor
txt_fts_all = gather_tensor(txt_fts_, gather)
# Gmain: Maximize logits for generated images.
if do_Gmain:
with torch.autograd.profiler.record_function('Gmain_forward'):
gen_img, _gen_ws = self.run_G(gen_z, gen_c, txt_fts=txt_fts_, sync=(sync and not do_Gpl)) # May get synced by Gpl.
gen_logits, gen_d_fts = self.run_D(gen_img, gen_c, sync=False, fts=txt_fts_)
gen_d_fts_all = gather_tensor(gen_d_fts, gather)
training_stats.report('Loss/scores/fake', gen_logits)
training_stats.report('Loss/signs/fake', gen_logits.sign())
loss_Gmain = torch.nn.functional.softplus(-gen_logits) # -log(sigmoid(gen_logits))
normed_gen_full_img = self.full_preprocess(gen_img)
img_fts_gen_full = self.clip_model.encode_image(normed_gen_full_img)
img_fts_gen_full = img_fts_gen_full/img_fts_gen_full.norm(dim=-1, keepdim=True)
img_fts_gen_full_all = gather_tensor(img_fts_gen_full, gather)
img_fts_all = gather_tensor(img_fts, gather)
if img_txt_c > 0.:
clip_loss_img_txt = self.contra_loss(temp, img_fts_gen_full_all, txt_fts_all, lam)
loss_Gmain = loss_Gmain - img_txt_c*clip_loss_img_txt.mean()
if img_img_c > 0.:
clip_loss_img_img = self.contra_loss(temp, img_fts_gen_full_all, img_fts_all, lam)
loss_Gmain = loss_Gmain - img_img_c*clip_loss_img_img.mean()
if img_txt_d > 0.:
loss_Gmain = loss_Gmain - img_txt_d*self.contra_loss(temp, gen_d_fts_all, txt_fts_all, lam).mean()
if img_img_d > 0.:
with torch.no_grad():
_, g_real_d_fts = self.run_D(real_img.detach(), real_c, sync=False, fts=txt_fts_)
g_real_d_fts_all = gather_tensor(g_real_d_fts, gather)
loss_Gmain = loss_Gmain - img_img_d*self.contra_loss(temp, g_real_d_fts_all, gen_d_fts_all, lam).mean()
training_stats.report('Loss/G/loss', loss_Gmain)
with torch.autograd.profiler.record_function('Gmain_backward'):
loss_Gmain.mean().mul(gain).backward()
# Gpl: Apply path length regularization.
if do_Gpl:
with torch.autograd.profiler.record_function('Gpl_forward'):
batch_size = gen_z.shape[0] // self.pl_batch_shrink
txt_fts_0 = txt_fts_[:batch_size]
txt_fts_0.requires_grad_()
gen_img, gen_ws = self.run_G(gen_z[:batch_size], gen_c[:batch_size], txt_fts=txt_fts_0, sync=sync)
pl_noise = torch.randn_like(gen_img) / np.sqrt(gen_img.shape[2] * gen_img.shape[3])
with torch.autograd.profiler.record_function('pl_grads'), conv2d_gradfix.no_weight_gradients():
if d_use_fts:
pl_grads = torch.autograd.grad(outputs=[(gen_img * pl_noise).sum()], inputs=[gen_ws, txt_fts_0], create_graph=True, only_inputs=True)[0]
else:
pl_grads = torch.autograd.grad(outputs=[(gen_img * pl_noise).sum()], inputs=[gen_ws], create_graph=True, only_inputs=True)[0]
pl_lengths = pl_grads.square().sum(2).mean(1).sqrt()
pl_mean = self.pl_mean.lerp(pl_lengths.mean(), self.pl_decay)
self.pl_mean.copy_(pl_mean.detach())
pl_penalty = (pl_lengths - pl_mean).square()
training_stats.report('Loss/pl_penalty', pl_penalty)
loss_Gpl = pl_penalty * self.pl_weight
training_stats.report('Loss/G/reg', loss_Gpl)
with torch.autograd.profiler.record_function('Gpl_backward'):
(gen_img[:, 0, 0, 0] * 0 + loss_Gpl).mean().mul(gain).backward()
# Dmain: Minimize logits for generated images.
loss_Dgen = 0
if do_Dmain:
with torch.autograd.profiler.record_function('Dgen_forward'):
gen_img, _gen_ws = self.run_G(gen_z, gen_c, txt_fts=txt_fts_, sync=False)
gen_logits, gen_d_fts = self.run_D(gen_img, gen_c, sync=False, fts=txt_fts_) # Gets synced by loss_Dreal.
training_stats.report('Loss/scores/fake', gen_logits)
training_stats.report('Loss/signs/fake', gen_logits.sign())
loss_Dgen = torch.nn.functional.softplus(gen_logits) # -log(1 - sigmoid(gen_logits))
with torch.autograd.profiler.record_function('Dgen_backward'):
loss_Dgen.mean().mul(gain).backward()
# Dmain: Maximize logits for real images.
# Dr1: Apply R1 regularization.
if do_Dmain or do_Dr1:
name = 'Dreal_Dr1' if do_Dmain and do_Dr1 else 'Dreal' if do_Dmain else 'Dr1'
with torch.autograd.profiler.record_function(name + '_forward'):
real_img_tmp = real_img.detach().requires_grad_(do_Dr1)
real_logits, real_d_fts = self.run_D(real_img_tmp, real_c, sync=sync, fts=txt_fts_)
training_stats.report('Loss/scores/real', real_logits)
training_stats.report('Loss/signs/real', real_logits.sign())
loss_Dreal = 0
if do_Dmain:
loss_Dreal = torch.nn.functional.softplus(-real_logits) # -log(sigmoid(real_logits))
if img_txt_d > 0.:
real_d_fts_all = gather_tensor(real_d_fts, gather)
loss_Dreal = loss_Dreal - img_txt_d*self.contra_loss(temp, real_d_fts_all, txt_fts_all, lam).mean()
training_stats.report('Loss/D/loss', loss_Dgen + loss_Dreal)
loss_Dr1 = 0
if do_Dr1:
with torch.autograd.profiler.record_function('r1_grads'), conv2d_gradfix.no_weight_gradients():
r1_grads = torch.autograd.grad(outputs=[real_logits.sum()], inputs=[real_img_tmp], create_graph=True, only_inputs=True)[0]
r1_penalty = r1_grads.square().sum([1,2,3])
loss_Dr1 = r1_penalty * (self.r1_gamma / 2)
training_stats.report('Loss/r1_penalty', r1_penalty)
training_stats.report('Loss/D/reg', loss_Dr1)
with torch.autograd.profiler.record_function(name + '_backward'):
(real_logits * 0 + loss_Dreal + loss_Dr1).mean().mul(gain).backward()
# ----------------------------------------------------------------------------
|
[
"numpy.sqrt",
"torch.exp",
"torch.full_like",
"torch.autograd.profiler.record_function",
"torch.nn.functional.interpolate",
"torch.distributed.get_rank",
"torch_utils.training_stats.report",
"torch.nn.functional.softmax",
"torch.randint",
"torch.zeros_like",
"torch.randn",
"torch.distributed.all_gather",
"torch.distributed.get_world_size",
"torch.nn.functional.softplus",
"torch.randn_like",
"torch_utils.ops.conv2d_gradfix.no_weight_gradients",
"torchvision.transforms.Normalize",
"clip.load",
"torch_utils.misc.ddp_sync",
"torch.empty",
"torch.cat",
"torch.diagonal",
"torch.log",
"torch.load",
"torch.nn.Linear",
"torch.no_grad",
"torch.nn.ConstantPad2d",
"torch.zeros",
"torch.rand"
] |
[((635, 661), 'torch.nn.Linear', 'torch.nn.Linear', (['(512)', '(1024)'], {}), '(512, 1024)\n', (650, 661), False, 'import torch\n'), ((685, 712), 'torch.nn.Linear', 'torch.nn.Linear', (['(1024)', '(1024)'], {}), '(1024, 1024)\n', (700, 712), False, 'import torch\n'), ((736, 763), 'torch.nn.Linear', 'torch.nn.Linear', (['(1024)', '(1024)'], {}), '(1024, 1024)\n', (751, 763), False, 'import torch\n'), ((787, 813), 'torch.nn.Linear', 'torch.nn.Linear', (['(1024)', '(512)'], {}), '(1024, 512)\n', (802, 813), False, 'import torch\n'), ((837, 863), 'torch.nn.Linear', 'torch.nn.Linear', (['(512)', '(1024)'], {}), '(512, 1024)\n', (852, 863), False, 'import torch\n'), ((887, 914), 'torch.nn.Linear', 'torch.nn.Linear', (['(1024)', '(1024)'], {}), '(1024, 1024)\n', (902, 914), False, 'import torch\n'), ((938, 965), 'torch.nn.Linear', 'torch.nn.Linear', (['(1024)', '(1024)'], {}), '(1024, 1024)\n', (953, 965), False, 'import torch\n'), ((989, 1015), 'torch.nn.Linear', 'torch.nn.Linear', (['(1024)', '(512)'], {}), '(1024, 512)\n', (1004, 1015), False, 'import torch\n'), ((2791, 2821), 'torch.zeros', 'torch.zeros', (['[]'], {'device': 'device'}), '([], device=device)\n', (2802, 2821), False, 'import torch\n'), ((2846, 2882), 'clip.load', 'clip.load', (['"""ViT-B/32"""'], {'device': 'device'}), "('ViT-B/32', device=device)\n", (2855, 2882), False, 'import clip\n'), ((5486, 5502), 'numpy.sqrt', 'np.sqrt', (['cut_num'], {}), '(cut_num)\n', (5493, 5502), True, 'import numpy as np\n'), ((5814, 5857), 'torch.randint', 'torch.randint', (['(0)', '(most_right - cut_size)', '()'], {}), '(0, most_right - cut_size, ())\n', (5827, 5857), False, 'import torch\n'), ((5870, 5914), 'torch.randint', 'torch.randint', (['(0)', '(most_bottom - cut_size)', '()'], {}), '(0, most_bottom - cut_size, ())\n', (5883, 5914), False, 'import torch\n'), ((6006, 6076), 'torch.nn.functional.interpolate', 'F.interpolate', (['cropped_img', '(224, 224)'], {'mode': 'mode', 'align_corners': '(False)'}), '(cropped_img, (224, 224), mode=mode, align_corners=False)\n', (6019, 6076), True, 'import torch.nn.functional as F\n'), ((1651, 1672), 'torch.exp', 'torch.exp', (['(sim / temp)'], {}), '(sim / temp)\n', (1660, 1672), False, 'import torch\n'), ((3023, 3089), 'torch.load', 'torch.load', (['"""./implicit.0.001.64.True.0.0.pth"""'], {'map_location': '"""cpu"""'}), "('./implicit.0.001.64.True.0.0.pth', map_location='cpu')\n", (3033, 3089), False, 'import torch\n'), ((3238, 3273), 'torch_utils.misc.ddp_sync', 'misc.ddp_sync', (['self.G_mapping', 'sync'], {}), '(self.G_mapping, sync)\n', (3251, 3273), False, 'from torch_utils import misc\n'), ((3878, 3915), 'torch_utils.misc.ddp_sync', 'misc.ddp_sync', (['self.G_synthesis', 'sync'], {}), '(self.G_synthesis, sync)\n', (3891, 3915), False, 'from torch_utils import misc\n'), ((4134, 4161), 'torch_utils.misc.ddp_sync', 'misc.ddp_sync', (['self.D', 'sync'], {}), '(self.D, sync)\n', (4147, 4161), False, 'from torch_utils import misc\n'), ((4567, 4604), 'torch.randint', 'torch.randint', (['(0)', '(224 - full_size)', '()'], {}), '(0, 224 - full_size, ())\n', (4580, 4604), False, 'import torch\n'), ((4623, 4660), 'torch.randint', 'torch.randint', (['(0)', '(224 - full_size)', '()'], {}), '(0, 224 - full_size, ())\n', (4636, 4660), False, 'import torch\n'), ((4675, 4772), 'torch.nn.ConstantPad2d', 'torch.nn.ConstantPad2d', (['(pad_1, 224 - full_size - pad_1, pad_2, 224 - full_size - pad_2)', '(1.0)'], {}), '((pad_1, 224 - full_size - pad_1, pad_2, 224 -\n full_size - pad_2), 1.0)\n', (4697, 4772), False, 'import torch\n'), ((4901, 4943), 'torch.randint', 'torch.randint', (['(0)', '(full_size - cut_size)', '()'], {}), '(0, full_size - cut_size, ())\n', (4914, 4943), False, 'import torch\n'), ((4960, 5002), 'torch.randint', 'torch.randint', (['(0)', '(full_size - cut_size)', '()'], {}), '(0, full_size - cut_size, ())\n', (4973, 5002), False, 'import torch\n'), ((5102, 5172), 'torch.nn.functional.interpolate', 'F.interpolate', (['cropped_img', '(224, 224)'], {'mode': 'mode', 'align_corners': '(False)'}), '(cropped_img, (224, 224), mode=mode, align_corners=False)\n', (5115, 5172), True, 'import torch.nn.functional as F\n'), ((6418, 6439), 'torch.exp', 'torch.exp', (['(sim / temp)'], {}), '(sim / temp)\n', (6427, 6439), False, 'import torch\n'), ((7269, 7288), 'torch.diagonal', 'torch.diagonal', (['sim'], {}), '(sim)\n', (7283, 7288), False, 'import torch\n'), ((2055, 2074), 'torch.diagonal', 'torch.diagonal', (['sim'], {}), '(sim)\n', (2069, 2074), False, 'import torch\n'), ((4313, 4404), 'torchvision.transforms.Normalize', 'T.Normalize', (['(0.48145466, 0.4578275, 0.40821073)', '(0.26862954, 0.26130258, 0.27577711)'], {}), '((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, \n 0.27577711))\n', (4324, 4404), True, 'import torchvision.transforms as T\n'), ((8042, 8068), 'torch.randn', 'torch.randn', (['txt_fts.shape'], {}), '(txt_fts.shape)\n', (8053, 8068), False, 'import torch\n'), ((9714, 9748), 'torch.distributed.get_world_size', 'torch.distributed.get_world_size', ([], {}), '()\n', (9746, 9748), False, 'import torch\n'), ((9772, 9800), 'torch.distributed.get_rank', 'torch.distributed.get_rank', ([], {}), '()\n', (9798, 9800), False, 'import torch\n'), ((9909, 9966), 'torch.distributed.all_gather', 'torch.distributed.all_gather', (['output_tensor', 'input_tensor'], {}), '(output_tensor, input_tensor)\n', (9937, 9966), False, 'import torch\n'), ((10098, 10122), 'torch.cat', 'torch.cat', (['output_tensor'], {}), '(output_tensor)\n', (10107, 10122), False, 'import torch\n'), ((10334, 10390), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (['"""Gmain_forward"""'], {}), "('Gmain_forward')\n", (10373, 10390), False, 'import torch\n'), ((10731, 10784), 'torch_utils.training_stats.report', 'training_stats.report', (['"""Loss/scores/fake"""', 'gen_logits'], {}), "('Loss/scores/fake', gen_logits)\n", (10752, 10784), False, 'from torch_utils import training_stats\n'), ((10905, 10946), 'torch.nn.functional.softplus', 'torch.nn.functional.softplus', (['(-gen_logits)'], {}), '(-gen_logits)\n', (10933, 10946), False, 'import torch\n'), ((12451, 12499), 'torch_utils.training_stats.report', 'training_stats.report', (['"""Loss/G/loss"""', 'loss_Gmain'], {}), "('Loss/G/loss', loss_Gmain)\n", (12472, 12499), False, 'from torch_utils import training_stats\n'), ((12517, 12574), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (['"""Gmain_backward"""'], {}), "('Gmain_backward')\n", (12556, 12574), False, 'import torch\n'), ((12717, 12771), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (['"""Gpl_forward"""'], {}), "('Gpl_forward')\n", (12756, 12771), False, 'import torch\n'), ((13910, 13962), 'torch_utils.training_stats.report', 'training_stats.report', (['"""Loss/pl_penalty"""', 'pl_penalty'], {}), "('Loss/pl_penalty', pl_penalty)\n", (13931, 13962), False, 'from torch_utils import training_stats\n'), ((14034, 14079), 'torch_utils.training_stats.report', 'training_stats.report', (['"""Loss/G/reg"""', 'loss_Gpl'], {}), "('Loss/G/reg', loss_Gpl)\n", (14055, 14079), False, 'from torch_utils import training_stats\n'), ((14097, 14152), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (['"""Gpl_backward"""'], {}), "('Gpl_backward')\n", (14136, 14152), False, 'import torch\n'), ((14351, 14406), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (['"""Dgen_forward"""'], {}), "('Dgen_forward')\n", (14390, 14406), False, 'import torch\n'), ((14636, 14689), 'torch_utils.training_stats.report', 'training_stats.report', (['"""Loss/scores/fake"""', 'gen_logits'], {}), "('Loss/scores/fake', gen_logits)\n", (14657, 14689), False, 'from torch_utils import training_stats\n'), ((14794, 14834), 'torch.nn.functional.softplus', 'torch.nn.functional.softplus', (['gen_logits'], {}), '(gen_logits)\n', (14822, 14834), False, 'import torch\n'), ((14884, 14940), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (['"""Dgen_backward"""'], {}), "('Dgen_backward')\n", (14923, 14940), False, 'import torch\n'), ((15225, 15283), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (["(name + '_forward')"], {}), "(name + '_forward')\n", (15264, 15283), False, 'import torch\n'), ((15473, 15527), 'torch_utils.training_stats.report', 'training_stats.report', (['"""Loss/scores/real"""', 'real_logits'], {}), "('Loss/scores/real', real_logits)\n", (15494, 15527), False, 'from torch_utils import training_stats\n'), ((16716, 16775), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (["(name + '_backward')"], {}), "(name + '_backward')\n", (16755, 16775), False, 'import torch\n'), ((1705, 1726), 'torch.nn.functional.softmax', 'F.softmax', (['sim'], {'dim': '(1)'}), '(sim, dim=1)\n', (1714, 1726), True, 'import torch.nn.functional as F\n'), ((1767, 1788), 'torch.nn.functional.softmax', 'F.softmax', (['sim'], {'dim': '(0)'}), '(sim, dim=0)\n', (1776, 1788), True, 'import torch.nn.functional as F\n'), ((3413, 3432), 'torch.randn_like', 'torch.randn_like', (['z'], {}), '(z)\n', (3429, 3432), False, 'import torch\n'), ((3483, 3538), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (['"""style_mixing"""'], {}), "('style_mixing')\n", (3522, 3538), False, 'import torch\n'), ((6925, 6946), 'torch.nn.functional.softmax', 'F.softmax', (['sim'], {'dim': '(1)'}), '(sim, dim=1)\n', (6934, 6946), True, 'import torch.nn.functional as F\n'), ((6987, 7008), 'torch.nn.functional.softmax', 'F.softmax', (['sim'], {'dim': '(0)'}), '(sim, dim=0)\n', (6996, 7008), True, 'import torch.nn.functional as F\n'), ((7163, 7178), 'torch.log', 'torch.log', (['sim2'], {}), '(sim2)\n', (7172, 7178), False, 'import torch\n'), ((7220, 7235), 'torch.log', 'torch.log', (['sim1'], {}), '(sim1)\n', (7229, 7235), False, 'import torch\n'), ((9834, 9864), 'torch.zeros_like', 'torch.zeros_like', (['input_tensor'], {}), '(input_tensor)\n', (9850, 9864), False, 'import torch\n'), ((13076, 13101), 'torch.randn_like', 'torch.randn_like', (['gen_img'], {}), '(gen_img)\n', (13092, 13101), False, 'import torch\n'), ((13104, 13148), 'numpy.sqrt', 'np.sqrt', (['(gen_img.shape[2] * gen_img.shape[3])'], {}), '(gen_img.shape[2] * gen_img.shape[3])\n', (13111, 13148), True, 'import numpy as np\n'), ((13170, 13221), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (['"""pl_grads"""'], {}), "('pl_grads')\n", (13209, 13221), False, 'import torch\n'), ((13223, 13259), 'torch_utils.ops.conv2d_gradfix.no_weight_gradients', 'conv2d_gradfix.no_weight_gradients', ([], {}), '()\n', (13257, 13259), False, 'from torch_utils.ops import conv2d_gradfix\n'), ((15699, 15741), 'torch.nn.functional.softplus', 'torch.nn.functional.softplus', (['(-real_logits)'], {}), '(-real_logits)\n', (15727, 15741), False, 'import torch\n'), ((16050, 16110), 'torch_utils.training_stats.report', 'training_stats.report', (['"""Loss/D/loss"""', '(loss_Dgen + loss_Dreal)'], {}), "('Loss/D/loss', loss_Dgen + loss_Dreal)\n", (16071, 16110), False, 'from torch_utils import training_stats\n'), ((16579, 16631), 'torch_utils.training_stats.report', 'training_stats.report', (['"""Loss/r1_penalty"""', 'r1_penalty'], {}), "('Loss/r1_penalty', r1_penalty)\n", (16600, 16631), False, 'from torch_utils import training_stats\n'), ((16652, 16697), 'torch_utils.training_stats.report', 'training_stats.report', (['"""Loss/D/reg"""', 'loss_Dr1'], {}), "('Loss/D/reg', loss_Dr1)\n", (16673, 16697), False, 'from torch_utils import training_stats\n'), ((1432, 1453), 'torch.randn', 'torch.randn', (['mu.shape'], {}), '(mu.shape)\n', (1443, 1453), False, 'import torch\n'), ((1947, 1962), 'torch.log', 'torch.log', (['sim2'], {}), '(sim2)\n', (1956, 1962), False, 'import torch\n'), ((2005, 2020), 'torch.log', 'torch.log', (['sim1'], {}), '(sim1)\n', (2014, 2020), False, 'import torch\n'), ((3754, 3790), 'torch.full_like', 'torch.full_like', (['cutoff', 'ws.shape[1]'], {}), '(cutoff, ws.shape[1])\n', (3769, 3790), False, 'import torch\n'), ((7070, 7085), 'torch.log', 'torch.log', (['sim1'], {}), '(sim1)\n', (7079, 7085), False, 'import torch\n'), ((7097, 7112), 'torch.log', 'torch.log', (['sim2'], {}), '(sim2)\n', (7106, 7112), False, 'import torch\n'), ((12112, 12127), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12125, 12127), False, 'import torch\n'), ((16193, 16244), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (['"""r1_grads"""'], {}), "('r1_grads')\n", (16232, 16244), False, 'import torch\n'), ((16246, 16282), 'torch_utils.ops.conv2d_gradfix.no_weight_gradients', 'conv2d_gradfix.no_weight_gradients', ([], {}), '()\n', (16280, 16282), False, 'from torch_utils.ops import conv2d_gradfix\n'), ((1852, 1867), 'torch.log', 'torch.log', (['sim1'], {}), '(sim1)\n', (1861, 1867), False, 'import torch\n'), ((1879, 1894), 'torch.log', 'torch.log', (['sim2'], {}), '(sim2)\n', (1888, 1894), False, 'import torch\n'), ((3569, 3621), 'torch.empty', 'torch.empty', (['[]'], {'dtype': 'torch.int64', 'device': 'ws.device'}), '([], dtype=torch.int64, device=ws.device)\n', (3580, 3621), False, 'import torch\n'), ((3687, 3719), 'torch.rand', 'torch.rand', (['[]'], {'device': 'ws.device'}), '([], device=ws.device)\n', (3697, 3719), False, 'import torch\n'), ((9300, 9358), 'torch.rand', 'torch.rand', (['[txt_fts_.shape[0], 1]'], {'device': 'txt_fts_.device'}), '([txt_fts_.shape[0], 1], device=txt_fts_.device)\n', (9310, 9358), False, 'import torch\n')]
|
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def do_ml(merged_df, test_size, ml_model, **kwargs):
train_data = merged_df.drop(
columns=[
"lagged_poc",
"price_date",
"label_id",
# "Low",
# "High",
# "Open",
# "Close",
# "Adj Close",
# "positive_poc",
"negative_poc",
]
)
target = merged_df[["lagged_poc"]]
X_train, X_test, y_train, y_test = train_test_split(
np.array(train_data), np.array(target), test_size=test_size, random_state=1
)
model = ml_model(**kwargs)
# Fit on training data
model.fit(X_train, np.ravel(y_train))
# Actual class predictions
predictions = model.predict(X_test)
confusion_matrix = metrics.confusion_matrix(y_test, predictions)
accuracy_score = metrics.accuracy_score(y_test, predictions)
# feature importance
plot_feature_importance(model, train_data)
return confusion_matrix, accuracy_score
def plot_feature_importance(model, train_data):
featureImportances = model.feature_importances_
fiDF = pd.DataFrame()
fiDF["fi"] = featureImportances
fiDF["f"] = train_data.columns
fiDF = fiDF.sort_values("fi", ascending=False)
fiDF.head()
nf = 50
plt.rcParams.update({"font.size": 8})
plt.figure(figsize=(8, 4))
plt.plot(fiDF.f.iloc[0:nf], fiDF.fi.iloc[0:nf])
plt.xticks(rotation=90)
plt.show()
|
[
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.figure",
"numpy.array",
"pandas.DataFrame",
"numpy.ravel",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.confusion_matrix"
] |
[((992, 1037), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (1016, 1037), False, 'from sklearn import metrics\n'), ((1059, 1102), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (1081, 1102), False, 'from sklearn import metrics\n'), ((1334, 1348), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1346, 1348), True, 'import pandas as pd\n'), ((1503, 1540), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 8}"], {}), "({'font.size': 8})\n", (1522, 1540), True, 'import matplotlib.pyplot as plt\n'), ((1545, 1571), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 4)'}), '(figsize=(8, 4))\n', (1555, 1571), True, 'import matplotlib.pyplot as plt\n'), ((1576, 1623), 'matplotlib.pyplot.plot', 'plt.plot', (['fiDF.f.iloc[0:nf]', 'fiDF.fi.iloc[0:nf]'], {}), '(fiDF.f.iloc[0:nf], fiDF.fi.iloc[0:nf])\n', (1584, 1623), True, 'import matplotlib.pyplot as plt\n'), ((1628, 1651), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (1638, 1651), True, 'import matplotlib.pyplot as plt\n'), ((1656, 1666), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1664, 1666), True, 'import matplotlib.pyplot as plt\n'), ((712, 732), 'numpy.array', 'np.array', (['train_data'], {}), '(train_data)\n', (720, 732), True, 'import numpy as np\n'), ((734, 750), 'numpy.array', 'np.array', (['target'], {}), '(target)\n', (742, 750), True, 'import numpy as np\n'), ((877, 894), 'numpy.ravel', 'np.ravel', (['y_train'], {}), '(y_train)\n', (885, 894), True, 'import numpy as np\n')]
|
import numpy as np
import tensorflow as tf
from keras import backend as K
from tqdm import tqdm
def write_log(callback, names, logs, batch_no):
for name, value in zip(names, logs):
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value
summary_value.tag = name
callback.writer.add_summary(summary, batch_no)
callback.writer.flush()
def fit_one_epoch(model_rpn, model_all, loss_history, callback, epoch, epoch_step, epoch_step_val, gen, gen_val, Epoch, anchors, bbox_util, roi_helper):
total_loss = 0
rpn_loc_loss = 0
rpn_cls_loss = 0
roi_loc_loss = 0
roi_cls_loss = 0
val_loss = 0
with tqdm(total=epoch_step,desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:
for iteration, batch in enumerate(gen):
if iteration >= epoch_step:
break
X, Y, boxes = batch[0], batch[1], batch[2]
P_rpn = model_rpn.predict_on_batch(X)
results = bbox_util.detection_out_rpn(P_rpn, anchors)
roi_inputs = []
out_classes = []
out_regrs = []
for i in range(len(X)):
R = results[i]
X2, Y1, Y2 = roi_helper.calc_iou(R, boxes[i])
roi_inputs.append(X2)
out_classes.append(Y1)
out_regrs.append(Y2)
loss_class = model_all.train_on_batch([X, np.array(roi_inputs)], [Y[0], Y[1], np.array(out_classes), np.array(out_regrs)])
write_log(callback, ['total_loss','rpn_cls_loss', 'rpn_reg_loss', 'detection_cls_loss', 'detection_reg_loss'], loss_class, iteration)
rpn_cls_loss += loss_class[1]
rpn_loc_loss += loss_class[2]
roi_cls_loss += loss_class[3]
roi_loc_loss += loss_class[4]
total_loss = rpn_loc_loss + rpn_cls_loss + roi_loc_loss + roi_cls_loss
pbar.set_postfix(**{'total' : total_loss / (iteration + 1),
'rpn_cls' : rpn_cls_loss / (iteration + 1),
'rpn_loc' : rpn_loc_loss / (iteration + 1),
'roi_cls' : roi_cls_loss / (iteration + 1),
'roi_loc' : roi_loc_loss / (iteration + 1),
'lr' : K.get_value(model_rpn.optimizer.lr)})
pbar.update(1)
print('Start Validation')
with tqdm(total=epoch_step_val, desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:
for iteration, batch in enumerate(gen_val):
if iteration >= epoch_step_val:
break
X, Y, boxes = batch[0], batch[1], batch[2]
P_rpn = model_rpn.predict_on_batch(X)
results = bbox_util.detection_out_rpn(P_rpn, anchors)
roi_inputs = []
out_classes = []
out_regrs = []
for i in range(len(X)):
R = results[i]
X2, Y1, Y2 = roi_helper.calc_iou(R, boxes[i])
roi_inputs.append(X2)
out_classes.append(Y1)
out_regrs.append(Y2)
loss_class = model_all.test_on_batch([X, np.array(roi_inputs)], [Y[0], Y[1], np.array(out_classes), np.array(out_regrs)])
val_loss += loss_class[0]
pbar.set_postfix(**{'total' : val_loss / (iteration + 1)})
pbar.update(1)
logs = {'loss': total_loss / epoch_step, 'val_loss': val_loss / epoch_step_val}
loss_history.on_epoch_end([], logs)
print('Epoch:'+ str(epoch+1) + '/' + str(Epoch))
print('Total Loss: %.3f || Val Loss: %.3f ' % (total_loss / epoch_step, val_loss / epoch_step_val))
model_all.save_weights('logs/ep%03d-loss%.3f-val_loss%.3f.h5' % (epoch + 1, total_loss / epoch_step, val_loss / epoch_step_val))
|
[
"numpy.array",
"tqdm.tqdm",
"tensorflow.Summary",
"keras.backend.get_value"
] |
[((211, 223), 'tensorflow.Summary', 'tf.Summary', ([], {}), '()\n', (221, 223), True, 'import tensorflow as tf\n'), ((730, 822), 'tqdm.tqdm', 'tqdm', ([], {'total': 'epoch_step', 'desc': 'f"""Epoch {epoch + 1}/{Epoch}"""', 'postfix': 'dict', 'mininterval': '(0.3)'}), "(total=epoch_step, desc=f'Epoch {epoch + 1}/{Epoch}', postfix=dict,\n mininterval=0.3)\n", (734, 822), False, 'from tqdm import tqdm\n'), ((2572, 2668), 'tqdm.tqdm', 'tqdm', ([], {'total': 'epoch_step_val', 'desc': 'f"""Epoch {epoch + 1}/{Epoch}"""', 'postfix': 'dict', 'mininterval': '(0.3)'}), "(total=epoch_step_val, desc=f'Epoch {epoch + 1}/{Epoch}', postfix=dict,\n mininterval=0.3)\n", (2576, 2668), False, 'from tqdm import tqdm\n'), ((1515, 1535), 'numpy.array', 'np.array', (['roi_inputs'], {}), '(roi_inputs)\n', (1523, 1535), True, 'import numpy as np\n'), ((1551, 1572), 'numpy.array', 'np.array', (['out_classes'], {}), '(out_classes)\n', (1559, 1572), True, 'import numpy as np\n'), ((1574, 1593), 'numpy.array', 'np.array', (['out_regrs'], {}), '(out_regrs)\n', (1582, 1593), True, 'import numpy as np\n'), ((3397, 3417), 'numpy.array', 'np.array', (['roi_inputs'], {}), '(roi_inputs)\n', (3405, 3417), True, 'import numpy as np\n'), ((3433, 3454), 'numpy.array', 'np.array', (['out_classes'], {}), '(out_classes)\n', (3441, 3454), True, 'import numpy as np\n'), ((3456, 3475), 'numpy.array', 'np.array', (['out_regrs'], {}), '(out_regrs)\n', (3464, 3475), True, 'import numpy as np\n'), ((2463, 2498), 'keras.backend.get_value', 'K.get_value', (['model_rpn.optimizer.lr'], {}), '(model_rpn.optimizer.lr)\n', (2474, 2498), True, 'from keras import backend as K\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 15 08:32:03 2021
@author: User
"""
import numpy as np
import matplotlib.pyplot as plt
a = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
print(a)
print(a[0])
print(a.ndim) #te dice la cantidad de ejes (o dimensiones) del arreglo
print(a.shape) #Te va a dar una tupla de enteros que indican la cantidad de elementos en cada eje.
print(a.size)
#%%
vec_fila = a[np.newaxis, :]
print(vec_fila.shape, a.shape)
#%%
print(a.sum())
print(a.min())
print(a.max())
#%%
print(a)
print(a.max(axis=1))
print(a.max(axis=0))
#%%
print(np.random.random(3))
|
[
"numpy.random.random",
"numpy.array"
] |
[((138, 193), 'numpy.array', 'np.array', (['[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]'], {}), '([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])\n', (146, 193), True, 'import numpy as np\n'), ((577, 596), 'numpy.random.random', 'np.random.random', (['(3)'], {}), '(3)\n', (593, 596), True, 'import numpy as np\n')]
|
import numpy as np
import os, sys
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from tensorflow.keras.models import Model
import tensorflow as tf
from PIL import Image
from utils_rtp import ProMP
class Predictor:
def __init__(self, encoder_model_path, predictor_model_path):
self.all_phi = self.promp_train()
encoder_model = tf.keras.models.load_model(encoder_model_path)
self.encoder = Model(encoder_model.input, encoder_model.get_layer("bottleneck").output)
self.exp_model = tf.keras.models.load_model(predictor_model_path, compile=False)
def promp_train(self):
phi = ProMP().basis_func_gauss_glb()
zeros = np.zeros([phi.shape[0], 8])
h1 = np.hstack((phi, zeros, zeros, zeros, zeros, zeros, zeros))
h2 = np.hstack((zeros, phi, zeros, zeros, zeros, zeros, zeros))
h3 = np.hstack((zeros, zeros, phi, zeros, zeros, zeros, zeros))
h4 = np.hstack((zeros, zeros, zeros, phi, zeros, zeros, zeros))
h5 = np.hstack((zeros, zeros, zeros, zeros, phi, zeros, zeros))
h6 = np.hstack((zeros, zeros, zeros, zeros, zeros, phi, zeros))
h7 = np.hstack((zeros, zeros, zeros, zeros, zeros, zeros, phi))
vstack = np.vstack((h1, h2, h3, h4, h5, h6, h7))
vstack = tf.cast(vstack, tf.float32)
return vstack
def preprocess_image(self, image):
return np.asarray(image.resize((256, 256)))
def predict(self, image_numpy):
# image_numpy = np.expand_dims(image_numpy, axis=0)
latent_img = self.encoder.predict(image_numpy/255)
q_val_pred = self.exp_model.predict(latent_img)
traj_pred = np.matmul(self.all_phi, np.transpose(q_val_pred)).squeeze()
return traj_pred #np.reshape(traj_pred, (-1, 150))
if __name__ == "__main__":
ENCODED_MODEL_PATH = "/home/arshad/Documents/reach_to_palpate_validation_models/encoded_model_regions"
PREDICTOR_MODEL = "/home/arshad/Documents/reach_to_palpate_validation_models/model_cnn_rgb_1"
image = np.load( "/home/arshad/catkin_ws/image_xy_rtp.npy" )
predictor = Predictor(ENCODED_MODEL_PATH, PREDICTOR_MODEL)
traj = predictor.predict(image)
np.save("/home/arshad/catkin_ws/predicted_joints_values_rtp.npy", traj)
print ("\n Predicted ProMPs weights for RTP task. Joint trajectory is saved in the file. \n Press 'p' to display the trajectory...")
|
[
"tensorflow.cast",
"numpy.transpose",
"utils_rtp.ProMP",
"numpy.hstack",
"numpy.zeros",
"tensorflow.keras.models.load_model",
"numpy.vstack",
"numpy.load",
"numpy.save"
] |
[((2017, 2067), 'numpy.load', 'np.load', (['"""/home/arshad/catkin_ws/image_xy_rtp.npy"""'], {}), "('/home/arshad/catkin_ws/image_xy_rtp.npy')\n", (2024, 2067), True, 'import numpy as np\n'), ((2175, 2246), 'numpy.save', 'np.save', (['"""/home/arshad/catkin_ws/predicted_joints_values_rtp.npy"""', 'traj'], {}), "('/home/arshad/catkin_ws/predicted_joints_values_rtp.npy', traj)\n", (2182, 2246), True, 'import numpy as np\n'), ((347, 393), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['encoder_model_path'], {}), '(encoder_model_path)\n', (373, 393), True, 'import tensorflow as tf\n'), ((515, 578), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['predictor_model_path'], {'compile': '(False)'}), '(predictor_model_path, compile=False)\n', (541, 578), True, 'import tensorflow as tf\n'), ((669, 696), 'numpy.zeros', 'np.zeros', (['[phi.shape[0], 8]'], {}), '([phi.shape[0], 8])\n', (677, 696), True, 'import numpy as np\n'), ((710, 768), 'numpy.hstack', 'np.hstack', (['(phi, zeros, zeros, zeros, zeros, zeros, zeros)'], {}), '((phi, zeros, zeros, zeros, zeros, zeros, zeros))\n', (719, 768), True, 'import numpy as np\n'), ((782, 840), 'numpy.hstack', 'np.hstack', (['(zeros, phi, zeros, zeros, zeros, zeros, zeros)'], {}), '((zeros, phi, zeros, zeros, zeros, zeros, zeros))\n', (791, 840), True, 'import numpy as np\n'), ((854, 912), 'numpy.hstack', 'np.hstack', (['(zeros, zeros, phi, zeros, zeros, zeros, zeros)'], {}), '((zeros, zeros, phi, zeros, zeros, zeros, zeros))\n', (863, 912), True, 'import numpy as np\n'), ((926, 984), 'numpy.hstack', 'np.hstack', (['(zeros, zeros, zeros, phi, zeros, zeros, zeros)'], {}), '((zeros, zeros, zeros, phi, zeros, zeros, zeros))\n', (935, 984), True, 'import numpy as np\n'), ((998, 1056), 'numpy.hstack', 'np.hstack', (['(zeros, zeros, zeros, zeros, phi, zeros, zeros)'], {}), '((zeros, zeros, zeros, zeros, phi, zeros, zeros))\n', (1007, 1056), True, 'import numpy as np\n'), ((1070, 1128), 'numpy.hstack', 'np.hstack', (['(zeros, zeros, zeros, zeros, zeros, phi, zeros)'], {}), '((zeros, zeros, zeros, zeros, zeros, phi, zeros))\n', (1079, 1128), True, 'import numpy as np\n'), ((1142, 1200), 'numpy.hstack', 'np.hstack', (['(zeros, zeros, zeros, zeros, zeros, zeros, phi)'], {}), '((zeros, zeros, zeros, zeros, zeros, zeros, phi))\n', (1151, 1200), True, 'import numpy as np\n'), ((1219, 1258), 'numpy.vstack', 'np.vstack', (['(h1, h2, h3, h4, h5, h6, h7)'], {}), '((h1, h2, h3, h4, h5, h6, h7))\n', (1228, 1258), True, 'import numpy as np\n'), ((1276, 1303), 'tensorflow.cast', 'tf.cast', (['vstack', 'tf.float32'], {}), '(vstack, tf.float32)\n', (1283, 1303), True, 'import tensorflow as tf\n'), ((621, 628), 'utils_rtp.ProMP', 'ProMP', ([], {}), '()\n', (626, 628), False, 'from utils_rtp import ProMP\n'), ((1675, 1699), 'numpy.transpose', 'np.transpose', (['q_val_pred'], {}), '(q_val_pred)\n', (1687, 1699), True, 'import numpy as np\n')]
|
'''
--- I M P O R T S T A T E M E N T S ---
'''
import coloredlogs, logging
coloredlogs.install()
import numpy as np
'''
=== S T A R T O F C L A S S E V A L M E T R I C ===
[About]
Object class for calculating average values.
[Init Args]
- name: String for the variable name to calculate average value for.
[Methods]
- __init__ : Class initialiser
- update : Function to be implemented by the children sub-classes.
- reset : Function for resetting the number of instances and the sum of the metric.
- get : Calculation of the average value based on the number of instances and the provided sum.
- get_name_value : Function for returning the name(s) and the value(s).
- check_label_shapes : Function responsible for type and shape checking.
'''
class EvalMetric(object):
def __init__(self, name, **kwargs):
self.name = str(name)
self.reset()
def update(self, preds, labels, losses, lr, batch_size):
raise NotImplementedError('Must be implemented in child classes!')
def reset(self):
self.num_inst = 0
self.sum_metric = 0.0
def get(self):
# case that instances are 0 -> return NaN
if self.num_inst == 0:
return (self.name, float('nan'))
# case that instances are 1 -> return their sum
if self.num_inst == 1:
return(self.name, self.sum_metric)
# case that instances are >1 -> return average
else:
return (self.name, self.sum_metric / self.num_inst)
def get_name_value(self):
name, value = self.get()
if not isinstance(name, list):
name = [name]
if not isinstance(value, list):
value = [value]
return list(zip(name, value))
def check_label_shapes(self, preds, labels):
# raise if the shape is inconsistent
if (type(labels) is list) and (type(preds) is list):
label_shape, pred_shape = len(labels), len(preds)
else:
label_shape, pred_shape = labels.shape[0], preds.shape[0]
if label_shape != pred_shape:
raise NotImplementedError("")
'''
=== E N D O F C L A S S E V A L M E T R I C ===
'''
'''
=== S T A R T O F C L A S S M E T R I C L I S T ===
[About]
EvalMetric class for creating a list containing Evalmetric objects.
[Init Args]
- name: String for the variable name.
[Methods]
- __init__ : Class initialiser
- update : Function to update the list of EvalMetric objects.
- reset : Function for resetting the list.
- get : Function for getting each of the EvalMetric objects in the list.
- get_name_value : Function for getting the name of the list items.
'''
class MetricList(EvalMetric):
def __init__(self, *args, name="metric_list"):
assert all([issubclass(type(x), EvalMetric) for x in args]), \
"MetricList input is illegal: {}".format(args)
self.metrics = [metric for metric in args]
super(MetricList, self).__init__(name=name)
def update(self, preds, labels, losses=None, lr=None, batch_size=None):
preds = [preds] if type(preds) is not list else preds
labels = [labels] if type(labels) is not list else labels
losses = [losses] if type(losses) is not list else losses
lr = [lr] if type(lr) is not list else lr
batch_size = [batch_size] if type(batch_size) is not list else batch_size
for metric in self.metrics:
metric.update(preds, labels, losses, lr, batch_size)
def reset(self):
if hasattr(self, 'metrics'):
for metric in self.metrics:
metric.reset()
else:
logging.warning("No metric defined.")
def get(self):
ouputs = []
for metric in self.metrics:
ouputs.append(metric.get())
return ouputs
def get_name_value(self):
ouputs = []
for metric in self.metrics:
ouputs.append(metric.get_name_value())
return ouputs
'''
=== E N D O F C L A S S M E T R I C L I S T ===
'''
'''
=== S T A R T O F C L A S S A C C U R A C Y ===
[About]
EvalMetric class for creating an accuracy estimate.
[Init Args]
- name: String for the variable name. Defaults to `accuracy`.
- topk: Number of top predictions to be used of the score (top-1, top-5 etc.).
Defaults to 1.
[Methods]
- __init__ : Class initialiser
- update : Function to update scores.
'''
class Accuracy(EvalMetric):
def __init__(self, name='accuracy', topk=1):
super(Accuracy, self).__init__(name)
self.topk = topk
def update(self, preds, labels, losses, lr, batch_size):
preds = [preds] if type(preds) is not list else preds
labels = [labels] if type(labels) is not list else labels
self.check_label_shapes(preds, labels)
for pred, label in zip(preds, labels):
assert self.topk <= pred.shape[1], \
"topk({}) should no larger than the pred dim({})".format(self.topk, pred.shape[1])
_, pred_topk = pred.topk(self.topk, 1, True, True)
pred_topk = pred_topk.t()
correct = pred_topk.eq(label.view(1, -1).expand_as(pred_topk))
self.sum_metric += float(correct.reshape(-1).float().sum(0, keepdim=True).numpy())
self.num_inst += label.shape[0]
'''
=== E N D O F C L A S S A C C U R A C Y ===
'''
'''
=== S T A R T O F C L A S S L O S S ===
[About]
EvalMetric class for creating a loss score. The class acts a a `dummy estimate`
as no further calculations are required for the loss. Instead it is primarily
used to easily/directly print the loss.
[Init Args]
- name: String for the variable name. Defaults to `loss`.
[Methods]
- __init__ : Class initialiser
- update : Function to update scores.
'''
class Loss(EvalMetric):
def __init__(self, name='loss'):
super(Loss, self).__init__(name)
def update(self, preds, labels, losses, lr, batch_size):
assert losses is not None, "Loss undefined."
for loss in losses:
self.sum_metric += float(loss.numpy().sum())
self.num_inst += 1
'''
=== E N D O F C L A S S L O S S ===
'''
'''
=== S T A R T O F C L A S S L O S S ===
[About]
EvalMetric class for batch-size used. The class acts a a `dummy estimate`
as no further calculations are required for the size of the batch. Instead it is primarily
used to easily/directly print the batch size.
[Init Args]
- name: String for the variable name. Defaults to `batch-size`.
[Methods]
- __init__ : Class initialiser
- update : Function used for updates.
'''
class BatchSize(EvalMetric):
def __init__(self, name='batch-size'):
super(BatchSize, self).__init__(name)
def update(self, preds, labels, losses, lrs, batch_sizes):
assert batch_sizes is not None, "Batch size undefined."
self.sum_metric = batch_sizes
self.num_inst = 1
'''
=== E N D O F C L A S S L O S S ===
'''
'''
=== S T A R T O F C L A S S L E A R N I N G R A T E ===
[About]
EvalMetric class for learning rate used. The class acts a a `dummy estimate`
as no further calculations are required for the size of the lr. Instead it is primarily
used to easily/directly print the learning rate.
[Init Args]
- name: String for the variable name. Defaults to `lr`.
[Methods]
- __init__ : Class initialiser
- update : Function used for updates.
'''
class LearningRate(EvalMetric):
def __init__(self, name='lr'):
super(LearningRate, self).__init__(name)
def update(self, preds, labels, losses, lrs, batch_sizes):
assert lrs is not None, "Learning rate undefined."
self.sum_metric = lrs[-1]
self.num_inst = 1
'''
=== E N D O F C L A S S L E A R N I N G R A T E ===
'''
if __name__ == "__main__":
import torch
# Test Accuracy
predicts = [torch.from_numpy(np.array([[0.7, 0.3], [0, 1.], [0.4, 0.6]]))]
labels = [torch.from_numpy(np.array([ 0, 1, 1 ]))]
losses = [torch.from_numpy(np.array([ 0.3, 0.4, 0.5 ]))]
logging.getLogger().setLevel(logging.DEBUG)
logging.debug("input pred: {}".format(predicts))
logging.debug("input label: {}".format(labels))
logging.debug("input loss: {}".format(labels))
acc = Accuracy()
acc.update(preds=predicts, labels=labels, losses=losses, lr=0, batch_size=1)
logging.info(acc.get())
# Test MetricList
metrics = MetricList(Loss(name="ce-loss"),
Accuracy(topk=1, name="acc-top1"),
Accuracy(topk=2, name="acc-top2"),
)
metrics.update(preds=predicts, labels=labels, losses=losses, lr=0, batch_size=1)
logging.info("------------")
logging.info(metrics.get())
acc.get_name_value()
|
[
"logging.getLogger",
"coloredlogs.install",
"logging.warning",
"numpy.array",
"logging.info"
] |
[((79, 100), 'coloredlogs.install', 'coloredlogs.install', ([], {}), '()\n', (98, 100), False, 'import coloredlogs, logging\n'), ((9112, 9140), 'logging.info', 'logging.info', (['"""------------"""'], {}), "('------------')\n", (9124, 9140), False, 'import coloredlogs, logging\n'), ((3791, 3828), 'logging.warning', 'logging.warning', (['"""No metric defined."""'], {}), "('No metric defined.')\n", (3806, 3828), False, 'import coloredlogs, logging\n'), ((8262, 8306), 'numpy.array', 'np.array', (['[[0.7, 0.3], [0, 1.0], [0.4, 0.6]]'], {}), '([[0.7, 0.3], [0, 1.0], [0.4, 0.6]])\n', (8270, 8306), True, 'import numpy as np\n'), ((8341, 8360), 'numpy.array', 'np.array', (['[0, 1, 1]'], {}), '([0, 1, 1])\n', (8349, 8360), True, 'import numpy as np\n'), ((8420, 8445), 'numpy.array', 'np.array', (['[0.3, 0.4, 0.5]'], {}), '([0.3, 0.4, 0.5])\n', (8428, 8445), True, 'import numpy as np\n'), ((8471, 8490), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (8488, 8490), False, 'import coloredlogs, logging\n')]
|
import os,sys
import pandas as pd
import numpy as np
import subprocess
from tqdm import tqdm
from ras_method import ras_method
import warnings
warnings.filterwarnings('ignore')
def est_trade_value(x,output_new,sector):
"""
Function to estimate the trade value between two sectors
"""
if (sector is not 'other1') & (sector is not 'other2'):
sec_output = output_new.sum(axis=1).loc[output_new.sum(axis=1).index.get_level_values(1) == sector].reset_index()
else:
sec_output = output_new.sum(axis=1).loc[output_new.sum(axis=1).index.get_level_values(1) == 'IMP'].reset_index()
x['gdp'] = x.gdp*min(sec_output.loc[sec_output.region==x.reg1].values[0][2],sec_output.loc[sec_output.region==x.reg2].values[0][2])
return x
def estimate(table='INDEC',year=2015,print_output=False,print_progress=True):
"""
Function to create a province-level MRIO table, based on a national IO table. The default is the INDEC table.
"""
data_path = os.path.join('..','data')
# load sector data
sectors = list(pd.read_excel(os.path.join(data_path,'other_sources',
'industry_high_level_classification.xlsx'))['SEC_CODE'].values)
# load provincial mappers
reg_mapper = pd.read_excel(os.path.join(data_path,'INDEC','sh_cou_06_16.xls'),sheet_name='reg_mapper',header=None).iloc[:,:2]
reg_mapper = dict(zip(reg_mapper[0],reg_mapper[1]))
# load provincial data
prov_data = pd.read_excel(os.path.join(data_path,'INDEC','PIB_provincial_06_17.xls'),sheet_name='VBP',
skiprows=3,index_col=[0],header=[0],nrows=71)
prov_data = prov_data.loc[[x.isupper() for x in prov_data.index],:]
prov_data.columns = [x.replace(' ','_') for x in ['Ciudad de Buenos Aires', 'Buenos Aires', 'Catamarca', 'Cordoba',
'Corrientes', 'Chaco', 'Chubut', 'Entre Rios', 'Formosa', 'Jujuy',
'La Pampa', 'La Rioja', 'Mendoza', 'Misiones', 'Neuquen', 'Rio Negro',
'Salta', 'San Juan', 'San Luis', 'Santa Cruz', 'Santa Fe',
'Santiago del Estero', 'Tucuman', 'Tierra del Fuego',
'No distribuido', 'Total']]
region_names = list(prov_data.columns)[:-2]
prov_data.index = sectors+['TOTAL']
prov_data = prov_data.replace(0, 1)
### Create proxy data for first iteration
sectors+['other1','other2']
# proxy level 2
proxy_reg_arg = pd.DataFrame(prov_data.iloc[-1,:24]/prov_data.iloc[-1,:24].sum()).reset_index()
proxy_reg_arg['year'] = 2016
proxy_reg_arg = proxy_reg_arg[['year','index','TOTAL']]
proxy_reg_arg.columns = ['year','id','gdp']
proxy_reg_arg.to_csv(os.path.join('..','mrio_downscaling','proxy_reg_arg.csv'),index=False)
# proxy level 4
for iter_,sector in enumerate(sectors+['other1','other2']):
if (sector is not 'other1') & (sector is not 'other2'):
proxy_sector = pd.DataFrame(prov_data.iloc[iter_,:24]/prov_data.iloc[iter_,:24].sum()).reset_index()
proxy_sector['year'] = 2016
proxy_sector['sector'] = 'sec{}'.format(sector)
proxy_sector = proxy_sector[['year','sector','index',sector]]
proxy_sector.columns = ['year','sector','region','gdp']
proxy_sector.to_csv(os.path.join('..','mrio_downscaling','proxy_sec{}.csv'.format(sector)),index=False)
else:
proxy_sector = pd.DataFrame(prov_data.iloc[-1,:24]/prov_data.iloc[-1,:24].sum()).reset_index()
proxy_sector['year'] = 2016
proxy_sector['sector'] = sector+'1'
proxy_sector = proxy_sector[['year','sector','index','TOTAL']]
proxy_sector.columns = ['year','sector','region','gdp']
proxy_sector.to_csv(os.path.join('..','mrio_downscaling','proxy_{}.csv'.format(sector)),index=False)
# proxy level 18
def change_name(x):
if x in sectors:
return 'sec'+x
elif x == 'other1':
return 'other11'
else:
return 'other21'
mi_index = pd.MultiIndex.from_product([sectors+['other1','other2'], region_names, sectors+['other1','other2'], region_names],
names=['sec1', 'reg1','sec2','reg2'])
for iter_,sector in enumerate(sectors+['other1','other2']):
if (sector is not 'other1') & (sector is not 'other2'):
proxy_trade = pd.DataFrame(columns=['year','gdp'],index= mi_index).reset_index()
proxy_trade['year'] = 2016
proxy_trade['gdp'] = 0
proxy_trade = proxy_trade.query("reg1 != reg2")
proxy_trade = proxy_trade.loc[proxy_trade.sec1 == sector]
proxy_trade['sec1'] = proxy_trade.sec1.apply(change_name)
proxy_trade['sec2'] = proxy_trade.sec2.apply(change_name)
proxy_trade = proxy_trade[['year','sec1','reg1','sec2','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','sector','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade_sec{}.csv'.format(sector)),index=False)
else:
proxy_trade = pd.DataFrame(columns=['year','gdp'],index= mi_index).reset_index()
proxy_trade['year'] = 2016
proxy_trade['gdp'] = 0
proxy_trade = proxy_trade.query("reg1 != reg2")
proxy_trade = proxy_trade.loc[proxy_trade.sec1 == sector]
proxy_trade['sec1'] = proxy_trade.sec1.apply(change_name)
proxy_trade['sec2'] = proxy_trade.sec2.apply(change_name)
proxy_trade = proxy_trade[['year','sec1','reg1','sec2','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','sector','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade_{}.csv'.format(sector)),index=False)
"""
Create first version of MRIO for Argentina, without trade
"""
### save basetable for disaggregation usin the specific source:
basetable = pd.read_csv(os.path.join(data_path,'national_tables','{}_{}.csv'.format(year,table)),index_col=[0])
basetable.to_csv(os.path.join('..','mrio_downscaling','basetable.csv'),header=False,index=False)
### run libmrio
p = subprocess.Popen([r'..\mrio_downscaling\mrio_disaggregate', 'settings_notrade.yml'],
cwd=os.path.join('..','mrio_downscaling'))
p.wait()
### load data and reorder
region_names_list = [item for sublist in [[x]*(len(sectors)+2) for x in region_names]
for item in sublist]
rows = ([x for x in sectors+['VA','IMP']])*len(region_names)
cols = ([x for x in sectors+['FD','EXP']])*len(region_names)
index_mi = pd.MultiIndex.from_arrays([region_names_list, rows], names=('region', 'row'))
column_mi = pd.MultiIndex.from_arrays([region_names_list, cols], names=('region', 'col'))
MRIO = pd.read_csv(os.path.join('..','mrio_downscaling','output1.csv'),header=None,index_col=None)
MRIO.index = index_mi
MRIO.columns = column_mi
# create predefined index and col, which is easier to read
sector_only = [x for x in sectors]*len(region_names)
col_only = ['FD']*len(region_names)
region_col = [item for sublist in [[x]*len(sectors) for x in region_names] for item in sublist] + \
[item for sublist in [[x]*1 for x in region_names] for item in sublist]
column_mi_reorder = pd.MultiIndex.from_arrays(
[region_col, sector_only+col_only], names=('region', 'col'))
# sum va and imports
valueA = MRIO.xs('VA', level=1, axis=0).sum(axis=0)
valueA.drop('FD', level=1,axis=0,inplace=True)
valueA.drop('EXP', level=1,axis=0,inplace=True)
imports = MRIO.xs('IMP', level=1, axis=0).sum(axis=0)
imports.drop('FD', level=1,axis=0,inplace=True)
imports.drop('EXP', level=1,axis=0,inplace=True)
FinalD = MRIO.xs('FD', level=1, axis=1).sum(axis=1)
FinalD.drop('VA', level=1,axis=0,inplace=True)
FinalD.drop('IMP', level=1,axis=0,inplace=True)
Export = MRIO.xs('EXP', level=1, axis=1).sum(axis=1)
Export.drop('VA', level=1,axis=0,inplace=True)
Export.drop('IMP', level=1,axis=0,inplace=True)
output_new = MRIO.copy()
"""
Balance first MRIO version
"""
# convert to numpy matrix
X0 = MRIO.as_matrix()
# get sum of rows and columns
u = X0.sum(axis=1)
v = X0.sum(axis=0)
# and only keep T
v[:(len(u)-2)] = u[:-2]
# apply RAS method to rebalance the table
X1 = ras_method(X0, u, v, eps=1e-5,print_out=print_output)
#translate to pandas dataframe
output_new = pd.DataFrame(X1)
output_new.index = index_mi
output_new.columns = column_mi
if print_progress:
print('NOTE : Balanced MRIO table without trade finished using {} data'.format(table))
"""
Create second version of MRIO for Argentina, with trade
"""
### Load OD matrix
od_matrix_total = pd.DataFrame(pd.read_excel(os.path.join(data_path,'OD_data','province_ods.xlsx'),
sheet_name='total',index_col=[0,1],usecols =[0,1,2,3,4,5,6,7])).unstack(1).fillna(0)
od_matrix_total.columns.set_levels(['A','G','C','D','B','I'],level=0,inplace=True)
od_matrix_total.index = od_matrix_total.index.map(reg_mapper)
od_matrix_total = od_matrix_total.stack(0)
od_matrix_total.columns = od_matrix_total.columns.map(reg_mapper)
od_matrix_total = od_matrix_total.swaplevel(i=-2, j=-1, axis=0)
od_matrix_total = od_matrix_total.loc[:, od_matrix_total.columns.notnull()]
### Create proxy data
# proxy level 14
mi_index = pd.MultiIndex.from_product([sectors+['other1','other2'], region_names, region_names],
names=['sec1', 'reg1','reg2'])
for iter_,sector in enumerate((sectors+['other1','other2'])):
if sector in ['A','G','C','D','B','I']:
proxy_trade = (od_matrix_total.sum(level=1).divide(od_matrix_total.sum(level=1).sum(axis=1),axis='rows')).stack(0).reset_index()
proxy_trade.columns = ['reg1','reg2','gdp']
proxy_trade['year'] = 2016
proxy_trade = proxy_trade.apply(lambda x: est_trade_value(x,output_new,sector),axis=1)
proxy_trade['sec1'] = 'sec{}'.format(sector)
proxy_trade = proxy_trade[['year','sec1','reg1','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade14_sec{}.csv'.format(sector)),index=False)
elif (sector is not 'other1') & (sector is not 'other2') & (sector not in ['A','G','C','D','B','I']): # & (sector not in ['L','M','N','O','P']):
proxy_trade = (od_matrix_total.sum(level=1).divide(od_matrix_total.sum(level=1).sum(axis=1),axis='rows')).stack(0).reset_index()
#proxy_trade[0].loc[(proxy_trade.origin_province == proxy_trade.destination_province)] = 0.9
#proxy_trade[0].loc[~(proxy_trade.origin_province == proxy_trade.destination_province)] = 0.1
proxy_trade.columns = ['reg1','reg2','gdp']
proxy_trade['year'] = 2016
proxy_trade = proxy_trade.apply(lambda x: est_trade_value(x,output_new,sector),axis=1)
proxy_trade['sec1'] = 'sec{}'.format(sector)
proxy_trade = proxy_trade[['year','sec1','reg1','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade14_sec{}.csv'.format(sector)),index=False)
else:
proxy_trade = (od_matrix_total.sum(level=1).divide(od_matrix_total.sum(level=1).sum(axis=1),axis='rows')).stack(0).reset_index()
proxy_trade.columns = ['reg1','reg2','gdp']
proxy_trade['year'] = 2016
proxy_trade = proxy_trade.apply(lambda x: est_trade_value(x,output_new,sector),axis=1)
proxy_trade['sec1'] = sector+'1'
proxy_trade = proxy_trade[['year','sec1','reg1','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade14_{}.csv'.format(sector)),index=False)
# proxy level 18
mi_index = pd.MultiIndex.from_product([sectors+['other1','other2'], region_names, sectors+['other1','other2'], region_names],
names=['sec1', 'reg1','sec2','reg2'])
for iter_,sector in enumerate((sectors+['other1','other2'])):
if (sector is not 'other1') & (sector is not 'other2'):
proxy_trade = pd.DataFrame(columns=['year','gdp'],index= mi_index).reset_index()
proxy_trade['year'] = 2016
proxy_trade['gdp'] = 0
proxy_trade = proxy_trade.query("reg1 != reg2")
proxy_trade = proxy_trade.loc[proxy_trade.sec1 == sector]
proxy_trade = proxy_trade.loc[proxy_trade.sec2.isin(['L','M','N','O','P'])]
proxy_trade['sec1'] = proxy_trade.sec1.apply(change_name)
proxy_trade['sec2'] = proxy_trade.sec2.apply(change_name)
proxy_trade = proxy_trade.query("reg1 == reg2")
proxy_trade = proxy_trade[['year','sec1','reg1','sec2','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','sector','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade_sec{}.csv'.format(sector)),index=False)
else:
proxy_trade = pd.DataFrame(columns=['year','gdp'],index= mi_index).reset_index()
proxy_trade['year'] = 2016
proxy_trade['gdp'] = 0
proxy_trade = proxy_trade.query("reg1 != reg2")
proxy_trade = proxy_trade.loc[proxy_trade.sec1 == sector]
proxy_trade = proxy_trade.loc[proxy_trade.sec2.isin(['L','M','N','O','P'])]
proxy_trade['sec1'] = proxy_trade.sec1.apply(change_name)
proxy_trade['sec2'] = proxy_trade.sec2.apply(change_name)
proxy_trade = proxy_trade.query("reg1 == reg2")
proxy_trade = proxy_trade[['year','sec1','reg1','sec2','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','sector','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade_{}.csv'.format(sector)),index=False)
### run libmrio
p = subprocess.Popen([r'..\mrio_downscaling\mrio_disaggregate', 'settings_trade.yml'],
cwd=os.path.join('..','mrio_downscaling'))
p.wait()
# load data and reorder
region_names_list = [item for sublist in [[x]*(len(sectors)+2) for x in region_names]
for item in sublist]
rows = ([x for x in sectors+['VA','IMP']])*len(region_names)
cols = ([x for x in sectors+['FD','EXP']])*len(region_names)
index_mi = pd.MultiIndex.from_arrays([region_names_list, rows], names=('region', 'row'))
column_mi = pd.MultiIndex.from_arrays([region_names_list, cols], names=('region', 'col'))
MRIO = pd.read_csv(os.path.join('..','mrio_downscaling','output2.csv'),header=None,index_col=None)
MRIO.index = index_mi
MRIO.columns = column_mi
# create predefined index and col, which is easier to read
sector_only = [x for x in sectors]*len(region_names)
col_only = ['FD','EXP']*len(region_names)
region_col = [item for sublist in [[x]*len(sectors) for x in region_names] for item in sublist] + \
[item for sublist in [[x]*2 for x in region_names] for item in sublist]
column_mi_reorder = pd.MultiIndex.from_arrays(
[region_col, sector_only+col_only], names=('region', 'col'))
# sum va and imports
valueA = pd.DataFrame(MRIO.loc[MRIO.index.get_level_values(1) == 'VA'].sum(axis='index'))
valueA.columns = pd.MultiIndex.from_product([['Total'],['ValueA']],names=['region','row'])
IMP = pd.DataFrame(MRIO.loc[MRIO.index.get_level_values(1) == 'IMP'].sum(axis='index'))
IMP.columns = pd.MultiIndex.from_product([['Total'],['IMP']],names=['region','row'])
output = pd.concat([MRIO.loc[~MRIO.index.get_level_values(1).isin(['FD','EXP'])]])
output = output.drop(['VA','IMP'], level=1)
output = pd.concat([output,valueA.T,IMP.T])
output = output.reindex(column_mi_reorder, axis='columns')
mrio_arg = ras_method(np.array(output).T,np.array(list(output.sum(axis=1))[:384]+list(output.sum(axis=0)[-48:])),
np.array(list(output.sum(axis=1))[:384]+[output.loc[('Total','ValueA'),:].sum(),output.loc[('Total','IMP'),:].sum()]),
eps=1e-3,print_out=print_output)
mrio_argentina = pd.DataFrame(mrio_arg.T,index=output.index,columns=output.columns)
mrio_argentina.to_csv(os.path.join(data_path,'MRIO','MRIO_Argentina_{}_{}.csv'.format(table,year)))
if print_progress:
print('NOTE : Balanced MRIO table with trade finished using {} data'.format(table))
def prepare_table_mria(table='INDEC',year='2015',print_output=True):
"""
Convert MRIO table to an excel file in which all elements of the table are disaggregated.
"""
data_path = os.path.join('..','data')
# load table
MRIO = pd.read_csv(os.path.join(data_path,'MRIO','MRIO_Argentina_{}_{}.csv'.format(table,year)),index_col=[0,1],header=[0,1])
Xnew = MRIO.copy()
Xnew = Xnew+1e-6
# write to excel
writer = pd.ExcelWriter(os.path.join(data_path,'MRIO', 'mrio_argentina_disaggregated_{}_{}.xlsx'.format(table,year)))
# write T
df_T = Xnew.iloc[:384, :384]
df_T.columns = df_T.columns.droplevel()
df_labels_T = pd.DataFrame(df_T.reset_index()[['region', 'row']])
df_T.reset_index(inplace=True, drop=True)
df_T.to_excel(writer, 'T', index=False, header=False)
df_labels_T.to_excel(writer, 'labels_T', index=False, header=False)
# write FD
df_FD = Xnew.iloc[:384, 384:].iloc[:, Xnew.iloc[:384, 384:].columns.get_level_values(1)=='FD']
df_labels_FD = pd.DataFrame(list(df_FD.columns))
df_FD.columns = df_FD.columns.droplevel()
df_FD.reset_index(inplace=True, drop=True)
df_FD.to_excel(writer, 'FD', index=False, header=False)
df_labels_FD.to_excel(writer, 'labels_FD', index=False, header=False)
# write ExpROW
df_ExpROW = pd.DataFrame(Xnew.iloc[:384, 384:].iloc[:, Xnew.iloc[:384, 384:].columns.get_level_values(1)=='EXP'].sum(axis=1))
df_labels_ExpROW = pd.DataFrame(['Export'])
df_ExpROW.reset_index(inplace=True, drop=True)
df_ExpROW.to_excel(writer, 'ExpROW', index=False, header=False)
df_labels_ExpROW.reset_index(inplace=True, drop=True)
df_labels_ExpROW.columns = ['Export']
df_labels_ExpROW.to_excel(writer, 'labels_ExpROW', index=False, header=False)
# write VA
df_VA = pd.DataFrame(Xnew.iloc[384:, :409].T[('Total', 'ValueA')])
df_VA.columns = ['VA']
df_VA['imports'] = pd.DataFrame(Xnew.iloc[384:, :].T[('Total', 'IMP')])
df_VA.reset_index(inplace=True, drop=True)
df_VA.to_excel(writer, 'VA', index=False, header=False)
df_labels_VA = pd.DataFrame(['Import', 'VA']).T
df_labels_VA.to_excel(writer, 'labels_VA', index=False, header=False)
# save excel
writer.save()
if print_output:
print('NOTE : MRIO table ready to use for MRIA model using {} data'.format(table))
if __name__ == "__main__":
estimate(table='GTAP',year='2014',print_output=True)
prepare_table_mria(table='GTAP',year='2014',print_output=True)
|
[
"pandas.MultiIndex.from_product",
"pandas.MultiIndex.from_arrays",
"os.path.join",
"ras_method.ras_method",
"numpy.array",
"pandas.DataFrame",
"pandas.concat",
"warnings.filterwarnings"
] |
[((144, 177), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (167, 177), False, 'import warnings\n'), ((986, 1012), 'os.path.join', 'os.path.join', (['""".."""', '"""data"""'], {}), "('..', 'data')\n", (998, 1012), False, 'import os, sys\n'), ((3994, 4163), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (["[sectors + ['other1', 'other2'], region_names, sectors + ['other1',\n 'other2'], region_names]"], {'names': "['sec1', 'reg1', 'sec2', 'reg2']"}), "([sectors + ['other1', 'other2'], region_names, \n sectors + ['other1', 'other2'], region_names], names=['sec1', 'reg1',\n 'sec2', 'reg2'])\n", (4020, 4163), True, 'import pandas as pd\n'), ((6657, 6734), 'pandas.MultiIndex.from_arrays', 'pd.MultiIndex.from_arrays', (['[region_names_list, rows]'], {'names': "('region', 'row')"}), "([region_names_list, rows], names=('region', 'row'))\n", (6682, 6734), True, 'import pandas as pd\n'), ((6751, 6828), 'pandas.MultiIndex.from_arrays', 'pd.MultiIndex.from_arrays', (['[region_names_list, cols]'], {'names': "('region', 'col')"}), "([region_names_list, cols], names=('region', 'col'))\n", (6776, 6828), True, 'import pandas as pd\n'), ((7359, 7452), 'pandas.MultiIndex.from_arrays', 'pd.MultiIndex.from_arrays', (['[region_col, sector_only + col_only]'], {'names': "('region', 'col')"}), "([region_col, sector_only + col_only], names=(\n 'region', 'col'))\n", (7384, 7452), True, 'import pandas as pd\n'), ((8445, 8500), 'ras_method.ras_method', 'ras_method', (['X0', 'u', 'v'], {'eps': '(1e-05)', 'print_out': 'print_output'}), '(X0, u, v, eps=1e-05, print_out=print_output)\n', (8455, 8500), False, 'from ras_method import ras_method\n'), ((8552, 8568), 'pandas.DataFrame', 'pd.DataFrame', (['X1'], {}), '(X1)\n', (8564, 8568), True, 'import pandas as pd\n'), ((9555, 9679), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (["[sectors + ['other1', 'other2'], region_names, region_names]"], {'names': "['sec1', 'reg1', 'reg2']"}), "([sectors + ['other1', 'other2'], region_names,\n region_names], names=['sec1', 'reg1', 'reg2'])\n", (9581, 9679), True, 'import pandas as pd\n'), ((12232, 12401), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (["[sectors + ['other1', 'other2'], region_names, sectors + ['other1',\n 'other2'], region_names]"], {'names': "['sec1', 'reg1', 'sec2', 'reg2']"}), "([sectors + ['other1', 'other2'], region_names, \n sectors + ['other1', 'other2'], region_names], names=['sec1', 'reg1',\n 'sec2', 'reg2'])\n", (12258, 12401), True, 'import pandas as pd\n'), ((14857, 14934), 'pandas.MultiIndex.from_arrays', 'pd.MultiIndex.from_arrays', (['[region_names_list, rows]'], {'names': "('region', 'row')"}), "([region_names_list, rows], names=('region', 'row'))\n", (14882, 14934), True, 'import pandas as pd\n'), ((14951, 15028), 'pandas.MultiIndex.from_arrays', 'pd.MultiIndex.from_arrays', (['[region_names_list, cols]'], {'names': "('region', 'col')"}), "([region_names_list, cols], names=('region', 'col'))\n", (14976, 15028), True, 'import pandas as pd\n'), ((15565, 15658), 'pandas.MultiIndex.from_arrays', 'pd.MultiIndex.from_arrays', (['[region_col, sector_only + col_only]'], {'names': "('region', 'col')"}), "([region_col, sector_only + col_only], names=(\n 'region', 'col'))\n", (15590, 15658), True, 'import pandas as pd\n'), ((15802, 15878), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (["[['Total'], ['ValueA']]"], {'names': "['region', 'row']"}), "([['Total'], ['ValueA']], names=['region', 'row'])\n", (15828, 15878), True, 'import pandas as pd\n'), ((15987, 16060), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (["[['Total'], ['IMP']]"], {'names': "['region', 'row']"}), "([['Total'], ['IMP']], names=['region', 'row'])\n", (16013, 16060), True, 'import pandas as pd\n'), ((16207, 16243), 'pandas.concat', 'pd.concat', (['[output, valueA.T, IMP.T]'], {}), '([output, valueA.T, IMP.T])\n', (16216, 16243), True, 'import pandas as pd\n'), ((16648, 16716), 'pandas.DataFrame', 'pd.DataFrame', (['mrio_arg.T'], {'index': 'output.index', 'columns': 'output.columns'}), '(mrio_arg.T, index=output.index, columns=output.columns)\n', (16660, 16716), True, 'import pandas as pd\n'), ((17132, 17158), 'os.path.join', 'os.path.join', (['""".."""', '"""data"""'], {}), "('..', 'data')\n", (17144, 17158), False, 'import os, sys\n'), ((18402, 18426), 'pandas.DataFrame', 'pd.DataFrame', (["['Export']"], {}), "(['Export'])\n", (18414, 18426), True, 'import pandas as pd\n'), ((18756, 18812), 'pandas.DataFrame', 'pd.DataFrame', (["Xnew.iloc[384:, :409].T['Total', 'ValueA']"], {}), "(Xnew.iloc[384:, :409].T['Total', 'ValueA'])\n", (18768, 18812), True, 'import pandas as pd\n'), ((18865, 18915), 'pandas.DataFrame', 'pd.DataFrame', (["Xnew.iloc[384:, :].T['Total', 'IMP']"], {}), "(Xnew.iloc[384:, :].T['Total', 'IMP'])\n", (18877, 18915), True, 'import pandas as pd\n'), ((1468, 1528), 'os.path.join', 'os.path.join', (['data_path', '"""INDEC"""', '"""PIB_provincial_06_17.xls"""'], {}), "(data_path, 'INDEC', 'PIB_provincial_06_17.xls')\n", (1480, 1528), False, 'import os, sys\n'), ((2623, 2682), 'os.path.join', 'os.path.join', (['""".."""', '"""mrio_downscaling"""', '"""proxy_reg_arg.csv"""'], {}), "('..', 'mrio_downscaling', 'proxy_reg_arg.csv')\n", (2635, 2682), False, 'import os, sys\n'), ((6076, 6131), 'os.path.join', 'os.path.join', (['""".."""', '"""mrio_downscaling"""', '"""basetable.csv"""'], {}), "('..', 'mrio_downscaling', 'basetable.csv')\n", (6088, 6131), False, 'import os, sys\n'), ((6853, 6906), 'os.path.join', 'os.path.join', (['""".."""', '"""mrio_downscaling"""', '"""output1.csv"""'], {}), "('..', 'mrio_downscaling', 'output1.csv')\n", (6865, 6906), False, 'import os, sys\n'), ((15053, 15106), 'os.path.join', 'os.path.join', (['""".."""', '"""mrio_downscaling"""', '"""output2.csv"""'], {}), "('..', 'mrio_downscaling', 'output2.csv')\n", (15065, 15106), False, 'import os, sys\n'), ((19044, 19074), 'pandas.DataFrame', 'pd.DataFrame', (["['Import', 'VA']"], {}), "(['Import', 'VA'])\n", (19056, 19074), True, 'import pandas as pd\n'), ((6295, 6333), 'os.path.join', 'os.path.join', (['""".."""', '"""mrio_downscaling"""'], {}), "('..', 'mrio_downscaling')\n", (6307, 6333), False, 'import os, sys\n'), ((14497, 14535), 'os.path.join', 'os.path.join', (['""".."""', '"""mrio_downscaling"""'], {}), "('..', 'mrio_downscaling')\n", (14509, 14535), False, 'import os, sys\n'), ((16333, 16349), 'numpy.array', 'np.array', (['output'], {}), '(output)\n', (16341, 16349), True, 'import numpy as np\n'), ((1255, 1307), 'os.path.join', 'os.path.join', (['data_path', '"""INDEC"""', '"""sh_cou_06_16.xls"""'], {}), "(data_path, 'INDEC', 'sh_cou_06_16.xls')\n", (1267, 1307), False, 'import os, sys\n'), ((1069, 1156), 'os.path.join', 'os.path.join', (['data_path', '"""other_sources"""', '"""industry_high_level_classification.xlsx"""'], {}), "(data_path, 'other_sources',\n 'industry_high_level_classification.xlsx')\n", (1081, 1156), False, 'import os, sys\n'), ((4341, 4394), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['year', 'gdp']", 'index': 'mi_index'}), "(columns=['year', 'gdp'], index=mi_index)\n", (4353, 4394), True, 'import pandas as pd\n'), ((5084, 5137), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['year', 'gdp']", 'index': 'mi_index'}), "(columns=['year', 'gdp'], index=mi_index)\n", (5096, 5137), True, 'import pandas as pd\n'), ((12582, 12635), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['year', 'gdp']", 'index': 'mi_index'}), "(columns=['year', 'gdp'], index=mi_index)\n", (12594, 12635), True, 'import pandas as pd\n'), ((13497, 13550), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['year', 'gdp']", 'index': 'mi_index'}), "(columns=['year', 'gdp'], index=mi_index)\n", (13509, 13550), True, 'import pandas as pd\n'), ((8905, 8960), 'os.path.join', 'os.path.join', (['data_path', '"""OD_data"""', '"""province_ods.xlsx"""'], {}), "(data_path, 'OD_data', 'province_ods.xlsx')\n", (8917, 8960), False, 'import os, sys\n')]
|
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
def node_match(n1, n2):
if n1['op'] == n2['op']:
return True
else:
return False
def edge_match(e1, e2):
return True
def gen_graph(adj, ops):
G = nx.DiGraph()
for k, op in enumerate(ops):
G.add_node(k, op=op)
assert adj.shape[0] == adj.shape[1] == len(ops)
for row in range(len(ops)):
for col in range(row + 1, len(ops)):
if adj[row, col] > 0:
G.add_edge(row, col)
return G
def preprocess_adj_op(adj, op):
def counting_trailing_false(l):
count = 0
for TF in l[-1::-1]:
if TF:
break
else:
count += 1
return count
def transform_op(op):
idx2op = {0:'input', 1:'conv1x1-bn-relu', 2:'conv3x3-bn-relu', 3:'maxpool3x3', 4:'output'}
return [idx2op[idx] for idx in op.argmax(axis=1)]
adj = np.array(adj).astype(int)
op = np.array(op).astype(int)
assert op.shape[0] == adj.shape[0] == adj.shape[1]
# find all zero columns
adj_zero_col = counting_trailing_false(adj.any(axis=0))
# find all zero rows
adj_zero_row = counting_trailing_false(adj.any(axis=1))
# find all zero rows
op_zero_row = counting_trailing_false(op.any(axis=1))
assert adj_zero_col == op_zero_row == adj_zero_row - 1, 'Inconsistant result {}={}={}'.format(adj_zero_col, op_zero_row, adj_zero_row - 1)
N = op.shape[0] - adj_zero_col
adj = adj[:N, :N]
op = op[:N]
return adj, transform_op(op)
if __name__ == '__main__':
adj1 = np.array([[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0]])
op1 = ['in', 'conv1x1', 'conv3x3', 'mp3x3', 'out']
adj2 = np.array([[0, 1, 1, 1, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0]])
op2 = ['in', 'conv1x1', 'mp3x3', 'conv3x3', 'out']
adj3 = np.array([[0, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0]])
op3 = ['in', 'conv1x1', 'conv3x3', 'mp3x3', 'out','out2']
adj4 = np.array([[0, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
op4 = np.array([[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0]])
adj4, op4 = preprocess_adj_op(adj4, op4)
G1 = gen_graph(adj1, op1)
G2 = gen_graph(adj2, op2)
G3 = gen_graph(adj3, op3)
G4 = gen_graph(adj4, op4)
plt.subplot(141)
nx.draw(G1, with_labels=True, font_weight='bold')
plt.subplot(142)
nx.draw(G2, with_labels=True, font_weight='bold')
plt.subplot(143)
nx.draw(G3, with_labels=True, font_weight='bold')
plt.subplot(144)
nx.draw(G4, with_labels=True, font_weight='bold')
nx.graph_edit_distance(G1,G2, node_match=node_match, edge_match=edge_match)
nx.graph_edit_distance(G2,G3, node_match=node_match, edge_match=edge_match)
|
[
"networkx.DiGraph",
"numpy.array",
"networkx.graph_edit_distance",
"matplotlib.pyplot.subplot",
"networkx.draw"
] |
[((253, 265), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (263, 265), True, 'import networkx as nx\n'), ((1623, 1723), 'numpy.array', 'np.array', (['[[0, 1, 1, 1, 0], [0, 0, 1, 0, 0], [0, 0, 0, 0, 1], [0, 0, 0, 0, 1], [0, 0,\n 0, 0, 0]]'], {}), '([[0, 1, 1, 1, 0], [0, 0, 1, 0, 0], [0, 0, 0, 0, 1], [0, 0, 0, 0, 1\n ], [0, 0, 0, 0, 0]])\n', (1631, 1723), True, 'import numpy as np\n'), ((1870, 1970), 'numpy.array', 'np.array', (['[[0, 1, 1, 1, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1], [0, 0, 0, 0, 1], [0, 0,\n 0, 0, 0]]'], {}), '([[0, 1, 1, 1, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1], [0, 0, 0, 0, 1\n ], [0, 0, 0, 0, 0]])\n', (1878, 1970), True, 'import numpy as np\n'), ((2118, 2252), 'numpy.array', 'np.array', (['[[0, 1, 1, 1, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1,\n 0], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0]]'], {}), '([[0, 1, 1, 1, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0,\n 0, 0, 1, 0], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0]])\n', (2126, 2252), True, 'import numpy as np\n'), ((2428, 2562), 'numpy.array', 'np.array', (['[[0, 1, 1, 1, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1,\n 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]'], {}), '([[0, 1, 1, 1, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0,\n 0, 0, 1, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]])\n', (2436, 2562), True, 'import numpy as np\n'), ((2674, 2791), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0,\n 0, 0, 1], [0, 0, 0, 0, 0]]'], {}), '([[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 1, 0\n ], [0, 0, 0, 0, 1], [0, 0, 0, 0, 0]])\n', (2682, 2791), True, 'import numpy as np\n'), ((3061, 3077), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(141)'], {}), '(141)\n', (3072, 3077), True, 'import matplotlib.pyplot as plt\n'), ((3082, 3131), 'networkx.draw', 'nx.draw', (['G1'], {'with_labels': '(True)', 'font_weight': '"""bold"""'}), "(G1, with_labels=True, font_weight='bold')\n", (3089, 3131), True, 'import networkx as nx\n'), ((3136, 3152), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(142)'], {}), '(142)\n', (3147, 3152), True, 'import matplotlib.pyplot as plt\n'), ((3157, 3206), 'networkx.draw', 'nx.draw', (['G2'], {'with_labels': '(True)', 'font_weight': '"""bold"""'}), "(G2, with_labels=True, font_weight='bold')\n", (3164, 3206), True, 'import networkx as nx\n'), ((3211, 3227), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(143)'], {}), '(143)\n', (3222, 3227), True, 'import matplotlib.pyplot as plt\n'), ((3232, 3281), 'networkx.draw', 'nx.draw', (['G3'], {'with_labels': '(True)', 'font_weight': '"""bold"""'}), "(G3, with_labels=True, font_weight='bold')\n", (3239, 3281), True, 'import networkx as nx\n'), ((3286, 3302), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(144)'], {}), '(144)\n', (3297, 3302), True, 'import matplotlib.pyplot as plt\n'), ((3307, 3356), 'networkx.draw', 'nx.draw', (['G4'], {'with_labels': '(True)', 'font_weight': '"""bold"""'}), "(G4, with_labels=True, font_weight='bold')\n", (3314, 3356), True, 'import networkx as nx\n'), ((3362, 3438), 'networkx.graph_edit_distance', 'nx.graph_edit_distance', (['G1', 'G2'], {'node_match': 'node_match', 'edge_match': 'edge_match'}), '(G1, G2, node_match=node_match, edge_match=edge_match)\n', (3384, 3438), True, 'import networkx as nx\n'), ((3442, 3518), 'networkx.graph_edit_distance', 'nx.graph_edit_distance', (['G2', 'G3'], {'node_match': 'node_match', 'edge_match': 'edge_match'}), '(G2, G3, node_match=node_match, edge_match=edge_match)\n', (3464, 3518), True, 'import networkx as nx\n'), ((959, 972), 'numpy.array', 'np.array', (['adj'], {}), '(adj)\n', (967, 972), True, 'import numpy as np\n'), ((994, 1006), 'numpy.array', 'np.array', (['op'], {}), '(op)\n', (1002, 1006), True, 'import numpy as np\n')]
|
import os
import pickle
import numpy as np
from PIL import Image
import torch
from torch.utils.data import Dataset
from torchvision import transforms
import h5py
from transforms import Scale
class CLEVR(Dataset):
def __init__(self, root, split='train', transform=None):
features_path = os.path.join(root, 'features')
with open('{}/{}.pkl'.format(features_path, split), 'rb') as f:
self.data = pickle.load(f)
# self.transform = transform
self.root = root
self.split = split
self.h = h5py.File('{}/{}_features.hdf5'.format(features_path, split), 'r')
self.img = self.h['data']
def close(self):
self.h.close()
def __getitem__(self, index):
imgfile, question, answer, family = self.data[index]
# img = Image.open(os.path.join(self.root, 'images',
# self.split, imgfile)).convert('RGB')
# img = self.transform(img)
id = int(imgfile.rsplit('_', 1)[1][:-4])
img = torch.from_numpy(self.img[id])
return img, question, len(question), answer, family, index
def __len__(self):
return len(self.data)
transform = transforms.Compose([
Scale([224, 224]),
transforms.Pad(4),
transforms.RandomCrop([224, 224]),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
])
def collate_data(batch):
images, lengths, answers, families, idxs = [], [], [], [], []
batch_size = len(batch)
max_len = max(map(lambda x: len(x[1]), batch))
questions = np.zeros((batch_size, max_len), dtype=np.int64)
sort_by_len = sorted(batch, key=lambda x: len(x[1]), reverse=True)
for i, b in enumerate(sort_by_len):
image, question, length, answer, family, idx = b
images.append(image)
length = len(question)
questions[i, :length] = question
lengths.append(length)
answers.append(answer)
families.append(family)
idxs.append(idx)
return torch.stack(images), torch.from_numpy(questions), \
lengths, torch.LongTensor(answers), families, idxs
|
[
"transforms.Scale",
"torch.LongTensor",
"torch.stack",
"os.path.join",
"pickle.load",
"torch.from_numpy",
"torchvision.transforms.RandomCrop",
"numpy.zeros",
"torchvision.transforms.Normalize",
"torchvision.transforms.Pad",
"torchvision.transforms.ToTensor"
] |
[((1611, 1658), 'numpy.zeros', 'np.zeros', (['(batch_size, max_len)'], {'dtype': 'np.int64'}), '((batch_size, max_len), dtype=np.int64)\n', (1619, 1658), True, 'import numpy as np\n'), ((301, 331), 'os.path.join', 'os.path.join', (['root', '"""features"""'], {}), "(root, 'features')\n", (313, 331), False, 'import os\n'), ((1028, 1058), 'torch.from_numpy', 'torch.from_numpy', (['self.img[id]'], {}), '(self.img[id])\n', (1044, 1058), False, 'import torch\n'), ((1219, 1236), 'transforms.Scale', 'Scale', (['[224, 224]'], {}), '([224, 224])\n', (1224, 1236), False, 'from transforms import Scale\n'), ((1242, 1259), 'torchvision.transforms.Pad', 'transforms.Pad', (['(4)'], {}), '(4)\n', (1256, 1259), False, 'from torchvision import transforms\n'), ((1265, 1298), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['[224, 224]'], {}), '([224, 224])\n', (1286, 1298), False, 'from torchvision import transforms\n'), ((1304, 1325), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1323, 1325), False, 'from torchvision import transforms\n'), ((1331, 1394), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.5, 0.5, 0.5]', 'std': '[0.5, 0.5, 0.5]'}), '(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])\n', (1351, 1394), False, 'from torchvision import transforms\n'), ((2060, 2079), 'torch.stack', 'torch.stack', (['images'], {}), '(images)\n', (2071, 2079), False, 'import torch\n'), ((2081, 2108), 'torch.from_numpy', 'torch.from_numpy', (['questions'], {}), '(questions)\n', (2097, 2108), False, 'import torch\n'), ((2129, 2154), 'torch.LongTensor', 'torch.LongTensor', (['answers'], {}), '(answers)\n', (2145, 2154), False, 'import torch\n'), ((428, 442), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (439, 442), False, 'import pickle\n')]
|
import pytest
import numpy as np
import pandas as pd
from xgboost_distribution.distributions import LogNormal
@pytest.fixture
def lognormal():
return LogNormal()
def test_target_validation(lognormal):
valid_target = np.array([0.5, 1, 4, 5, 10])
lognormal.check_target(valid_target)
@pytest.mark.parametrize(
"invalid_target",
[np.array([0, 1.2]), pd.Series([-1.1, 0.4, 2.3])],
)
def test_target_validation_raises(lognormal, invalid_target):
with pytest.raises(ValueError):
lognormal.check_target(invalid_target)
@pytest.mark.parametrize(
"y, params, natural_gradient, expected_grad",
[
(
np.array([1, 1]),
np.array([[np.log(1), 2], [1, 0]]),
True,
np.array([[0, 0.5], [1, 0]]),
),
(
np.array([1, 1]),
np.array([[np.log(1), 2], [1, 0]]),
False,
np.array([[0, 1], [1, 0]]),
),
],
)
def test_gradient_calculation(lognormal, y, params, natural_gradient, expected_grad):
grad, hess = lognormal.gradient_and_hessian(
y, params, natural_gradient=natural_gradient
)
np.testing.assert_array_equal(grad, expected_grad)
def test_loss(lognormal):
loss_name, loss_value = lognormal.loss(
# fmt: off
y=np.array([0, ]),
params=np.array([[1, 0], ]),
)
assert loss_name == "LogNormalError"
assert loss_value == np.inf
|
[
"pandas.Series",
"numpy.log",
"xgboost_distribution.distributions.LogNormal",
"numpy.array",
"pytest.raises",
"numpy.testing.assert_array_equal"
] |
[((158, 169), 'xgboost_distribution.distributions.LogNormal', 'LogNormal', ([], {}), '()\n', (167, 169), False, 'from xgboost_distribution.distributions import LogNormal\n'), ((230, 258), 'numpy.array', 'np.array', (['[0.5, 1, 4, 5, 10]'], {}), '([0.5, 1, 4, 5, 10])\n', (238, 258), True, 'import numpy as np\n'), ((1160, 1210), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['grad', 'expected_grad'], {}), '(grad, expected_grad)\n', (1189, 1210), True, 'import numpy as np\n'), ((478, 503), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (491, 503), False, 'import pytest\n'), ((355, 373), 'numpy.array', 'np.array', (['[0, 1.2]'], {}), '([0, 1.2])\n', (363, 373), True, 'import numpy as np\n'), ((375, 402), 'pandas.Series', 'pd.Series', (['[-1.1, 0.4, 2.3]'], {}), '([-1.1, 0.4, 2.3])\n', (384, 402), True, 'import pandas as pd\n'), ((658, 674), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (666, 674), True, 'import numpy as np\n'), ((754, 782), 'numpy.array', 'np.array', (['[[0, 0.5], [1, 0]]'], {}), '([[0, 0.5], [1, 0]])\n', (762, 782), True, 'import numpy as np\n'), ((817, 833), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (825, 833), True, 'import numpy as np\n'), ((914, 940), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (922, 940), True, 'import numpy as np\n'), ((1312, 1325), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (1320, 1325), True, 'import numpy as np\n'), ((1344, 1362), 'numpy.array', 'np.array', (['[[1, 0]]'], {}), '([[1, 0]])\n', (1352, 1362), True, 'import numpy as np\n'), ((699, 708), 'numpy.log', 'np.log', (['(1)'], {}), '(1)\n', (705, 708), True, 'import numpy as np\n'), ((858, 867), 'numpy.log', 'np.log', (['(1)'], {}), '(1)\n', (864, 867), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
"""
Main script for workload forecasting.
Example usage:
- Generate data (runs OLTP benchmark on the built database) and perform training, and save the trained model
./forecaster --gen_data --models=LSTM --model_save_path=model.pickle
- Use the trained models (LSTM) to generate predictions.
./forecaster --model_load_path=model.pickle --test_file=test_query.csv --test_model=LSTM
TODO:
- Better metrics for training and prediction (currently not focusing on models' accuracy yet)
- Multiple models (currently only simple-one-layer-untuned LSTM used)
- API and interaction with Pilot
"""
import argparse
import json
import pickle
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ..testing.self_driving.constants import (DEFAULT_ITER_NUM,
DEFAULT_QUERY_TRACE_FILE,
DEFAULT_TPCC_WEIGHTS,
DEFAULT_WORKLOAD_PATTERN)
from ..testing.self_driving.forecast import gen_oltp_trace
from ..testing.util.constants import LOG
from .cluster import QueryCluster
from .data_loader import DataLoader
from .models import ForecastModel, get_models
# Interval duration for aggregation in microseconds
INTERVAL_MICRO_SEC = 500000
# Number of Microseconds per second
MICRO_SEC_PER_SEC = 1000000
# Number of data points in a sequence
SEQ_LEN = 10 * MICRO_SEC_PER_SEC // INTERVAL_MICRO_SEC
# Number of data points for the horizon
HORIZON_LEN = 30 * MICRO_SEC_PER_SEC // INTERVAL_MICRO_SEC
# Number of data points for testing set
EVAL_DATA_SIZE = 2 * SEQ_LEN + HORIZON_LEN
argp = argparse.ArgumentParser(description="Query Load Forecaster")
# Generation stage related options
argp.add_argument(
"--gen_data",
default=False,
action="store_true",
help="If specified, OLTP benchmark would be downloaded and built to generate the query trace data")
argp.add_argument(
"--tpcc_weight",
type=str,
default=DEFAULT_TPCC_WEIGHTS,
help="Workload weights for the TPCC")
argp.add_argument(
"--tpcc_rates",
nargs="+",
default=DEFAULT_WORKLOAD_PATTERN,
help="Rate array for the TPCC workload")
argp.add_argument(
"--pattern_iter",
type=int,
default=DEFAULT_ITER_NUM,
help="Number of iterations the DEFAULT_WORKLOAD_PATTERN should be run")
argp.add_argument("--trace_file", default=DEFAULT_QUERY_TRACE_FILE,
help="Path to the query trace file", metavar="FILE")
# Model specific
argp.add_argument("--models", nargs='+', type=str, help="Models to use")
argp.add_argument("--models_config", type=str, metavar="FILE",
help="Models and init arguments JSON config file")
argp.add_argument("--seq_len", type=int, default=SEQ_LEN,
help="Length of one sequence in number of data points")
argp.add_argument(
"--horizon_len",
type=int,
default=HORIZON_LEN,
help="Length of the horizon in number of data points, "
"aka, how many further in the a sequence is used for prediction"
)
# Training stage related options
argp.add_argument("--model_save_path", metavar="FILE",
help="Where the model trained will be stored")
argp.add_argument(
"--eval_size",
type=int,
default=EVAL_DATA_SIZE,
help="Length of the evaluation data set length in number of data points")
argp.add_argument("--lr", type=float, default=0.001, help="Learning rate")
argp.add_argument("--epochs", type=int, default=10,
help="Number of epochs for training")
# Testing stage related options
argp.add_argument(
"--model_load_path",
default="model.pickle",
metavar="FILE",
help="Where the model should be loaded from")
argp.add_argument(
"--test_file",
help="Path to the test query trace file",
metavar="FILE")
argp.add_argument(
"--test_model",
type=str,
help="Model to be used for forecasting"
)
class Forecaster:
"""
A wrapper around various ForecastModels, that prepares training and evaluation data.
"""
TRAIN_DATA_IDX = 0
TEST_DATA_IDX = 1
def __init__(
self,
trace_file: str,
interval_us: int = INTERVAL_MICRO_SEC,
test_mode: bool = False,
eval_size: int = EVAL_DATA_SIZE,
seq_len: int = SEQ_LEN,
horizon_len: int = HORIZON_LEN) -> None:
"""
Initializer
:param trace_file: trace file for the forecaster
:param interval_us: number of microseconds for the time-series interval
:param test_mode: True If the Loader is for testing
:param eval_size: Number of data points used for evaluation(testing)
:param seq_len: Length of a sequence
:param horizon_len: Horizon length
"""
self._seq_len = seq_len
self._horizon_len = horizon_len
self._test_mode = test_mode
self._eval_data_size = eval_size
self._data_loader = DataLoader(
query_trace_file=trace_file,
interval_us=interval_us)
self._make_clusters()
def _make_clusters(self) -> None:
"""
Extract data from the DataLoader and put them into different clusters.
:return: None
"""
# FIXME:
# Assuming all the queries in the current trace file are from
# the same cluster for now. A future TODO would have a clustering
# process that separates traces into multiple clusters
self._clusters = [QueryCluster(self._data_loader.get_ts_data())]
self._cluster_data = []
for cluster in self._clusters:
# Aggregated time-series from the cluster
data = cluster.get_timeseries()
train_raw_data, test_raw_data = self._split_data(data)
self._cluster_data.append((train_raw_data, test_raw_data))
def _split_data(self, data: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Split the raw data into a training set, and a testing(evaluation) set.
:param data: All the raw data
:return: traing, test raw data set
"""
if self._test_mode:
self._test_set_size = len(data)
else:
self._test_set_size = self._eval_data_size
if self._test_set_size > len(data):
raise ValueError(
"Eval data size is too small. Not enough data points.")
split_idx = len(data) - self._test_set_size
# First part as the training set
train_raw_data = data[:split_idx]
# Last part as the testing set
test_raw_data = data[split_idx:]
return train_raw_data, test_raw_data
def _make_seqs(self,
input_data: np.ndarray,
start: int,
end: int,
with_label: bool = False) -> List[Union[Tuple[np.ndarray,
np.ndarray],
np.ndarray]]:
"""
Create time-series sequences of fixed sequence length from a continuous range of time-series.
:param input_data: Input time-series
:param start: Start index (inclusive) of the first sequence to be made
:param end: End index (exclusive) of the last sequence to be made
:param with_label: True if label in a certain horizon is added
:return: Sequences of fixed length if with_label is False,
or List of fixed length sequence and label if with_label is True
"""
seq_len = self._seq_len
horizon = self._horizon_len
seq_start = start
if with_label:
# Reserve space for horizon
seq_end = end - seq_len - horizon
else:
# Use all data for prediction
seq_end = end - seq_len
if seq_end <= seq_start:
raise IndexError(f"Not enough data points to make sequences")
seqs = []
for i in range(seq_start, seq_end):
seq = input_data[i:i + seq_len].reshape(-1, 1)
# Look beyond the horizon to get the label
if with_label:
label_i = i + seq_len + horizon
label = input_data[label_i: label_i + 1].reshape(1, -1)
seqs.append((seq, label))
else:
seqs.append(seq)
return seqs
@lru_cache(maxsize=32)
def _cluster_seqs(self,
cluster_id: int,
test_mode: bool = False,
with_label: bool = False) -> List[Union[Tuple[np.ndarray,
np.ndarray],
np.ndarray]]:
"""
Create time-series sequences of fixed sequence length from a continuous range of time-series. A cached wrapper
over _make_seqs with different options.
:param cluster_id: Cluster id
:param test_mode: True if using test dataset, otherwise use the training dataset
:param with_label: True if label (time-series data in a horizon from the sequence) is also added.
:return: Sequences of fixed length if with_label is False,
or List of fixed length sequence and label if with_label is True
"""
if test_mode:
input_data = self._cluster_data[cluster_id][self.TEST_DATA_IDX]
else:
input_data = self._cluster_data[cluster_id][self.TRAIN_DATA_IDX]
seqs = self._make_seqs(
input_data,
0,
len(input_data),
with_label=with_label)
return seqs
def train(self, models_kwargs: Dict) -> List[List[ForecastModel]]:
"""
:param models_kwargs: A dictionary of models' init arguments
:return: List of models(a list of models) for each cluster.
"""
models = []
for cid in range(len(self._cluster_data)):
cluster_models = get_models(models_kwargs)
train_seqs = self._cluster_seqs(
cid, test_mode=False, with_label=True)
for model_name, model in cluster_models.items():
# Fit the model
model.fit(train_seqs)
self.eval(cid, model)
models.append(cluster_models)
return models
def eval(self, cid: int, model: ForecastModel) -> None:
"""
Evaluate a fitted model on the test dataset.
:param cid: Cluster id
:param model: Model to use
"""
eval_seqs = self._cluster_seqs(cid, test_mode=True, with_label=True)
preds = []
gts = []
for seq, label in eval_seqs:
pred = model.predict(seq)
preds.append(pred)
gts.append(label.item())
# FIXME:
# simple L2 norm for comparing the prediction and results
l2norm = np.linalg.norm(np.array(preds) - np.array(gts))
LOG.info(
f"[{model.name}] has L2 norm(prediction, ground truth) = {l2norm}")
def predict(self, cid: int, model: ForecastModel) -> Dict:
"""
Output prediction on the test dataset, and segregate the predicted cluster time-series into individual queries
:param cid: Cluser id
:param model: Model to use
:return: Dict of {query_id -> time-series}
"""
test_seqs = self._cluster_seqs(cid, test_mode=True, with_label=False)
preds = list([model.predict(seq) for seq in test_seqs])
query_preds = self._clusters[cid].segregate(preds)
return query_preds
def parse_model_config(model_names: Optional[List[str]],
models_config: Optional[str]) -> Dict:
"""
Load models from
:param model_names: List of model names
:param models_config: JSON model config file
:return: Merged model config Dict
"""
model_kwargs = dict([(model_name, {}) for model_name in model_names])
if models_config is not None:
with open(models_config, 'r') as f:
custom_config = json.load(f)
# Simple and non-recursive merging of options
model_kwargs.update(custom_config)
if len(model_kwargs) < 1:
raise ValueError("At least 1 model needs to be used.")
return model_kwargs
if __name__ == "__main__":
args = argp.parse_args()
if args.test_file is None:
# Parse models arguments
models_kwargs = parse_model_config(args.models, args.models_config)
# Generate OLTP trace file
if args.gen_data:
gen_oltp_trace(
tpcc_weight=args.tpcc_weight,
tpcc_rates=args.tpcc_rates,
pattern_iter=args.pattern_iter)
trace_file = DEFAULT_QUERY_TRACE_FILE
else:
trace_file = args.trace_file
forecaster = Forecaster(
trace_file=trace_file,
interval_us=INTERVAL_MICRO_SEC,
seq_len=args.seq_len,
eval_size=args.eval_size,
horizon_len=args.horizon_len)
models = forecaster.train(models_kwargs)
# Save the model
if args.model_save_path:
with open(args.model_save_path, "wb") as f:
pickle.dump(models, f)
else:
# Do inference on a trained model
with open(args.model_load_path, "rb") as f:
models = pickle.load(f)
forecaster = Forecaster(
trace_file=args.test_file,
test_mode=True,
interval_us=INTERVAL_MICRO_SEC,
seq_len=args.seq_len,
eval_size=args.eval_size,
horizon_len=args.horizon_len)
# FIXME:
# Assuming all the queries in the current trace file are from
# the same cluster for now
query_pred = forecaster.predict(0, models[0][args.test_model])
# TODO:
# How are we consuming predictions?
for qid, ts in query_pred.items():
LOG.info(f"[Query: {qid}] pred={ts[:10]}")
|
[
"pickle.dump",
"argparse.ArgumentParser",
"pickle.load",
"numpy.array",
"json.load",
"functools.lru_cache"
] |
[((1707, 1767), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Query Load Forecaster"""'}), "(description='Query Load Forecaster')\n", (1730, 1767), False, 'import argparse\n'), ((8504, 8525), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(32)'}), '(maxsize=32)\n', (8513, 8525), False, 'from functools import lru_cache\n'), ((12216, 12228), 'json.load', 'json.load', (['f'], {}), '(f)\n', (12225, 12228), False, 'import json\n'), ((13542, 13556), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (13553, 13556), False, 'import pickle\n'), ((11063, 11078), 'numpy.array', 'np.array', (['preds'], {}), '(preds)\n', (11071, 11078), True, 'import numpy as np\n'), ((11081, 11094), 'numpy.array', 'np.array', (['gts'], {}), '(gts)\n', (11089, 11094), True, 'import numpy as np\n'), ((13394, 13416), 'pickle.dump', 'pickle.dump', (['models', 'f'], {}), '(models, f)\n', (13405, 13416), False, 'import pickle\n')]
|
import os, sys
from distutils.util import strtobool
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.python.util import nest, tf_inspect
from tensorflow.python.eager import tape
# from tensorflow.python.ops.custom_gradient import graph_mode_decorator
# 是否使用重计算
do_recompute = strtobool(os.environ.get('RECOMPUTE', '0'))
# 知乎:https://zhuanlan.zhihu.com/p/349492378
# 论文:https://arxiv.53yu.com/pdf/1606.08415.pdf
def gelu_erf(x):
"""根据erf直接计算gelu
"""
# np的精度更高,默认64位,tf默认32位
return 0.5 * x * (1.0 + tf.math.erf(x / np.sqrt(2.0)))
def gelu_tanh(x):
cdf = 0.5 * (
1 + K.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * K.pow(x,3)))
)
return x * cdf
def set_gelu(version):
"""设置gelu版本
"""
version = version.lower()
assert version in ['erf', 'tanh'], 'gelu version must in erf or tanh'
if version == 'erf':
tf.keras.utils.get_custom_objects()['gelu'] = gelu_erf
elif version == 'tanh':
tf.keras.utils.get_custom_objects()['gelu'] = gelu_tanh
def align(tensor, axes, ndim=None):
"""重新对齐tensor(批量版expand_dims)感觉更像是transpose
axes: 原来的第i维对齐新tensor的第axes[i]维;
ndim: 新tensor的维度
Example:
>>> tensor = tf.constant(np.arange(12).reshape(3,4), dtype=tf.float32)
>>> print(tensor)
tf.Tensor(
[[ 0. 1. 2. 3.]
[ 4. 5. 6. 7.]
[ 8. 9. 10. 11.]], shape=(3, 4), dtype=float32)
>>> same_dim = align(tensor, [0, -1], 2)
>>> print(same_dim)
tf.Tensor(
[[ 0. 1. 2. 3.]
[ 4. 5. 6. 7.]
[ 8. 9. 10. 11.]], shape=(3, 4), dtype=float32)
>>> more_dim = align(tensor, [0, -1], 3)
>>> print(more_dim)
tf.Tensor(
[[[ 0. 1. 2. 3.]]
<BLANKLINE>
[[ 4. 5. 6. 7.]]
<BLANKLINE>
[[ 8. 9. 10. 11.]]], shape=(3, 1, 4), dtype=float32)
"""
assert len(axes) == K.ndim(tensor)
indices = [None] * (ndim or max(axes))
for i in axes:
indices[i] = slice(None)
return tensor[indices]
def sequence_masking(x, mask, value=0, axis=None):
"""为序列条件mask的函数
parameters:
-----------
x: tensor
输入张量
mask: tensor
形如(batch_size, seq_len)的0-1矩阵
value: float or str
mask部分要被替换成的值,允许'inf'与'-inf'
axis: int
序列所在的轴,默认为1
"""
if mask is None:
return x
# 确保x类型,可以执行*运算
x_type = K.dtype(x)
if x_type == 'bool':
x = K.cast(x, 'int32')
# 确保mask类型 = x类型
if K.dtype(mask) != K.dtype(x):
mask = K.cast(mask, K.dtype(x))
if value == '-inf':
# -----------是个函数吗??---------------
value = -K.infinity
if value == 'inf':
value = K.infinity
value = K.cast(value, K.dtype(x))
# 确定axis
if axis is None:
axis = 1
if axis < 0:
axis = K.ndim(x) + axis
assert axis > 0, 'axis must be greater than 0'
# 统一shape
for _ in range(axis - 1): # > 1时生效
mask = K.expand_dims(mask, 1) # 把第0维让给batch_size
for _ in range(K.ndim(x) - K.ndim(mask)):
mask = K.expand_dims(mask, K.ndim(mask))
x = x * mask + value * (1 - mask)
# 与输入x的类型统一
if x_type == 'bool':
x = K.cast(x, x_type)
return x
def recompute_grad(call):
# ----------------------完全没看懂????------------------------
"""重计算装饰器,用来装饰keras层的call函数
目的是:通过一些额外的计算减少显存的占用
论文:https://arxiv.org/abs/1604.06174
"""
if not do_recompute:
return call
def inner(self, inputs, **kwargs):
# 2.x的tf.nest.flatten不会对numpy和tf.tensor进行展平
flat_inputs = nest.flatten(inputs)
call_args = tf_inspect.getfullargspec(call).args
for key in ['mask', 'training']:
if key not in call_args and key in kwargs:
del kwargs[key]
def kernel_call():
"""定义前向计算
"""
return call(self, inputs, **kwargs)
def call_and_grad(*inputs):
"""定义前向计算和反向计算
"""
with tape.stop_recording():
outputs = kernel_call()
outputs = tf.identity(outputs)
def grad_fn(doutputs, variables=None):
watches = list(inputs)
if variables is not None:
watches += list(variables)
with tf.GradientTape() as t:
t.watch(watches)
with tf.control_dependencies([doutputs]):
outputs = kernel_call()
grads = t.gradient(
outputs, watches, output_gradients=[doutputs]
)
del t
return grads[:len(inputs)], grads[len(inputs):]
return outputs, grad_fn
outputs, grad_fn = call_and_grad(*flat_inputs)
flat_outputs = nest.flatten(outputs)
def actual_grad_fn(*doutputs):
grads = grad_fn(*doutputs, variables=self.trainable_weights)
return grads[0] + grads[1]
watches = flat_inputs + self.trainable_weights
watches = [tf.convert_to_tensor(x) for x in watches]
tape.record_operation(
call.__name__, flat_outputs, watches, actual_grad_fn
)
return outputs
return inner
def infinity():
"""返回默认的代表无穷大的数值
"""
return tf.keras.utils.get_custom_objects().get('infinity', 1e12)
def set_infinity(value):
"""设置新的代表无穷大的数值
"""
tf.keras.utils.get_custom_objects()['infinity'] = value
# 添加到 keras.backend 上,使其可以像 K.epsilon() 那样操作
K.infinity = infinity
K.set_infinity = set_infinity
sys.modules['tensorflow.keras.backend'] = K
custom_objects = {
'gelu_erf': gelu_erf,
'gelu_tanh': gelu_tanh,
'gelu': gelu_erf,
}
tf.keras.utils.get_custom_objects().update(custom_objects)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
[
"tensorflow.python.eager.tape.stop_recording",
"numpy.sqrt",
"os.environ.get",
"tensorflow.python.util.nest.flatten",
"tensorflow.keras.backend.ndim",
"tensorflow.python.util.tf_inspect.getfullargspec",
"tensorflow.GradientTape",
"tensorflow.keras.backend.pow",
"tensorflow.keras.backend.dtype",
"tensorflow.python.eager.tape.record_operation",
"doctest.testmod",
"tensorflow.keras.backend.cast",
"tensorflow.control_dependencies",
"tensorflow.identity",
"tensorflow.convert_to_tensor",
"tensorflow.keras.utils.get_custom_objects",
"tensorflow.keras.backend.expand_dims"
] |
[((348, 380), 'os.environ.get', 'os.environ.get', (['"""RECOMPUTE"""', '"""0"""'], {}), "('RECOMPUTE', '0')\n", (362, 380), False, 'import os, sys\n'), ((2470, 2480), 'tensorflow.keras.backend.dtype', 'K.dtype', (['x'], {}), '(x)\n', (2477, 2480), True, 'import tensorflow.keras.backend as K\n'), ((6013, 6030), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (6028, 6030), False, 'import doctest\n'), ((1947, 1961), 'tensorflow.keras.backend.ndim', 'K.ndim', (['tensor'], {}), '(tensor)\n', (1953, 1961), True, 'import tensorflow.keras.backend as K\n'), ((2520, 2538), 'tensorflow.keras.backend.cast', 'K.cast', (['x', '"""int32"""'], {}), "(x, 'int32')\n", (2526, 2538), True, 'import tensorflow.keras.backend as K\n'), ((2569, 2582), 'tensorflow.keras.backend.dtype', 'K.dtype', (['mask'], {}), '(mask)\n', (2576, 2582), True, 'import tensorflow.keras.backend as K\n'), ((2586, 2596), 'tensorflow.keras.backend.dtype', 'K.dtype', (['x'], {}), '(x)\n', (2593, 2596), True, 'import tensorflow.keras.backend as K\n'), ((2817, 2827), 'tensorflow.keras.backend.dtype', 'K.dtype', (['x'], {}), '(x)\n', (2824, 2827), True, 'import tensorflow.keras.backend as K\n'), ((3058, 3080), 'tensorflow.keras.backend.expand_dims', 'K.expand_dims', (['mask', '(1)'], {}), '(mask, 1)\n', (3071, 3080), True, 'import tensorflow.keras.backend as K\n'), ((3293, 3310), 'tensorflow.keras.backend.cast', 'K.cast', (['x', 'x_type'], {}), '(x, x_type)\n', (3299, 3310), True, 'import tensorflow.keras.backend as K\n'), ((3693, 3713), 'tensorflow.python.util.nest.flatten', 'nest.flatten', (['inputs'], {}), '(inputs)\n', (3705, 3713), False, 'from tensorflow.python.util import nest, tf_inspect\n'), ((4949, 4970), 'tensorflow.python.util.nest.flatten', 'nest.flatten', (['outputs'], {}), '(outputs)\n', (4961, 4970), False, 'from tensorflow.python.util import nest, tf_inspect\n'), ((5256, 5331), 'tensorflow.python.eager.tape.record_operation', 'tape.record_operation', (['call.__name__', 'flat_outputs', 'watches', 'actual_grad_fn'], {}), '(call.__name__, flat_outputs, watches, actual_grad_fn)\n', (5277, 5331), False, 'from tensorflow.python.eager import tape\n'), ((5589, 5624), 'tensorflow.keras.utils.get_custom_objects', 'tf.keras.utils.get_custom_objects', ([], {}), '()\n', (5622, 5624), True, 'import tensorflow as tf\n'), ((5899, 5934), 'tensorflow.keras.utils.get_custom_objects', 'tf.keras.utils.get_custom_objects', ([], {}), '()\n', (5932, 5934), True, 'import tensorflow as tf\n'), ((951, 986), 'tensorflow.keras.utils.get_custom_objects', 'tf.keras.utils.get_custom_objects', ([], {}), '()\n', (984, 986), True, 'import tensorflow as tf\n'), ((2627, 2637), 'tensorflow.keras.backend.dtype', 'K.dtype', (['x'], {}), '(x)\n', (2634, 2637), True, 'import tensorflow.keras.backend as K\n'), ((2917, 2926), 'tensorflow.keras.backend.ndim', 'K.ndim', (['x'], {}), '(x)\n', (2923, 2926), True, 'import tensorflow.keras.backend as K\n'), ((3121, 3130), 'tensorflow.keras.backend.ndim', 'K.ndim', (['x'], {}), '(x)\n', (3127, 3130), True, 'import tensorflow.keras.backend as K\n'), ((3133, 3145), 'tensorflow.keras.backend.ndim', 'K.ndim', (['mask'], {}), '(mask)\n', (3139, 3145), True, 'import tensorflow.keras.backend as K\n'), ((3184, 3196), 'tensorflow.keras.backend.ndim', 'K.ndim', (['mask'], {}), '(mask)\n', (3190, 3196), True, 'import tensorflow.keras.backend as K\n'), ((3735, 3766), 'tensorflow.python.util.tf_inspect.getfullargspec', 'tf_inspect.getfullargspec', (['call'], {}), '(call)\n', (3760, 3766), False, 'from tensorflow.python.util import nest, tf_inspect\n'), ((5205, 5228), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['x'], {}), '(x)\n', (5225, 5228), True, 'import tensorflow as tf\n'), ((5466, 5501), 'tensorflow.keras.utils.get_custom_objects', 'tf.keras.utils.get_custom_objects', ([], {}), '()\n', (5499, 5501), True, 'import tensorflow as tf\n'), ((1044, 1079), 'tensorflow.keras.utils.get_custom_objects', 'tf.keras.utils.get_custom_objects', ([], {}), '()\n', (1077, 1079), True, 'import tensorflow as tf\n'), ((4124, 4145), 'tensorflow.python.eager.tape.stop_recording', 'tape.stop_recording', ([], {}), '()\n', (4143, 4145), False, 'from tensorflow.python.eager import tape\n'), ((4215, 4235), 'tensorflow.identity', 'tf.identity', (['outputs'], {}), '(outputs)\n', (4226, 4235), True, 'import tensorflow as tf\n'), ((602, 614), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (609, 614), True, 'import numpy as np\n'), ((679, 697), 'numpy.sqrt', 'np.sqrt', (['(2 / np.pi)'], {}), '(2 / np.pi)\n', (686, 697), True, 'import numpy as np\n'), ((4443, 4460), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (4458, 4460), True, 'import tensorflow as tf\n'), ((4531, 4566), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[doutputs]'], {}), '([doutputs])\n', (4554, 4566), True, 'import tensorflow as tf\n'), ((716, 727), 'tensorflow.keras.backend.pow', 'K.pow', (['x', '(3)'], {}), '(x, 3)\n', (721, 727), True, 'import tensorflow.keras.backend as K\n')]
|
# utils for working with 3d-protein structures
import os
import numpy as np
import torch
from functools import wraps
from einops import rearrange, repeat
# import torch_sparse # only needed for sparse nth_deg adj calculation
# bio
from Bio import SeqIO
import itertools
import string
# sidechainnet
from sidechainnet.utils.sequence import ProteinVocabulary, ONE_TO_THREE_LETTER_MAP
from sidechainnet.utils.measure import GLOBAL_PAD_CHAR
from sidechainnet.structure.build_info import NUM_COORDS_PER_RES, BB_BUILD_INFO, SC_BUILD_INFO
from sidechainnet.structure.StructureBuilder import _get_residue_build_iter
# build vocabulary
VOCAB = ProteinVocabulary()
# constants
import alphafold2_pytorch.constants as constants
# helpers
def exists(val):
return val is not None
# constants: same as in alphafold2.py
DISTANCE_THRESHOLDS = torch.linspace(2, 20, steps = constants.DISTOGRAM_BUCKETS)
# distance binning function
def get_bucketed_distance_matrix(coords, mask, num_buckets = constants.DISTOGRAM_BUCKETS, ignore_index = -100):
distances = torch.cdist(coords, coords, p=2)
boundaries = torch.linspace(2, 20, steps = num_buckets, device = coords.device)
discretized_distances = torch.bucketize(distances, boundaries[:-1])
discretized_distances.masked_fill_(~(mask[..., None] & mask[..., None, :]), ignore_index)
return discretized_distances
# decorators
def set_backend_kwarg(fn):
@wraps(fn)
def inner(*args, backend = 'auto', **kwargs):
if backend == 'auto':
backend = 'torch' if isinstance(args[0], torch.Tensor) else 'numpy'
kwargs.update(backend = backend)
return fn(*args, **kwargs)
return inner
def expand_dims_to(t, length = 3):
if length == 0:
return t
return t.reshape(*((1,) * length), *t.shape) # will work with both torch and numpy
def expand_arg_dims(dim_len = 3):
""" pack here for reuse.
turns input into (B x D x N)
"""
def outer(fn):
@wraps(fn)
def inner(x, y, **kwargs):
assert len(x.shape) == len(y.shape), "Shapes of A and B must match."
remaining_len = dim_len - len(x.shape)
x = expand_dims_to(x, length = remaining_len)
y = expand_dims_to(y, length = remaining_len)
return fn(x, y, **kwargs)
return inner
return outer
def invoke_torch_or_numpy(torch_fn, numpy_fn):
def outer(fn):
@wraps(fn)
def inner(*args, **kwargs):
backend = kwargs.pop('backend')
passed_args = fn(*args, **kwargs)
passed_args = list(passed_args)
if isinstance(passed_args[-1], dict):
passed_kwargs = passed_args.pop()
else:
passed_kwargs = {}
backend_fn = torch_fn if backend == 'torch' else numpy_fn
return backend_fn(*passed_args, **passed_kwargs)
return inner
return outer
# preprocess data
def get_atom_ids_dict():
""" Get's a dict mapping each atom to a token. """
ids = set(["", "N", "CA", "C", "O"])
for k,v in SC_BUILD_INFO.items():
for name in v["atom-names"]:
ids.add(name)
return {k: i for i,k in enumerate(sorted(ids))}
def make_cloud_mask(aa):
""" relevent points will be 1. paddings will be 0. """
mask = np.zeros(14)
# early stop if padding token
if aa == "_":
return mask
# get num of atoms in aa
n_atoms = 4+len( SC_BUILD_INFO[ ONE_TO_THREE_LETTER_MAP[aa] ]["atom-names"] )
mask[:n_atoms] = 1
return mask
def make_atom_id_embedds(aa, atom_ids):
""" Return the tokens for each atom in the aa. """
mask = np.zeros(14)
# early stop if padding token
if aa == "_":
return mask
# get atom id
atom_list = ["N", "CA", "C", "O"] + SC_BUILD_INFO[ ONE_TO_THREE_LETTER_MAP[aa] ]["atom-names"]
for i,atom in enumerate(atom_list):
mask[i] = ATOM_IDS[atom]
return mask
ATOM_IDS = get_atom_ids_dict()
CUSTOM_INFO = {k: {"cloud_mask": make_cloud_mask(k),
"atom_id_embedd": make_atom_id_embedds(k, atom_ids=ATOM_IDS),
} for k in "ARNDCQEGHILKMFPSTWYV_"}
# common utils
# parsing to pdb for easier visualization - other example from sidechainnet is:
# https://github.com/jonathanking/sidechainnet/tree/master/sidechainnet/structure
def download_pdb(name, route):
""" Downloads a PDB entry from the RCSB PDB.
Inputs:
* name: str. the PDB entry id. 4 characters, capitalized.
* route: str. route of the destin file. usually ".pdb" extension
Output: route of destin file
"""
os.system(f"curl https://files.rcsb.org/download/{name}.pdb > {route}")
return route
def clean_pdb(name, route=None, chain_num=None):
""" Cleans the structure to only leave the important part.
Inputs:
* name: str. route of the input .pdb file
* route: str. route of the output. will overwrite input if not provided
* chain_num: int. index of chain to select (1-indexed as pdb files)
Output: route of destin file.
"""
import mdtraj
destin = route if route is not None else name
# read input
raw_prot = mdtraj.load_pdb(name)
# iterate over prot and select the specified chains
idxs = []
for chain in raw_prot.topology.chains:
# if arg passed, only select that chain
if chain_num is not None:
if chain_num != chain.index:
continue
# select indexes of chain
chain_idxs = raw_prot.topology.select(f"chainid == {str(chain.index)}")
idxs.extend( chain_idxs.tolist() )
# sort: topology and xyz selection are ordered
idxs = sorted(idxs)
# get new trajectory from the sleected subset of indexes and save
prot = mdtraj.Trajectory(xyz=raw_prot.xyz[:, idxs],
topology=raw_prot.topology.subset(idxs))
prot.save(destin)
return destin
def custom2pdb(coords, proteinnet_id, route):
""" Takes a custom representation and turns into a .pdb file.
Inputs:
* coords: array/tensor of shape (3 x N) or (N x 3). in Angstroms.
same order as in the proteinnnet is assumed (same as raw pdb file)
* proteinnet_id: str. proteinnet id format (<class>#<pdb_id>_<chain_number>_<chain_id>)
see: https://github.com/aqlaboratory/proteinnet/
* route: str. destin route.
Output: tuple of routes: (original, generated) for the structures.
"""
import mdtraj
# convert to numpy
if isinstance(coords, torch.Tensor):
coords = coords.detach().cpu().numpy()
# ensure (1, N, 3)
if coords.shape[1] == 3:
coords = coords.T
coords = np.newaxis(coords, axis=0)
# get pdb id and chain num
pdb_name, chain_num = proteinnet_id.split("#")[-1].split("_")[:-1]
pdb_destin = "/".join(route.split("/")[:-1])+"/"+pdb_name+".pdb"
# download pdb file and select appropiate
download_pdb(pdb_name, pdb_destin)
clean_pdb(pdb_destin, chain_num=chain_num)
# load trajectory scaffold and replace coordinates - assumes same order
scaffold = mdtraj.load_pdb(pdb_destin)
scaffold.xyz = coords
scaffold.save(route)
return pdb_destin, route
def coords2pdb(seq, coords, cloud_mask, prefix="", name="af2_struct.pdb"):
""" Turns coordinates into PDB files ready to be visualized.
Inputs:
* seq: (L,) tensor of ints (sidechainnet aa-key pairs)
* coords: (3, N) coords of atoms
* cloud_mask: (L, C) boolean mask of occupied spaces in scn format
* prefix: str. directory to save files.
* name: str. name of destin file (ex: pred1.pdb)
"""
scaffold = torch.zeros( cloud_mask.shape, 3 )
scaffold[cloud_mask] = coords.cpu().float()
# build structures and save
pred = scn.StructureBuilder( seq, crd=scaffold )
pred.to_pdb(prefix+name)
# adapted from https://github.com/facebookresearch/esm
def remove_insertions(sequence: str) -> str:
""" Removes any insertions into the sequence. Needed to load aligned sequences in an MSA. """
deletekeys = dict.fromkeys(string.ascii_lowercase)
deletekeys["."] = None
deletekeys["*"] = None
translation = str.maketrans(deletekeys)
return sequence.translate(translation)
def read_msa(filename: str, nseq: int):
""" Reads the first nseq sequences from an MSA file, automatically removes insertions."""
return [(record.description, remove_insertions(str(record.seq)))
for record in itertools.islice(SeqIO.parse(filename, "fasta"), nseq)]
# sidechainnet / MSA / other data utils
def ids_to_embed_input(x):
""" Returns the amino acid string input for calculating the ESM and MSA transformer embeddings
Inputs:
* x: any deeply nested list of integers that correspond with amino acid id
"""
assert isinstance(x, list), 'input must be a list'
id2aa = VOCAB._int2char
out = []
for el in x:
if isinstance(el, list):
out.append(ids_to_embed_input(el))
elif isinstance(el, int):
out.append(id2aa[el])
else:
raise TypeError('type must be either list or character')
if all(map(lambda c: isinstance(c, str), out)):
return (None, ''.join(out))
return out
def get_msa_embedd(msa, embedd_model, batch_converter, device = None):
""" Returns the MSA_tr embeddings for a protein.
Inputs:
* seq: ( (b,) L,) tensor of ints (in sidechainnet int-char convention)
* embedd_model: MSA_tr model (see train_end2end.py for an example)
* batch_converter: MSA_tr batch converter (see train_end2end.py for an example)
Outputs: tensor of (batch, n_seqs, L, embedd_dim)
* n_seqs: number of sequences in the MSA
* embedd_dim: number of embedding dimensions. 768 for MSA_Transformer
"""
# use MSA transformer
REPR_LAYER_NUM = 12
device = embedd_model.device
max_seq_len = msa.shape[-1]
embedd_inputs = ids_to_embed_input(msa.cpu().tolist())
msa_batch_labels, msa_batch_strs, msa_batch_tokens = batch_converter(embedd_inputs)
with torch.no_grad():
results = embedd_model(msa_batch_tokens.to(device), repr_layers=[REPR_LAYER_NUM], return_contacts=False)
# index 0 is for start token. so take from 1 one
token_reps = results["representations"][REPR_LAYER_NUM][..., 1:, :]
return token_reps
def get_esm_embedd(seq, embedd_model, batch_converter, msa_data=None):
""" Returns the ESM embeddings for a protein.
Inputs:
* seq: ( (b,) L,) tensor of ints (in sidechainnet int-char convention)
* embedd_model: ESM model (see train_end2end.py for an example)
* batch_converter: ESM batch converter (see train_end2end.py for an example)
Outputs: tensor of (batch, n_seqs, L, embedd_dim)
* n_seqs: number of sequences in the MSA. 1 for ESM-1b
* embedd_dim: number of embedding dimensions. 1280 for ESM-1b
"""
# use ESM transformer
device = embedd_model.device
REPR_LAYER_NUM = 33
max_seq_len = seq.shape[-1]
embedd_inputs = ids_to_embed_input(seq.cpu().tolist())
batch_labels, batch_strs, batch_tokens = batch_converter(embedd_inputs)
with torch.no_grad():
results = embedd_model(batch_tokens.to(device), repr_layers=[REPR_LAYER_NUM], return_contacts=False)
# index 0 is for start token. so take from 1 one
token_reps = results["representations"][REPR_LAYER_NUM][..., 1:, :].unsqueeze(dim=1)
return token_reps
def get_all_protein_ids(dataloader, verbose=False):
""" Given a sidechainnet dataloader for a CASP version,
Returns all the ids belonging to proteins.
Inputs:
* dataloader: a sidechainnet dataloader for a CASP version
Outputs: a set containing the ids for all protein entries.
"""
# store ids here
ids = set([])
# iterate for all batches
for i,batch in tqdm(enumerate(dataloaders['train'])):
# for breaking from 2 loops at once
try:
for i in range(batch.int_seqs.shape[0]):
# check if all fragments are : 4_LETTER_PDB + NUM + CHAIN
max_len_10 = len(batch.pids[i]) < 10
fragments = [len(x) <= 4 for x in batch.pids[i].split("_")]
fragments_under_4 = sum(fragments) == len(fragments) # AND CONDITION
# record id
if max_len_10 and fragments_under_4:
ids.add(batch.pids[i])
else:
if verbose:
print("skip:", batch.pids[i], "under 4", fragments)
except StopIteration:
break
# returns set of ids
return ids
def scn_cloud_mask(scn_seq, boolean=True, coords=None):
""" Gets the boolean mask atom positions (not all aas have same atoms).
Inputs:
* scn_seq: (batch, length) sequence as provided by Sidechainnet package
* boolean: whether to return as array of idxs or boolean values
* coords: optional .(batch, lc, 3). sidechainnet coords.
returns the true mask (solves potential atoms that might not be provided)
Outputs: (batch, length, NUM_COORDS_PER_RES) boolean mask
"""
scn_seq = expand_dims_to(scn_seq, 2 - len(scn_seq.shape))
# early check for coords mask
if coords is not None:
batch_mask = ( rearrange(coords, '... (l c) d -> ... l c d', c=14) == 0 ).sum(dim=-1) < coords.shape[-1]
if boolean:
return batch_mask.bool()
else:
return batch_mask.nonzero()
# do loop in cpu
device = scn_seq.device
batch_mask = []
scn_seq = scn_seq.cpu().tolist()
for i, seq in enumerate(scn_seq):
# get masks for each prot (points for each aa)
batch_mask.append( torch.tensor([CUSTOM_INFO[VOCAB.int2char(aa)]['cloud_mask'] \
for aa in seq]).bool().to(device).unsqueeze(0) )
# concat in last dim
batch_mask = torch.cat(batch_mask, dim=0)
# return mask (boolean or indexes)
if boolean:
return batch_mask.bool()
else:
return batch_mask.nonzero()
def scn_backbone_mask(scn_seq, boolean=True, n_aa=3):
""" Gets the boolean mask for N and CA positions.
Inputs:
* scn_seq: sequence(s) as provided by Sidechainnet package (int tensor/s)
* n_aa: number of atoms in a backbone. (may include cbeta as 4th pos)
* bool: whether to return as array of idxs or boolean values
Outputs: (N_mask, CA_mask, C_mask)
"""
wrapper = torch.zeros(*scn_seq.shape, n_aa).to(scn_seq.device)
# N is the first atom in every AA. CA is the 2nd.
wrapper[..., 0] = 1
wrapper[..., 1] = 2
wrapper[..., 2] = 3
wrapper = rearrange(wrapper, '... l c -> ... (l c)')
# find idxs
N_mask = wrapper == 1
CA_mask = wrapper == 2
C_mask = wrapper == 3
if boolean:
return N_mask, CA_mask, C_mask
return torch.nonzero(N_mask), torch.nonzero(CA_mask), torch.nonzero(C_mask)
def scn_atom_embedd(scn_seq):
""" Returns the token for each atom in the aa.
Inputs:
* scn_seq: sequence(s) as provided by Sidechainnet package (int tensor/s)
"""
device = scn_seq.device
batch_tokens = []
# do loop in cpu
scn_seq = scn_seq.cpu()
for i,seq in enumerate(scn_seq):
batch_tokens.append( torch.tensor([CUSTOM_INFO[VOCAB.int2char(aa.item())]["atom_id_embedd"] \
for aa in seq]).long().to(device).unsqueeze(0) )
batch_tokens = torch.cat(batch_tokens, dim=0)
return batch_tokens
def nth_deg_adjacency(adj_mat, n=1, sparse=False):
""" Calculates the n-th degree adjacency matrix.
Performs mm of adj_mat and adds the newly added.
Default is dense. Mods for sparse version are done when needed.
Inputs:
* adj_mat: (N, N) adjacency tensor
* n: int. degree of the output adjacency
* sparse: bool. whether to use torch-sparse module
Outputs:
* edge_idxs: ij positions of the adjacency matrix
* edge_attrs: degree of connectivity (1 for neighs, 2 for neighs^2, ... )
"""
adj_mat = adj_mat.float()
attr_mat = torch.zeros_like(adj_mat)
new_adj_mat = adj_mat.clone()
for i in range(n):
if i == 0:
attr_mat += adj_mat
continue
if i == 1 and sparse:
idxs = adj_mat.nonzero().t()
vals = adj_mat[idxs[0], idxs[1]]
new_idxs = idxs.clone()
new_vals = vals.clone()
m, k, n = 3 * [adj_mat.shape[0]] # (m, n) * (n, k) , but adj_mats are squared: m=n=k
if sparse:
new_idxs, new_vals = torch_sparse.spspmm(new_idxs, new_vals, idxs, vals, m=m, k=k, n=n)
new_vals = new_vals.bool().float()
new_adj_mat = torch.zeros_like(attr_mat)
new_adj_mat[new_idxs[0], new_idxs[1]] = new_vals
# sparse to dense is slower
# torch.sparse.FloatTensor(idxs, vals).to_dense()
else:
new_adj_mat = (new_adj_mat @ adj_mat).bool().float()
attr_mat.masked_fill( (new_adj_mat - attr_mat.bool().float()).bool(), i+1 )
return new_adj_mat, attr_mat
def prot_covalent_bond(seqs, adj_degree=1, cloud_mask=None, mat=True):
""" Returns the idxs of covalent bonds for a protein.
Inputs
* seq: (b, n) torch long.
* adj_degree: int. adjacency degree
* cloud_mask: mask selecting the present atoms.
* mat: whether to return as indexes or matrices.
for indexes, only 1 seq is supported
Outputs: edge_idxs, edge_attrs
"""
device = seqs.device
# get starting poses for every aa
adj_mat = torch.zeros(seqs.shape[0], seqs.shape[1]*14, seqs.shape[1]*14)
# not needed to device since it's only for indices.
scaff = torch.zeros(seqs.shape[1], 14)
scaff[:, 0] = 1
idxs = torch.nonzero(scaff).reshape(-1)
for s,seq in enumerate(seqs):
for i,idx in enumerate(idxs):
if i >= seq.shape[0]:
break
# offset by pos in chain ( intra-aa bonds + with next aa )
bonds = idx + torch.tensor( constants.AA_DATA[VOCAB.int2char(seq[i].item())]['bonds'] + [[2, 14]] ).t()
# delete link with next if final AA in seq
if i == idxs.shape[0]-1:
bonds = bonds[:, :-1]
# modify adj mat
adj_mat[s, bonds[0], bonds[1]] = 1
# convert to undirected
adj_mat[s] = adj_mat[s] + adj_mat[s].t()
# do N_th degree adjacency
adj_mat, attr_mat = nth_deg_adjacency(adj_mat, n=adj_degree, sparse=False) # True
if mat:
return attr_mat.bool().to(seqs.device), attr_mat.to(device)
else:
edge_idxs = attr_mat[0].nonzero().t().long()
edge_attrs = attr_mat[0, edge_idxs[0], edge_idxs[1]]
return edge_idxs.to(seqs.device), edge_attrs.to(seqs.device)
def nerf_torch(a, b, c, l, theta, chi):
""" Custom Natural extension of Reference Frame.
Inputs:
* a: (batch, 3) or (3,). point(s) of the plane, not connected to d
* b: (batch, 3) or (3,). point(s) of the plane, not connected to d
* c: (batch, 3) or (3,). point(s) of the plane, connected to d
* theta: (batch,) or (float). angle(s) between b-c-d
* chi: (batch,) or float. dihedral angle(s) between the a-b-c and b-c-d planes
Outputs: d (batch, 3) or (3,). the next point in the sequence, linked to c
"""
# safety check
if not ( (-np.pi <= theta) * (theta <= np.pi) ).all().item():
raise ValueError(f"theta(s) must be in radians and in [-pi, pi]. theta(s) = {theta}")
# calc vecs
ba = b-a
cb = c-b
# calc rotation matrix. based on plane normals and normalized
n_plane = torch.cross(ba, cb, dim=-1)
n_plane_ = torch.cross(n_plane, cb, dim=-1)
rotate = torch.stack([cb, n_plane_, n_plane], dim=-1)
rotate /= torch.norm(rotate, dim=-2, keepdim=True)
# calc proto point, rotate
d = torch.stack([-torch.cos(theta),
torch.sin(theta) * torch.cos(chi),
torch.sin(theta) * torch.sin(chi)], dim=-1).unsqueeze(-1)
# extend base point, set length
return c + l.unsqueeze(-1) * torch.matmul(rotate, d).squeeze()
def sidechain_container(backbones, n_aa, cloud_mask=None, place_oxygen=False,
n_atoms=NUM_COORDS_PER_RES, padding=GLOBAL_PAD_CHAR):
""" Gets a backbone of the protein, returns the whole coordinates
with sidechains (same format as sidechainnet). Keeps differentiability.
Inputs:
* backbones: (batch, L*3, 3): assume batch=1 (could be extended later).
Coords for (N-term, C-alpha, C-term) of every aa.
* n_aa: int. number of points for each aa in the backbones.
* cloud_mask: (batch, l, c). optional. cloud mask from scn_cloud_mask`.
returns point outside to 0. if passed, else c_alpha
* place_oxygen: whether to claculate the oxygen of the
carbonyl group via NeRF
* n_atoms: int. n of atom positions / atom. same as in sidechainnet: 14
* padding: int. padding token. same as in sidechainnet: 0
Outputs: whole coordinates of shape (batch, L, n_atoms, 3)
"""
device = backbones.device
batch, length = backbones.shape[0], backbones.shape[1] // n_aa
# build scaffold from (N, CA, C, CB)
new_coords = torch.zeros(batch, length, NUM_COORDS_PER_RES, 3).to(device)
predicted = rearrange(backbones, 'b (l back) d -> b l back d', l=length)
# set backbone positions
new_coords[:, :, :3] = predicted[:, :, :3]
# set rest of positions to c_beta if present, else c_alpha
if n_aa == 4:
new_coords[:, :, 4:] = repeat(predicted[:, :, -1], 'b l d -> b l scn d', scn=10)
else:
new_coords[:, :, 4:] = repeat(new_coords[:, :, 1], 'b l d -> b l scn d', scn=10)
if cloud_mask is not None:
new_coords[torch.logical_not(cloud_mask)] = 0.
# hard-calculate oxygen position of carbonyl group with parallel version of NERF
if place_oxygen:
# build (=O) position of revery aa in each chain
for s in range(batch):
# dihedrals phi=f(c-1, n, ca, c) & psi=f(n, ca, c, n+1)
# phi = get_dihedral_torch(*backbone[s, i*3 - 1 : i*3 + 3]) if i>0 else None
psis = torch.tensor([ get_dihedral_torch(*backbones[s, i*3 + 0 : i*3 + 4] )if i < length-1 else np.pi*5/4 \
for i in range(length) ])
# the angle for placing oxygen is opposite to psi of current res.
# psi not available for last one so pi/4 taken for now
bond_lens = repeat(torch.tensor(BB_BUILD_INFO["BONDLENS"]["c-o"]), ' -> b', b=length).to(psis.device)
bond_angs = repeat(torch.tensor(BB_BUILD_INFO["BONDANGS"]["ca-c-o"]), ' -> b', b=length).to(psis.device)
correction = repeat(torch.tensor(-np.pi), ' -> b', b=length).to(psis.device)
new_coords[:, :, 3] = nerf_torch(new_coords[:, :, 0],
new_coords[:, :, 1],
new_coords[:, :, 2],
bond_lens, bond_angs, psis + correction)
else:
# init oxygen to carbonyl
new_coords[:, :, 3] = predicted[:, :, 2]
return new_coords
# distance utils (distogram to dist mat + masking)
def center_distogram_torch(distogram, bins=DISTANCE_THRESHOLDS, min_t=1., center="mean", wide="std"):
""" Returns the central estimate of a distogram. Median for now.
Inputs:
* distogram: (batch, N, N, B) where B is the number of buckets.
* bins: (B,) containing the cutoffs for the different buckets
* min_t: float. lower bound for distances.
Outputs:
* central: (batch, N, N)
* dispersion: (batch, N, N)
* weights: (batch, N, N)
"""
shape, device = distogram.shape, distogram.device
# threshold to weights and find mean value of each bin
n_bins = ( bins - 0.5 * (bins[2] - bins[1]) ).to(device)
n_bins[0] = 1.5
n_bins[-1] = 1.33*bins[-1] # above last threshold is ignored
max_bin_allowed = torch.tensor(n_bins.shape[0]-1).to(device).long()
# calculate measures of centrality and dispersion -
magnitudes = distogram.sum(dim=-1)
if center == "median":
cum_dist = torch.cumsum(distogram, dim=-1)
medium = 0.5 * cum_dist[..., -1:]
central = torch.searchsorted(cum_dist, medium).squeeze()
central = n_bins[ torch.min(central, max_bin_allowed) ]
elif center == "mean":
central = (distogram * n_bins).sum(dim=-1) / magnitudes
# create mask for last class - (IGNORE_INDEX)
mask = (central <= bins[-2].item()).float()
# mask diagonal to 0 dist - don't do masked filling to avoid inplace errors
diag_idxs = np.arange(shape[-2])
central = expand_dims_to(central, 3 - len(central.shape))
central[:, diag_idxs, diag_idxs] *= 0.
# provide weights
if wide == "var":
dispersion = (distogram * (n_bins - central.unsqueeze(-1))**2).sum(dim=-1) / magnitudes
elif wide == "std":
dispersion = ((distogram * (n_bins - central.unsqueeze(-1))**2).sum(dim=-1) / magnitudes).sqrt()
else:
dispersion = torch.zeros_like(central, device=device)
# rescale to 0-1. lower std / var --> weight=1. set potential nan's to 0
weights = mask / (1+dispersion)
weights[weights != weights] *= 0.
weights[:, diag_idxs, diag_idxs] *= 0.
return central, weights
# distance matrix to 3d coords: https://github.com/scikit-learn/scikit-learn/blob/42aff4e2e/sklearn/manifold/_mds.py#L279
def mds_torch(pre_dist_mat, weights=None, iters=10, tol=1e-5, eigen=False, verbose=2):
""" Gets distance matrix. Outputs 3d. See below for wrapper.
Assumes (for now) distogram is (N x N) and symmetric
Outs:
* best_3d_coords: (batch x 3 x N)
* historic_stresses: (batch x steps)
"""
device, dtype = pre_dist_mat.device, pre_dist_mat.type()
# ensure batched MDS
pre_dist_mat = expand_dims_to(pre_dist_mat, length = ( 3 - len(pre_dist_mat.shape) ))
# start
batch, N, _ = pre_dist_mat.shape
diag_idxs = np.arange(N)
his = [torch.tensor([np.inf]*batch, device=device)]
# initialize by eigendecomposition: https://www.lptmc.jussieu.fr/user/lesne/bioinformatics.pdf
# follow : https://www.biorxiv.org/content/10.1101/2020.11.27.401232v1.full.pdf
D = pre_dist_mat**2
M = 0.5 * (D[:, :1, :] + D[:, :, :1] - D)
# do loop svd bc it's faster: (2-3x in CPU and 1-2x in GPU)
# https://discuss.pytorch.org/t/batched-svd-lowrank-being-much-slower-than-loop-implementation-both-cpu-and-gpu/119336
svds = [torch.svd_lowrank(mi) for mi in M]
u = torch.stack([svd[0] for svd in svds], dim=0)
s = torch.stack([svd[1] for svd in svds], dim=0)
v = torch.stack([svd[2] for svd in svds], dim=0)
best_3d_coords = torch.bmm(u, torch.diag_embed(s).sqrt())[..., :3]
# only eigen - way faster but not weights
if weights is None and eigen==True:
return torch.transpose( best_3d_coords, -1, -2), torch.zeros_like(torch.stack(his, dim=0))
elif eigen==True:
if verbose:
print("Can't use eigen flag if weights are active. Fallback to iterative")
# continue the iterative way
if weights is None:
weights = torch.ones_like(pre_dist_mat)
# iterative updates:
for i in range(iters):
# compute distance matrix of coords and stress
best_3d_coords = best_3d_coords.contiguous()
dist_mat = torch.cdist(best_3d_coords, best_3d_coords, p=2).clone()
stress = ( weights * (dist_mat - pre_dist_mat)**2 ).sum(dim=(-1,-2)) * 0.5
# perturb - update X using the Guttman transform - sklearn-like
dist_mat[ dist_mat <= 0 ] += 1e-7
ratio = weights * (pre_dist_mat / dist_mat)
B = -ratio
B[:, diag_idxs, diag_idxs] += ratio.sum(dim=-1)
# update
coords = (1. / N * torch.matmul(B, best_3d_coords))
dis = torch.norm(coords, dim=(-1, -2))
if verbose >= 2:
print('it: %d, stress %s' % (i, stress))
# update metrics if relative improvement above tolerance
if (his[-1] - stress / dis).mean() <= tol:
if verbose:
print('breaking at iteration %d with stress %s' % (i,
stress / dis))
break
best_3d_coords = coords
his.append( stress / dis )
return torch.transpose(best_3d_coords, -1,-2), torch.stack(his, dim=0)
def mds_numpy(pre_dist_mat, weights=None, iters=10, tol=1e-5, eigen=False, verbose=2):
""" Gets distance matrix. Outputs 3d. See below for wrapper.
Assumes (for now) distrogram is (N x N) and symmetric
Out:
* best_3d_coords: (3 x N)
* historic_stress
"""
if weights is None:
weights = np.ones_like(pre_dist_mat)
# ensure batched MDS
pre_dist_mat = expand_dims_to(pre_dist_mat, length = ( 3 - len(pre_dist_mat.shape) ))
# start
batch, N, _ = pre_dist_mat.shape
his = [np.inf]
# init random coords
best_stress = np.inf * np.ones(batch)
best_3d_coords = 2*np.random.rand(batch, 3, N) - 1
# iterative updates:
for i in range(iters):
# compute distance matrix of coords and stress
dist_mat = np.linalg.norm(best_3d_coords[:, :, :, None] - best_3d_coords[:, :, None, :], axis=-3)
stress = (( weights * (dist_mat - pre_dist_mat) )**2).sum(axis=(-1, -2)) * 0.5
# perturb - update X using the Guttman transform - sklearn-like
dist_mat[dist_mat == 0] = 1e-7
ratio = weights * (pre_dist_mat / dist_mat)
B = -ratio
B[:, np.arange(N), np.arange(N)] += ratio.sum(axis=-1)
# update - double transpose. TODO: consider fix
coords = (1. / N * np.matmul(best_3d_coords, B))
dis = np.linalg.norm(coords, axis=(-1, -2))
if verbose >= 2:
print('it: %d, stress %s' % (i, stress))
# update metrics if relative improvement above tolerance
if (best_stress - stress / dis).mean() <= tol:
if verbose:
print('breaking at iteration %d with stress %s' % (i,
stress / dis))
break
best_3d_coords = coords
best_stress = stress / dis
his.append(best_stress)
return best_3d_coords, np.array(his)
def get_dihedral_torch(c1, c2, c3, c4):
""" Returns the dihedral angle in radians.
Will use atan2 formula from:
https://en.wikipedia.org/wiki/Dihedral_angle#In_polymer_physics
Can't use torch.dot bc it does not broadcast
Inputs:
* c1: (batch, 3) or (3,)
* c1: (batch, 3) or (3,)
* c1: (batch, 3) or (3,)
* c1: (batch, 3) or (3,)
"""
u1 = c2 - c1
u2 = c3 - c2
u3 = c4 - c3
return torch.atan2( ( (torch.norm(u2, dim=-1, keepdim=True) * u1) * torch.cross(u2,u3, dim=-1) ).sum(dim=-1) ,
( torch.cross(u1,u2, dim=-1) * torch.cross(u2, u3, dim=-1) ).sum(dim=-1) )
def get_dihedral_numpy(c1, c2, c3, c4):
""" Returns the dihedral angle in radians.
Will use atan2 formula from:
https://en.wikipedia.org/wiki/Dihedral_angle#In_polymer_physics
Inputs:
* c1: (batch, 3) or (3,)
* c1: (batch, 3) or (3,)
* c1: (batch, 3) or (3,)
* c1: (batch, 3) or (3,)
"""
u1 = c2 - c1
u2 = c3 - c2
u3 = c4 - c3
return np.arctan2( ( (np.linalg.norm(u2, axis=-1, keepdims=True) * u1) * np.cross(u2,u3, axis=-1)).sum(axis=-1),
( np.cross(u1,u2, axis=-1) * np.cross(u2, u3, axis=-1) ).sum(axis=-1) )
def calc_phis_torch(pred_coords, N_mask, CA_mask, C_mask=None,
prop=True, verbose=0):
""" Filters mirrors selecting the 1 with most N of negative phis.
Used as part of the MDScaling wrapper if arg is passed. See below.
Angle Phi between planes: (Cterm{-1}, N, Ca{0}) and (N{0}, Ca{+1}, Cterm{+1})
Inputs:
* pred_coords: (batch, 3, N) predicted coordinates
* N_mask: (batch, N) boolean mask for N-term positions
* CA_mask: (batch, N) boolean mask for C-alpha positions
* C_mask: (batch, N) or None. boolean mask for C-alpha positions or
automatically calculate from N_mask and CA_mask if None.
* prop: bool. whether to return as a proportion of negative phis.
* verbose: bool. verbosity level
Output: (batch, N) containing the phi angles or (batch,) containing
the proportions.
Note: use [0] since all prots in batch have same backbone
"""
# detach gradients for angle calculation - mirror selection
pred_coords_ = torch.transpose(pred_coords.detach(), -1 , -2).cpu()
# ensure dims
N_mask = expand_dims_to( N_mask, 2-len(N_mask.shape) )
CA_mask = expand_dims_to( CA_mask, 2-len(CA_mask.shape) )
if C_mask is not None:
C_mask = expand_dims_to( C_mask, 2-len(C_mask.shape) )
else:
C_mask = torch.logical_not(torch.logical_or(N_mask,CA_mask))
# select points
n_terms = pred_coords_[:, N_mask[0].squeeze()]
c_alphas = pred_coords_[:, CA_mask[0].squeeze()]
c_terms = pred_coords_[:, C_mask[0].squeeze()]
# compute phis for every pritein in the batch
phis = [get_dihedral_torch(c_terms[i, :-1],
n_terms[i, 1:],
c_alphas[i, 1:],
c_terms[i, 1:]) for i in range(pred_coords.shape[0])]
# return percentage of lower than 0
if prop:
return torch.tensor( [(x<0).float().mean().item() for x in phis] )
return phis
def calc_phis_numpy(pred_coords, N_mask, CA_mask, C_mask=None,
prop=True, verbose=0):
""" Filters mirrors selecting the 1 with most N of negative phis.
Used as part of the MDScaling wrapper if arg is passed. See below.
Angle Phi between planes: (Cterm{-1}, N, Ca{0}) and (N{0}, Ca{+1}, Cterm{+1})
Inputs:
* pred_coords: (batch, 3, N) predicted coordinates
* N_mask: (N, ) boolean mask for N-term positions
* CA_mask: (N, ) boolean mask for C-alpha positions
* C_mask: (N, ) or None. boolean mask for C-alpha positions or
automatically calculate from N_mask and CA_mask if None.
* prop: bool. whether to return as a proportion of negative phis.
* verbose: bool. verbosity level
Output: (batch, N) containing the phi angles or (batch,) containing
the proportions.
"""
# detach gradients for angle calculation - mirror selection
pred_coords_ = np.transpose(pred_coords, (0, 2, 1))
n_terms = pred_coords_[:, N_mask.squeeze()]
c_alphas = pred_coords_[:, CA_mask.squeeze()]
# select c_term auto if not passed
if C_mask is not None:
c_terms = pred_coords_[:, C_mask]
else:
c_terms = pred_coords_[:, (np.ones_like(N_mask)-N_mask-CA_mask).squeeze().astype(bool) ]
# compute phis for every pritein in the batch
phis = [get_dihedral_numpy(c_terms[i, :-1],
n_terms[i, 1:],
c_alphas[i, 1:],
c_terms[i, 1:]) for i in range(pred_coords.shape[0])]
# return percentage of lower than 0
if prop:
return np.array( [(x<0).mean() for x in phis] )
return phis
# alignment by centering + rotation to compute optimal RMSD
# adapted from : https://github.com/charnley/rmsd/
def kabsch_torch(X, Y, cpu=True):
""" Kabsch alignment of X into Y.
Assumes X,Y are both (Dims x N_points). See below for wrapper.
"""
device = X.device
# center X and Y to the origin
X_ = X - X.mean(dim=-1, keepdim=True)
Y_ = Y - Y.mean(dim=-1, keepdim=True)
# calculate convariance matrix (for each prot in the batch)
C = torch.matmul(X_, Y_.t()).detach()
if cpu:
C = C.cpu()
# Optimal rotation matrix via SVD
if int(torch.__version__.split(".")[1]) < 8:
# warning! int torch 1.<8 : W must be transposed
V, S, W = torch.svd(C)
W = W.t()
else:
V, S, W = torch.linalg.svd(C)
# determinant sign for direction correction
d = (torch.det(V) * torch.det(W)) < 0.0
if d:
S[-1] = S[-1] * (-1)
V[:, -1] = V[:, -1] * (-1)
# Create Rotation matrix U
U = torch.matmul(V, W).to(device)
# calculate rotations
X_ = torch.matmul(X_.t(), U).t()
# return centered and aligned
return X_, Y_
def kabsch_numpy(X, Y):
""" Kabsch alignment of X into Y.
Assumes X,Y are both (Dims x N_points). See below for wrapper.
"""
# center X and Y to the origin
X_ = X - X.mean(axis=-1, keepdims=True)
Y_ = Y - Y.mean(axis=-1, keepdims=True)
# calculate convariance matrix (for each prot in the batch)
C = np.dot(X_, Y_.transpose())
# Optimal rotation matrix via SVD
V, S, W = np.linalg.svd(C)
# determinant sign for direction correction
d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0
if d:
S[-1] = S[-1] * (-1)
V[:, -1] = V[:, -1] * (-1)
# Create Rotation matrix U
U = np.dot(V, W)
# calculate rotations
X_ = np.dot(X_.T, U).T
# return centered and aligned
return X_, Y_
# metrics - more formulas here: http://predictioncenter.org/casp12/doc/help.html
def distmat_loss_torch(X=None, Y=None, X_mat=None, Y_mat=None, p=2, q=2, custom=None, distmat_mask=None):
""" Calculates a loss on the distance matrix - no need to align structs.
Inputs:
* X: (N, d) tensor. the predicted structure. One of (X, X_mat) is needed.
* X_mat: (N, N) tensor. the predicted distance matrix. Optional ()
* Y: (N, d) tensor. the true structure. One of (Y, Y_mat) is needed.
* Y_mat: (N, N) tensor. the predicted distance matrix. Optional ()
* p: int. power for the distance calculation (2 for euclidean)
* q: float. power for the scaling of the loss (2 for MSE, 1 for MAE, etc)
* custom: func or None. custom loss over distance matrices.
ex: lambda x,y: 1 - 1/ (1 + ((x-y))**2) (1 is very bad. 0 is good)
* distmat_mask: (N, N) mask (boolean or weights for each ij pos). optional.
"""
assert (X is not None or X_mat is not None) and \
(Y is not None or Y_mat is not None), "The true and predicted coords or dist mats must be provided"
# calculate distance matrices
if X_mat is None:
X_mat = torch.cdist(X, X, p=p)
if Y_mat is None:
Y_mat = torch.cdist(Y, Y, p=p)
if distmat_mask is None:
distmat_mask = torch.ones_like(Y_mat).bool()
# do custom expression if passed
if custom is not None:
loss = custom(X_mat, Y_mat).mean()
# **2 ensures always positive. Later scale back to desired power
else:
loss = ( X_mat - Y_mat )**2
if q != 2:
loss = loss**(q/2)
return loss[distmat_mask].mean()
def rmsd_torch(X, Y):
""" Assumes x,y are both (B x D x N). See below for wrapper. """
return torch.sqrt( torch.mean((X - Y)**2, axis=(-1, -2)) )
def rmsd_numpy(X, Y):
""" Assumes x,y are both (B x D x N). See below for wrapper. """
return np.sqrt( np.mean((X - Y)**2, axis=(-1, -2)) )
def gdt_torch(X, Y, cutoffs, weights=None):
""" Assumes x,y are both (B x D x N). see below for wrapper.
* cutoffs is a list of `K` thresholds
* weights is a list of `K` weights (1 x each threshold)
"""
device = X.device
if weights is None:
weights = torch.ones(1,len(cutoffs))
else:
weights = torch.tensor([weights]).to(device)
# set zeros and fill with values
GDT = torch.zeros(X.shape[0], len(cutoffs), device=device)
dist = ((X - Y)**2).sum(dim=1).sqrt()
# iterate over thresholds
for i,cutoff in enumerate(cutoffs):
GDT[:, i] = (dist <= cutoff).float().mean(dim=-1)
# weighted mean
return (GDT*weights).mean(-1)
def gdt_numpy(X, Y, cutoffs, weights=None):
""" Assumes x,y are both (B x D x N). see below for wrapper.
* cutoffs is a list of `K` thresholds
* weights is a list of `K` weights (1 x each threshold)
"""
if weights is None:
weights = np.ones( (1,len(cutoffs)) )
else:
weights = np.array([weights])
# set zeros and fill with values
GDT = np.zeros( (X.shape[0], len(cutoffs)) )
dist = np.sqrt( ((X - Y)**2).sum(axis=1) )
# iterate over thresholds
for i,cutoff in enumerate(cutoffs):
GDT[:, i] = (dist <= cutoff).mean(axis=-1)
# weighted mean
return (GDT*weights).mean(-1)
def tmscore_torch(X, Y):
""" Assumes x,y are both (B x D x N). see below for wrapper. """
L = X.shape[-1]
d0 = 1.24 * np.cbrt(L - 15) - 1.8
# get distance
dist = ((X - Y)**2).sum(dim=1).sqrt()
# formula (see wrapper for source):
return (1 / (1 + (dist/d0)**2)).mean(dim=-1)
def tmscore_numpy(X, Y):
""" Assumes x,y are both (B x D x N). see below for wrapper. """
L = X.shape[-1]
d0 = 1.24 * np.cbrt(L - 15) - 1.8
# get distance
dist = np.sqrt( ((X - Y)**2).sum(axis=1) )
# formula (see wrapper for source):
return (1 / (1 + (dist/d0)**2)).mean(axis=-1)
def mdscaling_torch(pre_dist_mat, weights=None, iters=10, tol=1e-5,
fix_mirror=True, N_mask=None, CA_mask=None, C_mask=None,
eigen=False, verbose=2):
""" Handles the specifics of MDS for proteins (mirrors, ...) """
# batched mds for full parallel
preds, stresses = mds_torch(pre_dist_mat, weights=weights,iters=iters,
tol=tol, eigen=eigen, verbose=verbose)
if not fix_mirror:
return preds, stresses
# no need to caculate multiple mirrors - just correct Z axis
phi_ratios = calc_phis_torch(preds, N_mask, CA_mask, C_mask, prop=True)
to_correct = torch.nonzero( (phi_ratios < 0.5)).view(-1)
# fix mirrors by (-1)*Z if more (+) than (-) phi angles
preds[to_correct, -1] = (-1)*preds[to_correct, -1]
if verbose == 2:
print("Corrected mirror idxs:", to_correct)
return preds, stresses
def mdscaling_numpy(pre_dist_mat, weights=None, iters=10, tol=1e-5,
fix_mirror=True, N_mask=None, CA_mask=None, C_mask=None, verbose=2):
""" Handles the specifics of MDS for proteins (mirrors, ...) """
# batched mds for full parallel
preds, stresses = mds_numpy(pre_dist_mat, weights=weights,iters=iters,
tol=tol, verbose=verbose)
if not fix_mirror:
return preds, stresses
# no need to caculate multiple mirrors - just correct Z axis
phi_ratios = calc_phis_numpy(preds, N_mask, CA_mask, C_mask, prop=True)
for i,pred in enumerate(preds):
# fix mirrors by (-1)*Z if more (+) than (-) phi angles
if phi_ratios < 0.5:
preds[i, -1] = (-1)*preds[i, -1]
if verbose == 2:
print("Corrected mirror in struct no.", i)
return preds, stresses
def lddt_ca_torch(true_coords, pred_coords, cloud_mask, r_0=15.):
""" Computes the lddt score for each C_alpha.
https://academic.oup.com/bioinformatics/article/29/21/2722/195896
Inputs:
* true_coords: (b, l, c, d) in sidechainnet format.
* pred_coords: (b, l, c, d) in sidechainnet format.
* cloud_mask : (b, l, c) adapted for scn format.
* r_0: float. maximum inclusion radius in reference struct.
Outputs:
* (b, l) lddt for c_alpha scores (ranging between 0 and 1)
See wrapper below.
"""
device, dtype = true_coords.device, true_coords.type()
thresholds = torch.tensor([0.5, 1, 2, 4], device=device).type(dtype)
# adapt masks
cloud_mask = cloud_mask.bool().cpu()
c_alpha_mask = torch.zeros(cloud_mask.shape[1:], device=device).bool() # doesn't have batch dim
c_alpha_mask[..., 1] = True
# container for c_alpha scores (between 0,1)
wrapper = torch.zeros(true_coords.shape[:2], device=device).type(dtype)
for bi, seq in enumerate(true_coords):
# select atoms for study
c_alphas = cloud_mask[bi]*c_alpha_mask # only pick c_alpha positions
selected_pred = pred_coords[bi, c_alphas, :]
selected_target = true_coords[bi, c_alphas, :]
# get number under distance
dist_mat_pred = torch.cdist(selected_pred, selected_pred, p=2)
dist_mat_target = torch.cdist(selected_target, selected_target, p=2)
under_r0_target = dist_mat_target < r_0
compare_dists = torch.abs(dist_mat_pred - dist_mat_target)[under_r0_target]
# measure diff below threshold
score = torch.zeros_like(under_r0_target).float()
max_score = torch.zeros_like(under_r0_target).float()
max_score[under_r0_target] = 4.
# measure under how many thresholds
score[under_r0_target] = thresholds.shape[0] - \
torch.bucketize( compare_dists, boundaries=thresholds ).float()
# dont include diagonal
l_mask = c_alphas.float().sum(dim=-1).bool()
wrapper[bi, l_mask] = ( score.sum(dim=-1) - thresholds.shape[0] ) / \
( max_score.sum(dim=-1) - thresholds.shape[0] )
return wrapper
################
### WRAPPERS ###
################
@set_backend_kwarg
@invoke_torch_or_numpy(mdscaling_torch, mdscaling_numpy)
def MDScaling(pre_dist_mat, **kwargs):
""" Gets distance matrix (-ces). Outputs 3d.
Assumes (for now) distrogram is (N x N) and symmetric.
For support of ditograms: see `center_distogram_torch()`
Inputs:
* pre_dist_mat: (1, N, N) distance matrix.
* weights: optional. (N x N) pairwise relative weights .
* iters: number of iterations to run the algorithm on
* tol: relative tolerance at which to stop the algorithm if no better
improvement is achieved
* backend: one of ["numpy", "torch", "auto"] for backend choice
* fix_mirror: int. number of iterations to run the 3d generation and
pick the best mirror (highest number of negative phis)
* N_mask: indexing array/tensor for indices of backbone N.
Only used if fix_mirror > 0.
* CA_mask: indexing array/tensor for indices of backbone C_alpha.
Only used if fix_mirror > 0.
* verbose: whether to print logs
Outputs:
* best_3d_coords: (3 x N)
* historic_stress: (timesteps, )
"""
pre_dist_mat = expand_dims_to(pre_dist_mat, 3 - len(pre_dist_mat.shape))
return pre_dist_mat, kwargs
@expand_arg_dims(dim_len = 2)
@set_backend_kwarg
@invoke_torch_or_numpy(kabsch_torch, kabsch_numpy)
def Kabsch(A, B):
""" Returns Kabsch-rotated matrices resulting
from aligning A into B.
Adapted from: https://github.com/charnley/rmsd/
* Inputs:
* A,B are (3 x N)
* backend: one of ["numpy", "torch", "auto"] for backend choice
* Outputs: tensor/array of shape (3 x N)
"""
# run calcs - pick the 0th bc an additional dim was created
return A, B
@expand_arg_dims()
@set_backend_kwarg
@invoke_torch_or_numpy(rmsd_torch, rmsd_numpy)
def RMSD(A, B):
""" Returns RMSD score as defined here (lower is better):
https://en.wikipedia.org/wiki/
Root-mean-square_deviation_of_atomic_positions
* Inputs:
* A,B are (B x 3 x N) or (3 x N)
* backend: one of ["numpy", "torch", "auto"] for backend choice
* Outputs: tensor/array of size (B,)
"""
return A, B
@expand_arg_dims()
@set_backend_kwarg
@invoke_torch_or_numpy(gdt_torch, gdt_numpy)
def GDT(A, B, *, mode="TS", cutoffs=[1,2,4,8], weights=None):
""" Returns GDT score as defined here (highre is better):
Supports both TS and HA
http://predictioncenter.org/casp12/doc/help.html
* Inputs:
* A,B are (B x 3 x N) (np.array or torch.tensor)
* cutoffs: defines thresholds for gdt
* weights: list containing the weights
* mode: one of ["numpy", "torch", "auto"] for backend
* Outputs: tensor/array of size (B,)
"""
# define cutoffs for each type of gdt and weights
cutoffs = [0.5,1,2,4] if mode in ["HA", "ha"] else [1,2,4,8]
# calculate GDT
return A, B, cutoffs, {'weights': weights}
@expand_arg_dims()
@set_backend_kwarg
@invoke_torch_or_numpy(tmscore_torch, tmscore_numpy)
def TMscore(A, B):
""" Returns TMscore as defined here (higher is better):
>0.5 (likely) >0.6 (highly likely) same folding.
= 0.2. https://en.wikipedia.org/wiki/Template_modeling_score
Warning! It's not exactly the code in:
https://zhanglab.ccmb.med.umich.edu/TM-score/TMscore.cpp
but will suffice for now.
Inputs:
* A,B are (B x 3 x N) (np.array or torch.tensor)
* mode: one of ["numpy", "torch", "auto"] for backend
Outputs: tensor/array of size (B,)
"""
return A, B
|
[
"numpy.random.rand",
"torch.sin",
"torch.det",
"torch.searchsorted",
"torch.cdist",
"torch.min",
"numpy.array",
"torch.cos",
"numpy.linalg.norm",
"numpy.arange",
"torch.logical_or",
"numpy.mean",
"numpy.cross",
"torch.mean",
"einops.repeat",
"torch.logical_not",
"functools.wraps",
"numpy.dot",
"torch.matmul",
"numpy.matmul",
"torch.zeros_like",
"torch.diag_embed",
"sidechainnet.structure.build_info.SC_BUILD_INFO.items",
"numpy.cbrt",
"torch.ones_like",
"torch.abs",
"numpy.ones",
"torch.__version__.split",
"sidechainnet.utils.sequence.ProteinVocabulary",
"einops.rearrange",
"torch.transpose",
"torch.norm",
"torch.svd",
"numpy.linalg.svd",
"torch.no_grad",
"numpy.transpose",
"torch.bucketize",
"torch.cumsum",
"torch.cat",
"torch.svd_lowrank",
"numpy.ones_like",
"torch.linalg.svd",
"torch.stack",
"numpy.linalg.det",
"torch.nonzero",
"torch.tensor",
"numpy.zeros",
"mdtraj.load_pdb",
"Bio.SeqIO.parse",
"os.system",
"numpy.newaxis",
"torch.zeros",
"torch.linspace",
"torch.cross"
] |
[((641, 660), 'sidechainnet.utils.sequence.ProteinVocabulary', 'ProteinVocabulary', ([], {}), '()\n', (658, 660), False, 'from sidechainnet.utils.sequence import ProteinVocabulary, ONE_TO_THREE_LETTER_MAP\n'), ((842, 898), 'torch.linspace', 'torch.linspace', (['(2)', '(20)'], {'steps': 'constants.DISTOGRAM_BUCKETS'}), '(2, 20, steps=constants.DISTOGRAM_BUCKETS)\n', (856, 898), False, 'import torch\n'), ((1059, 1091), 'torch.cdist', 'torch.cdist', (['coords', 'coords'], {'p': '(2)'}), '(coords, coords, p=2)\n', (1070, 1091), False, 'import torch\n'), ((1109, 1171), 'torch.linspace', 'torch.linspace', (['(2)', '(20)'], {'steps': 'num_buckets', 'device': 'coords.device'}), '(2, 20, steps=num_buckets, device=coords.device)\n', (1123, 1171), False, 'import torch\n'), ((1204, 1247), 'torch.bucketize', 'torch.bucketize', (['distances', 'boundaries[:-1]'], {}), '(distances, boundaries[:-1])\n', (1219, 1247), False, 'import torch\n'), ((1422, 1431), 'functools.wraps', 'wraps', (['fn'], {}), '(fn)\n', (1427, 1431), False, 'from functools import wraps\n'), ((3088, 3109), 'sidechainnet.structure.build_info.SC_BUILD_INFO.items', 'SC_BUILD_INFO.items', ([], {}), '()\n', (3107, 3109), False, 'from sidechainnet.structure.build_info import NUM_COORDS_PER_RES, BB_BUILD_INFO, SC_BUILD_INFO\n'), ((3335, 3347), 'numpy.zeros', 'np.zeros', (['(14)'], {}), '(14)\n', (3343, 3347), True, 'import numpy as np\n'), ((3677, 3689), 'numpy.zeros', 'np.zeros', (['(14)'], {}), '(14)\n', (3685, 3689), True, 'import numpy as np\n'), ((4654, 4725), 'os.system', 'os.system', (['f"""curl https://files.rcsb.org/download/{name}.pdb > {route}"""'], {}), "(f'curl https://files.rcsb.org/download/{name}.pdb > {route}')\n", (4663, 4725), False, 'import os\n'), ((5225, 5246), 'mdtraj.load_pdb', 'mdtraj.load_pdb', (['name'], {}), '(name)\n', (5240, 5246), False, 'import mdtraj\n'), ((6776, 6802), 'numpy.newaxis', 'np.newaxis', (['coords'], {'axis': '(0)'}), '(coords, axis=0)\n', (6786, 6802), True, 'import numpy as np\n'), ((7198, 7225), 'mdtraj.load_pdb', 'mdtraj.load_pdb', (['pdb_destin'], {}), '(pdb_destin)\n', (7213, 7225), False, 'import mdtraj\n'), ((7772, 7804), 'torch.zeros', 'torch.zeros', (['cloud_mask.shape', '(3)'], {}), '(cloud_mask.shape, 3)\n', (7783, 7804), False, 'import torch\n'), ((14153, 14181), 'torch.cat', 'torch.cat', (['batch_mask'], {'dim': '(0)'}), '(batch_mask, dim=0)\n', (14162, 14181), False, 'import torch\n'), ((14937, 14979), 'einops.rearrange', 'rearrange', (['wrapper', '"""... l c -> ... (l c)"""'], {}), "(wrapper, '... l c -> ... (l c)')\n", (14946, 14979), False, 'from einops import rearrange, repeat\n'), ((15752, 15782), 'torch.cat', 'torch.cat', (['batch_tokens'], {'dim': '(0)'}), '(batch_tokens, dim=0)\n', (15761, 15782), False, 'import torch\n'), ((16420, 16445), 'torch.zeros_like', 'torch.zeros_like', (['adj_mat'], {}), '(adj_mat)\n', (16436, 16445), False, 'import torch\n'), ((17981, 18047), 'torch.zeros', 'torch.zeros', (['seqs.shape[0]', '(seqs.shape[1] * 14)', '(seqs.shape[1] * 14)'], {}), '(seqs.shape[0], seqs.shape[1] * 14, seqs.shape[1] * 14)\n', (17992, 18047), False, 'import torch\n'), ((18112, 18142), 'torch.zeros', 'torch.zeros', (['seqs.shape[1]', '(14)'], {}), '(seqs.shape[1], 14)\n', (18123, 18142), False, 'import torch\n'), ((20086, 20113), 'torch.cross', 'torch.cross', (['ba', 'cb'], {'dim': '(-1)'}), '(ba, cb, dim=-1)\n', (20097, 20113), False, 'import torch\n'), ((20129, 20161), 'torch.cross', 'torch.cross', (['n_plane', 'cb'], {'dim': '(-1)'}), '(n_plane, cb, dim=-1)\n', (20140, 20161), False, 'import torch\n'), ((20177, 20221), 'torch.stack', 'torch.stack', (['[cb, n_plane_, n_plane]'], {'dim': '(-1)'}), '([cb, n_plane_, n_plane], dim=-1)\n', (20188, 20221), False, 'import torch\n'), ((20237, 20277), 'torch.norm', 'torch.norm', (['rotate'], {'dim': '(-2)', 'keepdim': '(True)'}), '(rotate, dim=-2, keepdim=True)\n', (20247, 20277), False, 'import torch\n'), ((21850, 21910), 'einops.rearrange', 'rearrange', (['backbones', '"""b (l back) d -> b l back d"""'], {'l': 'length'}), "(backbones, 'b (l back) d -> b l back d', l=length)\n", (21859, 21910), False, 'from einops import rearrange, repeat\n'), ((25278, 25298), 'numpy.arange', 'np.arange', (['shape[-2]'], {}), '(shape[-2])\n', (25287, 25298), True, 'import numpy as np\n'), ((26661, 26673), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (26670, 26673), True, 'import numpy as np\n'), ((27228, 27272), 'torch.stack', 'torch.stack', (['[svd[0] for svd in svds]'], {'dim': '(0)'}), '([svd[0] for svd in svds], dim=0)\n', (27239, 27272), False, 'import torch\n'), ((27281, 27325), 'torch.stack', 'torch.stack', (['[svd[1] for svd in svds]'], {'dim': '(0)'}), '([svd[1] for svd in svds], dim=0)\n', (27292, 27325), False, 'import torch\n'), ((27334, 27378), 'torch.stack', 'torch.stack', (['[svd[2] for svd in svds]'], {'dim': '(0)'}), '([svd[2] for svd in svds], dim=0)\n', (27345, 27378), False, 'import torch\n'), ((35352, 35388), 'numpy.transpose', 'np.transpose', (['pred_coords', '(0, 2, 1)'], {}), '(pred_coords, (0, 2, 1))\n', (35364, 35388), True, 'import numpy as np\n'), ((37677, 37693), 'numpy.linalg.svd', 'np.linalg.svd', (['C'], {}), '(C)\n', (37690, 37693), True, 'import numpy as np\n'), ((37910, 37922), 'numpy.dot', 'np.dot', (['V', 'W'], {}), '(V, W)\n', (37916, 37922), True, 'import numpy as np\n'), ((1983, 1992), 'functools.wraps', 'wraps', (['fn'], {}), '(fn)\n', (1988, 1992), False, 'from functools import wraps\n'), ((2428, 2437), 'functools.wraps', 'wraps', (['fn'], {}), '(fn)\n', (2433, 2437), False, 'from functools import wraps\n'), ((10236, 10251), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10249, 10251), False, 'import torch\n'), ((11355, 11370), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11368, 11370), False, 'import torch\n'), ((15144, 15165), 'torch.nonzero', 'torch.nonzero', (['N_mask'], {}), '(N_mask)\n', (15157, 15165), False, 'import torch\n'), ((15167, 15189), 'torch.nonzero', 'torch.nonzero', (['CA_mask'], {}), '(CA_mask)\n', (15180, 15189), False, 'import torch\n'), ((15191, 15212), 'torch.nonzero', 'torch.nonzero', (['C_mask'], {}), '(C_mask)\n', (15204, 15212), False, 'import torch\n'), ((22099, 22156), 'einops.repeat', 'repeat', (['predicted[:, :, -1]', '"""b l d -> b l scn d"""'], {'scn': '(10)'}), "(predicted[:, :, -1], 'b l d -> b l scn d', scn=10)\n", (22105, 22156), False, 'from einops import rearrange, repeat\n'), ((22198, 22255), 'einops.repeat', 'repeat', (['new_coords[:, :, 1]', '"""b l d -> b l scn d"""'], {'scn': '(10)'}), "(new_coords[:, :, 1], 'b l d -> b l scn d', scn=10)\n", (22204, 22255), False, 'from einops import rearrange, repeat\n'), ((24782, 24813), 'torch.cumsum', 'torch.cumsum', (['distogram'], {'dim': '(-1)'}), '(distogram, dim=-1)\n', (24794, 24813), False, 'import torch\n'), ((26685, 26730), 'torch.tensor', 'torch.tensor', (['([np.inf] * batch)'], {'device': 'device'}), '([np.inf] * batch, device=device)\n', (26697, 26730), False, 'import torch\n'), ((27185, 27206), 'torch.svd_lowrank', 'torch.svd_lowrank', (['mi'], {}), '(mi)\n', (27202, 27206), False, 'import torch\n'), ((27841, 27870), 'torch.ones_like', 'torch.ones_like', (['pre_dist_mat'], {}), '(pre_dist_mat)\n', (27856, 27870), False, 'import torch\n'), ((28527, 28559), 'torch.norm', 'torch.norm', (['coords'], {'dim': '(-1, -2)'}), '(coords, dim=(-1, -2))\n', (28537, 28559), False, 'import torch\n'), ((29029, 29068), 'torch.transpose', 'torch.transpose', (['best_3d_coords', '(-1)', '(-2)'], {}), '(best_3d_coords, -1, -2)\n', (29044, 29068), False, 'import torch\n'), ((29069, 29092), 'torch.stack', 'torch.stack', (['his'], {'dim': '(0)'}), '(his, dim=0)\n', (29080, 29092), False, 'import torch\n'), ((29433, 29459), 'numpy.ones_like', 'np.ones_like', (['pre_dist_mat'], {}), '(pre_dist_mat)\n', (29445, 29459), True, 'import numpy as np\n'), ((29696, 29710), 'numpy.ones', 'np.ones', (['batch'], {}), '(batch)\n', (29703, 29710), True, 'import numpy as np\n'), ((29892, 29983), 'numpy.linalg.norm', 'np.linalg.norm', (['(best_3d_coords[:, :, :, None] - best_3d_coords[:, :, None, :])'], {'axis': '(-3)'}), '(best_3d_coords[:, :, :, None] - best_3d_coords[:, :, None, :\n ], axis=-3)\n', (29906, 29983), True, 'import numpy as np\n'), ((30441, 30478), 'numpy.linalg.norm', 'np.linalg.norm', (['coords'], {'axis': '(-1, -2)'}), '(coords, axis=(-1, -2))\n', (30455, 30478), True, 'import numpy as np\n'), ((30999, 31012), 'numpy.array', 'np.array', (['his'], {}), '(his)\n', (31007, 31012), True, 'import numpy as np\n'), ((36822, 36834), 'torch.svd', 'torch.svd', (['C'], {}), '(C)\n', (36831, 36834), False, 'import torch\n'), ((36882, 36901), 'torch.linalg.svd', 'torch.linalg.svd', (['C'], {}), '(C)\n', (36898, 36901), False, 'import torch\n'), ((37958, 37973), 'numpy.dot', 'np.dot', (['X_.T', 'U'], {}), '(X_.T, U)\n', (37964, 37973), True, 'import numpy as np\n'), ((39258, 39280), 'torch.cdist', 'torch.cdist', (['X', 'X'], {'p': 'p'}), '(X, X, p=p)\n', (39269, 39280), False, 'import torch\n'), ((39320, 39342), 'torch.cdist', 'torch.cdist', (['Y', 'Y'], {'p': 'p'}), '(Y, Y, p=p)\n', (39331, 39342), False, 'import torch\n'), ((39855, 39894), 'torch.mean', 'torch.mean', (['((X - Y) ** 2)'], {'axis': '(-1, -2)'}), '((X - Y) ** 2, axis=(-1, -2))\n', (39865, 39894), False, 'import torch\n'), ((40007, 40043), 'numpy.mean', 'np.mean', (['((X - Y) ** 2)'], {'axis': '(-1, -2)'}), '((X - Y) ** 2, axis=(-1, -2))\n', (40014, 40043), True, 'import numpy as np\n'), ((41076, 41095), 'numpy.array', 'np.array', (['[weights]'], {}), '([weights])\n', (41084, 41095), True, 'import numpy as np\n'), ((45207, 45253), 'torch.cdist', 'torch.cdist', (['selected_pred', 'selected_pred'], {'p': '(2)'}), '(selected_pred, selected_pred, p=2)\n', (45218, 45253), False, 'import torch\n'), ((45280, 45330), 'torch.cdist', 'torch.cdist', (['selected_target', 'selected_target'], {'p': '(2)'}), '(selected_target, selected_target, p=2)\n', (45291, 45330), False, 'import torch\n'), ((14744, 14777), 'torch.zeros', 'torch.zeros', (['*scn_seq.shape', 'n_aa'], {}), '(*scn_seq.shape, n_aa)\n', (14755, 14777), False, 'import torch\n'), ((17077, 17103), 'torch.zeros_like', 'torch.zeros_like', (['attr_mat'], {}), '(attr_mat)\n', (17093, 17103), False, 'import torch\n'), ((18174, 18194), 'torch.nonzero', 'torch.nonzero', (['scaff'], {}), '(scaff)\n', (18187, 18194), False, 'import torch\n'), ((21772, 21821), 'torch.zeros', 'torch.zeros', (['batch', 'length', 'NUM_COORDS_PER_RES', '(3)'], {}), '(batch, length, NUM_COORDS_PER_RES, 3)\n', (21783, 21821), False, 'import torch\n'), ((22306, 22335), 'torch.logical_not', 'torch.logical_not', (['cloud_mask'], {}), '(cloud_mask)\n', (22323, 22335), False, 'import torch\n'), ((24951, 24986), 'torch.min', 'torch.min', (['central', 'max_bin_allowed'], {}), '(central, max_bin_allowed)\n', (24960, 24986), False, 'import torch\n'), ((25707, 25747), 'torch.zeros_like', 'torch.zeros_like', (['central'], {'device': 'device'}), '(central, device=device)\n', (25723, 25747), False, 'import torch\n'), ((27552, 27591), 'torch.transpose', 'torch.transpose', (['best_3d_coords', '(-1)', '(-2)'], {}), '(best_3d_coords, -1, -2)\n', (27567, 27591), False, 'import torch\n'), ((28480, 28511), 'torch.matmul', 'torch.matmul', (['B', 'best_3d_coords'], {}), '(B, best_3d_coords)\n', (28492, 28511), False, 'import torch\n'), ((29734, 29761), 'numpy.random.rand', 'np.random.rand', (['batch', '(3)', 'N'], {}), '(batch, 3, N)\n', (29748, 29761), True, 'import numpy as np\n'), ((30397, 30425), 'numpy.matmul', 'np.matmul', (['best_3d_coords', 'B'], {}), '(best_3d_coords, B)\n', (30406, 30425), True, 'import numpy as np\n'), ((33718, 33751), 'torch.logical_or', 'torch.logical_or', (['N_mask', 'CA_mask'], {}), '(N_mask, CA_mask)\n', (33734, 33751), False, 'import torch\n'), ((36964, 36976), 'torch.det', 'torch.det', (['V'], {}), '(V)\n', (36973, 36976), False, 'import torch\n'), ((36979, 36991), 'torch.det', 'torch.det', (['W'], {}), '(W)\n', (36988, 36991), False, 'import torch\n'), ((37115, 37133), 'torch.matmul', 'torch.matmul', (['V', 'W'], {}), '(V, W)\n', (37127, 37133), False, 'import torch\n'), ((37751, 37767), 'numpy.linalg.det', 'np.linalg.det', (['V'], {}), '(V)\n', (37764, 37767), True, 'import numpy as np\n'), ((37770, 37786), 'numpy.linalg.det', 'np.linalg.det', (['W'], {}), '(W)\n', (37783, 37786), True, 'import numpy as np\n'), ((41535, 41550), 'numpy.cbrt', 'np.cbrt', (['(L - 15)'], {}), '(L - 15)\n', (41542, 41550), True, 'import numpy as np\n'), ((41839, 41854), 'numpy.cbrt', 'np.cbrt', (['(L - 15)'], {}), '(L - 15)\n', (41846, 41854), True, 'import numpy as np\n'), ((42691, 42722), 'torch.nonzero', 'torch.nonzero', (['(phi_ratios < 0.5)'], {}), '(phi_ratios < 0.5)\n', (42704, 42722), False, 'import torch\n'), ((44511, 44554), 'torch.tensor', 'torch.tensor', (['[0.5, 1, 2, 4]'], {'device': 'device'}), '([0.5, 1, 2, 4], device=device)\n', (44523, 44554), False, 'import torch\n'), ((44646, 44694), 'torch.zeros', 'torch.zeros', (['cloud_mask.shape[1:]'], {'device': 'device'}), '(cloud_mask.shape[1:], device=device)\n', (44657, 44694), False, 'import torch\n'), ((44822, 44871), 'torch.zeros', 'torch.zeros', (['true_coords.shape[:2]'], {'device': 'device'}), '(true_coords.shape[:2], device=device)\n', (44833, 44871), False, 'import torch\n'), ((45404, 45446), 'torch.abs', 'torch.abs', (['(dist_mat_pred - dist_mat_target)'], {}), '(dist_mat_pred - dist_mat_target)\n', (45413, 45446), False, 'import torch\n'), ((8614, 8644), 'Bio.SeqIO.parse', 'SeqIO.parse', (['filename', '"""fasta"""'], {}), "(filename, 'fasta')\n", (8625, 8644), False, 'from Bio import SeqIO\n'), ((24877, 24913), 'torch.searchsorted', 'torch.searchsorted', (['cum_dist', 'medium'], {}), '(cum_dist, medium)\n', (24895, 24913), False, 'import torch\n'), ((27611, 27634), 'torch.stack', 'torch.stack', (['his'], {'dim': '(0)'}), '(his, dim=0)\n', (27622, 27634), False, 'import torch\n'), ((28051, 28099), 'torch.cdist', 'torch.cdist', (['best_3d_coords', 'best_3d_coords'], {'p': '(2)'}), '(best_3d_coords, best_3d_coords, p=2)\n', (28062, 28099), False, 'import torch\n'), ((30264, 30276), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (30273, 30276), True, 'import numpy as np\n'), ((30278, 30290), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (30287, 30290), True, 'import numpy as np\n'), ((36709, 36737), 'torch.__version__.split', 'torch.__version__.split', (['"""."""'], {}), "('.')\n", (36732, 36737), False, 'import torch\n'), ((39395, 39417), 'torch.ones_like', 'torch.ones_like', (['Y_mat'], {}), '(Y_mat)\n', (39410, 39417), False, 'import torch\n'), ((40391, 40414), 'torch.tensor', 'torch.tensor', (['[weights]'], {}), '([weights])\n', (40403, 40414), False, 'import torch\n'), ((45519, 45552), 'torch.zeros_like', 'torch.zeros_like', (['under_r0_target'], {}), '(under_r0_target)\n', (45535, 45552), False, 'import torch\n'), ((45581, 45614), 'torch.zeros_like', 'torch.zeros_like', (['under_r0_target'], {}), '(under_r0_target)\n', (45597, 45614), False, 'import torch\n'), ((20555, 20578), 'torch.matmul', 'torch.matmul', (['rotate', 'd'], {}), '(rotate, d)\n', (20567, 20578), False, 'import torch\n'), ((24590, 24623), 'torch.tensor', 'torch.tensor', (['(n_bins.shape[0] - 1)'], {}), '(n_bins.shape[0] - 1)\n', (24602, 24623), False, 'import torch\n'), ((27413, 27432), 'torch.diag_embed', 'torch.diag_embed', (['s'], {}), '(s)\n', (27429, 27432), False, 'import torch\n'), ((31545, 31572), 'torch.cross', 'torch.cross', (['u2', 'u3'], {'dim': '(-1)'}), '(u2, u3, dim=-1)\n', (31556, 31572), False, 'import torch\n'), ((31617, 31644), 'torch.cross', 'torch.cross', (['u1', 'u2'], {'dim': '(-1)'}), '(u1, u2, dim=-1)\n', (31628, 31644), False, 'import torch\n'), ((31646, 31673), 'torch.cross', 'torch.cross', (['u2', 'u3'], {'dim': '(-1)'}), '(u2, u3, dim=-1)\n', (31657, 31673), False, 'import torch\n'), ((32176, 32201), 'numpy.cross', 'np.cross', (['u2', 'u3'], {'axis': '(-1)'}), '(u2, u3, axis=-1)\n', (32184, 32201), True, 'import numpy as np\n'), ((32243, 32268), 'numpy.cross', 'np.cross', (['u1', 'u2'], {'axis': '(-1)'}), '(u1, u2, axis=-1)\n', (32251, 32268), True, 'import numpy as np\n'), ((32270, 32295), 'numpy.cross', 'np.cross', (['u2', 'u3'], {'axis': '(-1)'}), '(u2, u3, axis=-1)\n', (32278, 32295), True, 'import numpy as np\n'), ((45797, 45850), 'torch.bucketize', 'torch.bucketize', (['compare_dists'], {'boundaries': 'thresholds'}), '(compare_dists, boundaries=thresholds)\n', (45812, 45850), False, 'import torch\n'), ((13530, 13581), 'einops.rearrange', 'rearrange', (['coords', '"""... (l c) d -> ... l c d"""'], {'c': '(14)'}), "(coords, '... (l c) d -> ... l c d', c=14)\n", (13539, 13581), False, 'from einops import rearrange, repeat\n'), ((20331, 20347), 'torch.cos', 'torch.cos', (['theta'], {}), '(theta)\n', (20340, 20347), False, 'import torch\n'), ((20371, 20387), 'torch.sin', 'torch.sin', (['theta'], {}), '(theta)\n', (20380, 20387), False, 'import torch\n'), ((20390, 20404), 'torch.cos', 'torch.cos', (['chi'], {}), '(chi)\n', (20399, 20404), False, 'import torch\n'), ((20428, 20444), 'torch.sin', 'torch.sin', (['theta'], {}), '(theta)\n', (20437, 20444), False, 'import torch\n'), ((20447, 20461), 'torch.sin', 'torch.sin', (['chi'], {}), '(chi)\n', (20456, 20461), False, 'import torch\n'), ((23051, 23097), 'torch.tensor', 'torch.tensor', (["BB_BUILD_INFO['BONDLENS']['c-o']"], {}), "(BB_BUILD_INFO['BONDLENS']['c-o'])\n", (23063, 23097), False, 'import torch\n'), ((23166, 23215), 'torch.tensor', 'torch.tensor', (["BB_BUILD_INFO['BONDANGS']['ca-c-o']"], {}), "(BB_BUILD_INFO['BONDANGS']['ca-c-o'])\n", (23178, 23215), False, 'import torch\n'), ((23284, 23304), 'torch.tensor', 'torch.tensor', (['(-np.pi)'], {}), '(-np.pi)\n', (23296, 23304), False, 'import torch\n'), ((31500, 31536), 'torch.norm', 'torch.norm', (['u2'], {'dim': '(-1)', 'keepdim': '(True)'}), '(u2, dim=-1, keepdim=True)\n', (31510, 31536), False, 'import torch\n'), ((32125, 32167), 'numpy.linalg.norm', 'np.linalg.norm', (['u2'], {'axis': '(-1)', 'keepdims': '(True)'}), '(u2, axis=-1, keepdims=True)\n', (32139, 32167), True, 'import numpy as np\n'), ((35643, 35663), 'numpy.ones_like', 'np.ones_like', (['N_mask'], {}), '(N_mask)\n', (35655, 35663), True, 'import numpy as np\n')]
|
import numpy as np
import tectosaur.util.gpu as gpu
from tectosaur.fmm.c2e import build_c2e
import logging
logger = logging.getLogger(__name__)
def make_tree(m, cfg, max_pts_per_cell):
tri_pts = m[0][m[1]]
centers = np.mean(tri_pts, axis = 1)
pt_dist = tri_pts - centers[:,np.newaxis,:]
Rs = np.max(np.linalg.norm(pt_dist, axis = 2), axis = 1)
tree = cfg.traversal_module.Tree.build(centers, Rs, max_pts_per_cell)
return tree
class FMM:
def __init__(self, obs_tree, obs_m, src_tree, src_m, cfg):
self.cfg = cfg
self.obs_tree = obs_tree
self.obs_m = obs_m
self.src_tree = src_tree
self.src_m = src_m
self.gpu_data = dict()
self.setup_interactions()
self.collect_gpu_ops()
self.setup_output_sizes()
self.params_to_gpu()
self.tree_to_gpu(obs_m, src_m)
self.interactions_to_gpu()
self.d2e_u2e_ops_to_gpu()
def setup_interactions(self):
self.interactions = self.cfg.traversal_module.fmmmm_interactions(
self.obs_tree, self.src_tree, self.cfg.inner_r, self.cfg.outer_r,
self.cfg.order, self.cfg.treecode
)
def collect_gpu_ops(self):
self.gpu_ops = dict()
for a in ['s', 'p']:
for b in ['s', 'p']:
name = a + '2' + b
self.gpu_ops[name] = getattr(self.cfg.gpu_module, name + '_' + self.cfg.K.name)
self.gpu_ops['c2e1'] = self.cfg.gpu_module.c2e_kernel1
self.gpu_ops['c2e2'] = self.cfg.gpu_module.c2e_kernel2
def setup_output_sizes(self):
self.n_surf_tris = self.cfg.surf[1].shape[0]
self.n_surf_dofs = self.n_surf_tris * 9
self.n_multipoles = self.n_surf_dofs * self.src_tree.n_nodes
self.n_locals = self.n_surf_dofs * self.obs_tree.n_nodes
self.n_input = self.src_m[1].shape[0] * 9
self.n_output = self.obs_m[1].shape[0] * 9
def float_gpu(self, arr):
return gpu.to_gpu(arr, self.cfg.float_type)
def int_gpu(self, arr):
return gpu.to_gpu(arr, np.int32)
def params_to_gpu(self):
self.gpu_data['params'] = self.float_gpu(self.cfg.params)
def tree_to_gpu(self, obs_m, src_m):
gd = self.gpu_data
gd['obs_pts'] = self.float_gpu(obs_m[0])
gd['obs_tris'] = self.int_gpu(obs_m[1][self.obs_tree.orig_idxs])
gd['src_pts'] = self.float_gpu(src_m[0])
gd['src_tris'] = self.int_gpu(src_m[1][self.src_tree.orig_idxs])
obs_tree_nodes = self.obs_tree.nodes
src_tree_nodes = self.src_tree.nodes
for name, tree in [('src', self.src_tree), ('obs', self.obs_tree)]:
gd[name + '_n_C'] = self.float_gpu(tree.node_centers)
gd[name + '_n_R'] = self.float_gpu(tree.node_Rs)
for name, tree in [('src', src_tree_nodes), ('obs', obs_tree_nodes)]:
gd[name + '_n_start'] = self.int_gpu(np.array([n.start for n in tree]))
gd[name + '_n_end'] = self.int_gpu(np.array([n.end for n in tree]))
def interactions_to_gpu(self):
op_names = ['p2p', 'p2m', 'p2l', 'm2p', 'm2m', 'm2l', 'l2p', 'l2l']
for name in op_names:
op = getattr(self.interactions, name)
if type(op) is list:
for i, op_level in enumerate(op):
self.op_to_gpu(name + str(i), op_level)
else:
self.op_to_gpu(name, op)
def op_to_gpu(self, name, op):
for data_name in ['obs_n_idxs', 'obs_src_starts', 'src_n_idxs']:
self.gpu_data[name + '_' + data_name] = self.int_gpu(
np.array(getattr(op, data_name), copy = False)
)
def d2e_u2e_ops_to_gpu(self):
gd = self.gpu_data
gd['u2e_obs_n_idxs'] = [
self.int_gpu(np.array(self.interactions.u2e[level].obs_n_idxs, copy = False))
for level in range(len(self.interactions.m2m))
]
gd['d2e_obs_n_idxs'] = [
self.int_gpu(np.array(self.interactions.d2e[level].obs_n_idxs, copy = False))
for level in range(len(self.interactions.l2l))
]
u2e_UT, u2e_E, u2e_V = build_c2e(
self.src_tree, self.cfg.outer_r, self.cfg.inner_r, self.cfg
)
gd['u2e_V'] = self.float_gpu(u2e_V)
gd['u2e_E'] = self.float_gpu(u2e_E)
gd['u2e_UT'] = self.float_gpu(u2e_UT)
d2e_UT, d2e_E, d2e_V = build_c2e(
self.obs_tree, self.cfg.inner_r, self.cfg.outer_r, self.cfg
)
gd['d2e_V'] = self.float_gpu(d2e_V)
gd['d2e_E'] = self.float_gpu(d2e_E)
gd['d2e_UT'] = self.float_gpu(d2e_UT)
def to_tree(self, input_orig):
orig_idxs = np.array(self.src_tree.orig_idxs)
input_orig = input_orig.reshape((-1,9))
return input_orig[orig_idxs,:].flatten()
def to_orig(self, output_tree):
orig_idxs = np.array(self.obs_tree.orig_idxs)
output_tree = output_tree.reshape((-1, 9))
output_orig = np.empty_like(output_tree)
output_orig[orig_idxs,:] = output_tree
return output_orig.flatten()
def report_interactions(fmm_obj):
dim = fmm_obj.obs_m[1].shape[1]
order = fmm_obj.cfg.surf[1].shape[0]
def count_interactions(op_name, op):
obs_surf = False if op_name[2] == 'p' else True
src_surf = False if op_name[0] == 'p' else True
return fmm_obj.cfg.traversal_module.count_interactions(
op, fmm_obj.obs_tree, fmm_obj.src_tree,
obs_surf, src_surf, order
)
n_obs_tris = fmm_obj.obs_m[1].shape[0]
n_src_tris = fmm_obj.src_m[1].shape[0]
level_ops = ['m2m', 'l2l']
ops = ['p2m', 'p2l', 'm2l', 'p2p', 'm2p', 'l2p']
interactions = dict()
for op_name in ops:
op = getattr(fmm_obj.interactions, op_name)
interactions[op_name] = count_interactions(op_name, op)
for op_name in level_ops:
ops = getattr(fmm_obj.interactions, op_name)
for op in ops:
if op_name not in interactions:
interactions[op_name] = 0
interactions[op_name] += count_interactions(op_name, op)
direct_i = n_obs_tris * n_src_tris
fmm_i = sum([v for k,v in interactions.items()])
logger.info('compression factor: ' + str(fmm_i / direct_i))
logger.info('# obs tris: ' + str(n_obs_tris))
logger.info('# src tris: ' + str(n_src_tris))
logger.info('total tree interactions: %e' % fmm_i)
for k, v in interactions.items():
logger.info('total %s interactions: %e' % (k, v))
|
[
"logging.getLogger",
"numpy.mean",
"tectosaur.fmm.c2e.build_c2e",
"tectosaur.util.gpu.to_gpu",
"numpy.array",
"numpy.empty_like",
"numpy.linalg.norm"
] |
[((118, 145), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (135, 145), False, 'import logging\n'), ((227, 251), 'numpy.mean', 'np.mean', (['tri_pts'], {'axis': '(1)'}), '(tri_pts, axis=1)\n', (234, 251), True, 'import numpy as np\n'), ((318, 349), 'numpy.linalg.norm', 'np.linalg.norm', (['pt_dist'], {'axis': '(2)'}), '(pt_dist, axis=2)\n', (332, 349), True, 'import numpy as np\n'), ((1980, 2016), 'tectosaur.util.gpu.to_gpu', 'gpu.to_gpu', (['arr', 'self.cfg.float_type'], {}), '(arr, self.cfg.float_type)\n', (1990, 2016), True, 'import tectosaur.util.gpu as gpu\n'), ((2061, 2086), 'tectosaur.util.gpu.to_gpu', 'gpu.to_gpu', (['arr', 'np.int32'], {}), '(arr, np.int32)\n', (2071, 2086), True, 'import tectosaur.util.gpu as gpu\n'), ((4161, 4231), 'tectosaur.fmm.c2e.build_c2e', 'build_c2e', (['self.src_tree', 'self.cfg.outer_r', 'self.cfg.inner_r', 'self.cfg'], {}), '(self.src_tree, self.cfg.outer_r, self.cfg.inner_r, self.cfg)\n', (4170, 4231), False, 'from tectosaur.fmm.c2e import build_c2e\n'), ((4420, 4490), 'tectosaur.fmm.c2e.build_c2e', 'build_c2e', (['self.obs_tree', 'self.cfg.inner_r', 'self.cfg.outer_r', 'self.cfg'], {}), '(self.obs_tree, self.cfg.inner_r, self.cfg.outer_r, self.cfg)\n', (4429, 4490), False, 'from tectosaur.fmm.c2e import build_c2e\n'), ((4704, 4737), 'numpy.array', 'np.array', (['self.src_tree.orig_idxs'], {}), '(self.src_tree.orig_idxs)\n', (4712, 4737), True, 'import numpy as np\n'), ((4892, 4925), 'numpy.array', 'np.array', (['self.obs_tree.orig_idxs'], {}), '(self.obs_tree.orig_idxs)\n', (4900, 4925), True, 'import numpy as np\n'), ((4999, 5025), 'numpy.empty_like', 'np.empty_like', (['output_tree'], {}), '(output_tree)\n', (5012, 5025), True, 'import numpy as np\n'), ((2920, 2953), 'numpy.array', 'np.array', (['[n.start for n in tree]'], {}), '([n.start for n in tree])\n', (2928, 2953), True, 'import numpy as np\n'), ((3002, 3033), 'numpy.array', 'np.array', (['[n.end for n in tree]'], {}), '([n.end for n in tree])\n', (3010, 3033), True, 'import numpy as np\n'), ((3802, 3863), 'numpy.array', 'np.array', (['self.interactions.u2e[level].obs_n_idxs'], {'copy': '(False)'}), '(self.interactions.u2e[level].obs_n_idxs, copy=False)\n', (3810, 3863), True, 'import numpy as np\n'), ((3995, 4056), 'numpy.array', 'np.array', (['self.interactions.d2e[level].obs_n_idxs'], {'copy': '(False)'}), '(self.interactions.d2e[level].obs_n_idxs, copy=False)\n', (4003, 4056), True, 'import numpy as np\n')]
|
import __init__
import os
#os.environ['LD_LIBRARY_PATH'] += ':/usr/local/cuda-11.1/bin64:/usr/local/cuda-11.2/bin64'
import numpy as np
import torch
import torch.multiprocessing as mp
import torch_geometric.datasets as GeoData
from torch_geometric.loader import DenseDataLoader
import torch_geometric.transforms as T
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data.distributed import DistributedSampler
from config import OptInit
from architecture import DenseDeepGCN, CustomDenseDeepGCN
from utils.ckpt_util import load_pretrained_models, load_pretrained_optimizer, save_checkpoint
from utils.metrics import AverageMeter
import logging
from tqdm import tqdm
from parallel_wrapper import launch
import comm
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter(log_dir='log/mlp4')
def train(model, train_loader, optimizer, criterion, opt, cur_rank):
opt.losses.reset()
model.train()
with tqdm(train_loader) as tqdm_loader:
for i, data in enumerate(tqdm_loader):
opt.iter += 1
desc = 'Epoch:{} Iter:{} [{}/{}] Loss:{Losses.avg: .4f}'\
.format(opt.epoch, opt.iter, i + 1, len(train_loader), Losses=opt.losses)
tqdm_loader.set_description(desc)
inputs = torch.cat((data.pos.transpose(2, 1).unsqueeze(3), data.x.transpose(2, 1).unsqueeze(3)), 1)
gt = data.y.to(opt.device)
# ------------------ zero, output, loss
optimizer.zero_grad()
out = model(inputs)
loss = criterion(out, gt)
# ------------------ optimization
loss.backward()
optimizer.step()
opt.losses.update(loss.item())
def test(model, loader, opt, cur_rank):
Is = np.empty((len(loader), opt.n_classes))
Us = np.empty((len(loader), opt.n_classes))
model.eval()
with torch.no_grad():
for i, data in enumerate(tqdm(loader)):
inputs = torch.cat((data.pos.transpose(2, 1).unsqueeze(3), data.x.transpose(2, 1).unsqueeze(3)), 1)
gt = data.y
out = model(inputs)
pred = out.max(dim=1)[1]
pred_np = pred.cpu().numpy()
target_np = gt.cpu().numpy()
for cl in range(opt.n_classes):
cur_gt_mask = (target_np == cl)
cur_pred_mask = (pred_np == cl)
I = np.sum(np.logical_and(cur_pred_mask, cur_gt_mask), dtype=np.float32)
U = np.sum(np.logical_or(cur_pred_mask, cur_gt_mask), dtype=np.float32)
Is[i, cl] = I
Us[i, cl] = U
ious = np.divide(np.sum(Is, 0), np.sum(Us, 0))
ious[np.isnan(ious)] = 1
iou = np.mean(ious)
if opt.phase == 'test':
for cl in range(opt.n_classes):
logging.info("===> mIOU for class {}: {}".format(cl, ious[cl]))
opt.test_value = iou
logging.info('TEST Epoch: [{}]\t mIoU: {:.4f}\t'.format(opt.epoch, opt.test_value))
def epochs(opt):
logging.info('===> Creating dataloader ...')
train_dataset = GeoData.S3DIS(opt.data_dir, opt.area, True, pre_transform=T.NormalizeScale())
train_sampler = DistributedSampler(train_dataset, shuffle=True, seed=opt.seed)
train_loader = DenseDataLoader(train_dataset, batch_size=opt.batch_size, shuffle=False, sampler = train_sampler, num_workers=opt.n_gpus)
test_dataset = GeoData.S3DIS(opt.data_dir, opt.area, train=False, pre_transform=T.NormalizeScale())
test_sampler = DistributedSampler(test_dataset, shuffle=False, seed=opt.seed)
test_loader = DenseDataLoader(test_dataset, batch_size=opt.batch_size, shuffle=False, sampler = test_sampler, num_workers=opt.n_gpus)
opt.n_classes = train_loader.dataset.num_classes
cur_rank = comm.get_local_rank()
logging.info('===> Loading the network ...')
model = DistributedDataParallel(CustomDenseDeepGCN(opt).to(cur_rank),device_ids=[cur_rank], output_device=cur_rank,broadcast_buffers=False).to(cur_rank)
logging.info('===> loading pre-trained ...')
model, opt.best_value, opt.epoch = load_pretrained_models(model, opt.pretrained_model, opt.phase)
logging.info(model)
logging.info('===> Init the optimizer ...')
criterion = torch.nn.CrossEntropyLoss().to(cur_rank)
optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, opt.lr_adjust_freq, opt.lr_decay_rate)
optimizer, scheduler, opt.lr = load_pretrained_optimizer(opt.pretrained_model, optimizer, scheduler, opt.lr)
logging.info('===> Init Metric ...')
opt.losses = AverageMeter()
opt.test_value = 0.
logging.info('===> start training ...')
for _ in range(opt.epoch, opt.total_epochs):
opt.epoch += 1
train_sampler.set_epoch(opt.epoch)
test_sampler.set_epoch(opt.epoch)
logging.info('Epoch:{}'.format(opt.epoch))
train(model, train_loader, optimizer, criterion, opt, cur_rank)
if opt.epoch % opt.eval_freq == 0 and opt.eval_freq != -1:
test(model, test_loader, opt, cur_rank)
scheduler.step()
if comm.is_main_process():
# ------------------ save checkpoints
# min or max. based on the metrics
is_best = (opt.test_value < opt.best_value)
opt.best_value = max(opt.test_value, opt.best_value)
model_cpu = {k: v.cpu() for k, v in model.state_dict().items()}
save_checkpoint({
'epoch': opt.epoch,
'state_dict': model_cpu,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict(),
'best_value': opt.best_value,
}, is_best, opt.ckpt_dir, opt.exp_name)
# ------------------ tensorboard log
info = {
'loss': opt.losses.avg,
'test_value': opt.test_value,
'lr': scheduler.get_lr()[0]
}
writer.add_scalar('Train Loss', info['loss'], opt.epoch)
writer.add_scalar('Test IOU', info['test_value'], opt.epoch)
writer.add_scalar('lr', info['lr'], opt.epoch)
logging.info('Saving the final model.Finish!')
def hola():
print('Hola')
def main():
opt = OptInit().get_args()
'''
This wrapper taken from detectron2 (https://github.com/facebookresearch/detectron2/blob/main/detectron2/engine/launch.py),
creates n_gpus processes and launches epochs function on each of them.
'''
launch(
epochs,
num_gpus_per_machine=opt.n_gpus,
num_machines=1,
machine_rank=0,
dist_url='auto',
args=(opt,)
)
#epochs(opt)
if __name__ == '__main__':
main()
|
[
"utils.metrics.AverageMeter",
"torch.nn.CrossEntropyLoss",
"torch.utils.data.distributed.DistributedSampler",
"logging.info",
"torch.utils.tensorboard.SummaryWriter",
"numpy.mean",
"config.OptInit",
"comm.is_main_process",
"comm.get_local_rank",
"numpy.isnan",
"parallel_wrapper.launch",
"utils.ckpt_util.load_pretrained_optimizer",
"numpy.logical_and",
"architecture.CustomDenseDeepGCN",
"tqdm.tqdm",
"torch.optim.lr_scheduler.StepLR",
"torch_geometric.transforms.NormalizeScale",
"numpy.logical_or",
"utils.ckpt_util.load_pretrained_models",
"numpy.sum",
"torch.no_grad",
"torch_geometric.loader.DenseDataLoader"
] |
[((799, 832), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {'log_dir': '"""log/mlp4"""'}), "(log_dir='log/mlp4')\n", (812, 832), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((2714, 2727), 'numpy.mean', 'np.mean', (['ious'], {}), '(ious)\n', (2721, 2727), True, 'import numpy as np\n'), ((3013, 3057), 'logging.info', 'logging.info', (['"""===> Creating dataloader ..."""'], {}), "('===> Creating dataloader ...')\n", (3025, 3057), False, 'import logging\n'), ((3176, 3238), 'torch.utils.data.distributed.DistributedSampler', 'DistributedSampler', (['train_dataset'], {'shuffle': '(True)', 'seed': 'opt.seed'}), '(train_dataset, shuffle=True, seed=opt.seed)\n', (3194, 3238), False, 'from torch.utils.data.distributed import DistributedSampler\n'), ((3258, 3381), 'torch_geometric.loader.DenseDataLoader', 'DenseDataLoader', (['train_dataset'], {'batch_size': 'opt.batch_size', 'shuffle': '(False)', 'sampler': 'train_sampler', 'num_workers': 'opt.n_gpus'}), '(train_dataset, batch_size=opt.batch_size, shuffle=False,\n sampler=train_sampler, num_workers=opt.n_gpus)\n', (3273, 3381), False, 'from torch_geometric.loader import DenseDataLoader\n'), ((3503, 3565), 'torch.utils.data.distributed.DistributedSampler', 'DistributedSampler', (['test_dataset'], {'shuffle': '(False)', 'seed': 'opt.seed'}), '(test_dataset, shuffle=False, seed=opt.seed)\n', (3521, 3565), False, 'from torch.utils.data.distributed import DistributedSampler\n'), ((3584, 3705), 'torch_geometric.loader.DenseDataLoader', 'DenseDataLoader', (['test_dataset'], {'batch_size': 'opt.batch_size', 'shuffle': '(False)', 'sampler': 'test_sampler', 'num_workers': 'opt.n_gpus'}), '(test_dataset, batch_size=opt.batch_size, shuffle=False,\n sampler=test_sampler, num_workers=opt.n_gpus)\n', (3599, 3705), False, 'from torch_geometric.loader import DenseDataLoader\n'), ((3773, 3794), 'comm.get_local_rank', 'comm.get_local_rank', ([], {}), '()\n', (3792, 3794), False, 'import comm\n'), ((3800, 3844), 'logging.info', 'logging.info', (['"""===> Loading the network ..."""'], {}), "('===> Loading the network ...')\n", (3812, 3844), False, 'import logging\n'), ((4007, 4051), 'logging.info', 'logging.info', (['"""===> loading pre-trained ..."""'], {}), "('===> loading pre-trained ...')\n", (4019, 4051), False, 'import logging\n'), ((4091, 4153), 'utils.ckpt_util.load_pretrained_models', 'load_pretrained_models', (['model', 'opt.pretrained_model', 'opt.phase'], {}), '(model, opt.pretrained_model, opt.phase)\n', (4113, 4153), False, 'from utils.ckpt_util import load_pretrained_models, load_pretrained_optimizer, save_checkpoint\n'), ((4158, 4177), 'logging.info', 'logging.info', (['model'], {}), '(model)\n', (4170, 4177), False, 'import logging\n'), ((4183, 4226), 'logging.info', 'logging.info', (['"""===> Init the optimizer ..."""'], {}), "('===> Init the optimizer ...')\n", (4195, 4226), False, 'import logging\n'), ((4365, 4451), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['optimizer', 'opt.lr_adjust_freq', 'opt.lr_decay_rate'], {}), '(optimizer, opt.lr_adjust_freq, opt.\n lr_decay_rate)\n', (4396, 4451), False, 'import torch\n'), ((4482, 4559), 'utils.ckpt_util.load_pretrained_optimizer', 'load_pretrained_optimizer', (['opt.pretrained_model', 'optimizer', 'scheduler', 'opt.lr'], {}), '(opt.pretrained_model, optimizer, scheduler, opt.lr)\n', (4507, 4559), False, 'from utils.ckpt_util import load_pretrained_models, load_pretrained_optimizer, save_checkpoint\n'), ((4565, 4601), 'logging.info', 'logging.info', (['"""===> Init Metric ..."""'], {}), "('===> Init Metric ...')\n", (4577, 4601), False, 'import logging\n'), ((4619, 4633), 'utils.metrics.AverageMeter', 'AverageMeter', ([], {}), '()\n', (4631, 4633), False, 'from utils.metrics import AverageMeter\n'), ((4663, 4702), 'logging.info', 'logging.info', (['"""===> start training ..."""'], {}), "('===> start training ...')\n", (4675, 4702), False, 'import logging\n'), ((6557, 6670), 'parallel_wrapper.launch', 'launch', (['epochs'], {'num_gpus_per_machine': 'opt.n_gpus', 'num_machines': '(1)', 'machine_rank': '(0)', 'dist_url': '"""auto"""', 'args': '(opt,)'}), "(epochs, num_gpus_per_machine=opt.n_gpus, num_machines=1,\n machine_rank=0, dist_url='auto', args=(opt,))\n", (6563, 6670), False, 'from parallel_wrapper import launch\n'), ((953, 971), 'tqdm.tqdm', 'tqdm', (['train_loader'], {}), '(train_loader)\n', (957, 971), False, 'from tqdm import tqdm\n'), ((1891, 1906), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1904, 1906), False, 'import torch\n'), ((2645, 2658), 'numpy.sum', 'np.sum', (['Is', '(0)'], {}), '(Is, 0)\n', (2651, 2658), True, 'import numpy as np\n'), ((2660, 2673), 'numpy.sum', 'np.sum', (['Us', '(0)'], {}), '(Us, 0)\n', (2666, 2673), True, 'import numpy as np\n'), ((2684, 2698), 'numpy.isnan', 'np.isnan', (['ious'], {}), '(ious)\n', (2692, 2698), True, 'import numpy as np\n'), ((5138, 5160), 'comm.is_main_process', 'comm.is_main_process', ([], {}), '()\n', (5158, 5160), False, 'import comm\n'), ((6213, 6259), 'logging.info', 'logging.info', (['"""Saving the final model.Finish!"""'], {}), "('Saving the final model.Finish!')\n", (6225, 6259), False, 'import logging\n'), ((1941, 1953), 'tqdm.tqdm', 'tqdm', (['loader'], {}), '(loader)\n', (1945, 1953), False, 'from tqdm import tqdm\n'), ((3136, 3154), 'torch_geometric.transforms.NormalizeScale', 'T.NormalizeScale', ([], {}), '()\n', (3152, 3154), True, 'import torch_geometric.transforms as T\n'), ((3464, 3482), 'torch_geometric.transforms.NormalizeScale', 'T.NormalizeScale', ([], {}), '()\n', (3480, 3482), True, 'import torch_geometric.transforms as T\n'), ((4243, 4270), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (4268, 4270), False, 'import torch\n'), ((6314, 6323), 'config.OptInit', 'OptInit', ([], {}), '()\n', (6321, 6323), False, 'from config import OptInit\n'), ((2413, 2455), 'numpy.logical_and', 'np.logical_and', (['cur_pred_mask', 'cur_gt_mask'], {}), '(cur_pred_mask, cur_gt_mask)\n', (2427, 2455), True, 'import numpy as np\n'), ((2502, 2543), 'numpy.logical_or', 'np.logical_or', (['cur_pred_mask', 'cur_gt_mask'], {}), '(cur_pred_mask, cur_gt_mask)\n', (2515, 2543), True, 'import numpy as np\n'), ((3881, 3904), 'architecture.CustomDenseDeepGCN', 'CustomDenseDeepGCN', (['opt'], {}), '(opt)\n', (3899, 3904), False, 'from architecture import DenseDeepGCN, CustomDenseDeepGCN\n')]
|
import torch
import torch.nn as nn
from torchvision.datasets.vision import VisionDataset
from PIL import Image
import os, sys, math
import os.path
import torch
import json
import torch.utils.model_zoo as model_zoo
from Yolo_v2_pytorch.src.utils import *
from Yolo_v2_pytorch.src.yolo_net import Yolo
from Yolo_v2_pytorch.src.yolo_tunning import YoloD
import numpy as np
import torch.nn.functional as F
from Yolo_v2_pytorch.src.rois_utils import anchorboxes
from Yolo_v2_pytorch.src.anotherMissOh_dataset import FaceCLS
from lib.person_model import person_model
label_dict = {'' : 9, 'beach':0, 'cafe':1, 'car':2, 'convenience store':3, 'garden':4, 'home':5, 'hospital':6, 'kitchen':7,
'livingroom':8, 'none':9, 'office':10, 'park':11, 'playground':12, 'pub':13, 'restaurant':14, 'riverside':15, 'road':16,
'rooftop':17, 'room':18, 'studio':19, 'toilet':20, 'wedding hall':21
}
label_dict_wo_none = {'beach':0, 'cafe':1, 'car':2, 'convenience store':3, 'garden':4, 'home':5, 'hospital':6, 'kitchen':7,
'livingroom':8, 'none':9, 'office':10, 'park':11, 'playground':12, 'pub':13, 'restaurant':14, 'riverside':15, 'road':16,
'rooftop':17, 'room':18, 'studio':19, 'toilet':20, 'wedding hall':21
}
def label_mapping(target):
temp = []
for idx in range(len(target)):
if target[idx][0][:3] == 'con':
target[idx][0] = 'convenience store'
temp.append(label_dict[target[idx][0]])
return temp
def label_remapping(target):
inv_label_dict = {v: k for k, v in label_dict_wo_none.items()}
temp = []
for idx in range(len(target)):
temp.append(inv_label_dict[target[idx]])
return temp
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def place_buffer(images_norm, buffer_images):
if len(buffer_images) == 0:
buffer_images = images_norm
if len(buffer_images) < 10:
for idx in range(10-len(buffer_images)):
buffer_images = [images_norm[0]] + buffer_images
assert len(buffer_images) == 10, 'Buffer failed'
return buffer_images
class AverageMeter(object):
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
sample_default = [105, 462, 953, 144, 108, 13, 123, 510, 1690, 19914, 1541, 126, 67, 592, 1010, 53, 2087, 0, 1547, 576, 74, 0]
def CB_loss(labels, logits, beta=0.99, gamma=0.5, samples_per_cls=sample_default, no_of_classes=22, loss_type='softmax'):
"""Compute the Class Balanced Loss between `logits` and the ground truth `labels`.
Class Balanced Loss: ((1-beta)/(1-beta^n))*Loss(labels, logits)
where Loss is one of the standard losses used for Neural Networks.
Args:
labels: A int tensor of size [batch].
logits: A float tensor of size [batch, no_of_classes].
samples_per_cls: A python list of size [no_of_classes].
no_of_classes: total number of classes. int
loss_type: string. One of "sigmoid", "focal", "softmax".
beta: float. Hyperparameter for Class balanced loss.
gamma: float. Hyperparameter for Focal loss.
Returns:
cb_loss: A float tensor representing class balanced loss
"""
effective_num = 1.0 - np.power(beta, samples_per_cls)
weights = (1.0 - beta) / np.array(effective_num)
weights = weights / np.sum(weights) * no_of_classes
labels_one_hot = F.one_hot(labels, no_of_classes).cpu().float()
weights = torch.tensor(weights).float()
weights = weights.unsqueeze(0)
weights = weights.repeat(labels_one_hot.shape[0],1) * labels_one_hot
weights = weights.sum(1)
weights = weights.unsqueeze(1)
weights = weights.repeat(1,no_of_classes)
if loss_type == "focal":
cb_loss = focal_loss(labels_one_hot.cuda(), logits, weights.cuda(), gamma)
elif loss_type == "sigmoid":
cb_loss = F.binary_cross_entropy_with_logits(input = logits,target = labels_one_hot, weights = weights)
elif loss_type == "softmax":
pred = logits.softmax(dim = 1)
cb_loss = F.binary_cross_entropy(input = pred, target = labels_one_hot.cuda(), weight = weights.cuda())
return cb_loss
def focal_loss(labels, logits, alpha, gamma):
"""Compute the focal loss between `logits` and the ground truth `labels`.
Focal loss = -alpha_t * (1-pt)^gamma * log(pt)
where pt is the probability of being classified to the true class.
pt = p (if true class), otherwise pt = 1 - p. p = sigmoid(logit).
Args:
labels: A float tensor of size [batch, num_classes].
logits: A float tensor of size [batch, num_classes].
alpha: A float tensor of size [batch_size]
specifying per-example weight for balanced cross entropy.
gamma: A float scalar modulating loss from hard and easy examples.
Returns:
focal_loss: A float32 scalar representing normalized total loss.
"""
BCLoss = F.binary_cross_entropy_with_logits(input = logits, target = labels,reduction = "none")
if gamma == 0.0:
modulator = 1.0
else:
modulator = torch.exp(-gamma * labels * logits - gamma * torch.log(1 +
torch.exp(-1.0 * logits)))
loss = modulator * BCLoss
weighted_loss = alpha * loss
focal_loss = torch.sum(weighted_loss)
focal_loss /= torch.sum(labels)
return focal_loss
class place_model(nn.Module):
def __init__(self, num_persons, num_faces, device):
super(place_model, self).__init__()
pre_model = Yolo(num_persons).cuda(device)
num_face_cls = num_faces
self.detector = YoloD(pre_model).cuda(device)
self.place_conv = nn.Sequential(nn.Conv2d(1024, 128, 3, 1, 1, bias=False), nn.BatchNorm2d(128),
nn.LeakyReLU(0.1, inplace=True), nn.MaxPool2d(2, 2))
self.avgpool = nn.AvgPool2d(7, stride=1)
# self.lstm_sc = torch.nn.LSTM(input_size=128, hidden_size=128, num_layers=2, batch_first=True)
# self.bert_fc1 = torch.nn.Linear(128, 768)
# self.bert_fc2 = torch.nn.Linear(768, 128)
self.bert = BERT()
self.fc2 = torch.nn.Linear(128, 1)
self.fc3 = torch.nn.Linear(128, 22)
self.softmax = torch.nn.Softmax(dim=1)
# # define face
# self.face_conv = nn.Conv2d(
# 1024, len(self.detector.anchors) * (5 + num_face_cls), 1, 1, 0, bias=False)
def forward(self, image):
N, T , C, H, W = image.size(0), image.size(1), image.size(2), image.size(3), image.size(4)
image = image.reshape(N*T, C, H, W)
# feature map of backbone
fmap, output_1 = self.detector(image)
fmap = self.place_conv(fmap)
x = self.avgpool(fmap)
x = x.reshape(N, T, -1)
# self.lstm_sc.flatten_parameters()
# N, T = x.size(0), x.size(1)
# x = self.lstm_sc(x)[0]
# x = self.bert_fc1(x)
x = self.bert(x)
# x = self.bert_fc2(x)
change = x.reshape(N*T, -1)
#x = self.fc1(x)
change = self.fc2(change)
change = change.reshape(N, T)
#x = x.reshape(N*T, -1)
M, _ = change.max(1)
w = change - M.view(-1,1)
w = w.exp()
w = w.unsqueeze(1).expand(-1,w.size(1),-1)
w = w.triu(1) - w.tril()
w = w.cumsum(2)
w = w - w.diagonal(dim1=1,dim2=2).unsqueeze(2)
ww = w.new_empty(w.size())
idx = M>=0
ww[idx] = w[idx] + M[idx].neg().exp().view(-1,1,1)
idx = ~idx
ww[idx] = M[idx].exp().view(-1,1,1)*w[idx] + 1
ww = (ww+1e-10).pow(-1)
ww = ww/ww.sum(1,True)
x = ww.transpose(1,2).bmm(x)
x = x.reshape(N*T, -1)
x = self.fc3(x)
x = x.reshape(N*T, -1)
return x
class BERT(nn.Module):
"""
BERT model : Bidirectional Encoder Representations from Transformers.
"""
def __init__(self, vocab_size=0, hidden=128, n_layers=5, attn_heads=8, dropout=0.):
"""
:param vocab_size: vocab_size of total words
:param hidden: BERT model hidden size
:param n_layers: numbers of Transformer blocks(layers)
:param attn_heads: number of attention heads
:param dropout: dropout rate
"""
super(BERT, self).__init__()
self.hidden = hidden
self.n_layers = n_layers
self.attn_heads = attn_heads
# paper noted they used 4*hidden_size for ff_network_hidden_size
self.feed_forward_hidden = hidden * 4
# embedding for BERT, sum of positional, segment, token embeddings
self.embedding = BERTEmbedding(vocab_size=vocab_size, embed_size=hidden)
# multi-layers transformer blocks, deep network
self.transformer_blocks = nn.ModuleList(
[TransformerBlock(hidden, attn_heads, hidden * 4, dropout) for _ in range(n_layers)])
def forward(self, x):
# attention masking for padded token
# torch.ByteTensor([batch_size, 1, seq_len, seq_len])
# mask = (x > 0).unsqueeze(1).repeat(1, x.size(1), 1).unsqueeze(1)
# embedding the indexed sequence to sequence of vectors
x = self.embedding(x)
# running over multiple transformer blocks
for transformer in self.transformer_blocks:
# x = transformer.forward(x, mask)
x = transformer.forward(x, None)
return x
class BERTEmbedding(nn.Module):
"""
BERT Embedding which is consisted with under features
1. TokenEmbedding : normal embedding matrix
2. PositionalEmbedding : adding positional information using sin, cos
2. SegmentEmbedding : adding sentence segment info, (sent_A:1, sent_B:2)
sum of all these features are output of BERTEmbedding
"""
def __init__(self, vocab_size, embed_size, dropout=0.):
"""
:param vocab_size: total vocab size
:param embed_size: embedding size of token embedding
:param dropout: dropout rate
"""
super(BERTEmbedding, self).__init__()
# self.token = TokenEmbedding(vocab_size=vocab_size, embed_size=embed_size)
# self.position = PositionalEmbedding(d_model=self.token.embedding_dim)
# self.segment = SegmentEmbedding(embed_size=self.token.embedding_dim)
self.position = PositionalEmbedding(d_model=embed_size)
self.dropout = nn.Dropout(p=dropout)
self.embed_size = embed_size
def forward(self, sequence):
# x = self.token(sequence) + self.position(sequence) + self.segment(segment_label)
x = sequence + self.position(sequence)
return self.dropout(x)
class PositionalEmbedding(nn.Module):
def __init__(self, d_model, max_len=512):
super(PositionalEmbedding, self).__init__()
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model).float()
pe.require_grad = False
position = torch.arange(0, max_len).float().unsqueeze(1)
div_term = (torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model)).exp()
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
return self.pe[:, :x.size(1)]
class TransformerBlock(nn.Module):
"""
Bidirectional Encoder = Transformer (self-attention)
Transformer = MultiHead_Attention + Feed_Forward with sublayer connection
"""
def __init__(self, hidden, attn_heads, feed_forward_hidden, dropout):
"""
:param hidden: hidden size of transformer
:param attn_heads: head sizes of multi-head attention
:param feed_forward_hidden: feed_forward_hidden, usually 4*hidden_size
:param dropout: dropout rate
"""
super(TransformerBlock, self).__init__()
self.attention = MultiHeadedAttention(h=attn_heads, d_model=hidden)
self.feed_forward = PositionwiseFeedForward(d_model=hidden, d_ff=feed_forward_hidden, dropout=dropout)
self.input_sublayer = SublayerConnection(size=hidden, dropout=dropout)
self.output_sublayer = SublayerConnection(size=hidden, dropout=dropout)
self.dropout = nn.Dropout(p=dropout)
def forward(self, x, mask):
x = self.input_sublayer(x, lambda _x: self.attention.forward(_x, _x, _x, mask=mask))
x = self.output_sublayer(x, self.feed_forward)
return self.dropout(x)
class MultiHeadedAttention(nn.Module):
"""
Take in model size and number of heads.
"""
def __init__(self, h, d_model, dropout=0.1):
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linear_layers = nn.ModuleList([nn.Linear(d_model, d_model) for _ in range(3)])
self.output_linear = nn.Linear(d_model, d_model)
self.attention = Attention()
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
batch_size = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = [l(x).view(batch_size, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linear_layers, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
x, attn = self.attention(query, key, value, mask=mask, dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous().view(batch_size, -1, self.h * self.d_k)
return self.output_linear(x)
class Attention(nn.Module):
"""
Compute 'Scaled Dot Product Attention'
"""
def __init__(self):
super(Attention, self).__init__()
def forward(self, query, key, value, mask=None, dropout=None):
scores = torch.matmul(query, key.transpose(-2, -1))/math.sqrt(query.size(-1))
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
class PositionwiseFeedForward(nn.Module):
"Implements FFN equation."
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
#self.activation = nn.GELU()
self.activation = nn.ReLU()
def forward(self, x):
return self.w_2(self.dropout(self.activation(self.w_1(x))))
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = nn.LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"Apply residual connection to any sublayer with the same size."
return x + self.dropout(sublayer(self.norm(x)))
|
[
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.sin",
"torch.exp",
"math.log",
"numpy.array",
"torch.cos",
"torch.sum",
"torch.nn.AvgPool2d",
"torch.nn.functional.softmax",
"torch.arange",
"torch.nn.BatchNorm2d",
"Yolo_v2_pytorch.src.yolo_tunning.YoloD",
"torch.nn.LayerNorm",
"torch.matmul",
"torch.nn.LeakyReLU",
"torch.nn.functional.one_hot",
"torch.nn.Softmax",
"numpy.power",
"torch.nn.Conv2d",
"Yolo_v2_pytorch.src.yolo_net.Yolo",
"numpy.sum",
"torch.tensor",
"torch.nn.MaxPool2d",
"torch.nn.Linear",
"torch.no_grad",
"torch.zeros",
"torch.nn.functional.binary_cross_entropy_with_logits"
] |
[((1750, 1839), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': '(1)', 'bias': '(False)'}), '(in_planes, out_planes, kernel_size=3, stride=stride, padding=1,\n bias=False)\n', (1759, 1839), True, 'import torch.nn as nn\n'), ((6545, 6631), 'torch.nn.functional.binary_cross_entropy_with_logits', 'F.binary_cross_entropy_with_logits', ([], {'input': 'logits', 'target': 'labels', 'reduction': '"""none"""'}), "(input=logits, target=labels, reduction=\n 'none')\n", (6579, 6631), True, 'import torch.nn.functional as F\n'), ((6889, 6913), 'torch.sum', 'torch.sum', (['weighted_loss'], {}), '(weighted_loss)\n', (6898, 6913), False, 'import torch\n'), ((6933, 6950), 'torch.sum', 'torch.sum', (['labels'], {}), '(labels)\n', (6942, 6950), False, 'import torch\n'), ((1998, 2013), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2011, 2013), False, 'import torch\n'), ((4869, 4900), 'numpy.power', 'np.power', (['beta', 'samples_per_cls'], {}), '(beta, samples_per_cls)\n', (4877, 4900), True, 'import numpy as np\n'), ((4930, 4953), 'numpy.array', 'np.array', (['effective_num'], {}), '(effective_num)\n', (4938, 4953), True, 'import numpy as np\n'), ((7467, 7492), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', (['(7)'], {'stride': '(1)'}), '(7, stride=1)\n', (7479, 7492), True, 'import torch.nn as nn\n'), ((7749, 7772), 'torch.nn.Linear', 'torch.nn.Linear', (['(128)', '(1)'], {}), '(128, 1)\n', (7764, 7772), False, 'import torch\n'), ((7792, 7816), 'torch.nn.Linear', 'torch.nn.Linear', (['(128)', '(22)'], {}), '(128, 22)\n', (7807, 7816), False, 'import torch\n'), ((7840, 7863), 'torch.nn.Softmax', 'torch.nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (7856, 7863), False, 'import torch\n'), ((12019, 12040), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (12029, 12040), True, 'import torch.nn as nn\n'), ((12750, 12780), 'torch.sin', 'torch.sin', (['(position * div_term)'], {}), '(position * div_term)\n', (12759, 12780), False, 'import torch\n'), ((12803, 12833), 'torch.cos', 'torch.cos', (['(position * div_term)'], {}), '(position * div_term)\n', (12812, 12833), False, 'import torch\n'), ((13901, 13922), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (13911, 13922), True, 'import torch.nn as nn\n'), ((14586, 14613), 'torch.nn.Linear', 'nn.Linear', (['d_model', 'd_model'], {}), '(d_model, d_model)\n', (14595, 14613), True, 'import torch.nn as nn\n'), ((14675, 14696), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (14685, 14696), True, 'import torch.nn as nn\n'), ((15790, 15815), 'torch.nn.functional.softmax', 'F.softmax', (['scores'], {'dim': '(-1)'}), '(scores, dim=-1)\n', (15799, 15815), True, 'import torch.nn.functional as F\n'), ((16140, 16164), 'torch.nn.Linear', 'nn.Linear', (['d_model', 'd_ff'], {}), '(d_model, d_ff)\n', (16149, 16164), True, 'import torch.nn as nn\n'), ((16184, 16208), 'torch.nn.Linear', 'nn.Linear', (['d_ff', 'd_model'], {}), '(d_ff, d_model)\n', (16193, 16208), True, 'import torch.nn as nn\n'), ((16232, 16251), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (16242, 16251), True, 'import torch.nn as nn\n'), ((16315, 16324), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (16322, 16324), True, 'import torch.nn as nn\n'), ((16704, 16722), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['size'], {}), '(size)\n', (16716, 16722), True, 'import torch.nn as nn\n'), ((16746, 16765), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (16756, 16765), True, 'import torch.nn as nn\n'), ((4978, 4993), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (4984, 4993), True, 'import numpy as np\n'), ((5094, 5115), 'torch.tensor', 'torch.tensor', (['weights'], {}), '(weights)\n', (5106, 5115), False, 'import torch\n'), ((5506, 5598), 'torch.nn.functional.binary_cross_entropy_with_logits', 'F.binary_cross_entropy_with_logits', ([], {'input': 'logits', 'target': 'labels_one_hot', 'weights': 'weights'}), '(input=logits, target=labels_one_hot,\n weights=weights)\n', (5540, 5598), True, 'import torch.nn.functional as F\n'), ((7285, 7326), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1024)', '(128)', '(3)', '(1)', '(1)'], {'bias': '(False)'}), '(1024, 128, 3, 1, 1, bias=False)\n', (7294, 7326), True, 'import torch.nn as nn\n'), ((7328, 7347), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (7342, 7347), True, 'import torch.nn as nn\n'), ((7391, 7422), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (7403, 7422), True, 'import torch.nn as nn\n'), ((7424, 7442), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (7436, 7442), True, 'import torch.nn as nn\n'), ((15902, 15929), 'torch.matmul', 'torch.matmul', (['p_attn', 'value'], {}), '(p_attn, value)\n', (15914, 15929), False, 'import torch\n'), ((7125, 7142), 'Yolo_v2_pytorch.src.yolo_net.Yolo', 'Yolo', (['num_persons'], {}), '(num_persons)\n', (7129, 7142), False, 'from Yolo_v2_pytorch.src.yolo_net import Yolo\n'), ((7215, 7231), 'Yolo_v2_pytorch.src.yolo_tunning.YoloD', 'YoloD', (['pre_model'], {}), '(pre_model)\n', (7220, 7231), False, 'from Yolo_v2_pytorch.src.yolo_tunning import YoloD\n'), ((12495, 12524), 'torch.zeros', 'torch.zeros', (['max_len', 'd_model'], {}), '(max_len, d_model)\n', (12506, 12524), False, 'import torch\n'), ((14509, 14536), 'torch.nn.Linear', 'nn.Linear', (['d_model', 'd_model'], {}), '(d_model, d_model)\n', (14518, 14536), True, 'import torch.nn as nn\n'), ((5032, 5064), 'torch.nn.functional.one_hot', 'F.one_hot', (['labels', 'no_of_classes'], {}), '(labels, no_of_classes)\n', (5041, 5064), True, 'import torch.nn.functional as F\n'), ((12585, 12609), 'torch.arange', 'torch.arange', (['(0)', 'max_len'], {}), '(0, max_len)\n', (12597, 12609), False, 'import torch\n'), ((6780, 6804), 'torch.exp', 'torch.exp', (['(-1.0 * logits)'], {}), '(-1.0 * logits)\n', (6789, 6804), False, 'import torch\n'), ((12651, 12678), 'torch.arange', 'torch.arange', (['(0)', 'd_model', '(2)'], {}), '(0, d_model, 2)\n', (12663, 12678), False, 'import torch\n'), ((12691, 12708), 'math.log', 'math.log', (['(10000.0)'], {}), '(10000.0)\n', (12699, 12708), False, 'import os, sys, math\n')]
|
""" The ARIMA model. """
import torch
import numpy as np
class ARIMA(torch.nn.Module):
"""ARIMA [summary]
"""
def __init__(self,
p: int = 0,
d: int = 0,
q: int = 0) -> None:
"""__init__ General ARIMA model constructor.
Args:
p (int): The number of lag observations included in the model,
also called the lag order.
d (int): The number of times that the raw observations are
differenced, also called the degree of differencing.
q (int): The size of the moving average window,
also called the order of moving average.
"""
super(ARIMA, self).__init__()
self.p = p
self.pWeights = torch.rand(p)
self.pWeights.requires_grad = True
self.q = q
self.qWeights = torch.rand(q)
self.qWeights.requires_grad = True
self.d = d
self.dWeights = torch.rand(d)
self.dWeights.requires_grad = True
self.drift = torch.rand(1)
pass
def forward(self, x: torch.Tensor, err: torch.Tensor) -> torch.Tensor:
"""forward the function that defines the ARIMA(0,1,1) model.
It was written specifically for the case of ARIMA(0,1,1).
Args:
x (torch.Tensor): The input data. All the past observations
err (torch.Tensor): The error term. A normal distribution vector.
Returns:
torch.Tensor: The output of the model. The current prediction.
"""
zData = torch.diff(x)
zPred = self.dWeights*zData[-1] + \
self.qWeights*err[-2] + err[-1] + self.drift
aPred = zPred + x[-1]
return aPred
def generateSample(self, length: int) -> torch.Tensor:
"""generateSample An helper function to generate a sample of data.
Args:
length (int): The length of the sample.
Returns:
torch.Tensor: The generated sample.
"""
sample = torch.zeros(length)
noise = torch.tensor(np.random.normal(
loc=0, scale=1, size=length), dtype=torch.float32)
sample[0] = noise[0]
with torch.no_grad():
for i in range(length-2):
sample[i+2] = self.forward(sample[:i+2], noise[:i+2])
pass
return sample
def fit(self,
trainData: torch.Tensor,
epochs: int,
learningRate: float) -> None:
"""fit A function to fit the model. It is a wrapper of the
Args:
trainData (torch.Tensor): The training data.
epochs (int): The number of epochs.
learningRate (float): The learning rate.
"""
dataLength = len(trainData)
errors = torch.tensor(np.random.normal(
loc=0, scale=1, size=dataLength), dtype=torch.float32)
for epoch in range(epochs):
prediction = torch.zeros(dataLength)
for i in range(dataLength-2):
prediction[i +
2] = self.forward(trainData[0:i+2], errors[0:i+2])
pass
loss = torch.mean(torch.pow(trainData - prediction, 2))
print(f'Epoch {epoch} Loss {loss}')
loss.backward()
self.dWeights.data = self.dWeights.data - \
learningRate * self.dWeights.grad.data
self.dWeights.grad.data.zero_()
self.qWeights.data = self.qWeights.data - \
learningRate * self.qWeights.grad.data
self.qWeights.grad.data.zero_()
pass
|
[
"numpy.random.normal",
"torch.pow",
"torch.diff",
"torch.no_grad",
"torch.zeros",
"torch.rand"
] |
[((789, 802), 'torch.rand', 'torch.rand', (['p'], {}), '(p)\n', (799, 802), False, 'import torch\n'), ((889, 902), 'torch.rand', 'torch.rand', (['q'], {}), '(q)\n', (899, 902), False, 'import torch\n'), ((989, 1002), 'torch.rand', 'torch.rand', (['d'], {}), '(d)\n', (999, 1002), False, 'import torch\n'), ((1067, 1080), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (1077, 1080), False, 'import torch\n'), ((1591, 1604), 'torch.diff', 'torch.diff', (['x'], {}), '(x)\n', (1601, 1604), False, 'import torch\n'), ((2054, 2073), 'torch.zeros', 'torch.zeros', (['length'], {}), '(length)\n', (2065, 2073), False, 'import torch\n'), ((2103, 2148), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': '(1)', 'size': 'length'}), '(loc=0, scale=1, size=length)\n', (2119, 2148), True, 'import numpy as np\n'), ((2226, 2241), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2239, 2241), False, 'import torch\n'), ((2835, 2884), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': '(1)', 'size': 'dataLength'}), '(loc=0, scale=1, size=dataLength)\n', (2851, 2884), True, 'import numpy as np\n'), ((2981, 3004), 'torch.zeros', 'torch.zeros', (['dataLength'], {}), '(dataLength)\n', (2992, 3004), False, 'import torch\n'), ((3207, 3243), 'torch.pow', 'torch.pow', (['(trainData - prediction)', '(2)'], {}), '(trainData - prediction, 2)\n', (3216, 3243), False, 'import torch\n')]
|
import matplotlib.pyplot as plt
import numpy as np
import math
import cv2
kernel = np.ones((3, 3), np.int8)
# 去除雜訊
def eraseImage (image):
return cv2.erode(image, kernel, iterations = 1)
# 模糊圖片
def blurImage (image):
return cv2.GaussianBlur(image, (5, 5), 0)
# 銳利化圖片
# threshold1,2,較小的值為作為偵測邊界的最小值
def edgedImage (image, threshold1 = 30, threshold2 = 150):
return cv2.Canny(image, threshold1, threshold2)
# 圖片膨脹
def dilateImage (image, level = (3, 3)):
level = np.ones(level, np.int8)
return cv2.dilate(image, level, iterations = 1)
# 獲得字元外盒
def getCharBox (image, minW = 15, minH = 15):
def setBoundingBox (contours):
box = []
for cnt in contours:
(x, y, w, h) = cv2.boundingRect(cnt)
# NOTE: 字元有一定大小,所以其盒子寬高也有基本門檻值
if w > minW and h > minH:
box.append((x, y, w, h))
# cv2.rectangle(image, (x, y), (x + w, y + h), (127, 255, 0), 2) # 依照contour畫邊界
# cv2.imshow('test', image)
return box
def removeInnerBox (boxes):
# 對各個字元的外盒,依照 x 大小排列
boxes.sort(key = lambda e: e[0])
results = [boxes[0]]
for i in range(len(boxes) - 1):
x1, y1, w1, h1 = boxes[i]
x2, y2, w2, h2 = boxes[i+1]
if (x2 > x1 and x2 + w2 > x1 + w1):
results.append(boxes[i+1])
return results
contours, hierarchy = cv2.findContours(image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
boundingBox = setBoundingBox(contours)
boundingBox = removeInnerBox(boundingBox)
return boundingBox
def showCharBox (image, boxes):
for x, y, w, h in boxes:
cv2.rectangle(image, (x, y), (x + w, y + h), (127, 255, 0), 2) # 依照contour畫邊界
cv2.imshow('charBox', image)
cv2.waitKey(0)
def showCountour (contours):
row = 2
col = math.ceil(len(contours)/row)
for i, cnt in enumerate(contours, start = 1):
x = []
y = []
# plt.subplot(row, col, i)
for point in cnt:
x.append(point[0][0])
y.append(point[0][1])
plt.plot(x, y)
plt.show()
def resizeImage (image, charBox, size = (50, 50)):
results = []
for (x, y, w, h) in charBox:
char = image[y:y+h, x:x+w]
char = cv2.resize(char, size)
results.append(char)
return results
def diffPictures (picA, picB):
err = np.sum( (picA.astype('float') - picB.astype('float')) ** 2 )
err /= float(picA.shape[0] * picA.shape[1])
return err
if __name__ == '__main__':
pic = cv2.imread('../captcha_Images/0.png')
print(pic)
cv2.imshow('pic', pic)
cv2.waitKey(0)
erosion = eraseImage(pic)
blured = blurImage(erosion)
edged = edgedImage(blured)
dilated = dilateImage(edged)
charBox = getCharBox(dilated)
showCharBox(dilated, charBox)
dilated = dilateImage(edged, (4, 4))
chars = resizeImage(dilated, charBox)
# input("Press Enter to continue.")
# c = result[0][0][0][0]
# print(c)
# plt.plot(c)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"cv2.rectangle",
"numpy.ones",
"cv2.resize",
"cv2.erode",
"matplotlib.pyplot.plot",
"cv2.boundingRect",
"cv2.imshow",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.findContours",
"cv2.dilate",
"cv2.Canny",
"cv2.imread",
"cv2.GaussianBlur",
"matplotlib.pyplot.show"
] |
[((84, 108), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.int8'], {}), '((3, 3), np.int8)\n', (91, 108), True, 'import numpy as np\n'), ((150, 188), 'cv2.erode', 'cv2.erode', (['image', 'kernel'], {'iterations': '(1)'}), '(image, kernel, iterations=1)\n', (159, 188), False, 'import cv2\n'), ((231, 265), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['image', '(5, 5)', '(0)'], {}), '(image, (5, 5), 0)\n', (247, 265), False, 'import cv2\n'), ((374, 414), 'cv2.Canny', 'cv2.Canny', (['image', 'threshold1', 'threshold2'], {}), '(image, threshold1, threshold2)\n', (383, 414), False, 'import cv2\n'), ((474, 497), 'numpy.ones', 'np.ones', (['level', 'np.int8'], {}), '(level, np.int8)\n', (481, 497), True, 'import numpy as np\n'), ((507, 545), 'cv2.dilate', 'cv2.dilate', (['image', 'level'], {'iterations': '(1)'}), '(image, level, iterations=1)\n', (517, 545), False, 'import cv2\n'), ((1298, 1361), 'cv2.findContours', 'cv2.findContours', (['image', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (1314, 1361), False, 'import cv2\n'), ((1940, 1950), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1948, 1950), True, 'import matplotlib.pyplot as plt\n'), ((2352, 2389), 'cv2.imread', 'cv2.imread', (['"""../captcha_Images/0.png"""'], {}), "('../captcha_Images/0.png')\n", (2362, 2389), False, 'import cv2\n'), ((2405, 2427), 'cv2.imshow', 'cv2.imshow', (['"""pic"""', 'pic'], {}), "('pic', pic)\n", (2415, 2427), False, 'import cv2\n'), ((2430, 2444), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2441, 2444), False, 'import cv2\n'), ((2803, 2817), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2814, 2817), False, 'import cv2\n'), ((2820, 2843), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2841, 2843), False, 'import cv2\n'), ((1532, 1594), 'cv2.rectangle', 'cv2.rectangle', (['image', '(x, y)', '(x + w, y + h)', '(127, 255, 0)', '(2)'], {}), '(image, (x, y), (x + w, y + h), (127, 255, 0), 2)\n', (1545, 1594), False, 'import cv2\n'), ((1615, 1643), 'cv2.imshow', 'cv2.imshow', (['"""charBox"""', 'image'], {}), "('charBox', image)\n", (1625, 1643), False, 'import cv2\n'), ((1648, 1662), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1659, 1662), False, 'import cv2\n'), ((1923, 1937), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (1931, 1937), True, 'import matplotlib.pyplot as plt\n'), ((2091, 2113), 'cv2.resize', 'cv2.resize', (['char', 'size'], {}), '(char, size)\n', (2101, 2113), False, 'import cv2\n'), ((697, 718), 'cv2.boundingRect', 'cv2.boundingRect', (['cnt'], {}), '(cnt)\n', (713, 718), False, 'import cv2\n')]
|
import numpy as np
import tensorflow as tf
import sys, os
sys.path.extend(['alg/', 'models/'])
from visualisation import plot_images
from encoder_no_shared import encoder, recon
from utils import init_variables, save_params, load_params, load_data
from eval_test_ll import construct_eval_func
dimZ = 50
dimH = 500
n_channel = 128
batch_size = 50
lr = 1e-4
K_mc = 10
checkpoint = -1
def main(data_name, method, dimZ, dimH, n_channel, batch_size, K_mc, checkpoint, lbd):
# set up dataset specific stuff
from config import config
labels, n_iter, dimX, shape_high, ll = config(data_name, n_channel)
if data_name == 'mnist':
from mnist import load_mnist
if data_name == 'notmnist':
from notmnist import load_notmnist
# import functionalities
if method == 'onlinevi':
from bayesian_generator import generator_head, generator_shared, \
generator, construct_gen
from onlinevi import construct_optimizer, init_shared_prior, \
update_shared_prior, update_q_sigma
if method in ['ewc', 'noreg', 'laplace', 'si']:
from generator import generator_head, generator_shared, generator, construct_gen
if method in ['ewc', 'noreg']:
from vae_ewc import construct_optimizer, lowerbound
if method == 'ewc': from vae_ewc import update_ewc_loss, compute_fisher
if method == 'laplace':
from vae_laplace import construct_optimizer, lowerbound
from vae_laplace import update_laplace_loss, compute_fisher, init_fisher_accum
if method == 'si':
from vae_si import construct_optimizer, lowerbound, update_si_reg
# then define model
n_layers_shared = 2
batch_size_ph = tf.placeholder(tf.int32, shape=(), name='batch_size')
dec_shared = generator_shared(dimX, dimH, n_layers_shared, 'sigmoid', 'gen')
# initialise sessions
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
string = method
if method in ['ewc', 'laplace', 'si']:
string = string + '_lbd%.1f' % lbd
if method == 'onlinevi' and K_mc > 1:
string = string + '_K%d' % K_mc
path_name = data_name + '_%s/' % string
if not os.path.isdir('save/'):
os.mkdir('save/')
if not os.path.isdir('save/'+path_name):
os.mkdir('save/'+path_name)
print('create path save/' + path_name)
filename = 'save/' + path_name + 'checkpoint'
if checkpoint < 0:
print('training from scratch')
old_var_list = init_variables(sess)
else:
load_params(sess, filename, checkpoint)
checkpoint += 1
# visualise the samples
N_gen = 10**2
path = 'figs/' + path_name
if not os.path.isdir('figs/'):
os.mkdir('figs/')
if not os.path.isdir(path):
os.mkdir(path)
print('create path ' + path)
X_ph = tf.placeholder(tf.float32, shape=(batch_size, dimX), name = 'x_ph')
# now start fitting
N_task = len(labels)
gen_ops = []
X_valid_list = []
X_test_list = []
eval_func_list = []
result_list = []
if method == 'onlinevi':
shared_prior_params = init_shared_prior()
if method in ['ewc', 'noreg']:
ewc_loss = 0.0
if method == 'laplace':
F_accum = init_fisher_accum()
laplace_loss = 0.0
if method == 'si':
old_params_shared = None
si_reg = None
n_layers_head = 2
n_layers_enc = n_layers_shared + n_layers_head - 1
for task in range(1, N_task+1):
# first load data
if data_name == 'mnist':
X_train, X_test, _, _ = load_mnist(digits = labels[task-1], conv = False)
if data_name == 'notmnist':
X_train, X_test, _, _ = load_notmnist(digits = labels[task-1], conv = False)
N_train = int(X_train.shape[0] * 0.9)
X_valid_list.append(X_train[N_train:])
X_train = X_train[:N_train]
X_test_list.append(X_test)
# define the head net and the generator ops
dec = generator(generator_head(dimZ, dimH, n_layers_head, 'gen_%d' % task), dec_shared)
enc = encoder(dimX, dimH, dimZ, n_layers_enc, 'enc_%d' % task)
gen_ops.append(construct_gen(dec, dimZ, sampling=False)(N_gen))
print('construct eval function...')
eval_func_list.append(construct_eval_func(X_ph, enc, dec, ll, \
batch_size_ph, K = 100, sample_W = False))
# then construct loss func and fit func
print('construct fit function...')
if method == 'onlinevi':
fit = construct_optimizer(X_ph, enc, dec, ll, X_train.shape[0], batch_size_ph, \
shared_prior_params, task, K_mc)
if method in ['ewc', 'noreg']:
bound = lowerbound(X_ph, enc, dec, ll)
fit = construct_optimizer(X_ph, batch_size_ph, bound, X_train.shape[0], ewc_loss)
if method == 'ewc':
fisher, var_list = compute_fisher(X_ph, batch_size_ph, bound, X_train.shape[0])
if method == 'laplace':
bound = lowerbound(X_ph, enc, dec, ll)
fit = construct_optimizer(X_ph, batch_size_ph, bound, X_train.shape[0], laplace_loss)
fisher, var_list = compute_fisher(X_ph, batch_size_ph, bound, X_train.shape[0])
if method == 'si':
bound = lowerbound(X_ph, enc, dec, ll)
fit, shared_var_list = construct_optimizer(X_ph, batch_size_ph, bound, X_train.shape[0],
si_reg, old_params_shared, lbd)
if old_params_shared is None:
old_params_shared = sess.run(shared_var_list)
# initialise all the uninitialised stuff
old_var_list = init_variables(sess, old_var_list)
# start training for each task
if method == 'si':
new_params_shared, w_params_shared = fit(sess, X_train, n_iter, lr)
else:
fit(sess, X_train, n_iter, lr)
# plot samples
x_gen_list = sess.run(gen_ops, feed_dict={batch_size_ph: N_gen})
for i in range(len(x_gen_list)):
plot_images(x_gen_list[i], shape_high, path, \
data_name+'_gen_task%d_%d' % (task, i+1))
x_list = [x_gen_list[i][:1] for i in range(len(x_gen_list))]
x_list = np.concatenate(x_list, 0)
tmp = np.zeros([10, dimX])
tmp[:task] = x_list
if task == 1:
x_gen_all = tmp
else:
x_gen_all = np.concatenate([x_gen_all, tmp], 0)
# print test-ll on all tasks
tmp_list = []
for i in range(len(eval_func_list)):
print('task %d' % (i+1), end=' ')
test_ll = eval_func_list[i](sess, X_valid_list[i])
tmp_list.append(test_ll)
result_list.append(tmp_list)
# save param values
save_params(sess, filename, checkpoint)
checkpoint += 1
# update regularisers/priors
if method == 'ewc':
# update EWC loss
print('update ewc loss...')
X_batch = X_train[np.random.permutation(list(range(X_train.shape[0])))[:batch_size]]
ewc_loss = update_ewc_loss(sess, ewc_loss, var_list, fisher, lbd, X_batch)
if method == 'laplace':
# update EWC loss
print('update laplace loss...')
X_batch = X_train[np.random.permutation(list(range(X_train.shape[0])))[:batch_size]]
laplace_loss, F_accum = update_laplace_loss(sess, F_accum, var_list, fisher, lbd, X_batch)
if method == 'onlinevi':
# update prior
print('update prior...')
shared_prior_params = update_shared_prior(sess, shared_prior_params)
# reset the variance of q
update_q_sigma(sess)
if method == 'si':
# update regularisers/priors
print('update SI big omega matrices...')
si_reg, _ = update_si_reg(sess, si_reg, new_params_shared, \
old_params_shared, w_params_shared)
old_params_shared = new_params_shared
plot_images(x_gen_all, shape_high, path, data_name+'_gen_all')
for i in range(len(result_list)):
print(result_list[i])
# save results
if not os.path.isdir("results/"):
os.mkdir("results/")
fname = 'results/' + data_name + '_%s.pkl' % string
import pickle
with open(fname, 'wb') as f:
pickle.dump(result_list, f)
print('test-ll results saved in', fname)
if __name__ == '__main__':
data_name = str(sys.argv[1])
method = str(sys.argv[2])
assert method in ['noreg', 'laplace', 'ewc', 'si', 'onlinevi']
if method == 'onlinevi':
lbd = 1.0 # some placeholder, doesn't matter
else:
lbd = float(sys.argv[3])
main(data_name, method, dimZ, dimH, n_channel, batch_size, K_mc, checkpoint, lbd)
|
[
"eval_test_ll.construct_eval_func",
"generator.construct_gen",
"notmnist.load_notmnist",
"vae_laplace.init_fisher_accum",
"visualisation.plot_images",
"vae_si.update_si_reg",
"tensorflow.placeholder",
"tensorflow.Session",
"utils.load_params",
"os.path.isdir",
"sys.path.extend",
"os.mkdir",
"numpy.concatenate",
"vae_laplace.update_laplace_loss",
"onlinevi.init_shared_prior",
"tensorflow.ConfigProto",
"onlinevi.update_shared_prior",
"vae_ewc.update_ewc_loss",
"config.config",
"vae_si.lowerbound",
"generator.generator_shared",
"vae_laplace.compute_fisher",
"mnist.load_mnist",
"onlinevi.update_q_sigma",
"utils.save_params",
"pickle.dump",
"numpy.zeros",
"vae_si.construct_optimizer",
"generator.generator_head",
"encoder_no_shared.encoder",
"utils.init_variables"
] |
[((58, 94), 'sys.path.extend', 'sys.path.extend', (["['alg/', 'models/']"], {}), "(['alg/', 'models/'])\n", (73, 94), False, 'import sys, os\n'), ((580, 608), 'config.config', 'config', (['data_name', 'n_channel'], {}), '(data_name, n_channel)\n', (586, 608), False, 'from config import config\n'), ((1757, 1810), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '()', 'name': '"""batch_size"""'}), "(tf.int32, shape=(), name='batch_size')\n", (1771, 1810), True, 'import tensorflow as tf\n'), ((1828, 1891), 'generator.generator_shared', 'generator_shared', (['dimX', 'dimH', 'n_layers_shared', '"""sigmoid"""', '"""gen"""'], {}), "(dimX, dimH, n_layers_shared, 'sigmoid', 'gen')\n", (1844, 1891), False, 'from generator import generator_head, generator_shared, generator, construct_gen\n'), ((1932, 1948), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (1946, 1948), True, 'import tensorflow as tf\n'), ((2003, 2028), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (2013, 2028), True, 'import tensorflow as tf\n'), ((2925, 2990), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(batch_size, dimX)', 'name': '"""x_ph"""'}), "(tf.float32, shape=(batch_size, dimX), name='x_ph')\n", (2939, 2990), True, 'import tensorflow as tf\n'), ((8364, 8428), 'visualisation.plot_images', 'plot_images', (['x_gen_all', 'shape_high', 'path', "(data_name + '_gen_all')"], {}), "(x_gen_all, shape_high, path, data_name + '_gen_all')\n", (8375, 8428), False, 'from visualisation import plot_images\n'), ((2272, 2294), 'os.path.isdir', 'os.path.isdir', (['"""save/"""'], {}), "('save/')\n", (2285, 2294), False, 'import sys, os\n'), ((2304, 2321), 'os.mkdir', 'os.mkdir', (['"""save/"""'], {}), "('save/')\n", (2312, 2321), False, 'import sys, os\n'), ((2333, 2367), 'os.path.isdir', 'os.path.isdir', (["('save/' + path_name)"], {}), "('save/' + path_name)\n", (2346, 2367), False, 'import sys, os\n'), ((2375, 2404), 'os.mkdir', 'os.mkdir', (["('save/' + path_name)"], {}), "('save/' + path_name)\n", (2383, 2404), False, 'import sys, os\n'), ((2585, 2605), 'utils.init_variables', 'init_variables', (['sess'], {}), '(sess)\n', (2599, 2605), False, 'from utils import init_variables, save_params, load_params, load_data\n'), ((2624, 2663), 'utils.load_params', 'load_params', (['sess', 'filename', 'checkpoint'], {}), '(sess, filename, checkpoint)\n', (2635, 2663), False, 'from utils import init_variables, save_params, load_params, load_data\n'), ((2772, 2794), 'os.path.isdir', 'os.path.isdir', (['"""figs/"""'], {}), "('figs/')\n", (2785, 2794), False, 'import sys, os\n'), ((2804, 2821), 'os.mkdir', 'os.mkdir', (['"""figs/"""'], {}), "('figs/')\n", (2812, 2821), False, 'import sys, os\n'), ((2833, 2852), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (2846, 2852), False, 'import sys, os\n'), ((2862, 2876), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (2870, 2876), False, 'import sys, os\n'), ((3207, 3226), 'onlinevi.init_shared_prior', 'init_shared_prior', ([], {}), '()\n', (3224, 3226), False, 'from onlinevi import construct_optimizer, init_shared_prior, update_shared_prior, update_q_sigma\n'), ((3331, 3350), 'vae_laplace.init_fisher_accum', 'init_fisher_accum', ([], {}), '()\n', (3348, 3350), False, 'from vae_laplace import update_laplace_loss, compute_fisher, init_fisher_accum\n'), ((4175, 4231), 'encoder_no_shared.encoder', 'encoder', (['dimX', 'dimH', 'dimZ', 'n_layers_enc', "('enc_%d' % task)"], {}), "(dimX, dimH, dimZ, n_layers_enc, 'enc_%d' % task)\n", (4182, 4231), False, 'from encoder_no_shared import encoder, recon\n'), ((5870, 5904), 'utils.init_variables', 'init_variables', (['sess', 'old_var_list'], {}), '(sess, old_var_list)\n', (5884, 5904), False, 'from utils import init_variables, save_params, load_params, load_data\n'), ((6499, 6524), 'numpy.concatenate', 'np.concatenate', (['x_list', '(0)'], {}), '(x_list, 0)\n', (6513, 6524), True, 'import numpy as np\n'), ((6539, 6559), 'numpy.zeros', 'np.zeros', (['[10, dimX]'], {}), '([10, dimX])\n', (6547, 6559), True, 'import numpy as np\n'), ((7080, 7119), 'utils.save_params', 'save_params', (['sess', 'filename', 'checkpoint'], {}), '(sess, filename, checkpoint)\n', (7091, 7119), False, 'from utils import init_variables, save_params, load_params, load_data\n'), ((8539, 8564), 'os.path.isdir', 'os.path.isdir', (['"""results/"""'], {}), "('results/')\n", (8552, 8564), False, 'import sys, os\n'), ((8574, 8594), 'os.mkdir', 'os.mkdir', (['"""results/"""'], {}), "('results/')\n", (8582, 8594), False, 'import sys, os\n'), ((8711, 8738), 'pickle.dump', 'pickle.dump', (['result_list', 'f'], {}), '(result_list, f)\n', (8722, 8738), False, 'import pickle\n'), ((3665, 3712), 'mnist.load_mnist', 'load_mnist', ([], {'digits': 'labels[task - 1]', 'conv': '(False)'}), '(digits=labels[task - 1], conv=False)\n', (3675, 3712), False, 'from mnist import load_mnist\n'), ((3787, 3837), 'notmnist.load_notmnist', 'load_notmnist', ([], {'digits': 'labels[task - 1]', 'conv': '(False)'}), '(digits=labels[task - 1], conv=False)\n', (3800, 3837), False, 'from notmnist import load_notmnist\n'), ((4089, 4147), 'generator.generator_head', 'generator_head', (['dimZ', 'dimH', 'n_layers_head', "('gen_%d' % task)"], {}), "(dimZ, dimH, n_layers_head, 'gen_%d' % task)\n", (4103, 4147), False, 'from generator import generator_head, generator_shared, generator, construct_gen\n'), ((4378, 4455), 'eval_test_ll.construct_eval_func', 'construct_eval_func', (['X_ph', 'enc', 'dec', 'll', 'batch_size_ph'], {'K': '(100)', 'sample_W': '(False)'}), '(X_ph, enc, dec, ll, batch_size_ph, K=100, sample_W=False)\n', (4397, 4455), False, 'from eval_test_ll import construct_eval_func\n'), ((4664, 4773), 'vae_si.construct_optimizer', 'construct_optimizer', (['X_ph', 'enc', 'dec', 'll', 'X_train.shape[0]', 'batch_size_ph', 'shared_prior_params', 'task', 'K_mc'], {}), '(X_ph, enc, dec, ll, X_train.shape[0], batch_size_ph,\n shared_prior_params, task, K_mc)\n', (4683, 4773), False, 'from vae_si import construct_optimizer, lowerbound, update_si_reg\n'), ((4881, 4911), 'vae_si.lowerbound', 'lowerbound', (['X_ph', 'enc', 'dec', 'll'], {}), '(X_ph, enc, dec, ll)\n', (4891, 4911), False, 'from vae_si import construct_optimizer, lowerbound, update_si_reg\n'), ((4930, 5005), 'vae_si.construct_optimizer', 'construct_optimizer', (['X_ph', 'batch_size_ph', 'bound', 'X_train.shape[0]', 'ewc_loss'], {}), '(X_ph, batch_size_ph, bound, X_train.shape[0], ewc_loss)\n', (4949, 5005), False, 'from vae_si import construct_optimizer, lowerbound, update_si_reg\n'), ((5188, 5218), 'vae_si.lowerbound', 'lowerbound', (['X_ph', 'enc', 'dec', 'll'], {}), '(X_ph, enc, dec, ll)\n', (5198, 5218), False, 'from vae_si import construct_optimizer, lowerbound, update_si_reg\n'), ((5237, 5316), 'vae_si.construct_optimizer', 'construct_optimizer', (['X_ph', 'batch_size_ph', 'bound', 'X_train.shape[0]', 'laplace_loss'], {}), '(X_ph, batch_size_ph, bound, X_train.shape[0], laplace_loss)\n', (5256, 5316), False, 'from vae_si import construct_optimizer, lowerbound, update_si_reg\n'), ((5348, 5408), 'vae_laplace.compute_fisher', 'compute_fisher', (['X_ph', 'batch_size_ph', 'bound', 'X_train.shape[0]'], {}), '(X_ph, batch_size_ph, bound, X_train.shape[0])\n', (5362, 5408), False, 'from vae_laplace import update_laplace_loss, compute_fisher, init_fisher_accum\n'), ((5457, 5487), 'vae_si.lowerbound', 'lowerbound', (['X_ph', 'enc', 'dec', 'll'], {}), '(X_ph, enc, dec, ll)\n', (5467, 5487), False, 'from vae_si import construct_optimizer, lowerbound, update_si_reg\n'), ((5523, 5624), 'vae_si.construct_optimizer', 'construct_optimizer', (['X_ph', 'batch_size_ph', 'bound', 'X_train.shape[0]', 'si_reg', 'old_params_shared', 'lbd'], {}), '(X_ph, batch_size_ph, bound, X_train.shape[0], si_reg,\n old_params_shared, lbd)\n', (5542, 5624), False, 'from vae_si import construct_optimizer, lowerbound, update_si_reg\n'), ((6275, 6369), 'visualisation.plot_images', 'plot_images', (['x_gen_list[i]', 'shape_high', 'path', "(data_name + '_gen_task%d_%d' % (task, i + 1))"], {}), "(x_gen_list[i], shape_high, path, data_name + '_gen_task%d_%d' %\n (task, i + 1))\n", (6286, 6369), False, 'from visualisation import plot_images\n'), ((6687, 6722), 'numpy.concatenate', 'np.concatenate', (['[x_gen_all, tmp]', '(0)'], {}), '([x_gen_all, tmp], 0)\n', (6701, 6722), True, 'import numpy as np\n'), ((7409, 7472), 'vae_ewc.update_ewc_loss', 'update_ewc_loss', (['sess', 'ewc_loss', 'var_list', 'fisher', 'lbd', 'X_batch'], {}), '(sess, ewc_loss, var_list, fisher, lbd, X_batch)\n', (7424, 7472), False, 'from vae_ewc import update_ewc_loss, compute_fisher\n'), ((7712, 7778), 'vae_laplace.update_laplace_loss', 'update_laplace_loss', (['sess', 'F_accum', 'var_list', 'fisher', 'lbd', 'X_batch'], {}), '(sess, F_accum, var_list, fisher, lbd, X_batch)\n', (7731, 7778), False, 'from vae_laplace import update_laplace_loss, compute_fisher, init_fisher_accum\n'), ((7910, 7956), 'onlinevi.update_shared_prior', 'update_shared_prior', (['sess', 'shared_prior_params'], {}), '(sess, shared_prior_params)\n', (7929, 7956), False, 'from onlinevi import construct_optimizer, init_shared_prior, update_shared_prior, update_q_sigma\n'), ((8007, 8027), 'onlinevi.update_q_sigma', 'update_q_sigma', (['sess'], {}), '(sess)\n', (8021, 8027), False, 'from onlinevi import construct_optimizer, init_shared_prior, update_shared_prior, update_q_sigma\n'), ((8182, 8268), 'vae_si.update_si_reg', 'update_si_reg', (['sess', 'si_reg', 'new_params_shared', 'old_params_shared', 'w_params_shared'], {}), '(sess, si_reg, new_params_shared, old_params_shared,\n w_params_shared)\n', (8195, 8268), False, 'from vae_si import construct_optimizer, lowerbound, update_si_reg\n'), ((4255, 4295), 'generator.construct_gen', 'construct_gen', (['dec', 'dimZ'], {'sampling': '(False)'}), '(dec, dimZ, sampling=False)\n', (4268, 4295), False, 'from generator import generator_head, generator_shared, generator, construct_gen\n'), ((5074, 5134), 'vae_laplace.compute_fisher', 'compute_fisher', (['X_ph', 'batch_size_ph', 'bound', 'X_train.shape[0]'], {}), '(X_ph, batch_size_ph, bound, X_train.shape[0])\n', (5088, 5134), False, 'from vae_laplace import update_laplace_loss, compute_fisher, init_fisher_accum\n')]
|
# raise NotImplementedError("Did not check!")
"""MSCOCO Semantic Segmentation pretraining for VOC."""
import os
from tqdm import trange
from PIL import Image, ImageOps, ImageFilter
import numpy as np
import pickle
from gluoncv.data.segbase import SegmentationDataset
class COCOSegmentation(SegmentationDataset):
CAT_LIST = [0, 5, 2, 16, 9, 44, 6, 3, 17, 62, 21, 67, 18, 19, 4,
1, 64, 20, 63, 7, 72]
NUM_CLASS = 21
def __init__(self, root=os.path.expanduser('~/.mxnet/datasets/coco'),
split='train', mode=None, transform=None):
super(COCOSegmentation, self).__init__(root, split, mode, transform)
from pycocotools.coco import COCO
from pycocotools import mask
if split == 'train':
print('train set')
ann_file = os.path.join(root, 'annotations/instances_train2017.json')
ids_file = os.path.join(root, 'annotations/train_ids.mx')
self.root = os.path.join(root, 'train2017')
else:
print('val set')
ann_file = os.path.join(root, 'annotations/instances_val2017.json')
ids_file = os.path.join(root, 'annotations/val_ids.mx')
self.root = os.path.join(root, 'val2017')
self.coco = COCO(ann_file)
self.coco_mask = mask
if os.path.exists(ids_file):
with open(ids_file, 'rb') as f:
self.ids = pickle.load(f)
else:
ids = list(self.coco.imgs.keys())
self.ids = self._preprocess(ids, ids_file)
self.transform = transform
# self.root = os.path.join(root, 'train2017') if split == 'train' else \
# os.path.join(root, 'val2017')
def __getitem__(self, index):
coco = self.coco
img_id = self.ids[index]
img_metadata = coco.loadImgs(img_id)[0]
path = img_metadata['file_name']
img = Image.open(os.path.join(self.root, path)).convert('RGB')
cocotarget = coco.loadAnns(coco.getAnnIds(imgIds=img_id))
mask = Image.fromarray(self._gen_seg_mask(cocotarget, img_metadata['height'],
img_metadata['width']))
# synchrosized transform
if self.mode == 'train':
img, mask = self._sync_transform(img, mask)
elif self.mode == 'val':
img, mask = self._val_sync_transform(img, mask)
else:
assert self.mode == 'testval'
mask = self._mask_transform(mask)
# general resize, normalize and toTensor
if self.transform is not None:
img = self.transform(img)
return img, mask
def __len__(self):
return len(self.ids)
def _gen_seg_mask(self, target, h, w):
mask = np.zeros((h, w), dtype=np.uint8)
coco_mask = self.coco_mask
for instance in target:
rle = coco_mask.frPyObjects(instance['segmentation'], h, w)
m = coco_mask.decode(rle)
cat = instance['category_id']
if cat in self.CAT_LIST:
c = self.CAT_LIST.index(cat)
else:
continue
if len(m.shape) < 3:
mask[:, :] += (mask == 0) * (m * c)
else:
mask[:, :] += (mask == 0) * (((np.sum(m, axis=2)) > 0) * c).astype(np.uint8)
return mask
def _preprocess(self, ids, ids_file):
print("Preprocessing mask, this will take a while." + \
"But don't worry, it only run once for each split.")
tbar = trange(len(ids))
new_ids = []
for i in tbar:
img_id = ids[i]
cocotarget = self.coco.loadAnns(self.coco.getAnnIds(imgIds=img_id))
img_metadata = self.coco.loadImgs(img_id)[0]
mask = self._gen_seg_mask(cocotarget, img_metadata['height'],
img_metadata['width'])
# more than 1k pixels
if (mask > 0).sum() > 1000:
new_ids.append(img_id)
tbar.set_description('Doing: {}/{}, got {} qualified images'. \
format(i, len(ids), len(new_ids)))
print('Found number of qualified images: ', len(new_ids))
with open(ids_file, 'wb') as f:
pickle.dump(new_ids, f)
return new_ids
@property
def classes(self):
"""Category names."""
return ('background', 'airplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
'motorcycle', 'person', 'potted-plant', 'sheep', 'sofa', 'train',
'tv')
|
[
"os.path.exists",
"pickle.dump",
"os.path.join",
"pycocotools.coco.COCO",
"pickle.load",
"numpy.sum",
"numpy.zeros",
"os.path.expanduser"
] |
[((471, 515), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.mxnet/datasets/coco"""'], {}), "('~/.mxnet/datasets/coco')\n", (489, 515), False, 'import os\n'), ((1266, 1280), 'pycocotools.coco.COCO', 'COCO', (['ann_file'], {}), '(ann_file)\n', (1270, 1280), False, 'from pycocotools.coco import COCO\n'), ((1322, 1346), 'os.path.exists', 'os.path.exists', (['ids_file'], {}), '(ids_file)\n', (1336, 1346), False, 'import os\n'), ((2767, 2799), 'numpy.zeros', 'np.zeros', (['(h, w)'], {'dtype': 'np.uint8'}), '((h, w), dtype=np.uint8)\n', (2775, 2799), True, 'import numpy as np\n'), ((816, 874), 'os.path.join', 'os.path.join', (['root', '"""annotations/instances_train2017.json"""'], {}), "(root, 'annotations/instances_train2017.json')\n", (828, 874), False, 'import os\n'), ((898, 944), 'os.path.join', 'os.path.join', (['root', '"""annotations/train_ids.mx"""'], {}), "(root, 'annotations/train_ids.mx')\n", (910, 944), False, 'import os\n'), ((969, 1000), 'os.path.join', 'os.path.join', (['root', '"""train2017"""'], {}), "(root, 'train2017')\n", (981, 1000), False, 'import os\n'), ((1067, 1123), 'os.path.join', 'os.path.join', (['root', '"""annotations/instances_val2017.json"""'], {}), "(root, 'annotations/instances_val2017.json')\n", (1079, 1123), False, 'import os\n'), ((1147, 1191), 'os.path.join', 'os.path.join', (['root', '"""annotations/val_ids.mx"""'], {}), "(root, 'annotations/val_ids.mx')\n", (1159, 1191), False, 'import os\n'), ((1216, 1245), 'os.path.join', 'os.path.join', (['root', '"""val2017"""'], {}), "(root, 'val2017')\n", (1228, 1245), False, 'import os\n'), ((4285, 4308), 'pickle.dump', 'pickle.dump', (['new_ids', 'f'], {}), '(new_ids, f)\n', (4296, 4308), False, 'import pickle\n'), ((1419, 1433), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1430, 1433), False, 'import pickle\n'), ((1915, 1944), 'os.path.join', 'os.path.join', (['self.root', 'path'], {}), '(self.root, path)\n', (1927, 1944), False, 'import os\n'), ((3294, 3311), 'numpy.sum', 'np.sum', (['m'], {'axis': '(2)'}), '(m, axis=2)\n', (3300, 3311), True, 'import numpy as np\n')]
|
"""Cell parameter random initializations."""
from typing import Any, Dict
import numpy as np
from ..parameters import (
Height,
NewCellBendLowerLower,
NewCellBendLowerUpper,
NewCellBendOverallLower,
NewCellBendOverallUpper,
NewCellBendUpperLower,
NewCellBendUpperUpper,
NewCellLength1Mean,
NewCellLength1Std,
NewCellLength2Mean,
NewCellLength2Std,
NewCellLengthAbsoluteMax,
NewCellLengthAbsoluteMin,
NewCellRadiusFromCenter,
NewCellWidthAbsoluteMax,
NewCellWidthAbsoluteMin,
NewCellWidthMean,
NewCellWidthStd,
Width,
)
from ..random import RRF, enforce_bounds
RandomSequenceType = Dict[str, Any]
class RandomWidthLength:
"""Random initializations for cell width/lengths."""
@staticmethod
def random_sequences(sequence: RRF) -> RandomSequenceType:
assert NewCellLength1Mean.value > NewCellWidthMean.value
assert NewCellLength2Mean.value > NewCellWidthMean.value
def ensure_length_greater_width(length, width):
for inner_length, inner_width in zip(length, width):
if inner_length > inner_width:
yield [inner_length, inner_width]
return dict(
length__width=RRF.chain(
ensure_length_greater_width,
length=RRF.compose(
lambda raw_lengths, choice: raw_lengths[choice],
raw_lengths=RRF.chain(
enforce_bounds,
iterator=sequence.multivariate_normal(
[NewCellLength1Mean.value, NewCellLength2Mean.value],
[
[NewCellLength1Std.value, 0.0],
[0.0, NewCellLength2Std.value],
],
),
minimum=NewCellLengthAbsoluteMin.value,
maximum=NewCellLengthAbsoluteMax.value,
),
choice=sequence.integers(0, 1),
),
width=RRF.chain(
enforce_bounds,
iterator=sequence.normal(
NewCellWidthMean.value, NewCellWidthStd.value
),
minimum=NewCellWidthAbsoluteMin.value,
maximum=NewCellWidthAbsoluteMax.value,
),
)
)
class RandomBentRod:
"""Random initializations for cell bent radii."""
@staticmethod
def random_sequences(sequence: RRF) -> RandomSequenceType:
return dict(
bend_overall=sequence.uniform(
NewCellBendOverallLower.value,
NewCellBendOverallUpper.value,
),
bend_upper=sequence.uniform(
NewCellBendUpperLower.value, NewCellBendUpperUpper.value
),
bend_lower=sequence.uniform(
NewCellBendLowerLower.value, NewCellBendLowerUpper.value
),
)
class RandomPosition:
"""Random initializations for cell positions."""
@staticmethod
def random_sequences(sequence: RRF) -> RandomSequenceType:
return dict(
position=RRF.compose(
lambda radius, angle: [
float(radius * np.cos(angle) + Width.value / 2),
float(radius * np.sin(angle) + Height.value / 2),
],
radius=sequence.uniform(0, NewCellRadiusFromCenter.value),
angle=RRF.wrap(sequence.uniform(0, 360.0), np.radians),
)
)
class RandomAngle:
"""Random initializations for cell angles."""
@staticmethod
def random_sequences(sequence: RRF) -> RandomSequenceType:
return dict(angle=RRF.wrap(sequence.uniform(0, 360.0), np.radians))
class RandomFluorescence:
"""Random initializations for fluorescences."""
@staticmethod
def random_sequences(sequence: RRF) -> RandomSequenceType:
return dict(fluorescences=sequence.uniform(0, 360.0, (1,)))
|
[
"numpy.sin",
"numpy.cos"
] |
[((3328, 3341), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (3334, 3341), True, 'import numpy as np\n'), ((3397, 3410), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (3403, 3410), True, 'import numpy as np\n')]
|
import qiskit
import numpy as np
import matplotlib.pyplot as plt
import json
from graph import *
# Random comment
P =1
def makeCircuit(inbits, outbits):
q = qiskit.QuantumRegister(inbits+outbits)
c = qiskit.ClassicalRegister(inbits+outbits)
qc = qiskit.QuantumCircuit(q, c)
q_input = [q[i] for i in range(outbits,outbits+inbits)]
q_output = [q[j] for j in range(outbits)]
return qc, c, q_input, q_output
# measure all qubits in q_input register, return dictionary of samples
def measureInput(qc, q_input, c):
for i in range(len(q_input)):
qc.measure(q_input[i], c[i])
job = qiskit.execute(qc, backend='local_qasm_simulator', shots=1024)
return job.result().get_counts(qc)
def test5(qc, q_input, c):
data = measureInput(qc, q_input, c)
# assemble data from dictionary into list
parsed = []
xticks = []
n = len(q_input)
for i in range(2**n):
bits = np.binary_repr(i, width=n)
xticks.append(bits)
bits += "00"
if bits in data: parsed.append(data[bits])
else: parsed.append(0)
plt.bar(range(2**n), parsed)
plt.xticks(range(2**n),xticks,rotation="vertical")
plt.xlabel('Outcomes')
plt.ylabel('Counts')
plt.title('Measurement Histogram')
plt.show()
def applyQAOA(gamma, beta, graph):
### INIT REGS
qc, c, q_input, q_output = makeCircuit(graph.getNumNodes(), 1);
PENALTY = graph.getMaxEdges()
### H on every input register
for node in q_input:
qc.h(node)
complement = graph.getEdgesComp();
edges = graph.getEdges()
### APPLY V AND W
### APPLY V
# EDGES IN THE GRAPH
for edge in edges:
nodeList = edge.getNodes()
qc.cu1(-gamma, q_input[nodeList[0].name], q_input[nodeList[1].name])
# EDGES NOT IN THE GRAPH
for edge in complement:
nodeList = edge.getNodes()
qc.cu1(PENALTY*gamma, q_input[nodeList[0].name], q_input[nodeList[1].name])
### APPLY W
for node in q_input:
qc.h(node)
qc.u1(2*beta, node)
qc.h(node)
### Measure
results = measureInput(qc, q_input, c)
### Compute the result expectation
### Parse the result list.
# B/c we only care about counts associated with input register
# we combine the counts of states with same input register bits
counts = dict()
for key in results:
if key[1:] not in counts:
counts[key[1:]] = results[key]
else:
counts[key[1:]] += results[key]
#print(counts)
eox = 0
eox2 = 0
for val in counts:
cliqNum = 0
for edge in edges:
nodeList = edge.getNodes()
#print("Node 1:", nodeList[0].name,"Node 2:", nodeList[1].name)
if val[nodeList[0].name] == '1' and val[nodeList[1].name] == '1':
cliqNum += 1
for edge in complement:
nodeList = edge.getNodes()
if val[nodeList[0].name] == '1' and val[nodeList[1].name] == '1':
cliqNum -= PENALTY
eox += counts[val]/1024 * cliqNum
eox2 += (cliqNum**2) * counts[val]/1024
std = np.sqrt((len(counts)/(len(counts) -1))*(eox2 - eox**2))
return eox, std
### gradient ascent optimizer
# graph is graph to optimize over
# epsilon controls how far out the delta is calculated
# eta is learning rate
# threshold is the average of gamma and beta that we will consider a max
def optimize(graph, epsilon, eta, threshold):
count = 0
gamma = 2
beta = 2
dgamma = (applyQAOA(gamma + epsilon, beta, graph) - applyQAOA(gamma - epsilon, beta, graph))/(2*epsilon)
dbeta = (applyQAOA(gamma, beta + epsilon, graph) - applyQAOA(gamma, beta + epsilon, graph))/(2*epsilon)
flipper = True #Alternate between maxing gamma and maxing beta
while((abs(dgamma) + abs(dbeta))/2 > threshold):
if(flipper):
if (dgamma > 0):
gamma = (gamma + (dgamma * eta)) % (2*np.pi)
elif (dgamma < 0):
gamma = (gamma - (dgamma * eta)) % (2*np.pi)
dgamma = (applyQAOA(gamma + epsilon, beta, graph) - applyQAOA(gamma - epsilon, beta, graph))/(2*epsilon)
else:
if(dbeta > 0):
beta = (beta + (dbeta * eta)) % np.pi
elif (dbeta < 0):
beta = (beta - (dbeta * eta)) % np.pi
dbeta = (applyQAOA(gamma, beta + epsilon, graph) - applyQAOA(gamma, beta + epsilon, graph))/(2*epsilon)
count+=1
print("Count", count, "dg", dgamma, "db", dbeta)
flipper = not flipper
print(count)
return gamma, beta
def main():
###TESTING GRAPH
#0---1
#| / |
#3---2
myGraph = Graph(0, 0)
nodes = [Node(i) for i in range(4)]
edges = []
edges.append(Edge(nodes[0], nodes[1]))
edges.append(Edge(nodes[1], nodes[2]))
edges.append(Edge(nodes[2], nodes[3]))
edges.append(Edge(nodes[3], nodes[0]))
edges.append(Edge(nodes[3], nodes[1]))
for n in nodes:
myGraph.addNode(n)
for e in edges:
myGraph.addEdge(e)
### Run the algorithm
#expect = applyQAOA(gamma, beta, myGraph)
#print("Expectation Value:", expect)
### OPTIMIZE
#bestGamma, bestBeta = optimize(myGraph, 0.1, 0.1, 0.05)
#print("BestGamma: ", bestGamma, "bestBeta", bestBeta)
#print("Optimized Expectation value", applyQAOA(bestGamma, bestBeta, myGraph))
#print("Optimal Gamma:", bestGamma, "Optimal Beta:", bestBeta)
#BestGamma: 4.6015625 bestBeta 0.18702062766020688
#Optimized Expectation value -0.3115234375
### Make graphs.
# I'm thinking we hold one variable constant at its maxed value
# and vary the other and vice versa.
# Gamma has a larger range than beta. Do we want more data points for gamma than beta?
# The last page of the worksheet says exactly which graphs we need in our report
# so make sure we have at least those
BestGamma = 4.6015625
BestBeta = 0.18702062766020688
betas = np.linspace(0, np.pi, 10)
gammas = np.linspace(0, 2*np.pi, 100)
varyingBeta = []
varyingGamma = []
betaSTD = []
gammaSTD = []
y = []
std = []
for gammaa in gammas:
e, s = applyQAOA(gammaa, BestBeta, myGraph)
y.append(e)
std.append(s)
with open("varyingGamma.txt", 'w') as f:
json.dump(y, f)
with open("gammaSTD.txt", 'w') as f:
json.dump(std, f)
"""
y = []
std = []
for betaa in betas:
e, s = applyQAOA(BestGamma, betaa, myGraph)
y.append(e)
std.append(s)
with open("varyingBeta.txt", 'w') as f:
json.dump(y, f)
with open("betaSTD.txt", 'w') as f:
json.dump(std, f)
"""
with open("varyingGamma.txt", 'r') as f:
varyingGamma = json.load(f)
#with open("varyingBeta.txt", 'r') as f:
# varyingBeta = json.load(f)
#with open("betaSTD.txt", 'r') as f:
# betaSTD = json.load(f)
with open("gammaSTD.txt", 'r') as f:
gammaSTD = json.load(f)
#betaG = plt.errorbar(betas, varyingBeta, betaSTD, ecolor='black', elinewidth = 0.5, capsize=3)
gammaG = plt.errorbar(gammas, varyingGamma, gammaSTD, ecolor='black', elinewidth = 0.5, capsize=3)
plt.legend(('Gamma Graph',))
plt.xlabel('Gamma values')
plt.ylabel('Expectation Value')
plt.title('Expectation Value vs Gamma holding Beta constant')
plt.show()
main()
|
[
"qiskit.ClassicalRegister",
"matplotlib.pyplot.title",
"qiskit.execute",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"json.dump",
"numpy.binary_repr",
"numpy.linspace",
"matplotlib.pyplot.errorbar",
"json.load",
"qiskit.QuantumCircuit",
"qiskit.QuantumRegister",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((162, 202), 'qiskit.QuantumRegister', 'qiskit.QuantumRegister', (['(inbits + outbits)'], {}), '(inbits + outbits)\n', (184, 202), False, 'import qiskit\n'), ((209, 251), 'qiskit.ClassicalRegister', 'qiskit.ClassicalRegister', (['(inbits + outbits)'], {}), '(inbits + outbits)\n', (233, 251), False, 'import qiskit\n'), ((259, 286), 'qiskit.QuantumCircuit', 'qiskit.QuantumCircuit', (['q', 'c'], {}), '(q, c)\n', (280, 286), False, 'import qiskit\n'), ((619, 681), 'qiskit.execute', 'qiskit.execute', (['qc'], {'backend': '"""local_qasm_simulator"""', 'shots': '(1024)'}), "(qc, backend='local_qasm_simulator', shots=1024)\n", (633, 681), False, 'import qiskit\n'), ((1179, 1201), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Outcomes"""'], {}), "('Outcomes')\n", (1189, 1201), True, 'import matplotlib.pyplot as plt\n'), ((1206, 1226), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Counts"""'], {}), "('Counts')\n", (1216, 1226), True, 'import matplotlib.pyplot as plt\n'), ((1231, 1265), 'matplotlib.pyplot.title', 'plt.title', (['"""Measurement Histogram"""'], {}), "('Measurement Histogram')\n", (1240, 1265), True, 'import matplotlib.pyplot as plt\n'), ((1270, 1280), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1278, 1280), True, 'import matplotlib.pyplot as plt\n'), ((6086, 6111), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', '(10)'], {}), '(0, np.pi, 10)\n', (6097, 6111), True, 'import numpy as np\n'), ((6125, 6155), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(100)'], {}), '(0, 2 * np.pi, 100)\n', (6136, 6155), True, 'import numpy as np\n'), ((7261, 7352), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['gammas', 'varyingGamma', 'gammaSTD'], {'ecolor': '"""black"""', 'elinewidth': '(0.5)', 'capsize': '(3)'}), "(gammas, varyingGamma, gammaSTD, ecolor='black', elinewidth=0.5,\n capsize=3)\n", (7273, 7352), True, 'import matplotlib.pyplot as plt\n'), ((7355, 7383), 'matplotlib.pyplot.legend', 'plt.legend', (["('Gamma Graph',)"], {}), "(('Gamma Graph',))\n", (7365, 7383), True, 'import matplotlib.pyplot as plt\n'), ((7388, 7414), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Gamma values"""'], {}), "('Gamma values')\n", (7398, 7414), True, 'import matplotlib.pyplot as plt\n'), ((7419, 7450), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Expectation Value"""'], {}), "('Expectation Value')\n", (7429, 7450), True, 'import matplotlib.pyplot as plt\n'), ((7455, 7516), 'matplotlib.pyplot.title', 'plt.title', (['"""Expectation Value vs Gamma holding Beta constant"""'], {}), "('Expectation Value vs Gamma holding Beta constant')\n", (7464, 7516), True, 'import matplotlib.pyplot as plt\n'), ((7521, 7531), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7529, 7531), True, 'import matplotlib.pyplot as plt\n'), ((929, 955), 'numpy.binary_repr', 'np.binary_repr', (['i'], {'width': 'n'}), '(i, width=n)\n', (943, 955), True, 'import numpy as np\n'), ((6440, 6455), 'json.dump', 'json.dump', (['y', 'f'], {}), '(y, f)\n', (6449, 6455), False, 'import json\n'), ((6506, 6523), 'json.dump', 'json.dump', (['std', 'f'], {}), '(std, f)\n', (6515, 6523), False, 'import json\n'), ((6897, 6909), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6906, 6909), False, 'import json\n'), ((7130, 7142), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7139, 7142), False, 'import json\n')]
|
"""Simple code for training an RNN for motion prediction."""
import os
import sys
import time
import numpy as np
import torch
import torch.optim as optim
from torch.autograd import Variable
import mtfixb_model
import mtfixb_model2
import parseopts
def create_model(args, total_num_batches):
"""Create MT model and initialize or load parameters in session."""
if len(args.load) > 0:
print("Loading model")
model = torch.load(args.load, map_location="cpu") if args.use_cpu else torch.load(args.load)
return model
if args.k == 0:
return create_model_k0(args, total_num_batches)
if args.dynamicsdict:
return create_model_DD(args, total_num_batches)
if args.biasonly:
return create_model_BiasOnly(args, total_num_batches)
if args.nobias:
return create_model_NoMTBias(args, total_num_batches)
model = mtfixb_model.MTGRU(
args.seq_length_out,
args.decoder_size,
args.decoder_size2,
args.batch_size,
total_num_batches,
args.k,
args.size_psi_hidden,
args.size_psi_lowrank,
args.bottleneck,
output_dim=args.human_size,
input_dim=args.input_size,
dropout=args.dropout_p,
residual_output=args.residual_velocities,
init_state_noise=args.init_state_noise,
mt_rnn=args.mt_rnn,
psi_affine=args.psi_affine,
)
if len(args.load) <= 0:
if len(args.load_layer1) > 0:
print("Loading GRU2 model")
model = load_layer1(model, args.load_layer1, args.use_cpu)
return model
print("Loading model")
model = torch.load(args.load, map_location="cpu") if args.use_cpu else torch.load(args.load)
return model
def create_model_k0(args, total_num_batches):
"""Create MT model and initialize or load parameters in session."""
model = mtfixb_model.OpenLoopGRU(
args.seq_length_out,
args.decoder_size,
args.batch_size,
args.human_size,
args.input_size,
args.dropout_p,
args.residual_velocities,
args.init_state_noise,
)
return model
def create_model_DD(args, total_num_batches):
"""Create MT model and initialize or load parameters in session."""
if len(args.load_layer1) > 0:
NotImplementedError("Layer 1 load not yet implemented for Dynamics Dict.")
model = mtfixb_model.DynamicsDict(
args.seq_length_out,
args.decoder_size,
total_num_batches,
args.batch_size,
args.k,
args.size_psi_hidden,
args.size_psi_lowrank,
args.human_size,
args.input_size,
args.dropout_p,
args.residual_velocities,
args.init_state_noise,
)
return model
def create_model_BiasOnly(args, total_num_batches):
"""Create MT model and initialize or load parameters in session."""
if len(args.load_layer1) > 0:
NotImplementedError("Layer 1 load not yet implemented for MT Bias Only.")
model = mtfixb_model.MTGRU_BiasOnly(
args.seq_length_out,
args.decoder_size,
args.decoder_size2,
args.batch_size,
total_num_batches,
args.k,
args.size_psi_hidden,
args.size_psi_lowrank,
args.bottleneck,
output_dim=args.human_size,
input_dim=args.input_size,
dropout=args.dropout_p,
residual_output=args.residual_velocities,
init_state_noise=args.init_state_noise,
)
return model
def create_model_NoMTBias(args, total_num_batches):
"""Create MT model and initialize or load parameters in session."""
if len(args.load_layer1) > 0:
NotImplementedError("Layer 1 load not yet implemented for MT Bias Only.")
model = mtfixb_model2.MTGRU_NoBias(
args.seq_length_out,
args.decoder_size,
args.decoder_size2,
args.batch_size,
total_num_batches,
args.k,
args.size_psi_hidden,
args.size_psi_lowrank,
args.bottleneck,
output_dim=args.human_size,
input_dim=args.input_size,
dropout=args.dropout_p,
residual_output=args.residual_velocities,
init_state_noise=args.init_state_noise,
mt_rnn=args.mt_rnn,
psi_affine=args.psi_affine,
)
return model
def train(args):
"""Train a MT model on human motion"""
train_iter = read_all_data(args)
train_iter.shuffle()
total_num_batches = train_iter.total_length()
model = create_model(args, total_num_batches)
model = model if args.use_cpu else model.cuda()
has_weight = not np.isclose(args.first3_prec, 1.0)
is_hard_em = args.hard_em_iters > 0
is_MT = args.k > 0
current_step = 0
previous_losses = []
step_time, loss = 0, 0
mt_lr = args.learning_rate_mt if args.learning_rate_mt >= 0 else args.learning_rate
z_lr = args.learning_rate_z if args.learning_rate_z >= 0 else args.learning_rate
zls_lr = 0 if is_hard_em else z_lr
pars_lrs, zls_ix = model.get_params_optim_dicts(mt_lr, args.learning_rate, z_lr, zls_lr=zls_lr)
if args.optimiser.upper() == "SGD":
optimiser = optim.SGD(pars_lrs, weight_decay=args.weight_decay)
elif args.optimiser.upper() == "NESTEROV":
optimiser = optim.SGD(pars_lrs, momentum=0.8, nesterov=True, weight_decay=args.weight_decay)
elif args.optimiser.upper() == "ADAM":
optimiser = optim.Adam(pars_lrs, betas=(0.9, 0.999), weight_decay=args.weight_decay)
else:
Exception("Unknown optimiser type: {:d}. Try 'SGD', 'Nesterov' or 'Adam'")
has_ar_noise = args.ar_coef > 0
device = "cpu" if args.use_cpu else "cuda"
if has_ar_noise:
assert args.ar_coef < 1, "ar_coef must be in [0, 1)."
# Construct banded AR precision matrix (fn def below)
Prec = ar_prec_matrix(args.ar_coef, args.seq_length_out).float().to(device)
for _ in range(args.iterations):
optimiser.zero_grad()
model.train()
start_time = time.time()
# ------------------------------------------------------- TRAINING
inputs, outputs, c_ids = model.get_batch(train_iter)
inputs, outputs = torchify(inputs, outputs, device=device)
if is_MT:
mu = model.mt_net.Z_mu[c_ids, :]
sd = torch.sigmoid(3 * model.mt_net.Z_logit_s[c_ids, :])
preds, _state = model(inputs, mu, sd)
else:
preds, _state = model(inputs)
err = preds - outputs
if has_weight:
err = err * torch.cat(
(torch.ones(1, 1, 3) * np.sqrt(args.first3_prec), torch.ones(1, 1, args.human_size - 3)), dim=2
).to(err.device)
if not has_ar_noise:
sqerr = err ** 2
else:
sqerr = (Prec @ err) * err
step_loss = args.human_size * args.seq_length_out * sqerr.mean() / 2
# assume \sigma is const. wrt optimisation, and hence normalising constant can be ignored.
# Now for KL term. Since we're descending *negative* L.B., we need to *ADD* KL to loss:
if is_MT:
logstd = torch.log(sd)
KLD = -0.5 * torch.sum(1 + 2 * logstd - mu.pow(2) - torch.exp(2 * logstd))
step_loss = step_loss + KLD
# Actual backpropagation
step_loss.backward()
optimiser.step()
# -------------------------------------------------------
# Reporting / admin
step_loss = step_loss.cpu().data.numpy()
if current_step % 10 == 0:
if is_MT:
KLD_part = KLD.cpu().data.numpy()
print(
"step {0:04d}; step_loss: {1:.4f} ({2:.4f})".format(current_step, step_loss, step_loss - KLD_part)
)
else:
print("step {0:04d}; step_loss: {1:.4f}".format(current_step, step_loss))
step_time += (time.time() - start_time) / args.test_every
loss += step_loss / args.test_every
current_step += 1
if current_step % 20 == 0:
sys.stdout.flush()
# Decay learning rate (if appl.)
if current_step % args.learning_rate_step == 0:
for param_group in optimiser.param_groups:
param_group["lr"] *= args.learning_rate_decay_factor
print("Decay learning rate. New value at " + str(optimiser.param_groups[0]["lr"]))
# remove Hard EM spec (if appl.)
if is_hard_em and zls_ix is not None and current_step == args.hard_em_iters:
optimiser.param_groups[zls_ix]["lr"] = z_lr
model.standardise_aggregate_posterior()
# Once in a while, we save checkpoint, print statistics, and run evals.
if current_step % args.test_every == 0:
model.eval()
# === CANNOT DO TEST SET EVALUATION SINCE DONT KNOW LATENT Z ===
# inputs, outputs = model.get_test_batch(test_set_Y, test_set_U, -1)
# inputs, outputs = torchify(inputs, outputs, device=device)
#
# if is_MT:
# preds, state = model(inputs, mu, sd)
# else:
# preds = model(inputs)
#
# err = (preds - outputs)
# if has_weight:
# err = err * torch.cat((torch.ones(1, 1, 3) * np.sqrt(args.first3_prec),
# torch.ones(1, 1, args.human_size - 3)), dim=2).to(err.device)
#
# if not has_ar_noise:
# sqerr = err ** 2
# else:
# Prec_test = ar_prec_matrix(args.ar_coef, err.size(1)).float().to(device)
# sqerr = (Prec_test @ err) * err
#
# val_loss = args.human_size * args.seq_length_out * sqerr.mean() / 2
#
# if is_MT:
# logstd = torch.log(sd)
# KLD = -0.5 * torch.sum(1 + 2 * logstd - mu.pow(2) - torch.exp(2 * logstd))
# val_loss = val_loss + KLD
#
# print()
# print("{0: <16} |".format("milliseconds"), end="")
# for ms in [60, 240, 480, 750, 990, 1500, 2010]:
# print(" {0:5d} |".format(ms), end="")
# print()
#
# avg_mse_tt = sqerr.detach().cpu().mean(dim=0).numpy().mean(axis=1)
# Pretty print of the results for 60, 240, 480, 750, 990, 1500, 2010 ms
# print("{0: <16} |".format(" "), end="")
# for ms in [1, 7, 15, 24, 32, 49, 66]:
# if args.seq_length_out >= ms + 1:
# print(" {0:.3f} |".format(avg_mse_tt[ms]), end="")
# else:
# print(" n/a |", end="")
# print()
#
# print()
# print("============================\n"
# "Global step: %d\n"
# "Learning rate: %.4f\n"
# "Step-time (ms): %.4f\n"
# "Train loss avg: %.4f\n"
# "--------------------------\n"
# "Test loss: %.4f\n"
# "============================" % (current_step,
# args.learning_rate, step_time * 1000, loss,
# val_loss))
torch.save(model, args.train_dir + "/model_" + str(current_step))
# print()
previous_losses.append(loss)
# Reset global time and loss
step_time, loss = 0, 0
sys.stdout.flush()
def sample(args):
raise NotImplementedError("Sampling not yet implemented: unsure how to deal with unknown latent z.")
train_set_Y, train_set_U, test_set_Y, test_set_U = read_all_data(args)
model = create_model(args)
model.eval()
if not args.use_cpu:
model = model.cuda()
print("Model created")
inputs, outputs = model.get_test_batch(test_set_Y, test_set_U, -1)
inputs = Variable(torch.from_numpy(inputs).float())
outputs = Variable(torch.from_numpy(outputs).float())
if not args.use_cpu:
inputs, outputs, inputs.cuda(), outputs.cuda()
if args.k > 0:
preds, mu, logstd, state = model(inputs, outputs)
else:
preds = model(inputs)
loss = (preds - outputs) ** 2
loss.cpu().data.numpy()
loss = loss.mean()
preds = preds.cpu().data.numpy()
preds = preds.transpose([1, 0, 2])
loss = loss.cpu().data.numpy()
np.savez("mt_predictions_{0}.npz".format(args.style_ix), preds=preds, actual=outputs)
return
def ar_prec_matrix(rho, n):
# Banded covariance construction
Prec = np.zeros((n, n))
i, j = np.indices(Prec.shape)
Prec[i == j] = 1 + rho ** 2
Prec[i == j - 1] = -rho
Prec[i == j + 2] = -rho
return torch.tensor(Prec)
def load_layer1(model, layer1_filename, use_cpu):
model_gru1 = torch.load(layer1_filename, map_location="cpu") if use_cpu else torch.load(layer1_filename)
if isinstance(model_gru1, mtfixb_model.OpenLoopGRU):
model.layer1_rnn = model_gru1.rnn
# model.layer1_linear = model_gru2.emission
else:
model.layer1_rnn = model_gru1.rnn2
return model
def read_all_data(args):
"""
Loads data for training/testing and normalizes it.
Args
data_dir: directory to load the data from
style_ix: style index of the test set (and leave out from the training set)
njoints: number of joints to model (0 or -1 = all)
Returns
train_set: dictionary with normalized training data
test_set: dictionary with test data
data_mean: d-long vector with the mean of the training data
data_std: d-long vector with the standard dev of the training data
dim_to_ignore: dimensions that are not used becaused stdev is too small
dim_to_use: dimensions that we are actually using in the model
"""
# === Read training data ===
print("Reading training data (test index {0:d}).".format(args.style_ix))
njoints = args.human_size
if not args.train_set_size == -1:
style_lkp = {
str(i): range(1 + args.train_set_size * (i - 1), 1 + args.train_set_size * i) for i in range(1, 8 + 1)
}
else:
style_lkp = np.load(os.path.join(args.data_dir, args.stylelkp_fname))
train_set_Y = np.load(os.path.join(args.data_dir, args.output_fname))
train_set_U = np.load(os.path.join(args.data_dir, args.input_fname))
njoints = train_set_Y[str(0)].shape[1] if njoints <= 0 else njoints
if args.train_set_size != 0:
train_ixs = np.concatenate(
[
style_lkp[str(i)] for i in range(1, len(style_lkp.keys()) + 1) if i != args.style_ix
] # CAREFUL: jl is 1-based!
)
train_set_Y = [train_set_Y[str(i)][:, :njoints] for i in train_ixs]
train_set_U = [train_set_U[str(i)] for i in train_ixs]
else:
assert args.style_ix not in range(1, 9), "no support for LOO experiments with max MTL data yet. Use style_ix=9"
train_set_Y = [train_set_Y[str(i + 1)][:, :njoints] for i in range(len(train_set_Y))]
train_set_U = [train_set_U[str(i + 1)] for i in range(len(train_set_U))]
print("Using files {:s}; {:s}".format(args.input_fname, args.output_fname))
print("done reading data.")
return mtfixb_model.DataIterator(train_set_Y, train_set_U, 64, min_size=64, overlap2=args.overlap_windows)
def torchify(*args, device="cpu"):
return [Variable(torch.from_numpy(arg).float()).to(device) for arg in args]
def main(args=None):
args = parseopts.parse_args(args)
args = parseopts.initial_arg_transform(args)
print(args.train_dir)
os.makedirs(args.train_dir, exist_ok=True)
if args.sample:
sample(args)
else:
train(args)
return args
if __name__ == "__main__":
main()
|
[
"numpy.sqrt",
"torch.from_numpy",
"parseopts.parse_args",
"torch.exp",
"parseopts.initial_arg_transform",
"mtfixb_model2.MTGRU_NoBias",
"mtfixb_model.MTGRU",
"mtfixb_model.DataIterator",
"mtfixb_model.OpenLoopGRU",
"sys.stdout.flush",
"torch.optim.SGD",
"mtfixb_model.DynamicsDict",
"numpy.indices",
"mtfixb_model.MTGRU_BiasOnly",
"time.time",
"torch.optim.Adam",
"numpy.isclose",
"torch.log",
"os.makedirs",
"torch.load",
"torch.sigmoid",
"os.path.join",
"torch.tensor",
"numpy.zeros",
"torch.ones"
] |
[((890, 1307), 'mtfixb_model.MTGRU', 'mtfixb_model.MTGRU', (['args.seq_length_out', 'args.decoder_size', 'args.decoder_size2', 'args.batch_size', 'total_num_batches', 'args.k', 'args.size_psi_hidden', 'args.size_psi_lowrank', 'args.bottleneck'], {'output_dim': 'args.human_size', 'input_dim': 'args.input_size', 'dropout': 'args.dropout_p', 'residual_output': 'args.residual_velocities', 'init_state_noise': 'args.init_state_noise', 'mt_rnn': 'args.mt_rnn', 'psi_affine': 'args.psi_affine'}), '(args.seq_length_out, args.decoder_size, args.\n decoder_size2, args.batch_size, total_num_batches, args.k, args.\n size_psi_hidden, args.size_psi_lowrank, args.bottleneck, output_dim=\n args.human_size, input_dim=args.input_size, dropout=args.dropout_p,\n residual_output=args.residual_velocities, init_state_noise=args.\n init_state_noise, mt_rnn=args.mt_rnn, psi_affine=args.psi_affine)\n', (908, 1307), False, 'import mtfixb_model\n'), ((1893, 2083), 'mtfixb_model.OpenLoopGRU', 'mtfixb_model.OpenLoopGRU', (['args.seq_length_out', 'args.decoder_size', 'args.batch_size', 'args.human_size', 'args.input_size', 'args.dropout_p', 'args.residual_velocities', 'args.init_state_noise'], {}), '(args.seq_length_out, args.decoder_size, args.\n batch_size, args.human_size, args.input_size, args.dropout_p, args.\n residual_velocities, args.init_state_noise)\n', (1917, 2083), False, 'import mtfixb_model\n'), ((2414, 2680), 'mtfixb_model.DynamicsDict', 'mtfixb_model.DynamicsDict', (['args.seq_length_out', 'args.decoder_size', 'total_num_batches', 'args.batch_size', 'args.k', 'args.size_psi_hidden', 'args.size_psi_lowrank', 'args.human_size', 'args.input_size', 'args.dropout_p', 'args.residual_velocities', 'args.init_state_noise'], {}), '(args.seq_length_out, args.decoder_size,\n total_num_batches, args.batch_size, args.k, args.size_psi_hidden, args.\n size_psi_lowrank, args.human_size, args.input_size, args.dropout_p,\n args.residual_velocities, args.init_state_noise)\n', (2439, 2680), False, 'import mtfixb_model\n'), ((3045, 3423), 'mtfixb_model.MTGRU_BiasOnly', 'mtfixb_model.MTGRU_BiasOnly', (['args.seq_length_out', 'args.decoder_size', 'args.decoder_size2', 'args.batch_size', 'total_num_batches', 'args.k', 'args.size_psi_hidden', 'args.size_psi_lowrank', 'args.bottleneck'], {'output_dim': 'args.human_size', 'input_dim': 'args.input_size', 'dropout': 'args.dropout_p', 'residual_output': 'args.residual_velocities', 'init_state_noise': 'args.init_state_noise'}), '(args.seq_length_out, args.decoder_size, args.\n decoder_size2, args.batch_size, total_num_batches, args.k, args.\n size_psi_hidden, args.size_psi_lowrank, args.bottleneck, output_dim=\n args.human_size, input_dim=args.input_size, dropout=args.dropout_p,\n residual_output=args.residual_velocities, init_state_noise=args.\n init_state_noise)\n', (3072, 3423), False, 'import mtfixb_model\n'), ((3793, 4218), 'mtfixb_model2.MTGRU_NoBias', 'mtfixb_model2.MTGRU_NoBias', (['args.seq_length_out', 'args.decoder_size', 'args.decoder_size2', 'args.batch_size', 'total_num_batches', 'args.k', 'args.size_psi_hidden', 'args.size_psi_lowrank', 'args.bottleneck'], {'output_dim': 'args.human_size', 'input_dim': 'args.input_size', 'dropout': 'args.dropout_p', 'residual_output': 'args.residual_velocities', 'init_state_noise': 'args.init_state_noise', 'mt_rnn': 'args.mt_rnn', 'psi_affine': 'args.psi_affine'}), '(args.seq_length_out, args.decoder_size, args.\n decoder_size2, args.batch_size, total_num_batches, args.k, args.\n size_psi_hidden, args.size_psi_lowrank, args.bottleneck, output_dim=\n args.human_size, input_dim=args.input_size, dropout=args.dropout_p,\n residual_output=args.residual_velocities, init_state_noise=args.\n init_state_noise, mt_rnn=args.mt_rnn, psi_affine=args.psi_affine)\n', (3819, 4218), False, 'import mtfixb_model2\n'), ((12744, 12760), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (12752, 12760), True, 'import numpy as np\n'), ((12772, 12794), 'numpy.indices', 'np.indices', (['Prec.shape'], {}), '(Prec.shape)\n', (12782, 12794), True, 'import numpy as np\n'), ((12894, 12912), 'torch.tensor', 'torch.tensor', (['Prec'], {}), '(Prec)\n', (12906, 12912), False, 'import torch\n'), ((15429, 15532), 'mtfixb_model.DataIterator', 'mtfixb_model.DataIterator', (['train_set_Y', 'train_set_U', '(64)'], {'min_size': '(64)', 'overlap2': 'args.overlap_windows'}), '(train_set_Y, train_set_U, 64, min_size=64,\n overlap2=args.overlap_windows)\n', (15454, 15532), False, 'import mtfixb_model\n'), ((15680, 15706), 'parseopts.parse_args', 'parseopts.parse_args', (['args'], {}), '(args)\n', (15700, 15706), False, 'import parseopts\n'), ((15718, 15755), 'parseopts.initial_arg_transform', 'parseopts.initial_arg_transform', (['args'], {}), '(args)\n', (15749, 15755), False, 'import parseopts\n'), ((15787, 15829), 'os.makedirs', 'os.makedirs', (['args.train_dir'], {'exist_ok': '(True)'}), '(args.train_dir, exist_ok=True)\n', (15798, 15829), False, 'import os\n'), ((1658, 1699), 'torch.load', 'torch.load', (['args.load'], {'map_location': '"""cpu"""'}), "(args.load, map_location='cpu')\n", (1668, 1699), False, 'import torch\n'), ((1721, 1742), 'torch.load', 'torch.load', (['args.load'], {}), '(args.load)\n', (1731, 1742), False, 'import torch\n'), ((4649, 4682), 'numpy.isclose', 'np.isclose', (['args.first3_prec', '(1.0)'], {}), '(args.first3_prec, 1.0)\n', (4659, 4682), True, 'import numpy as np\n'), ((5194, 5245), 'torch.optim.SGD', 'optim.SGD', (['pars_lrs'], {'weight_decay': 'args.weight_decay'}), '(pars_lrs, weight_decay=args.weight_decay)\n', (5203, 5245), True, 'import torch.optim as optim\n'), ((6048, 6059), 'time.time', 'time.time', ([], {}), '()\n', (6057, 6059), False, 'import time\n'), ((12982, 13029), 'torch.load', 'torch.load', (['layer1_filename'], {'map_location': '"""cpu"""'}), "(layer1_filename, map_location='cpu')\n", (12992, 13029), False, 'import torch\n'), ((13046, 13073), 'torch.load', 'torch.load', (['layer1_filename'], {}), '(layer1_filename)\n', (13056, 13073), False, 'import torch\n'), ((14431, 14477), 'os.path.join', 'os.path.join', (['args.data_dir', 'args.output_fname'], {}), '(args.data_dir, args.output_fname)\n', (14443, 14477), False, 'import os\n'), ((14505, 14550), 'os.path.join', 'os.path.join', (['args.data_dir', 'args.input_fname'], {}), '(args.data_dir, args.input_fname)\n', (14517, 14550), False, 'import os\n'), ((443, 484), 'torch.load', 'torch.load', (['args.load'], {'map_location': '"""cpu"""'}), "(args.load, map_location='cpu')\n", (453, 484), False, 'import torch\n'), ((506, 527), 'torch.load', 'torch.load', (['args.load'], {}), '(args.load)\n', (516, 527), False, 'import torch\n'), ((5313, 5398), 'torch.optim.SGD', 'optim.SGD', (['pars_lrs'], {'momentum': '(0.8)', 'nesterov': '(True)', 'weight_decay': 'args.weight_decay'}), '(pars_lrs, momentum=0.8, nesterov=True, weight_decay=args.weight_decay\n )\n', (5322, 5398), True, 'import torch.optim as optim\n'), ((6345, 6396), 'torch.sigmoid', 'torch.sigmoid', (['(3 * model.mt_net.Z_logit_s[c_ids, :])'], {}), '(3 * model.mt_net.Z_logit_s[c_ids, :])\n', (6358, 6396), False, 'import torch\n'), ((7157, 7170), 'torch.log', 'torch.log', (['sd'], {}), '(sd)\n', (7166, 7170), False, 'import torch\n'), ((8091, 8109), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (8107, 8109), False, 'import sys\n'), ((11630, 11648), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (11646, 11648), False, 'import sys\n'), ((14354, 14402), 'os.path.join', 'os.path.join', (['args.data_dir', 'args.stylelkp_fname'], {}), '(args.data_dir, args.stylelkp_fname)\n', (14366, 14402), False, 'import os\n'), ((5457, 5529), 'torch.optim.Adam', 'optim.Adam', (['pars_lrs'], {'betas': '(0.9, 0.999)', 'weight_decay': 'args.weight_decay'}), '(pars_lrs, betas=(0.9, 0.999), weight_decay=args.weight_decay)\n', (5467, 5529), True, 'import torch.optim as optim\n'), ((7929, 7940), 'time.time', 'time.time', ([], {}), '()\n', (7938, 7940), False, 'import time\n'), ((12074, 12098), 'torch.from_numpy', 'torch.from_numpy', (['inputs'], {}), '(inputs)\n', (12090, 12098), False, 'import torch\n'), ((12131, 12156), 'torch.from_numpy', 'torch.from_numpy', (['outputs'], {}), '(outputs)\n', (12147, 12156), False, 'import torch\n'), ((7235, 7256), 'torch.exp', 'torch.exp', (['(2 * logstd)'], {}), '(2 * logstd)\n', (7244, 7256), False, 'import torch\n'), ((15587, 15608), 'torch.from_numpy', 'torch.from_numpy', (['arg'], {}), '(arg)\n', (15603, 15608), False, 'import torch\n'), ((6658, 6695), 'torch.ones', 'torch.ones', (['(1)', '(1)', '(args.human_size - 3)'], {}), '(1, 1, args.human_size - 3)\n', (6668, 6695), False, 'import torch\n'), ((6609, 6628), 'torch.ones', 'torch.ones', (['(1)', '(1)', '(3)'], {}), '(1, 1, 3)\n', (6619, 6628), False, 'import torch\n'), ((6631, 6656), 'numpy.sqrt', 'np.sqrt', (['args.first3_prec'], {}), '(args.first3_prec)\n', (6638, 6656), True, 'import numpy as np\n')]
|
import time
import numpy as np
import vtk
from vtk.util import numpy_support
from svtk.lib.toolbox.integer import minmax
from svtk.lib.toolbox.idarray import IdArray
from svtk.lib.toolbox.numpy_helpers import normalize
import math as m
class VTKAnimationTimerCallback(object):
"""This class is called every few milliseconds by VTK based on the set frame rate. This allows for animation.
I've added several modification functions, such as adding and deleting lines/points, changing colors, etc."""
__slots__ = ["points", "point_colors", "timer_count", "points_poly",
"lines", "lines_poly", "line_colors", "line_id_array"
"last_velocity_update", "unused_locations",
"last_color_velocity_update", "renderer", "last_bg_color_velocity_update",
"last_velocity_update", "_loop_time", "remaining_lerp_fade_time", "lerp_multiplier",
"line_id_array", "point_id_array", "point_vertices", "interactor_style", "renderer",
"interactive_renderer", "_started"
]
def __init__(self):
self.timer_count = 0
self.last_velocity_update = time.clock()
self.last_color_velocity_update = time.clock()
self.last_bg_color_velocity_update = time.clock()
self._loop_time = time.clock()
self.unused_locations = []
self.remaining_lerp_fade_time = 0
self.lerp_multiplier = 1
self.line_id_array = IdArray()
self.point_id_array = IdArray()
self._started=False
def add_lines(self, lines, line_colors):
"""
Adds multiple lines between any sets of points.
Args:
lines (list, tuple, np.ndarray, np.generic):
An array in the format of [2, point_a, point_b, 2, point_c, point_d, ...]. The two is needed for VTK's
lines.
line_colors (list, tuple, np.ndarray, np.generic):
An array in the format of [[r1, g1, b1], [r2, g2, b2], ...], with the same length as the number of
lines.
Returns:
list: An array containing the memory locations of each of the newly inserted lines.
"""
assert (isinstance(lines, (list, tuple, np.ndarray, np.generic)))
assert (isinstance(line_colors, (list, tuple, np.ndarray, np.generic)))
np_line_data = numpy_support.vtk_to_numpy(self.lines.GetData())
np_line_color_data = numpy_support.vtk_to_numpy(self.line_colors)
#todo: add lines in unused locations if possible
mem_locations = range(int(len(np_line_data) / 3), int((len(np_line_data) + len(lines)) / 3))
np_line_data = np.append(np_line_data, lines)
if len(np_line_color_data) > 0:
np_line_color_data = np.append(np_line_color_data, line_colors, axis=0)
else:
np_line_color_data = line_colors
vtk_line_data = numpy_support.numpy_to_vtkIdTypeArray(np_line_data, deep=True)
self.lines.SetCells(int(len(np_line_data) / 3), vtk_line_data)
vtk_line_color_data = numpy_support.numpy_to_vtk(num_array=np_line_color_data,
deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.line_colors.DeepCopy(vtk_line_color_data)
self.lines_poly.Modified()
self.line_id_array.add_ids(mem_locations)
return mem_locations
def del_all_lines(self):
"""
Deletes all lines.
"""
vtk_data = numpy_support.numpy_to_vtkIdTypeArray(np.array([], dtype=np.int64), deep=True)
self.lines.SetCells(0, vtk_data)
vtk_data = numpy_support.numpy_to_vtk(num_array=np.array([]), deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.line_colors.DeepCopy(vtk_data)
self.lines_poly.Modified()
def del_lines(self, line_indices):
#todo: change idarray to use tuples of (start,end) locations and set this to delete those partitions
"""
Delete specific lines.
Args:
line_indices (tuple, list, np.ndarray, np.generic):
An array of integers or a single integer representing line memory locations(s) to delete.
"""
np_data = numpy_support.vtk_to_numpy(self.lines.GetData())
np_color_data = numpy_support.vtk_to_numpy(self.line_colors)
if isinstance(line_indices, (tuple, list, np.ndarray, np.generic)):
last_loc = -1
loc = 0
np_new_data = []
np_new_color_data = []
for i in range(len(line_indices)):
loc = self.line_id_array.pop_id(line_indices[i])
if loc==None:
#todo: put warning here
continue
if len(np_new_data) > 0:
np_new_data = np.append(np_new_data, np_data[(last_loc + 1) * 3:loc * 3], axis=0)
else:
np_new_data = np_data[(last_loc + 1) * 3:loc * 3]
if len(np_new_color_data) > 0:
np_new_color_data = np.append(np_new_color_data, np_color_data[(last_loc + 1):loc], axis=0)
else:
np_new_color_data = np_color_data[(last_loc + 1):loc]
last_loc = loc
last_loc = loc
loc = len(np_data) / 3
np_data = np.append(np_new_data, np_data[(last_loc + 1) * 3:loc * 3], axis=0)
np_data = np_data.astype(np.int64)
np_color_data = np.append(np_new_color_data, np_color_data[(last_loc + 1):loc], axis=0)
else:
raise TypeError("Deletion list should be tuple, list, np.ndarray, or np.generic")
vtk_data = numpy_support.numpy_to_vtkIdTypeArray(np_data, deep=True)
self.lines.SetCells(int(len(np_data) / 3), vtk_data)
vtk_data = numpy_support.numpy_to_vtk(num_array=np_color_data, deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.line_colors.DeepCopy(vtk_data)
self.lines_poly.Modified()
def del_points(self, point_indices):
"""
Delete specific points.
Args:
point_indices (tuple, list, np.ndarray, np.generic):
An array of integers or a single integer representing point memory locations(s) to delete.
"""
np_point_data = numpy_support.vtk_to_numpy(self.points.GetData())
np_point_color_data = numpy_support.vtk_to_numpy(self.point_colors)
np_vert_data = numpy_support.vtk_to_numpy(self.point_vertices.GetData())#1,1,1,2,1,3,1,4,1,5,1,6...
print(len(np_vert_data), len(np_point_data), len(np_point_color_data))
if isinstance(point_indices, (tuple, list, np.ndarray, np.generic)):
last_loc = -1
loc = 0
subtractor = 0
np_new_data = []
np_new_color_data = []
np_new_verts = []
for i in range(len(point_indices)):
loc = self.point_id_array.pop_id(point_indices[i])
if loc == None:
# todo: put warning here
continue
subtractor+=1
#I could just remove the end of the array, but this keeps the lines attached to the same points
if len(np_new_verts) >0:
np_new_verts = np.append(np_new_verts, np_vert_data[(last_loc+1)*2:loc*2], axis = 0)
else:
np_new_verts = np_vert_data[(last_loc+1)*2: loc*2]
if len(np_new_data) > 0:
np_new_data = np.append(np_new_data, np_point_data[(last_loc + 1):loc], axis=0)
else:
np_new_data = np_point_data[(last_loc + 1):loc]
if len(np_new_color_data) > 0:
np_new_color_data = np.append(np_new_color_data, np_point_color_data[(last_loc + 1)*3:loc*3], axis=0)
else:
np_new_color_data = np_point_color_data[(last_loc + 1):loc]
last_loc = loc
if loc == None:
return
last_loc = loc
loc = len(np_point_data)
np_point_data = np.append(np_new_data, np_point_data[(last_loc + 1):loc], axis=0)
np_point_color_data = np.append(np_new_color_data, np_point_color_data[(last_loc + 1):loc], axis=0)
np_vert_data = np.append(np_new_verts, np_vert_data[(last_loc + 1)*2:loc*2], axis = 0)
else:
raise TypeError("Deletion list should be tuple, list, np.ndarray, or np.generic")
vtk_data = numpy_support.numpy_to_vtk(np_point_data, deep=True)
self.points.SetData(vtk_data)
vtk_data = numpy_support.numpy_to_vtk(num_array=np_point_color_data, deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.point_colors.DeepCopy(vtk_data)
vtk_data = numpy_support.numpy_to_vtkIdTypeArray(np_vert_data, deep=True)
self.point_vertices.SetCells(int(len(np_vert_data) / 2), vtk_data)
self.lines_poly.Modified()
def add_points(self, points, point_colors):
"""
Adds points in 3d space.
Args:
points (tuple, list, np.ndarray, np.generic):
An array in the format of [[x1,y1,z1], [x2,y2,x2], ..., [xn,yn,zn]]
point_colors (tuple, list, np.ndarray, np.generic):
An array in the format of [[r1, g1, b1], [r2, g2, b2], ...], with the same length as the number of
points to be added.
Returns:
"""
assert (isinstance(points, (list, tuple, np.ndarray, np.generic)))
assert (isinstance(point_colors, (list, tuple, np.ndarray, np.generic)))
np_point_data = numpy_support.vtk_to_numpy(self.points.GetData())
np_point_color_data = numpy_support.vtk_to_numpy(self.point_colors)
np_vert_data = numpy_support.vtk_to_numpy(self.point_vertices.GetData())
print(np_vert_data)
for i in range(len(points)):
#todo: modify pointer_id_array to set free pointers to deleted data, not deleted data locations
if len(self.point_id_array.free_pointers)>0:
np_vert_data = np.append(np_vert_data, [1,self.point_id_array.free_pointers.pop()])
else:
np_vert_data = np.append(np_vert_data,[1, len(np_vert_data)/2])
mem_locations = range(int(len(np_point_data)), int((len(np_point_data) + len(points))))
if len(np_point_data) > 0:
np_point_data = np.append(np_point_data, points, axis=0)
else:
np_point_data = points
if len(point_colors) ==1:
points = np.array(points)
point_colors = np.tile(point_colors, (points.shape[0], 1))
if len(np_point_color_data) > 0:
np_point_color_data = np.append(np_point_color_data, point_colors, axis=0)
else:
np_point_color_data = point_colors
vtk_point_data = numpy_support.numpy_to_vtk(num_array=np_point_data, deep=True, array_type=vtk.VTK_FLOAT)
self.points.SetData(vtk_point_data)
vtk_data = numpy_support.numpy_to_vtkIdTypeArray(np_vert_data.astype(np.int64), deep=True)
self.point_vertices.SetCells(int(len(np_vert_data) / 2), vtk_data)
vtk_point_color_data = numpy_support.numpy_to_vtk(num_array=np_point_color_data,
deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.point_colors.DeepCopy(vtk_point_color_data)
self.points_poly.Modified()
self.point_id_array.add_ids(mem_locations)
#print(self.point_id_array)
return mem_locations
def add_point_field(self, widths, normal, center, color):
"""
Adds a rectangular field of points.
Args:
widths (tuple, list, np.ndarray, np.generic): an array defining the widths of each dimension of the field.
normal (tuple, list, np.ndarray, np.generic): an array defining the normal to the field. Specifies angle.
center (tuple, list, np.ndarray, np.generic): an array defining the central position of the field.
color (tuple, list, np.ndarray, np.generic):
An array in the format of [[r1, g1, b1], [r2, g2, b2], ...], with the same length as the number of
points to be added, or a single color in the form of [[r1, g1, b1]].
Returns:
A list of integers representing the memory locations where the points were added.
"""
true_normal = normalize(normal)
if not np.allclose(true_normal, [1, 0, 0]):
zn = np.cross(true_normal, [1, 0, 0])
xn = np.cross(true_normal, zn)
else:
xn = [1, 0, 0]
zn = [0, 0, 1]
point_field = np.array([])
#todo: replace for loops with numpy or gpu ops
for z in range(-int(m.floor(widths[2] / 2.0)), int(m.ceil(widths[2] / 2.0))):
for y in range(-int(m.floor(widths[1] / 2.0)), int(m.ceil(widths[1] / 2.0))):
for x in range(-int(m.floor(widths[0] / 2.0)), int(m.ceil(widths[0] / 2.0))):
vector_space_matrix = np.column_stack(
(np.transpose(xn), np.transpose(true_normal), np.transpose(zn)))
translation = np.matmul([x, y, z], vector_space_matrix)
point_location = [center[0], center[1], center[2]] + translation
point_location = [point_location]
if len(point_field)>0:
point_field = np.append(point_field, point_location, axis = 0)
else:
point_field = point_location
return self.add_points(point_field, color) #returns ids
def set_bg_color(self, color):
"""
Sets the background color of the viewport.
Args:
color (tuple, list, np.ndarray, np.generic): a single rgb color in the form of [[int, int, int]]
"""
r, g, b = color[0]
r,g,b = (r/255.,g/255.,b/255.)
self.renderer.SetBackground((minmax(r, 0, 1), minmax(g, 0, 1), minmax(b, 0, 1)))
self.renderer.Modified()
def set_all_point_colors(self, color):
"""
Sets the color of every point.
Args:
color (tuple, list, np.ndarray, np.generic): a single rgb color in the form of [[int, int, int]]
"""
np_color_data = numpy_support.vtk_to_numpy(self.point_colors)
np_color_data = np.tile(color, (np_color_data.shape[0], 1))
vtk_data = numpy_support.numpy_to_vtk(num_array=np_color_data, deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.point_colors.DeepCopy(vtk_data)
def set_point_colors(self, colors, point_indices=None):
if point_indices is None:
if isinstance(colors, (list, tuple, np.ndarray, np.generic)):
vtk_data = numpy_support.numpy_to_vtk(num_array=colors, deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.point_colors.DeepCopy(vtk_data)
elif isinstance(point_indices, (list, tuple, np.ndarray, np.generic)):
np_color_data = numpy_support.vtk_to_numpy(self.point_colors)
np_color_data[point_indices] = colors
vtk_data = numpy_support.numpy_to_vtk(num_array=np_color_data, deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.point_colors.DeepCopy(vtk_data)
# self.points_poly.GetPointData().GetScalars().Modified()
self.points_poly.Modified()
def setup_lerp_all_point_colors(self, color, fade_time):
"""
Sets all points to the same color, but uses lerping to slowly change the colors.
Args:
color ():
fade_time ():
"""
np_color_data = numpy_support.vtk_to_numpy(self.point_colors)
self.next_colors = np.tile(color, (np_color_data.shape[0], 1))
self.prev_colors = numpy_support.vtk_to_numpy(self.point_colors)
self.lerp_fade_time = fade_time
self.remaining_lerp_fade_time = fade_time
def lerp_point_colors(self, colors, fade_time, point_indices=None):
"""
Sets colors for specific points, but uses lerping to slowly change those colors.
Args:
colors ():
fade_time ():
point_indices ():
"""
if isinstance(self.next_colors, (np.ndarray, np.generic)):
if isinstance(point_indices, (list, tuple, np.ndarray, np.generic)):
self.next_colors[point_indices] = colors
else:
self.next_colors = colors
self.next_color_indices = None
elif isinstance(point_indices, (list, tuple, np.ndarray, np.generic)) or isinstance(colors, (list, tuple)):
if self.lerp_fade_time > 0:
self.next_colors = np.append(self.next_colors, colors)
if point_indices is not None:
self.next_color_indices = np.append(self.next_color_indices, point_indices)
else:
self.next_colors = colors
self.next_color_indices = point_indices
# must should not already be lerping
self.prev_colors = numpy_support.vtk_to_numpy(self.point_colors)
# fade time in seconds, float
self.lerp_fade_time = fade_time
self.remaining_lerp_fade_time = fade_time
def set_lerp_remainder(self, lerp_remainder):
"""
Sets the portion of color from the previous color set remains after the lerp has been fully run.
Args:
lerp_remainder ():
"""
self.lerp_multiplier = 1 - lerp_remainder
def _calculate_point_color_lerp(self):
"""
Linearly interpolates colors. In addition to making animation look smoother, it helps prevent seizures a little.
Only a little though, and it has to be used correctly. Still, using it at all helps.
"""
if self.remaining_lerp_fade_time > 0:
# print(self.lerp_fade_time, self.remaining_lerp_fade_time)
lerp_val = self.lerp_multiplier * (
self.lerp_fade_time - self.remaining_lerp_fade_time) / self.lerp_fade_time
# print(lerp_val)
diff_array = (self.prev_colors - self.next_colors)
lerp_diff_array = diff_array * lerp_val
# print(lerp_diff_array)
lerp_colors = self.prev_colors - lerp_diff_array
# print(lerp_colors)
if isinstance(lerp_colors, (np.ndarray, np.generic)):
vtk_data = numpy_support.numpy_to_vtk(num_array=lerp_colors, deep=True,
array_type=vtk.VTK_UNSIGNED_CHAR)
self.point_colors.DeepCopy(vtk_data)
# self.points_poly.GetPointData().GetScalars().Modified()
self.points_poly.Modified()
self.remaining_lerp_fade_time -= self.loop_change_in_time
# print(self.remaining_lerp_fade_time)
def position_points(self, positions, point_indices=None):
#todo:unit test
"""
Untested with most recent changes.
Sets the positions of specific points, all points, or one point.
Args:
positions ():
point_indices ():
"""
if point_indices == None:
vtk_data = numpy_support.numpy_to_vtk(num_array=positions, deep=True, array_type=vtk.VTK_FLOAT)
self.points.DeepCopy(vtk_data)
elif isinstance(point_indices, (list, tuple)):
if isinstance(positions, (list, tuple)):
for i in range(len(point_indices)):
x, y, z = positions[i % len(positions)]
self.points.SetPoint(point_indices[i], (x, y, z))
else:
for i in range(len(point_indices)):
x, y, z = positions
self.points.SetPoint(point_indices[i], (x, y, z))
else:
x, y, z = positions
self.points.SetPoint(point_indices, (x, y, z))
self.points_poly.Modified()
def add_key_input_functions(self, keydic):
"""
Sets functions to be called when specific keys are pressed, in order from shallowest to deepest dictionaries.
If a key is already in the dictionary, it will be replaced.
Args:
keydic ():
"""
self.interactor_style.append_input_combinations(keydic)
def at_start(self):
"""
Function to be run after class instantiation and vtk start up. Useful for setting things that can only be set
after VTK is running.
"""
pass
def loop(self, obj, event):
"""
Function called every few milliseconds when VTK is set to call. Variables that need updating like change_in_time
can be set here.
Args:
obj ():
event ():
"""
self.loop_change_in_time = time.clock() - self._loop_time
self._loop_time = time.clock()
self._calculate_point_color_lerp()
pass
def at_end(self):
"""
Function called when animation is ended.
"""
self.interactive_renderer.RemoveAllObservers()
def exit(self):
# needed to stop previous setups from being run on next class call
# proper cleanup
self.interactive_renderer.TerminateApp()
def execute(self, obj, event):
"""
Function called to start animation.
Args:
obj ():
event ():
"""
if not self._started:
self.at_start()
self._started = True
self.loop(obj, event)
self.points_poly.GetPointData().GetScalars().Modified()
self.points_poly.Modified()
self.interactive_renderer = obj
self.interactive_renderer.GetRenderWindow().Render()
|
[
"numpy.tile",
"numpy.allclose",
"math.ceil",
"numpy.cross",
"time.clock",
"math.floor",
"svtk.lib.toolbox.idarray.IdArray",
"vtk.util.numpy_support.numpy_to_vtkIdTypeArray",
"svtk.lib.toolbox.integer.minmax",
"numpy.append",
"numpy.array",
"numpy.matmul",
"vtk.util.numpy_support.numpy_to_vtk",
"svtk.lib.toolbox.numpy_helpers.normalize",
"numpy.transpose",
"vtk.util.numpy_support.vtk_to_numpy"
] |
[((1209, 1221), 'time.clock', 'time.clock', ([], {}), '()\n', (1219, 1221), False, 'import time\n'), ((1264, 1276), 'time.clock', 'time.clock', ([], {}), '()\n', (1274, 1276), False, 'import time\n'), ((1322, 1334), 'time.clock', 'time.clock', ([], {}), '()\n', (1332, 1334), False, 'import time\n'), ((1361, 1373), 'time.clock', 'time.clock', ([], {}), '()\n', (1371, 1373), False, 'import time\n'), ((1513, 1522), 'svtk.lib.toolbox.idarray.IdArray', 'IdArray', ([], {}), '()\n', (1520, 1522), False, 'from svtk.lib.toolbox.idarray import IdArray\n'), ((1553, 1562), 'svtk.lib.toolbox.idarray.IdArray', 'IdArray', ([], {}), '()\n', (1560, 1562), False, 'from svtk.lib.toolbox.idarray import IdArray\n'), ((2506, 2550), 'vtk.util.numpy_support.vtk_to_numpy', 'numpy_support.vtk_to_numpy', (['self.line_colors'], {}), '(self.line_colors)\n', (2532, 2550), False, 'from vtk.util import numpy_support\n'), ((2734, 2764), 'numpy.append', 'np.append', (['np_line_data', 'lines'], {}), '(np_line_data, lines)\n', (2743, 2764), True, 'import numpy as np\n'), ((2974, 3036), 'vtk.util.numpy_support.numpy_to_vtkIdTypeArray', 'numpy_support.numpy_to_vtkIdTypeArray', (['np_line_data'], {'deep': '(True)'}), '(np_line_data, deep=True)\n', (3011, 3036), False, 'from vtk.util import numpy_support\n'), ((3139, 3244), 'vtk.util.numpy_support.numpy_to_vtk', 'numpy_support.numpy_to_vtk', ([], {'num_array': 'np_line_color_data', 'deep': '(True)', 'array_type': 'vtk.VTK_UNSIGNED_CHAR'}), '(num_array=np_line_color_data, deep=True,\n array_type=vtk.VTK_UNSIGNED_CHAR)\n', (3165, 3244), False, 'from vtk.util import numpy_support\n'), ((4367, 4411), 'vtk.util.numpy_support.vtk_to_numpy', 'numpy_support.vtk_to_numpy', (['self.line_colors'], {}), '(self.line_colors)\n', (4393, 4411), False, 'from vtk.util import numpy_support\n'), ((5769, 5826), 'vtk.util.numpy_support.numpy_to_vtkIdTypeArray', 'numpy_support.numpy_to_vtkIdTypeArray', (['np_data'], {'deep': '(True)'}), '(np_data, deep=True)\n', (5806, 5826), False, 'from vtk.util import numpy_support\n'), ((5908, 6009), 'vtk.util.numpy_support.numpy_to_vtk', 'numpy_support.numpy_to_vtk', ([], {'num_array': 'np_color_data', 'deep': '(True)', 'array_type': 'vtk.VTK_UNSIGNED_CHAR'}), '(num_array=np_color_data, deep=True, array_type=\n vtk.VTK_UNSIGNED_CHAR)\n', (5934, 6009), False, 'from vtk.util import numpy_support\n'), ((6474, 6519), 'vtk.util.numpy_support.vtk_to_numpy', 'numpy_support.vtk_to_numpy', (['self.point_colors'], {}), '(self.point_colors)\n', (6500, 6519), False, 'from vtk.util import numpy_support\n'), ((8646, 8698), 'vtk.util.numpy_support.numpy_to_vtk', 'numpy_support.numpy_to_vtk', (['np_point_data'], {'deep': '(True)'}), '(np_point_data, deep=True)\n', (8672, 8698), False, 'from vtk.util import numpy_support\n'), ((8757, 8863), 'vtk.util.numpy_support.numpy_to_vtk', 'numpy_support.numpy_to_vtk', ([], {'num_array': 'np_point_color_data', 'deep': '(True)', 'array_type': 'vtk.VTK_UNSIGNED_CHAR'}), '(num_array=np_point_color_data, deep=True,\n array_type=vtk.VTK_UNSIGNED_CHAR)\n', (8783, 8863), False, 'from vtk.util import numpy_support\n'), ((8925, 8987), 'vtk.util.numpy_support.numpy_to_vtkIdTypeArray', 'numpy_support.numpy_to_vtkIdTypeArray', (['np_vert_data'], {'deep': '(True)'}), '(np_vert_data, deep=True)\n', (8962, 8987), False, 'from vtk.util import numpy_support\n'), ((9859, 9904), 'vtk.util.numpy_support.vtk_to_numpy', 'numpy_support.vtk_to_numpy', (['self.point_colors'], {}), '(self.point_colors)\n', (9885, 9904), False, 'from vtk.util import numpy_support\n'), ((11027, 11120), 'vtk.util.numpy_support.numpy_to_vtk', 'numpy_support.numpy_to_vtk', ([], {'num_array': 'np_point_data', 'deep': '(True)', 'array_type': 'vtk.VTK_FLOAT'}), '(num_array=np_point_data, deep=True, array_type=\n vtk.VTK_FLOAT)\n', (11053, 11120), False, 'from vtk.util import numpy_support\n'), ((11369, 11475), 'vtk.util.numpy_support.numpy_to_vtk', 'numpy_support.numpy_to_vtk', ([], {'num_array': 'np_point_color_data', 'deep': '(True)', 'array_type': 'vtk.VTK_UNSIGNED_CHAR'}), '(num_array=np_point_color_data, deep=True,\n array_type=vtk.VTK_UNSIGNED_CHAR)\n', (11395, 11475), False, 'from vtk.util import numpy_support\n'), ((12629, 12646), 'svtk.lib.toolbox.numpy_helpers.normalize', 'normalize', (['normal'], {}), '(normal)\n', (12638, 12646), False, 'from svtk.lib.toolbox.numpy_helpers import normalize\n'), ((12882, 12894), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (12890, 12894), True, 'import numpy as np\n'), ((14534, 14579), 'vtk.util.numpy_support.vtk_to_numpy', 'numpy_support.vtk_to_numpy', (['self.point_colors'], {}), '(self.point_colors)\n', (14560, 14579), False, 'from vtk.util import numpy_support\n'), ((14604, 14647), 'numpy.tile', 'np.tile', (['color', '(np_color_data.shape[0], 1)'], {}), '(color, (np_color_data.shape[0], 1))\n', (14611, 14647), True, 'import numpy as np\n'), ((14667, 14768), 'vtk.util.numpy_support.numpy_to_vtk', 'numpy_support.numpy_to_vtk', ([], {'num_array': 'np_color_data', 'deep': '(True)', 'array_type': 'vtk.VTK_UNSIGNED_CHAR'}), '(num_array=np_color_data, deep=True, array_type=\n vtk.VTK_UNSIGNED_CHAR)\n', (14693, 14768), False, 'from vtk.util import numpy_support\n'), ((15885, 15930), 'vtk.util.numpy_support.vtk_to_numpy', 'numpy_support.vtk_to_numpy', (['self.point_colors'], {}), '(self.point_colors)\n', (15911, 15930), False, 'from vtk.util import numpy_support\n'), ((15958, 16001), 'numpy.tile', 'np.tile', (['color', '(np_color_data.shape[0], 1)'], {}), '(color, (np_color_data.shape[0], 1))\n', (15965, 16001), True, 'import numpy as np\n'), ((16029, 16074), 'vtk.util.numpy_support.vtk_to_numpy', 'numpy_support.vtk_to_numpy', (['self.point_colors'], {}), '(self.point_colors)\n', (16055, 16074), False, 'from vtk.util import numpy_support\n'), ((21145, 21157), 'time.clock', 'time.clock', ([], {}), '()\n', (21155, 21157), False, 'import time\n'), ((2839, 2889), 'numpy.append', 'np.append', (['np_line_color_data', 'line_colors'], {'axis': '(0)'}), '(np_line_color_data, line_colors, axis=0)\n', (2848, 2889), True, 'import numpy as np\n'), ((3608, 3636), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int64'}), '([], dtype=np.int64)\n', (3616, 3636), True, 'import numpy as np\n'), ((5424, 5491), 'numpy.append', 'np.append', (['np_new_data', 'np_data[(last_loc + 1) * 3:loc * 3]'], {'axis': '(0)'}), '(np_new_data, np_data[(last_loc + 1) * 3:loc * 3], axis=0)\n', (5433, 5491), True, 'import numpy as np\n'), ((5569, 5638), 'numpy.append', 'np.append', (['np_new_color_data', 'np_color_data[last_loc + 1:loc]'], {'axis': '(0)'}), '(np_new_color_data, np_color_data[last_loc + 1:loc], axis=0)\n', (5578, 5638), True, 'import numpy as np\n'), ((8239, 8302), 'numpy.append', 'np.append', (['np_new_data', 'np_point_data[last_loc + 1:loc]'], {'axis': '(0)'}), '(np_new_data, np_point_data[last_loc + 1:loc], axis=0)\n', (8248, 8302), True, 'import numpy as np\n'), ((8340, 8415), 'numpy.append', 'np.append', (['np_new_color_data', 'np_point_color_data[last_loc + 1:loc]'], {'axis': '(0)'}), '(np_new_color_data, np_point_color_data[last_loc + 1:loc], axis=0)\n', (8349, 8415), True, 'import numpy as np\n'), ((8446, 8519), 'numpy.append', 'np.append', (['np_new_verts', 'np_vert_data[(last_loc + 1) * 2:loc * 2]'], {'axis': '(0)'}), '(np_new_verts, np_vert_data[(last_loc + 1) * 2:loc * 2], axis=0)\n', (8455, 8519), True, 'import numpy as np\n'), ((10577, 10617), 'numpy.append', 'np.append', (['np_point_data', 'points'], {'axis': '(0)'}), '(np_point_data, points, axis=0)\n', (10586, 10617), True, 'import numpy as np\n'), ((10723, 10739), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (10731, 10739), True, 'import numpy as np\n'), ((10767, 10810), 'numpy.tile', 'np.tile', (['point_colors', '(points.shape[0], 1)'], {}), '(point_colors, (points.shape[0], 1))\n', (10774, 10810), True, 'import numpy as np\n'), ((10887, 10939), 'numpy.append', 'np.append', (['np_point_color_data', 'point_colors'], {'axis': '(0)'}), '(np_point_color_data, point_colors, axis=0)\n', (10896, 10939), True, 'import numpy as np\n'), ((12662, 12697), 'numpy.allclose', 'np.allclose', (['true_normal', '[1, 0, 0]'], {}), '(true_normal, [1, 0, 0])\n', (12673, 12697), True, 'import numpy as np\n'), ((12716, 12748), 'numpy.cross', 'np.cross', (['true_normal', '[1, 0, 0]'], {}), '(true_normal, [1, 0, 0])\n', (12724, 12748), True, 'import numpy as np\n'), ((12766, 12791), 'numpy.cross', 'np.cross', (['true_normal', 'zn'], {}), '(true_normal, zn)\n', (12774, 12791), True, 'import numpy as np\n'), ((19482, 19571), 'vtk.util.numpy_support.numpy_to_vtk', 'numpy_support.numpy_to_vtk', ([], {'num_array': 'positions', 'deep': '(True)', 'array_type': 'vtk.VTK_FLOAT'}), '(num_array=positions, deep=True, array_type=vtk.\n VTK_FLOAT)\n', (19508, 19571), False, 'from vtk.util import numpy_support\n'), ((21088, 21100), 'time.clock', 'time.clock', ([], {}), '()\n', (21098, 21100), False, 'import time\n'), ((3747, 3759), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3755, 3759), True, 'import numpy as np\n'), ((13009, 13032), 'math.ceil', 'm.ceil', (['(widths[2] / 2.0)'], {}), '(widths[2] / 2.0)\n', (13015, 13032), True, 'import math as m\n'), ((14194, 14209), 'svtk.lib.toolbox.integer.minmax', 'minmax', (['r', '(0)', '(1)'], {}), '(r, 0, 1)\n', (14200, 14209), False, 'from svtk.lib.toolbox.integer import minmax\n'), ((14211, 14226), 'svtk.lib.toolbox.integer.minmax', 'minmax', (['g', '(0)', '(1)'], {}), '(g, 0, 1)\n', (14217, 14226), False, 'from svtk.lib.toolbox.integer import minmax\n'), ((14228, 14243), 'svtk.lib.toolbox.integer.minmax', 'minmax', (['b', '(0)', '(1)'], {}), '(b, 0, 1)\n', (14234, 14243), False, 'from svtk.lib.toolbox.integer import minmax\n'), ((15005, 15099), 'vtk.util.numpy_support.numpy_to_vtk', 'numpy_support.numpy_to_vtk', ([], {'num_array': 'colors', 'deep': '(True)', 'array_type': 'vtk.VTK_UNSIGNED_CHAR'}), '(num_array=colors, deep=True, array_type=vtk.\n VTK_UNSIGNED_CHAR)\n', (15031, 15099), False, 'from vtk.util import numpy_support\n'), ((15255, 15300), 'vtk.util.numpy_support.vtk_to_numpy', 'numpy_support.vtk_to_numpy', (['self.point_colors'], {}), '(self.point_colors)\n', (15281, 15300), False, 'from vtk.util import numpy_support\n'), ((15374, 15475), 'vtk.util.numpy_support.numpy_to_vtk', 'numpy_support.numpy_to_vtk', ([], {'num_array': 'np_color_data', 'deep': '(True)', 'array_type': 'vtk.VTK_UNSIGNED_CHAR'}), '(num_array=np_color_data, deep=True, array_type=\n vtk.VTK_UNSIGNED_CHAR)\n', (15400, 15475), False, 'from vtk.util import numpy_support\n'), ((18690, 18789), 'vtk.util.numpy_support.numpy_to_vtk', 'numpy_support.numpy_to_vtk', ([], {'num_array': 'lerp_colors', 'deep': '(True)', 'array_type': 'vtk.VTK_UNSIGNED_CHAR'}), '(num_array=lerp_colors, deep=True, array_type=vtk\n .VTK_UNSIGNED_CHAR)\n', (18716, 18789), False, 'from vtk.util import numpy_support\n'), ((4890, 4957), 'numpy.append', 'np.append', (['np_new_data', 'np_data[(last_loc + 1) * 3:loc * 3]'], {'axis': '(0)'}), '(np_new_data, np_data[(last_loc + 1) * 3:loc * 3], axis=0)\n', (4899, 4957), True, 'import numpy as np\n'), ((5138, 5207), 'numpy.append', 'np.append', (['np_new_color_data', 'np_color_data[last_loc + 1:loc]'], {'axis': '(0)'}), '(np_new_color_data, np_color_data[last_loc + 1:loc], axis=0)\n', (5147, 5207), True, 'import numpy as np\n'), ((7394, 7467), 'numpy.append', 'np.append', (['np_new_verts', 'np_vert_data[(last_loc + 1) * 2:loc * 2]'], {'axis': '(0)'}), '(np_new_verts, np_vert_data[(last_loc + 1) * 2:loc * 2], axis=0)\n', (7403, 7467), True, 'import numpy as np\n'), ((7633, 7696), 'numpy.append', 'np.append', (['np_new_data', 'np_point_data[last_loc + 1:loc]'], {'axis': '(0)'}), '(np_new_data, np_point_data[last_loc + 1:loc], axis=0)\n', (7642, 7696), True, 'import numpy as np\n'), ((7877, 7967), 'numpy.append', 'np.append', (['np_new_color_data', 'np_point_color_data[(last_loc + 1) * 3:loc * 3]'], {'axis': '(0)'}), '(np_new_color_data, np_point_color_data[(last_loc + 1) * 3:loc * 3\n ], axis=0)\n', (7886, 7967), True, 'import numpy as np\n'), ((12978, 13002), 'math.floor', 'm.floor', (['(widths[2] / 2.0)'], {}), '(widths[2] / 2.0)\n', (12985, 13002), True, 'import math as m\n'), ((13099, 13122), 'math.ceil', 'm.ceil', (['(widths[1] / 2.0)'], {}), '(widths[1] / 2.0)\n', (13105, 13122), True, 'import math as m\n'), ((13402, 13443), 'numpy.matmul', 'np.matmul', (['[x, y, z]', 'vector_space_matrix'], {}), '([x, y, z], vector_space_matrix)\n', (13411, 13443), True, 'import numpy as np\n'), ((16945, 16980), 'numpy.append', 'np.append', (['self.next_colors', 'colors'], {}), '(self.next_colors, colors)\n', (16954, 16980), True, 'import numpy as np\n'), ((17327, 17372), 'vtk.util.numpy_support.vtk_to_numpy', 'numpy_support.vtk_to_numpy', (['self.point_colors'], {}), '(self.point_colors)\n', (17353, 17372), False, 'from vtk.util import numpy_support\n'), ((13068, 13092), 'math.floor', 'm.floor', (['(widths[1] / 2.0)'], {}), '(widths[1] / 2.0)\n', (13075, 13092), True, 'import math as m\n'), ((13193, 13216), 'math.ceil', 'm.ceil', (['(widths[0] / 2.0)'], {}), '(widths[0] / 2.0)\n', (13199, 13216), True, 'import math as m\n'), ((13664, 13710), 'numpy.append', 'np.append', (['point_field', 'point_location'], {'axis': '(0)'}), '(point_field, point_location, axis=0)\n', (13673, 13710), True, 'import numpy as np\n'), ((17073, 17122), 'numpy.append', 'np.append', (['self.next_color_indices', 'point_indices'], {}), '(self.next_color_indices, point_indices)\n', (17082, 17122), True, 'import numpy as np\n'), ((13162, 13186), 'math.floor', 'm.floor', (['(widths[0] / 2.0)'], {}), '(widths[0] / 2.0)\n', (13169, 13186), True, 'import math as m\n'), ((13304, 13320), 'numpy.transpose', 'np.transpose', (['xn'], {}), '(xn)\n', (13316, 13320), True, 'import numpy as np\n'), ((13322, 13347), 'numpy.transpose', 'np.transpose', (['true_normal'], {}), '(true_normal)\n', (13334, 13347), True, 'import numpy as np\n'), ((13349, 13365), 'numpy.transpose', 'np.transpose', (['zn'], {}), '(zn)\n', (13361, 13365), True, 'import numpy as np\n')]
|
import time
import numpy as np
from tqdm import tqdm
from utils import RandomCNOT, RandomCNOTs
def SimulatedAnnealing(quantum_count, layer_count, solver, epochs=100, save_path=None, global_best_score=0):
#TODO:
best_score = 0
cnot = RandomCNOTs(quantum_count, layer_count)
sc, model = solver(cnot)
if sc>best_score:
best_score = sc
cnot_seed = cnot
best_model = model
best_cnot = cnot
if save_path is not None and best_score>global_best_score:
with open(save_path, 'w') as f:
f.write(best_model)
start_time = time.time()
for epoch in range(epochs):
for i in range(layer_count):
cnot_layers = cnot_seed.copy()
cnot_layers[i] = RandomCNOT(quantum_count)
sc, model = solver(cnot_layers)
if sc>best_score or np.random.randint(epochs)>epoch:
cnot_seed = cnot_layers
if sc>best_score:
best_score = sc
best_model = model
best_cnot = cnot_layers
if save_path is not None and best_score>global_best_score:
with open(save_path, 'w') as f:
f.write(best_model)
print('epoch %d, iter %d, Score = %g, best_score = %g, global_best_score = %g, time = %gs'%(epoch, i, sc, best_score, global_best_score, time.time()-start_time))
# print(best_model)
return best_score, best_model, best_cnot
def SequenceJitter(quantum_count, layer_count, solver, init_epochs=10, epochs=100, save_path=None, global_best_score=0):
#TODO:
best_score = 0
print('Init cnot seed.')
for _ in tqdm(range(init_epochs)):
cnot = RandomCNOTs(quantum_count, layer_count)
sc, model = solver(cnot)
if sc>best_score:
best_score = sc
cnot_seed = cnot
best_model = model
best_cnot = cnot
if save_path is not None and best_score>global_best_score:
with open(save_path, 'w') as f:
f.write(best_model)
start_time = time.time()
for epoch in range(epochs):
for i in range(layer_count):
cnot_layers = cnot_seed.copy()
cnot_layers[i] = RandomCNOT(quantum_count)
sc, model = solver(cnot_layers)
if sc>best_score:
best_score = sc
cnot_seed = cnot_layers
best_model = model
best_cnot = cnot_layers
if save_path is not None and best_score>global_best_score:
with open(save_path, 'w') as f:
f.write(best_model)
print('Score = %g, best_score = %g, global_best_score = %g, time = %gs'%(sc, best_score, global_best_score, time.time()-start_time))
# print(best_model)
return best_score, best_model, best_cnot
def RandomSearch(cnot_creater, solver, epochs=100, save_path=None):
'''
随机搜索
Parameters:
cnot_creater: 生成CNOT层的可执行对象
solver: 一个可执行对象,给定网络结构后,求解网络参数的求解器
epochs: 随机搜索的轮数
save_path: 保存最佳结果的路径
'''
best_score = 0
start_time = time.time()
for epoch in range(epochs):
cnot_layers = cnot_creater()
sc, model = solver(cnot_layers)
if sc>best_score:
best_score = sc
best_model = model
if save_path is not None:
with open(save_path, 'w') as f:
f.write(best_model)
print('No_%d: score = %g, best_score = %g, time = %gs'%(epoch, sc, best_score, time.time()-start_time))
# print(best_model)
return best_score, best_model
|
[
"utils.RandomCNOTs",
"numpy.random.randint",
"time.time",
"utils.RandomCNOT"
] |
[((247, 286), 'utils.RandomCNOTs', 'RandomCNOTs', (['quantum_count', 'layer_count'], {}), '(quantum_count, layer_count)\n', (258, 286), False, 'from utils import RandomCNOT, RandomCNOTs\n'), ((591, 602), 'time.time', 'time.time', ([], {}), '()\n', (600, 602), False, 'import time\n'), ((2114, 2125), 'time.time', 'time.time', ([], {}), '()\n', (2123, 2125), False, 'import time\n'), ((3177, 3188), 'time.time', 'time.time', ([], {}), '()\n', (3186, 3188), False, 'import time\n'), ((1734, 1773), 'utils.RandomCNOTs', 'RandomCNOTs', (['quantum_count', 'layer_count'], {}), '(quantum_count, layer_count)\n', (1745, 1773), False, 'from utils import RandomCNOT, RandomCNOTs\n'), ((744, 769), 'utils.RandomCNOT', 'RandomCNOT', (['quantum_count'], {}), '(quantum_count)\n', (754, 769), False, 'from utils import RandomCNOT, RandomCNOTs\n'), ((2267, 2292), 'utils.RandomCNOT', 'RandomCNOT', (['quantum_count'], {}), '(quantum_count)\n', (2277, 2292), False, 'from utils import RandomCNOT, RandomCNOTs\n'), ((846, 871), 'numpy.random.randint', 'np.random.randint', (['epochs'], {}), '(epochs)\n', (863, 871), True, 'import numpy as np\n'), ((3596, 3607), 'time.time', 'time.time', ([], {}), '()\n', (3605, 3607), False, 'import time\n'), ((1404, 1415), 'time.time', 'time.time', ([], {}), '()\n', (1413, 1415), False, 'import time\n'), ((2805, 2816), 'time.time', 'time.time', ([], {}), '()\n', (2814, 2816), False, 'import time\n')]
|
from __future__ import print_function, division
import numpy as np
from numpy import identity, dot, zeros, zeros_like
def rf_den_via_rf0(self, rf0, v):
""" Whole matrix of the interacting response via non-interacting response and interaction"""
rf = zeros_like(rf0)
I = identity(rf0.shape[1])
for ir,r in enumerate(rf0):
rf[ir] = dot(np.linalg.inv(I-dot(r,v)), r)
return rf
def rf_den(self, ww):
""" Full matrix interacting response from NAO GW class"""
rf0 = self.rf0(ww)
return rf_den_via_rf0(self, rf0, self.kernel_sq)
|
[
"numpy.identity",
"numpy.dot",
"numpy.zeros_like"
] |
[((255, 270), 'numpy.zeros_like', 'zeros_like', (['rf0'], {}), '(rf0)\n', (265, 270), False, 'from numpy import identity, dot, zeros, zeros_like\n'), ((278, 300), 'numpy.identity', 'identity', (['rf0.shape[1]'], {}), '(rf0.shape[1])\n', (286, 300), False, 'from numpy import identity, dot, zeros, zeros_like\n'), ((364, 373), 'numpy.dot', 'dot', (['r', 'v'], {}), '(r, v)\n', (367, 373), False, 'from numpy import identity, dot, zeros, zeros_like\n')]
|
import os
import logging
import numpy as np
from typing import Optional
import torch
from torch.utils.data import DataLoader
from ..eval import Metric
from .dataset import CHMMBaseDataset
from .dataset import collate_fn as default_collate_fn
logger = logging.getLogger(__name__)
OUT_RECALL = 0.9
OUT_PRECISION = 0.8
class CHMMBaseTrainer:
def __init__(self,
config,
collate_fn=None,
training_dataset=None,
valid_dataset=None,
test_dataset=None,
pretrain_optimizer=None,
optimizer=None):
self._model = None
self._config = config
self._training_dataset = training_dataset
self._valid_dataset = valid_dataset
self._test_dataset = test_dataset
self._collate_fn = collate_fn
self._pretrain_optimizer = pretrain_optimizer
self._optimizer = optimizer
self._init_state_prior = None
self._init_trans_mat = None
self._init_emiss_mat = None
@property
def config(self):
return self._config
@config.setter
def config(self, x):
logger.warning("Updating DirCHMMTrainer.config")
self._config = x
@property
def model(self):
return self._model
def initialize_trainer(self):
"""
Initialize necessary components for training
Note: Better not change the order
Returns
-------
the initialized trainer
"""
self.initialize_matrices()
self.initialize_model()
self.initialize_optimizers()
return self
def initialize_model(self):
raise NotImplementedError
def initialize_matrices(self):
"""
Initialize <HMM> transition and emission matrices
Returns
-------
self
"""
assert self._training_dataset and self._valid_dataset
# inject prior knowledge about transition and emission
self._init_state_prior = torch.zeros(self._config.d_hidden, device=self._config.device) + 1e-2
self._init_state_prior[0] += 1 - self._init_state_prior.sum()
intg_obs = list(map(np.array, self._training_dataset.obs + self._valid_dataset.obs))
# construct/load initial transition matrix
dataset_dir = os.path.split(self._config.train_path)[0]
transmat_path = os.path.join(dataset_dir, "init_transmat.pt")
if getattr(self._config, "load_init_mat", False):
if os.path.isfile(transmat_path):
logger.info("Loading initial transition matrix from disk")
self._init_trans_mat = torch.load(transmat_path)
# if the loaded transmat does not have the proper shape, re-calculate it.
s0_transmat, s1_transmat = self._init_trans_mat.shape
if not (s0_transmat == s1_transmat == self.config.d_obs):
self._init_trans_mat = None
if self._init_trans_mat is None:
self._init_trans_mat = torch.tensor(initialise_transmat(
observations=intg_obs, label_set=self._config.bio_label_types
)[0], dtype=torch.float)
if getattr(self._config, "save_init_mat", False):
logger.info("Saving initial transition matrix")
torch.save(self._init_trans_mat, transmat_path)
# construct/load initial emission matrix
emissmat_path = os.path.join(dataset_dir, "init_emissmat.pt")
if getattr(self._config, "load_init_mat", False):
if os.path.isfile(emissmat_path):
logger.info("Loading initial emission matrix from disk")
self._init_emiss_mat = torch.load(emissmat_path)
# if the loaded emissmat does not have the proper shape, re-calculate it.
s0_emissmat, s1_emissmat, s2_emissmat = self._init_emiss_mat.shape
if not (s0_emissmat == self.config.n_src) and (s1_emissmat == s2_emissmat == self.config.d_obs):
self._init_emiss_mat = None
if self._init_emiss_mat is None:
self._init_emiss_mat = torch.tensor(initialise_emissions(
observations=intg_obs, label_set=self._config.bio_label_types,
sources=self._config.sources, src_priors=self._config.src_priors
)[0], dtype=torch.float)
if getattr(self._config, "save_init_mat", False):
logger.info("Saving initial emission matrix")
torch.save(self._init_emiss_mat, emissmat_path)
return self
def initialize_optimizers(self, optimizer=None, pretrain_optimizer=None):
self._optimizer = self.get_optimizer() if optimizer is None else optimizer
self._pretrain_optimizer = self.get_pretrain_optimizer() if pretrain_optimizer is None else pretrain_optimizer
def get_dataloader(self, dataset, shuffle=False):
if dataset is not None:
dataloader = DataLoader(
dataset=dataset,
batch_size=self._config.lm_batch_size,
collate_fn=self._collate_fn if self._collate_fn is not None else default_collate_fn,
shuffle=shuffle,
drop_last=False
)
return dataloader
else:
logger.error('Dataset is not defined')
raise ValueError("Dataset is not defined!")
def pretrain_step(self, data_loader, optimizer, trans_, emiss_):
raise NotImplementedError
def training_step(self, data_loader, optimizer):
raise NotImplementedError
def train(self):
raise NotImplementedError
def valid(self) -> Metric:
self._model.to(self._config.device)
valid_metrics = self.evaluate(self._valid_dataset)
logger.info("Validation results:")
for k, v in valid_metrics.items():
logger.info(f" {k}: {v:.4f}")
return valid_metrics
def test(self) -> Metric:
self._model.to(self._config.device)
test_metrics = self.evaluate(self._test_dataset)
logger.info("Test results:")
for k, v in test_metrics.items():
logger.info(f" {k}: {v:.4f}")
return test_metrics
def evaluate(self, dataset: CHMMBaseDataset):
raise NotImplementedError
def predict(self, dataset: CHMMBaseDataset):
raise NotImplementedError
def get_pretrain_optimizer(self):
raise NotImplementedError
def get_optimizer(self):
# ----- initialize optimizer -----
raise NotImplementedError
def save(self,
output_dir: Optional[str] = None,
save_optimizer: Optional[bool] = False,
model_name: Optional[str] = 'chmm',
optimizer_name: Optional[str] = 'chmm-optimizer',
pretrain_optimizer_name: Optional[str] = 'chmm-pretrain-optimizer'):
"""
Save model parameters as well as trainer parameters
Parameters
----------
output_dir: model directory
save_optimizer: whether to save optimizer
model_name: model name (suffix free)
optimizer_name: optimizer name (suffix free)
pretrain_optimizer_name: pretrain optimizer name (suffix free)
Returns
-------
None
"""
output_dir = output_dir if output_dir is not None else self._config.output_dir
logger.info(f"Saving model to {output_dir}")
model_state_dict = self._model.state_dict()
torch.save(model_state_dict, os.path.join(output_dir, f'{model_name}.bin'))
self._config.save(output_dir)
if save_optimizer:
logger.info("Saving optimizer and scheduler")
torch.save(self._optimizer.state_dict(),
os.path.join(output_dir, f"{optimizer_name}.bin"))
torch.save(self._pretrain_optimizer.state_dict(),
os.path.join(output_dir, f"{pretrain_optimizer_name}.bin"))
return None
def load(self,
input_dir: Optional[str] = None,
load_optimizer: Optional[bool] = False,
model_name: Optional[str] = 'chmm',
optimizer_name: Optional[str] = 'chmm-optimizer',
pretrain_optimizer_name: Optional[str] = 'chmm-pretrain-optimizer'):
"""
Load model parameters.
Parameters
----------
input_dir: model directory
load_optimizer: whether load other trainer parameters
model_name: model name (suffix free)
optimizer_name: optimizer name (suffix free)
pretrain_optimizer_name: pretrain optimizer name (suffix free)
Returns
-------
self
"""
input_dir = input_dir if input_dir is not None else self._config.output_dir
if self._model is not None:
logger.warning(f"The original model {type(self._model)} in {type(self)} is not None. "
f"It will be overwritten by the loaded model!")
logger.info(f"Loading model from {input_dir}")
self.initialize_model()
self._model.load_state_dict(torch.load(os.path.join(input_dir, f'{model_name}.bin')))
self._model.to(self.config.device)
if load_optimizer:
logger.info("Loading optimizer and scheduler")
if self._optimizer is None:
self.initialize_optimizers()
if os.path.isfile(os.path.join(input_dir, f"{optimizer_name}.bin")):
self._optimizer.load_state_dict(
torch.load(os.path.join(input_dir, f"{optimizer_name}.bin"), map_location=self.config.device)
)
else:
logger.warning("Optimizer file does not exist!")
if os.path.isfile(os.path.join(input_dir, f"{pretrain_optimizer_name}.bin")):
self._pretrain_optimizer.load_state_dict(
torch.load(os.path.join(input_dir, f"{pretrain_optimizer_name}.bin"))
)
else:
logger.warning("Pretrain optimizer file does not exist!")
return self
def save_results(self,
output_dir: str,
valid_results: Optional[Metric] = None,
file_name: Optional[str] = 'results',
disable_final_valid: Optional[bool] = False,
disable_test: Optional[bool] = False,
disable_inter_results: Optional[bool] = False) -> None:
"""
Save training (validation) results
Parameters
----------
output_dir: output directory, should be a folder
valid_results: validation results during the training process
file_name: file name
disable_final_valid: disable final validation process (getting validation results of the trained model)
disable_test: disable test process
disable_inter_results: do not save inter-results
Returns
-------
None
"""
if not disable_final_valid:
logger.info("Getting final validation metrics")
valid_metrics = self.valid()
else:
valid_metrics = None
if not disable_test:
logger.info("Getting test metrics.")
test_metrics = self.test()
else:
test_metrics = None
# write validation and test results
result_file = os.path.join(output_dir, f'{file_name}.txt')
logger.info(f"Writing results to {result_file}")
self.write_result(file_path=result_file,
valid_results=valid_results,
final_valid_metrics=valid_metrics,
test_metrics=test_metrics)
if not disable_inter_results:
# save validation inter results
logger.info(f"Saving inter results")
inter_result_file = os.path.join(output_dir, f'{file_name}-inter.pt')
torch.save(valid_results.__dict__, inter_result_file)
return None
@staticmethod
def write_result(file_path: str,
valid_results: Optional[Metric] = None,
final_valid_metrics: Optional[Metric] = None,
test_metrics: Optional[Metric] = None) -> None:
"""
Support functions for saving training results
Parameters
----------
file_path: where to save results
valid_results: validation results during the training process
final_valid_metrics: validation results of the trained model
test_metrics
Returns
-------
"""
with open(file_path, 'w') as f:
if valid_results is not None:
for i in range(len(valid_results)):
f.write(f"[Epoch {i + 1}]\n")
for k in ['precision', 'recall', 'f1']:
f.write(f" {k}: {valid_results[k][i]:.4f}")
f.write("\n")
if final_valid_metrics is not None:
f.write(f"[Best Validation]\n")
for k in ['precision', 'recall', 'f1']:
f.write(f" {k}: {final_valid_metrics[k]:.4f}")
f.write("\n")
if test_metrics is not None:
f.write(f"[Test]\n")
for k in ['precision', 'recall', 'f1']:
f.write(f" {k}: {test_metrics[k]:.4f}")
f.write("\n")
return None
def initialise_startprob(observations,
label_set,
src_idx=None):
"""
calculate initial hidden states (not used in our setup since our sequences all begin from
[CLS], which corresponds to hidden state "O".
:param src_idx: source index
:param label_set: a set of all possible label_set
:param observations: n_instances X seq_len X n_src X d_obs
:return: probabilities for the initial hidden states
"""
n_src = observations[0].shape[1]
logger.info("Constructing start distribution prior...")
init_counts = np.zeros((len(label_set),))
if src_idx is not None:
for obs in observations:
init_counts[obs[0, src_idx].argmax()] += 1
else:
for obs in observations:
for z in range(n_src):
init_counts[obs[0, z].argmax()] += 1
for i, label in enumerate(label_set):
if i == 0 or label.startswith("B-"):
init_counts[i] += 1
startprob_prior = init_counts + 1
startprob_ = np.random.dirichlet(init_counts + 1E-10)
return startprob_, startprob_prior
# TODO: try to use a more reliable source to start the transition and emission
def initialise_transmat(observations,
label_set,
src_idx=None):
"""
initialize transition matrix
:param src_idx: the index of the source of which the transition statistics is computed.
If None, use all sources
:param label_set: a set of all possible label_set
:param observations: n_instances X seq_len X n_src X d_obs
:return: initial transition matrix and transition counts
"""
logger.info("Constructing transition matrix prior...")
n_src = observations[0].shape[1]
trans_counts = np.zeros((len(label_set), len(label_set)))
if src_idx is not None:
for obs in observations:
for k in range(0, len(obs) - 1):
trans_counts[obs[k, src_idx].argmax(), obs[k + 1, src_idx].argmax()] += 1
else:
for obs in observations:
for k in range(0, len(obs) - 1):
for z in range(n_src):
trans_counts[obs[k, z].argmax(), obs[k + 1, z].argmax()] += 1
# update transition matrix with prior knowledge
for i, label in enumerate(label_set):
if label.startswith("B-") or label.startswith("I-"):
trans_counts[i, label_set.index("I-" + label[2:])] += 1
elif i == 0 or label.startswith("I-"):
for j, label2 in enumerate(label_set):
if j == 0 or label2.startswith("B-"):
trans_counts[i, j] += 1
transmat_prior = trans_counts + 1
# initialize transition matrix with dirichlet distribution
transmat_ = np.vstack([np.random.dirichlet(trans_counts2 + 1E-10)
for trans_counts2 in trans_counts])
return transmat_, transmat_prior
def initialise_emissions(observations,
label_set,
sources,
src_priors,
strength=1000):
"""
initialize emission matrices
:param sources: source names
:param src_priors: source priors
:param label_set: a set of all possible label_set
:param observations: n_instances X seq_len X n_src X d_obs
:param strength: Don't know what this is for
:return: initial emission matrices and emission counts?
"""
logger.info("Constructing emission probabilities...")
obs_counts = np.zeros((len(sources), len(label_set)), dtype=np.float64)
# extract the total number of observations for each prior
for obs in observations:
obs_counts += obs.sum(axis=0)
for source_index, source in enumerate(sources):
# increase p(O)
obs_counts[source_index, 0] += 1
# increase the "reasonable" observations
for pos_index, pos_label in enumerate(label_set[1:]):
if pos_label[2:] in src_priors[source]:
obs_counts[source_index, pos_index] += 1
# construct probability distribution from counts
obs_probs = obs_counts / (obs_counts.sum(axis=1, keepdims=True) + 1E-3)
# initialize emission matrix
matrix = np.zeros((len(sources), len(label_set), len(label_set)))
for source_index, source in enumerate(sources):
for pos_index, pos_label in enumerate(label_set):
# Simple case: set P(O=x|Y=x) to be the recall
recall = 0
if pos_index == 0:
recall = OUT_RECALL
elif pos_label[2:] in src_priors[source]:
_, recall = src_priors[source][pos_label[2:]]
matrix[source_index, pos_index, pos_index] = recall
for pos_index2, pos_label2 in enumerate(label_set):
if pos_index2 == pos_index:
continue
elif pos_index2 == 0:
precision = OUT_PRECISION
elif pos_label2[2:] in src_priors[source]:
precision, _ = src_priors[source][pos_label2[2:]]
else:
precision = 1.0
# Otherwise, we set the probability to be inversely proportional to the precision
# and the (unconditional) probability of the observation
error_prob = (1 - recall) * (1 - precision) * (0.001 + obs_probs[source_index, pos_index2])
# We increase the probability for boundary errors (i.e. I-ORG -> B-ORG)
if pos_index > 0 and pos_index2 > 0 and pos_label[2:] == pos_label2[2:]:
error_prob *= 5
# We increase the probability for errors with same boundary (i.e. I-ORG -> I-GPE)
if pos_index > 0 and pos_index2 > 0 and pos_label[0] == pos_label2[0]:
error_prob *= 2
matrix[source_index, pos_index, pos_index2] = error_prob
error_indices = [i for i in range(len(label_set)) if i != pos_index]
error_sum = matrix[source_index, pos_index, error_indices].sum()
matrix[source_index, pos_index, error_indices] /= (error_sum / (1 - recall) + 1E-5)
emission_priors = matrix * strength
emission_probs = matrix
return emission_probs, emission_priors
|
[
"logging.getLogger",
"torch.load",
"os.path.join",
"os.path.split",
"os.path.isfile",
"numpy.random.dirichlet",
"torch.save",
"torch.utils.data.DataLoader",
"torch.zeros"
] |
[((254, 281), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (271, 281), False, 'import logging\n'), ((14610, 14650), 'numpy.random.dirichlet', 'np.random.dirichlet', (['(init_counts + 1e-10)'], {}), '(init_counts + 1e-10)\n', (14629, 14650), True, 'import numpy as np\n'), ((2409, 2454), 'os.path.join', 'os.path.join', (['dataset_dir', '"""init_transmat.pt"""'], {}), "(dataset_dir, 'init_transmat.pt')\n", (2421, 2454), False, 'import os\n'), ((3473, 3518), 'os.path.join', 'os.path.join', (['dataset_dir', '"""init_emissmat.pt"""'], {}), "(dataset_dir, 'init_emissmat.pt')\n", (3485, 3518), False, 'import os\n'), ((11491, 11535), 'os.path.join', 'os.path.join', (['output_dir', 'f"""{file_name}.txt"""'], {}), "(output_dir, f'{file_name}.txt')\n", (11503, 11535), False, 'import os\n'), ((2035, 2097), 'torch.zeros', 'torch.zeros', (['self._config.d_hidden'], {'device': 'self._config.device'}), '(self._config.d_hidden, device=self._config.device)\n', (2046, 2097), False, 'import torch\n'), ((2343, 2381), 'os.path.split', 'os.path.split', (['self._config.train_path'], {}), '(self._config.train_path)\n', (2356, 2381), False, 'import os\n'), ((2528, 2557), 'os.path.isfile', 'os.path.isfile', (['transmat_path'], {}), '(transmat_path)\n', (2542, 2557), False, 'import os\n'), ((3592, 3621), 'os.path.isfile', 'os.path.isfile', (['emissmat_path'], {}), '(emissmat_path)\n', (3606, 3621), False, 'import os\n'), ((5008, 5201), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'dataset', 'batch_size': 'self._config.lm_batch_size', 'collate_fn': '(self._collate_fn if self._collate_fn is not None else default_collate_fn)', 'shuffle': 'shuffle', 'drop_last': '(False)'}), '(dataset=dataset, batch_size=self._config.lm_batch_size,\n collate_fn=self._collate_fn if self._collate_fn is not None else\n default_collate_fn, shuffle=shuffle, drop_last=False)\n', (5018, 5201), False, 'from torch.utils.data import DataLoader\n'), ((7581, 7626), 'os.path.join', 'os.path.join', (['output_dir', 'f"""{model_name}.bin"""'], {}), "(output_dir, f'{model_name}.bin')\n", (7593, 7626), False, 'import os\n'), ((11975, 12024), 'os.path.join', 'os.path.join', (['output_dir', 'f"""{file_name}-inter.pt"""'], {}), "(output_dir, f'{file_name}-inter.pt')\n", (11987, 12024), False, 'import os\n'), ((12037, 12090), 'torch.save', 'torch.save', (['valid_results.__dict__', 'inter_result_file'], {}), '(valid_results.__dict__, inter_result_file)\n', (12047, 12090), False, 'import torch\n'), ((16361, 16403), 'numpy.random.dirichlet', 'np.random.dirichlet', (['(trans_counts2 + 1e-10)'], {}), '(trans_counts2 + 1e-10)\n', (16380, 16403), True, 'import numpy as np\n'), ((2673, 2698), 'torch.load', 'torch.load', (['transmat_path'], {}), '(transmat_path)\n', (2683, 2698), False, 'import torch\n'), ((3351, 3398), 'torch.save', 'torch.save', (['self._init_trans_mat', 'transmat_path'], {}), '(self._init_trans_mat, transmat_path)\n', (3361, 3398), False, 'import torch\n'), ((3735, 3760), 'torch.load', 'torch.load', (['emissmat_path'], {}), '(emissmat_path)\n', (3745, 3760), False, 'import torch\n'), ((4546, 4593), 'torch.save', 'torch.save', (['self._init_emiss_mat', 'emissmat_path'], {}), '(self._init_emiss_mat, emissmat_path)\n', (4556, 4593), False, 'import torch\n'), ((7829, 7878), 'os.path.join', 'os.path.join', (['output_dir', 'f"""{optimizer_name}.bin"""'], {}), "(output_dir, f'{optimizer_name}.bin')\n", (7841, 7878), False, 'import os\n'), ((7965, 8023), 'os.path.join', 'os.path.join', (['output_dir', 'f"""{pretrain_optimizer_name}.bin"""'], {}), "(output_dir, f'{pretrain_optimizer_name}.bin')\n", (7977, 8023), False, 'import os\n'), ((9193, 9237), 'os.path.join', 'os.path.join', (['input_dir', 'f"""{model_name}.bin"""'], {}), "(input_dir, f'{model_name}.bin')\n", (9205, 9237), False, 'import os\n'), ((9485, 9533), 'os.path.join', 'os.path.join', (['input_dir', 'f"""{optimizer_name}.bin"""'], {}), "(input_dir, f'{optimizer_name}.bin')\n", (9497, 9533), False, 'import os\n'), ((9830, 9887), 'os.path.join', 'os.path.join', (['input_dir', 'f"""{pretrain_optimizer_name}.bin"""'], {}), "(input_dir, f'{pretrain_optimizer_name}.bin')\n", (9842, 9887), False, 'import os\n'), ((9616, 9664), 'os.path.join', 'os.path.join', (['input_dir', 'f"""{optimizer_name}.bin"""'], {}), "(input_dir, f'{optimizer_name}.bin')\n", (9628, 9664), False, 'import os\n'), ((9979, 10036), 'os.path.join', 'os.path.join', (['input_dir', 'f"""{pretrain_optimizer_name}.bin"""'], {}), "(input_dir, f'{pretrain_optimizer_name}.bin')\n", (9991, 10036), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 28 13:03:05 2017
@author: <NAME>
"""
import cntk as C
import _cntk_py
import cntk.layers
import cntk.initializer
import cntk.losses
import cntk.metrics
import cntk.logging
import cntk.io.transforms as xforms
import cntk.io
import cntk.train
import os
import numpy as np
import yolo2
import CloneModel
# default Paths relative to current python file.
abs_path = os.path.dirname(os.path.abspath(__file__))
model_path = os.path.join(abs_path, "Models")
# model dimensions
image_height = 416
image_width = 416
num_channels = 3 # RGB
num_truth_boxes = 14
box_dim = 5 # centerX, centerY, Width, Height, class_type
num_classes = 3 # object type count. i.e. tomato, flower, stem, et, al.
num_anchors = 5
model_name = "Yolo2Net.model"
# Create a minibatch source.
def create_image_mb_source(image_file, rois_file, is_training, total_number_of_samples):
if not os.path.exists(image_file):
raise RuntimeError("File '%s' does not exist." %image_file)
if not os.path.exists(rois_file):
raise RuntimeError("File '%s' does not exist." %rois_file)
# transformation pipeline for the features has jitter/crop only when training
transforms = [xforms.scale(width=image_width, height=image_height,
channels=num_channels, interpolations='linear')]
if is_training:
transforms += [
xforms.color(brightness_radius=0.2, contrast_radius=0.2, saturation_radius=0.2)
]
# deserializer
imageReader = cntk.io.ImageDeserializer(image_file,
cntk.io.StreamDefs(
features=cntk.io.StreamDef(field='image', transforms=transforms),
ignored=cntk.io.StreamDef(field='label', shape=1)))
txtReader = cntk.io.CTFDeserializer(rois_file,
cntk.io.StreamDefs(
rois=cntk.io.StreamDef(field='rois',shape=num_truth_boxes*box_dim)))
return cntk.io.MinibatchSource([imageReader, txtReader],
randomize=is_training,
max_samples=total_number_of_samples,
multithreaded_deserializer=True)
# Create the network.
def create_yolo2net(anchor_dims = None):
# Input variables denoting the features and label data
feature_var = C.input_variable((num_channels, image_height, image_width))
label_var = C.input_variable((num_truth_boxes, box_dim))
net = CloneModel.CloneModel('Models/DarkNet.model', 'mean_removed_input', 'bn6e',
cntk.ops.functions.CloneMethod.clone, feature_var)
det1 = cntk.layers.layers.Convolution2D((3,3), 1024,
init=cntk.initializer.he_normal(), pad=True,
activation=cntk.ops.leaky_relu,
name='det1')(net)
detbn1 = cntk.layers.BatchNormalization(map_rank=1, name='detbn1')(det1)
det2 = cntk.layers.layers.Convolution2D((3,3), 1024,
init=cntk.initializer.he_normal(), pad=True,
activation=cntk.ops.leaky_relu,
name='det2')(detbn1)
detbn2 = cntk.layers.BatchNormalization(map_rank=1, name='detbn2')(det2)
det3 = cntk.layers.layers.Convolution2D((3,3), 1024,
init=cntk.initializer.he_normal(), pad = True,
activation=cntk.ops.leaky_relu,
name='det3')(detbn2)
detbn3 = cntk.layers.BatchNormalization(map_rank=1, name='detbn3')(det3)
z = cntk.layers.layers.Convolution2D((1,1), (5+num_classes) * num_anchors,
init=cntk.initializer.normal(0.01), pad = True,
name='output')(detbn3)
# loss and metric
ce = C.user_function(yolo2.Yolo2Error(z, label_var, class_size = num_classes, priors = anchor_dims))
pe = C.user_function(yolo2.Yolo2Metric(z, label_var, class_size = num_classes, priors = anchor_dims,
metricMethod = yolo2.Yolo2MetricMethod.Avg_iou))
cntk.logging.log_number_of_parameters(z) ; print()
return {
'feature': feature_var,
'label': label_var,
'ce' : ce,
'pe' : pe,
'output': z
}
# Create trainer
def create_trainer(network, epoch_size, num_quantization_bits, printer, block_size, warm_up):
# Set learning parameters
lr_per_mb = [0.001]*25 + [0.0001]*25 + [0.00001]*25 + [0.000001]*25 + [0.0000001]
lr_schedule = C.learning_rate_schedule(lr_per_mb, unit=C.learners.UnitType.minibatch, epoch_size=epoch_size)
mm_schedule = C.learners.momentum_schedule(0.9)
l2_reg_weight = 0.0005 # CNTK L2 regularization is per sample, thus same as Caffe
if block_size != None and num_quantization_bits != 32:
raise RuntimeError("Block momentum cannot be used with quantization, please remove quantized_bits option.")
# Create learner
local_learner = C.learners.momentum_sgd(network['output'].parameters, lr_schedule, mm_schedule, unit_gain=False, l2_regularization_weight=l2_reg_weight)
# Since we reuse parameter settings (learning rate, momentum) from Caffe, we set unit_gain to False to ensure consistency
# Create trainer
if block_size != None:
parameter_learner = cntk.train.distributed.block_momentum_distributed_learner(local_learner, block_size=block_size)
else:
parameter_learner = cntk.train.distributed.data_parallel_distributed_learner(local_learner, num_quantization_bits=num_quantization_bits, distributed_after=warm_up)
return C.Trainer(network['output'], (network['ce'], network['pe']), parameter_learner, printer)
# Train and test
def train_and_test(network, trainer, train_source, test_source, minibatch_size, epoch_size, restore):
# define mapping from intput streams to network inputs
input_map = {
network['feature']: train_source.streams.features,
network['label']: train_source.streams.rois
}
# Train all minibatches
cntk.train.training_session(
trainer=trainer, mb_source = train_source,
model_inputs_to_streams = input_map,
mb_size = minibatch_size,
progress_frequency=epoch_size,
checkpoint_config = C.CheckpointConfig(filename=os.path.join(model_path, model_name), restore=restore),
test_config= C.TestConfig(test_source, minibatch_size=minibatch_size)
).train()
# Train and evaluate the network.
def net_train_and_eval(train_data, train_rois, test_data, test_rois,
priors = None,
num_quantization_bits=32,
block_size=3200, warm_up=0,
minibatch_size=1,
epoch_size = 1281167,
max_epochs=1,
restore=True,
log_to_file=None,
num_mbs_per_log=None,
gen_heartbeat=True):
_cntk_py.set_computation_network_trace_level(0)
log_printer = cntk.logging.progress_print.ProgressPrinter(
freq=1,
tag='Training',
log_to_file = os.path.join(model_path, log_to_file),
num_epochs=max_epochs)
progress_printer = cntk.logging.progress_print.ProgressPrinter(freq=1, tag='Training',
num_epochs=max_epochs,test_freq=1)
network = create_yolo2net(priors)
trainer = create_trainer(network, epoch_size, num_quantization_bits,
[progress_printer, log_printer], block_size, warm_up)
train_source = create_image_mb_source(train_data, train_rois, True,
total_number_of_samples=max_epochs * epoch_size)
train_source
test_source = create_image_mb_source(test_data, train_rois, False,
total_number_of_samples=cntk.io.FULL_DATA_SWEEP)
train_and_test(network,
trainer,
train_source,
test_source,
minibatch_size,
epoch_size,
restore)
#
# get train sample size evaluate sample size
#
def get_sample_counts(train_file, test_file):
counts = [0, 0]
if os.path.exists(train_file):
ff = open(train_file)
counts[0] = len(ff.readlines())
ff.close()
if os.path.exists(test_file):
ff = open(test_file)
counts[1] = len(ff.readlines())
ff.close()
return counts
def open_anchor_file(anchor_file):
anchors = []
file = open(anchor_file)
lines = file.readlines()
for line in lines:
if len(line.strip()) > 0:
dims = line.strip().split("\t")
anchors.append([float(dims[0]), float(dims[1])])
file.close()
return np.array(anchors).astype(np.float32)
if __name__=='__main__':
anchor_data = 'anchor.txt'
if not os.path.exists(anchor_data):
raise RuntimeError("File '%s' does not exist." %anchor_data)
anchors = open_anchor_file(anchor_data)
if anchors.shape[0] < num_anchors:
raise RuntimeError("Anchor dimension is less than %s" %num_anchors)
# network = create_yolo2net(anchors)
# cntk.logging.graph.plot(network['output'], 'yolo2.png')
train_data = 'train.txt'
train_rois = 'train.rois.txt'
test_data = 'train.txt'
test_rois = 'train.rois.txt'
sample_size = get_sample_counts(train_data, test_data)
net_train_and_eval(train_data, train_rois, test_data, test_rois,
priors = anchors,
epoch_size=sample_size[0],
block_size = None,
minibatch_size = 32,
max_epochs = 130,
log_to_file = 'Yolo2Net.log')
# Must call MPI finalize when process exit without exceptions
cntk.train.distributed.Communicator.finalize()
|
[
"os.path.exists",
"yolo2.Yolo2Metric",
"_cntk_py.set_computation_network_trace_level",
"cntk.Trainer",
"os.path.join",
"cntk.learners.momentum_sgd",
"yolo2.Yolo2Error",
"cntk.io.transforms.color",
"cntk.learners.momentum_schedule",
"cntk.input_variable",
"cntk.io.transforms.scale",
"numpy.array",
"os.path.abspath",
"CloneModel.CloneModel",
"cntk.learning_rate_schedule",
"cntk.TestConfig"
] |
[((469, 501), 'os.path.join', 'os.path.join', (['abs_path', '"""Models"""'], {}), "(abs_path, 'Models')\n", (481, 501), False, 'import os\n'), ((429, 454), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (444, 454), False, 'import os\n'), ((2267, 2326), 'cntk.input_variable', 'C.input_variable', (['(num_channels, image_height, image_width)'], {}), '((num_channels, image_height, image_width))\n', (2283, 2326), True, 'import cntk as C\n'), ((2343, 2387), 'cntk.input_variable', 'C.input_variable', (['(num_truth_boxes, box_dim)'], {}), '((num_truth_boxes, box_dim))\n', (2359, 2387), True, 'import cntk as C\n'), ((2399, 2529), 'CloneModel.CloneModel', 'CloneModel.CloneModel', (['"""Models/DarkNet.model"""', '"""mean_removed_input"""', '"""bn6e"""', 'cntk.ops.functions.CloneMethod.clone', 'feature_var'], {}), "('Models/DarkNet.model', 'mean_removed_input', 'bn6e',\n cntk.ops.functions.CloneMethod.clone, feature_var)\n", (2420, 2529), False, 'import CloneModel\n'), ((4334, 4432), 'cntk.learning_rate_schedule', 'C.learning_rate_schedule', (['lr_per_mb'], {'unit': 'C.learners.UnitType.minibatch', 'epoch_size': 'epoch_size'}), '(lr_per_mb, unit=C.learners.UnitType.minibatch,\n epoch_size=epoch_size)\n', (4358, 4432), True, 'import cntk as C\n'), ((4453, 4486), 'cntk.learners.momentum_schedule', 'C.learners.momentum_schedule', (['(0.9)'], {}), '(0.9)\n', (4481, 4486), True, 'import cntk as C\n'), ((4795, 4935), 'cntk.learners.momentum_sgd', 'C.learners.momentum_sgd', (["network['output'].parameters", 'lr_schedule', 'mm_schedule'], {'unit_gain': '(False)', 'l2_regularization_weight': 'l2_reg_weight'}), "(network['output'].parameters, lr_schedule,\n mm_schedule, unit_gain=False, l2_regularization_weight=l2_reg_weight)\n", (4818, 4935), True, 'import cntk as C\n'), ((5425, 5517), 'cntk.Trainer', 'C.Trainer', (["network['output']", "(network['ce'], network['pe'])", 'parameter_learner', 'printer'], {}), "(network['output'], (network['ce'], network['pe']),\n parameter_learner, printer)\n", (5434, 5517), True, 'import cntk as C\n'), ((6851, 6898), '_cntk_py.set_computation_network_trace_level', '_cntk_py.set_computation_network_trace_level', (['(0)'], {}), '(0)\n', (6895, 6898), False, 'import _cntk_py\n'), ((8143, 8169), 'os.path.exists', 'os.path.exists', (['train_file'], {}), '(train_file)\n', (8157, 8169), False, 'import os\n'), ((8267, 8292), 'os.path.exists', 'os.path.exists', (['test_file'], {}), '(test_file)\n', (8281, 8292), False, 'import os\n'), ((937, 963), 'os.path.exists', 'os.path.exists', (['image_file'], {}), '(image_file)\n', (951, 963), False, 'import os\n'), ((1044, 1069), 'os.path.exists', 'os.path.exists', (['rois_file'], {}), '(rois_file)\n', (1058, 1069), False, 'import os\n'), ((1239, 1343), 'cntk.io.transforms.scale', 'xforms.scale', ([], {'width': 'image_width', 'height': 'image_height', 'channels': 'num_channels', 'interpolations': '"""linear"""'}), "(width=image_width, height=image_height, channels=num_channels,\n interpolations='linear')\n", (1251, 1343), True, 'import cntk.io.transforms as xforms\n'), ((3614, 3688), 'yolo2.Yolo2Error', 'yolo2.Yolo2Error', (['z', 'label_var'], {'class_size': 'num_classes', 'priors': 'anchor_dims'}), '(z, label_var, class_size=num_classes, priors=anchor_dims)\n', (3630, 3688), False, 'import yolo2\n'), ((3719, 3844), 'yolo2.Yolo2Metric', 'yolo2.Yolo2Metric', (['z', 'label_var'], {'class_size': 'num_classes', 'priors': 'anchor_dims', 'metricMethod': 'yolo2.Yolo2MetricMethod.Avg_iou'}), '(z, label_var, class_size=num_classes, priors=anchor_dims,\n metricMethod=yolo2.Yolo2MetricMethod.Avg_iou)\n', (3736, 3844), False, 'import yolo2\n'), ((8811, 8838), 'os.path.exists', 'os.path.exists', (['anchor_data'], {}), '(anchor_data)\n', (8825, 8838), False, 'import os\n'), ((1433, 1512), 'cntk.io.transforms.color', 'xforms.color', ([], {'brightness_radius': '(0.2)', 'contrast_radius': '(0.2)', 'saturation_radius': '(0.2)'}), '(brightness_radius=0.2, contrast_radius=0.2, saturation_radius=0.2)\n', (1445, 1512), True, 'import cntk.io.transforms as xforms\n'), ((7025, 7062), 'os.path.join', 'os.path.join', (['model_path', 'log_to_file'], {}), '(model_path, log_to_file)\n', (7037, 7062), False, 'import os\n'), ((8701, 8718), 'numpy.array', 'np.array', (['anchors'], {}), '(anchors)\n', (8709, 8718), True, 'import numpy as np\n'), ((6197, 6253), 'cntk.TestConfig', 'C.TestConfig', (['test_source'], {'minibatch_size': 'minibatch_size'}), '(test_source, minibatch_size=minibatch_size)\n', (6209, 6253), True, 'import cntk as C\n'), ((6120, 6156), 'os.path.join', 'os.path.join', (['model_path', 'model_name'], {}), '(model_path, model_name)\n', (6132, 6156), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 13 09:52:47 2015
@author: wirkert
"""
import unittest
import os
import numpy as np
import msi.msimanipulations as msimani
from msi.io.nrrdreader import NrrdReader
from msi.io.nrrdwriter import NrrdWriter
from msi.test import helpers
class TestNrrdWriter(unittest.TestCase):
def setUp(self):
# setup file and the path where it shall be written to
self.msi = helpers.getFakeMsi()
self.fileUriToWrite = "testfile.nrrd"
def tearDown(self):
# remove the hopefully written file
os.remove(self.fileUriToWrite)
def test_imageWriterCreatesFile(self):
writer = NrrdWriter(self.msi)
writer.write(self.fileUriToWrite)
self.assertTrue(os.path.isfile(self.fileUriToWrite),
"file was written to disk")
def test_imageWriterCreatesCorrectFile(self):
writer = NrrdWriter(self.msi)
writer.write(self.fileUriToWrite)
reader = NrrdReader()
msi = reader.read(self.fileUriToWrite)
self.assertTrue(msi == helpers.getFakeMsi(),
"image correctly written and read")
def test_write_one_d_image_works(self):
writer = NrrdWriter(self.msi)
msimani.calculate_mean_spectrum(self.msi)
writer.write(self.fileUriToWrite)
reader = NrrdReader()
msi = reader.read(self.fileUriToWrite)
np.testing.assert_array_equal(msi.get_image(),
np.array([1, 2, 3, 4, 5]),
"1d image correctly written and read")
|
[
"msi.io.nrrdreader.NrrdReader",
"os.remove",
"os.path.isfile",
"numpy.array",
"msi.msimanipulations.calculate_mean_spectrum",
"msi.test.helpers.getFakeMsi",
"msi.io.nrrdwriter.NrrdWriter"
] |
[((430, 450), 'msi.test.helpers.getFakeMsi', 'helpers.getFakeMsi', ([], {}), '()\n', (448, 450), False, 'from msi.test import helpers\n'), ((574, 604), 'os.remove', 'os.remove', (['self.fileUriToWrite'], {}), '(self.fileUriToWrite)\n', (583, 604), False, 'import os\n'), ((666, 686), 'msi.io.nrrdwriter.NrrdWriter', 'NrrdWriter', (['self.msi'], {}), '(self.msi)\n', (676, 686), False, 'from msi.io.nrrdwriter import NrrdWriter\n'), ((911, 931), 'msi.io.nrrdwriter.NrrdWriter', 'NrrdWriter', (['self.msi'], {}), '(self.msi)\n', (921, 931), False, 'from msi.io.nrrdwriter import NrrdWriter\n'), ((992, 1004), 'msi.io.nrrdreader.NrrdReader', 'NrrdReader', ([], {}), '()\n', (1002, 1004), False, 'from msi.io.nrrdreader import NrrdReader\n'), ((1242, 1262), 'msi.io.nrrdwriter.NrrdWriter', 'NrrdWriter', (['self.msi'], {}), '(self.msi)\n', (1252, 1262), False, 'from msi.io.nrrdwriter import NrrdWriter\n'), ((1271, 1312), 'msi.msimanipulations.calculate_mean_spectrum', 'msimani.calculate_mean_spectrum', (['self.msi'], {}), '(self.msi)\n', (1302, 1312), True, 'import msi.msimanipulations as msimani\n'), ((1373, 1385), 'msi.io.nrrdreader.NrrdReader', 'NrrdReader', ([], {}), '()\n', (1383, 1385), False, 'from msi.io.nrrdreader import NrrdReader\n'), ((753, 788), 'os.path.isfile', 'os.path.isfile', (['self.fileUriToWrite'], {}), '(self.fileUriToWrite)\n', (767, 788), False, 'import os\n'), ((1526, 1551), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (1534, 1551), True, 'import numpy as np\n'), ((1083, 1103), 'msi.test.helpers.getFakeMsi', 'helpers.getFakeMsi', ([], {}), '()\n', (1101, 1103), False, 'from msi.test import helpers\n')]
|
import matplotlib.pyplot as pl
import os
import numpy as np
from ticle.data.dataHandler import normalizeData,load_file
from ticle.analysis.analysis import get_phases,normalize_phase
pl.rc('xtick', labelsize='x-small')
pl.rc('ytick', labelsize='x-small')
pl.rc('font', family='serif')
pl.rcParams.update({'font.size': 20})
pl.tight_layout()
path = os.getcwd()
phase_dir = f"{path}/results/phase_plots"
try:
os.makedirs(phase_dir)
except FileExistsError:
pass
data_dir = f"{path}/data/"
data_list_file = f"{data_dir}/dataList.txt"
data_list = np.loadtxt(data_list_file)
for data in data_list:
star = f"0{int(data[0])}"
file_name = f"{data_dir}/{star}/{star}_LC_destepped.txt"
res_dir = f"{phase_dir}/{star}"
try:
os.mkdir(res_dir)
except FileExistsError:
pass
t_series = load_file(file_name)
t_series = normalizeData(t_series)
p = [(f"Phaseplot {star} - literature","literature",data[2]),
(f"Phaseplot {star} - P={data[1]} days",f"result",data[1])]
for title,save_text,period in p:
masks = get_phases(t_series,period)
fig_phase = pl.figure(figsize=(10,7))
for i in masks:
plot_data = normalize_phase(np.array((t_series[0][i],t_series[1][i])))
pl.plot(plot_data[0],plot_data[1],linewidth = 1)
pl.xlabel("Phase")
pl.ylabel("Flux")
pl.title(title)
fig_phase.savefig(f"{res_dir}/{star}_{save_text}_phase_.pdf")
fig_lightcurve = pl.figure(figsize=(10,7))
for i in masks:
pl.plot(t_series[0][i],t_series[1][i],linewidth = 1)
pl.xlabel("Period(days)")
pl.ylabel("Flux")
pl.title(f"{star} Lightcurve {save_text}")
fig_lightcurve.savefig(f"{res_dir}/{star}_{save_text}_lightcurve.pdf")
|
[
"os.makedirs",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"ticle.data.dataHandler.load_file",
"os.getcwd",
"ticle.data.dataHandler.normalizeData",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.figure",
"ticle.analysis.analysis.get_phases",
"numpy.array",
"os.mkdir",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"numpy.loadtxt",
"matplotlib.pyplot.rc"
] |
[((184, 219), 'matplotlib.pyplot.rc', 'pl.rc', (['"""xtick"""'], {'labelsize': '"""x-small"""'}), "('xtick', labelsize='x-small')\n", (189, 219), True, 'import matplotlib.pyplot as pl\n'), ((220, 255), 'matplotlib.pyplot.rc', 'pl.rc', (['"""ytick"""'], {'labelsize': '"""x-small"""'}), "('ytick', labelsize='x-small')\n", (225, 255), True, 'import matplotlib.pyplot as pl\n'), ((256, 285), 'matplotlib.pyplot.rc', 'pl.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (261, 285), True, 'import matplotlib.pyplot as pl\n'), ((286, 323), 'matplotlib.pyplot.rcParams.update', 'pl.rcParams.update', (["{'font.size': 20}"], {}), "({'font.size': 20})\n", (304, 323), True, 'import matplotlib.pyplot as pl\n'), ((324, 341), 'matplotlib.pyplot.tight_layout', 'pl.tight_layout', ([], {}), '()\n', (339, 341), True, 'import matplotlib.pyplot as pl\n'), ((350, 361), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (359, 361), False, 'import os\n'), ((554, 580), 'numpy.loadtxt', 'np.loadtxt', (['data_list_file'], {}), '(data_list_file)\n', (564, 580), True, 'import numpy as np\n'), ((414, 436), 'os.makedirs', 'os.makedirs', (['phase_dir'], {}), '(phase_dir)\n', (425, 436), False, 'import os\n'), ((825, 845), 'ticle.data.dataHandler.load_file', 'load_file', (['file_name'], {}), '(file_name)\n', (834, 845), False, 'from ticle.data.dataHandler import normalizeData, load_file\n'), ((861, 884), 'ticle.data.dataHandler.normalizeData', 'normalizeData', (['t_series'], {}), '(t_series)\n', (874, 884), False, 'from ticle.data.dataHandler import normalizeData, load_file\n'), ((750, 767), 'os.mkdir', 'os.mkdir', (['res_dir'], {}), '(res_dir)\n', (758, 767), False, 'import os\n'), ((1076, 1104), 'ticle.analysis.analysis.get_phases', 'get_phases', (['t_series', 'period'], {}), '(t_series, period)\n', (1086, 1104), False, 'from ticle.analysis.analysis import get_phases, normalize_phase\n'), ((1125, 1151), 'matplotlib.pyplot.figure', 'pl.figure', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (1134, 1151), True, 'import matplotlib.pyplot as pl\n'), ((1329, 1347), 'matplotlib.pyplot.xlabel', 'pl.xlabel', (['"""Phase"""'], {}), "('Phase')\n", (1338, 1347), True, 'import matplotlib.pyplot as pl\n'), ((1356, 1373), 'matplotlib.pyplot.ylabel', 'pl.ylabel', (['"""Flux"""'], {}), "('Flux')\n", (1365, 1373), True, 'import matplotlib.pyplot as pl\n'), ((1382, 1397), 'matplotlib.pyplot.title', 'pl.title', (['title'], {}), '(title)\n', (1390, 1397), True, 'import matplotlib.pyplot as pl\n'), ((1494, 1520), 'matplotlib.pyplot.figure', 'pl.figure', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (1503, 1520), True, 'import matplotlib.pyplot as pl\n'), ((1619, 1644), 'matplotlib.pyplot.xlabel', 'pl.xlabel', (['"""Period(days)"""'], {}), "('Period(days)')\n", (1628, 1644), True, 'import matplotlib.pyplot as pl\n'), ((1653, 1670), 'matplotlib.pyplot.ylabel', 'pl.ylabel', (['"""Flux"""'], {}), "('Flux')\n", (1662, 1670), True, 'import matplotlib.pyplot as pl\n'), ((1679, 1721), 'matplotlib.pyplot.title', 'pl.title', (['f"""{star} Lightcurve {save_text}"""'], {}), "(f'{star} Lightcurve {save_text}')\n", (1687, 1721), True, 'import matplotlib.pyplot as pl\n'), ((1271, 1319), 'matplotlib.pyplot.plot', 'pl.plot', (['plot_data[0]', 'plot_data[1]'], {'linewidth': '(1)'}), '(plot_data[0], plot_data[1], linewidth=1)\n', (1278, 1319), True, 'import matplotlib.pyplot as pl\n'), ((1557, 1609), 'matplotlib.pyplot.plot', 'pl.plot', (['t_series[0][i]', 't_series[1][i]'], {'linewidth': '(1)'}), '(t_series[0][i], t_series[1][i], linewidth=1)\n', (1564, 1609), True, 'import matplotlib.pyplot as pl\n'), ((1216, 1258), 'numpy.array', 'np.array', (['(t_series[0][i], t_series[1][i])'], {}), '((t_series[0][i], t_series[1][i]))\n', (1224, 1258), True, 'import numpy as np\n')]
|
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# Extended by <NAME>
# --------------------------------------------------------
import os
import cv2
import numpy as np
import torch
import torch.utils.data as data
import xml.etree.ElementTree as ET
from utils.bbox import quad_2_rbox
class VOCDataset(data.Dataset):
""""""
def __init__(self,
dataset='trainval.txt',
augment = False,
level = 1,
random_flip=True):
self.image_set = dataset
self.data_path = self.image_set.strip('/ImageSets/Main/trainval.txt')
self.image_ext = [".jpg"]
self.image_list = self._load_image_names()
self.classes = ('__background__', 'aeroplane','bicycle','bird','boat',
'bottle','bus','car','cat','chair','cow','diningtable',
'dog','horse','motorbike','person','pottedplant',
'sheep','sofa','train','tvmonitor')
self.num_classes = len(self.classes)
self.class_to_ind = dict(zip(self.classes, range(self.num_classes)))
self.random_flip = random_flip
def __len__(self):
return len(self.image_list)
def __getitem__(self, index):
im_path = self._image_path_from_index(self.image_list[index])
im = cv2.cvtColor(cv2.imread(im_path, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB)
roidb = self._load_pascal_annotation(self.image_list[index])
gt_inds = np.where(roidb['gt_classes'] != 0)[0]
bboxes = roidb['boxes'][gt_inds, :]
classes = roidb['gt_classes'][gt_inds]
if self.random_flip and np.random.rand() >= 0.5:
im = cv2.flip(im, 1, None)
oldxs = bboxes[:, 0::2].copy()
bboxes[:, 0::2] = im.shape[1] - oldxs - 1
gt_boxes = np.empty((len(gt_inds), 6), dtype=np.float32)
for i, bbox in enumerate(bboxes):
gt_boxes[i, :5] = quad_2_rbox(np.array(bbox))
gt_boxes[i, 5] = classes[i]
return {'image': im, 'boxes': gt_boxes}
def _load_image_names(self):
"""
Load the names listed in this dataset's image set file.
"""
image_set_file = self.image_set
if not os.path.exists(image_set_file):
'Path does not exist: {}'.format(image_set_file)
image_names = []
else:
with open(image_set_file) as f:
image_names = [x.strip() for x in f.readlines()]
return image_names
def _image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
image_path = None
image_exist = False
for image_ext in self.image_ext:
image_path = os.path.join(self.data_path, 'JPEGImages', index + image_ext)
if os.path.exists(image_path):
image_exist = True
break
if not image_exist:
raise Exception('Image path does not exist: {}'.format(
os.path.join(self.data_path, 'JPEGImages', index))
)
return image_path
def _load_pascal_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC format.
"""
filename = os.path.join(self.data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
boxes, gt_classes = [], []
for _, obj in enumerate(objs):
difficult = int(obj.find('difficult').text)
is_latin = obj.find('language') is None or obj.find('language').text == 'Latin'
bnd_box = obj.find('bndbox')
box = [
float(bnd_box.find('xmin').text),
float(bnd_box.find('ymin').text),
float(bnd_box.find('xmax').text),
float(bnd_box.find('ymin').text),
float(bnd_box.find('xmax').text),
float(bnd_box.find('ymax').text),
float(bnd_box.find('xmin').text),
float(bnd_box.find('ymax').text),
]
label = self.class_to_ind[obj.find('name').text.lower().strip()]
if difficult:
continue
# if self.only_latin and not is_latin:
# continue
boxes.append(box)
gt_classes.append(label)
return {'boxes': np.array(boxes, dtype=np.int32), 'gt_classes': np.array(gt_classes)}
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self._image_path_from_index(self.image_list[i])
def return_class(self, id):
id = int(id)
return self.classes[id]
if __name__ == '__main__':
pass
|
[
"os.path.exists",
"xml.etree.ElementTree.parse",
"cv2.flip",
"numpy.random.rand",
"numpy.where",
"os.path.join",
"numpy.array",
"cv2.imread"
] |
[((3434, 3493), 'os.path.join', 'os.path.join', (['self.data_path', '"""Annotations"""', "(index + '.xml')"], {}), "(self.data_path, 'Annotations', index + '.xml')\n", (3446, 3493), False, 'import os\n'), ((3509, 3527), 'xml.etree.ElementTree.parse', 'ET.parse', (['filename'], {}), '(filename)\n', (3517, 3527), True, 'import xml.etree.ElementTree as ET\n'), ((1466, 1503), 'cv2.imread', 'cv2.imread', (['im_path', 'cv2.IMREAD_COLOR'], {}), '(im_path, cv2.IMREAD_COLOR)\n', (1476, 1503), False, 'import cv2\n'), ((1611, 1645), 'numpy.where', 'np.where', (["(roidb['gt_classes'] != 0)"], {}), "(roidb['gt_classes'] != 0)\n", (1619, 1645), True, 'import numpy as np\n'), ((1815, 1836), 'cv2.flip', 'cv2.flip', (['im', '(1)', 'None'], {}), '(im, 1, None)\n', (1823, 1836), False, 'import cv2\n'), ((2365, 2395), 'os.path.exists', 'os.path.exists', (['image_set_file'], {}), '(image_set_file)\n', (2379, 2395), False, 'import os\n'), ((2896, 2957), 'os.path.join', 'os.path.join', (['self.data_path', '"""JPEGImages"""', '(index + image_ext)'], {}), "(self.data_path, 'JPEGImages', index + image_ext)\n", (2908, 2957), False, 'import os\n'), ((2973, 2999), 'os.path.exists', 'os.path.exists', (['image_path'], {}), '(image_path)\n', (2987, 2999), False, 'import os\n'), ((4563, 4594), 'numpy.array', 'np.array', (['boxes'], {'dtype': 'np.int32'}), '(boxes, dtype=np.int32)\n', (4571, 4594), True, 'import numpy as np\n'), ((4610, 4630), 'numpy.array', 'np.array', (['gt_classes'], {}), '(gt_classes)\n', (4618, 4630), True, 'import numpy as np\n'), ((1773, 1789), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1787, 1789), True, 'import numpy as np\n'), ((2084, 2098), 'numpy.array', 'np.array', (['bbox'], {}), '(bbox)\n', (2092, 2098), True, 'import numpy as np\n'), ((3170, 3219), 'os.path.join', 'os.path.join', (['self.data_path', '"""JPEGImages"""', 'index'], {}), "(self.data_path, 'JPEGImages', index)\n", (3182, 3219), False, 'import os\n')]
|
# Machine Learning Online Class - Exercise 2: Logistic Regression
#
# Instructions
# ------------
#
# This file contains code that helps you get started on the logistic
# regression exercise. You will need to complete the following functions
# in this exericse:
#
# sigmoid.py
# costFunction.py
# predict.py
# costFunctionReg.py
#
# For this exercise, you will not need to change any code in this file,
# or any other files other than those mentioned above.
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize as opt
from plotData import *
import costFunction as cf
import plotDecisionBoundary as pdb
import predict as predict
from sigmoid import *
plt.ion()
# Load data
# The first two columns contain the exam scores and the third column contains the label.
data = np.loadtxt('ex2data1.txt', delimiter=',')
print('plot_decision_boundary data[0, 0:1] = \n{}'.format(data[0, 0:1]))
print('plot_decision_boundary data[0, 0:2] = \n{}'.format(data[0, 0:2]))
print('plot_decision_boundary data[0, 0:3] = \n{}'.format(data[0, 0:3]))
print('plot_decision_boundary data[0, 1:1] = \n{}'.format(data[0, 1:1]))
print('plot_decision_boundary data[0, 1:2] = \n{}'.format(data[0, 1:2]))
print('plot_decision_boundary data[0, 1:3] = \n{}'.format(data[0, 1:3]))
print('plot_decision_boundary data[0, 2:1] = \n{}'.format(data[0, 2:1]))
print('plot_decision_boundary data[0, 2:2] = \n{}'.format(data[0, 2:2]))
print('plot_decision_boundary data[0, 2:3] = \n{}'.format(data[0, 2:3]))
X = data[:, 0:2]
y = data[:, 2]
# ===================== Part 1: Plotting =====================
# We start the exercise by first plotting the data to understand the
# the problem we are working with.
print('Plotting Data with + indicating (y = 1) examples and o indicating (y = 0) examples.')
plot_data(X, y)
plt.axis([30, 100, 30, 100])
# Specified in plot order. 按绘图顺序指定
plt.legend(['Admitted', 'Not admitted'], loc=1)
plt.xlabel('Exam 1 score')
plt.ylabel('Exam 2 score')
input('Program paused. Press ENTER to continue')
# ===================== Part 2: Compute Cost and Gradient =====================
# In this part of the exercise, you will implement the cost and gradient
# for logistic regression. You need to complete the code in
# costFunction.py
# Setup the data array appropriately, and add ones for the intercept term
(m, n) = X.shape
# Add intercept term
X = np.c_[np.ones(m), X]
# Initialize fitting parameters
initial_theta = np.zeros(n + 1) # 初始化权重theta
# Compute and display initial cost and gradient
cost, grad = cf.cost_function(initial_theta, X, y)
np.set_printoptions(formatter={'float': '{: 0.4f}\n'.format})
print('Cost at initial theta (zeros): {:0.3f}'.format(cost))
print('Expected cost (approx): 0.693')
print('Gradient at initial theta (zeros): \n{}'.format(grad))
print('Expected gradients (approx): \n-0.1000\n-12.0092\n-11.2628')
# Compute and display cost and gradient with non-zero theta
test_theta = np.array([-24, 0.2, 0.2])
cost, grad = cf.cost_function(test_theta, X, y)
print('Cost at test theta (zeros): {:0.3f}'.format(cost))
print('Expected cost (approx): 0.218')
print('Gradient at test theta: \n{}'.format(grad))
print('Expected gradients (approx): \n0.043\n2.566\n2.647')
input('Program paused. Press ENTER to continue')
# ===================== Part 3: Optimizing using fmin_bfgs =====================
# In this exercise, you will use a built-in function (opt.fmin_bfgs) to find the
# optimal parameters theta
def cost_func(t):
return cf.cost_function(t, X, y)[0]
def grad_func(t):
return cf.cost_function(t, X, y)[1]
# Run fmin_bfgs to obtain the optimal theta
theta, cost, *unused = opt.fmin_bfgs(f=cost_func, fprime=grad_func, x0=initial_theta, maxiter=400, full_output=True, disp=False)
print('Cost at theta found by fmin: {:0.4f}'.format(cost))
print('Expected cost (approx): 0.203')
print('theta: \n{}'.format(theta))
print('Expected Theta (approx): \n-25.161\n0.206\n0.201')
# Plot boundary 画出二分边界
pdb.plot_decision_boundary(theta, X, y)
plt.xlabel('Exam 1 score')
plt.ylabel('Exam 2 score')
input('Program paused. Press ENTER to continue')
# ===================== Part 4: Predict and Accuracies =====================
# After learning the parameters, you'll like to use it to predict the outcomes
# on unseen data. In this part, you will use the logistic regression model
# to predict the probability that a student with score 45 on exam 1 and
# score 85 on exam 2 will be admitted
#
# Furthermore, you will compute the training and test set accuracies of our model.
#
# Your task is to complete the code in predict.py
# Predict probability for a student with score 45 on exam 1
# and score 85 on exam 2
prob = sigmoid(np.array([1, 45, 85]).dot(theta))
print('For a student with scores 45 and 85, we predict an admission probability of {:0.4f}'.format(prob))
print('Expected value : 0.775 +/- 0.002')
# Compute the accuracy on our training set
p = predict.predict(theta, X)
print('Train accuracy: {}'.format(np.mean(y == p) * 100))
print('Expected accuracy (approx): 89.0')
input('ex2 Finished. Press ENTER to exit')
|
[
"numpy.mean",
"predict.predict",
"numpy.ones",
"matplotlib.pyplot.ylabel",
"scipy.optimize.fmin_bfgs",
"matplotlib.pyplot.xlabel",
"plotDecisionBoundary.plot_decision_boundary",
"numpy.array",
"numpy.zeros",
"costFunction.cost_function",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.axis",
"numpy.loadtxt",
"matplotlib.pyplot.legend",
"numpy.set_printoptions"
] |
[((696, 705), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (703, 705), True, 'import matplotlib.pyplot as plt\n'), ((814, 855), 'numpy.loadtxt', 'np.loadtxt', (['"""ex2data1.txt"""'], {'delimiter': '""","""'}), "('ex2data1.txt', delimiter=',')\n", (824, 855), True, 'import numpy as np\n'), ((1827, 1855), 'matplotlib.pyplot.axis', 'plt.axis', (['[30, 100, 30, 100]'], {}), '([30, 100, 30, 100])\n', (1835, 1855), True, 'import matplotlib.pyplot as plt\n'), ((1892, 1939), 'matplotlib.pyplot.legend', 'plt.legend', (["['Admitted', 'Not admitted']"], {'loc': '(1)'}), "(['Admitted', 'Not admitted'], loc=1)\n", (1902, 1939), True, 'import matplotlib.pyplot as plt\n'), ((1940, 1966), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Exam 1 score"""'], {}), "('Exam 1 score')\n", (1950, 1966), True, 'import matplotlib.pyplot as plt\n'), ((1967, 1993), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Exam 2 score"""'], {}), "('Exam 2 score')\n", (1977, 1993), True, 'import matplotlib.pyplot as plt\n'), ((2464, 2479), 'numpy.zeros', 'np.zeros', (['(n + 1)'], {}), '(n + 1)\n', (2472, 2479), True, 'import numpy as np\n'), ((2555, 2592), 'costFunction.cost_function', 'cf.cost_function', (['initial_theta', 'X', 'y'], {}), '(initial_theta, X, y)\n', (2571, 2592), True, 'import costFunction as cf\n'), ((2594, 2655), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'formatter': "{'float': '{: 0.4f}\\n'.format}"}), "(formatter={'float': '{: 0.4f}\\n'.format})\n", (2613, 2655), True, 'import numpy as np\n'), ((2961, 2986), 'numpy.array', 'np.array', (['[-24, 0.2, 0.2]'], {}), '([-24, 0.2, 0.2])\n', (2969, 2986), True, 'import numpy as np\n'), ((3000, 3034), 'costFunction.cost_function', 'cf.cost_function', (['test_theta', 'X', 'y'], {}), '(test_theta, X, y)\n', (3016, 3034), True, 'import costFunction as cf\n'), ((3673, 3782), 'scipy.optimize.fmin_bfgs', 'opt.fmin_bfgs', ([], {'f': 'cost_func', 'fprime': 'grad_func', 'x0': 'initial_theta', 'maxiter': '(400)', 'full_output': '(True)', 'disp': '(False)'}), '(f=cost_func, fprime=grad_func, x0=initial_theta, maxiter=400,\n full_output=True, disp=False)\n', (3686, 3782), True, 'import scipy.optimize as opt\n'), ((3995, 4034), 'plotDecisionBoundary.plot_decision_boundary', 'pdb.plot_decision_boundary', (['theta', 'X', 'y'], {}), '(theta, X, y)\n', (4021, 4034), True, 'import plotDecisionBoundary as pdb\n'), ((4036, 4062), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Exam 1 score"""'], {}), "('Exam 1 score')\n", (4046, 4062), True, 'import matplotlib.pyplot as plt\n'), ((4063, 4089), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Exam 2 score"""'], {}), "('Exam 2 score')\n", (4073, 4089), True, 'import matplotlib.pyplot as plt\n'), ((4951, 4976), 'predict.predict', 'predict.predict', (['theta', 'X'], {}), '(theta, X)\n', (4966, 4976), True, 'import predict as predict\n'), ((2400, 2410), 'numpy.ones', 'np.ones', (['m'], {}), '(m)\n', (2407, 2410), True, 'import numpy as np\n'), ((3515, 3540), 'costFunction.cost_function', 'cf.cost_function', (['t', 'X', 'y'], {}), '(t, X, y)\n', (3531, 3540), True, 'import costFunction as cf\n'), ((3575, 3600), 'costFunction.cost_function', 'cf.cost_function', (['t', 'X', 'y'], {}), '(t, X, y)\n', (3591, 3600), True, 'import costFunction as cf\n'), ((4721, 4742), 'numpy.array', 'np.array', (['[1, 45, 85]'], {}), '([1, 45, 85])\n', (4729, 4742), True, 'import numpy as np\n'), ((5012, 5027), 'numpy.mean', 'np.mean', (['(y == p)'], {}), '(y == p)\n', (5019, 5027), True, 'import numpy as np\n')]
|
#poly_gauss_coil model
#conversion of Poly_GaussCoil.py
#converted by <NAME>, Mar 2016
r"""
This empirical model describes the scattering from *polydisperse* polymer
chains in theta solvents or polymer melts, assuming a Schulz-Zimm type
molecular weight distribution.
To describe the scattering from *monodisperse* polymer chains, see the
:ref:`mono-gauss-coil` model.
Definition
----------
.. math::
I(q) = \text{scale} \cdot I_0 \cdot P(q) + \text{background}
where
.. math::
I_0 &= \phi_\text{poly} \cdot V \cdot (\rho_\text{poly}-\rho_\text{solv})^2 \\
P(q) &= 2 [(1 + UZ)^{-1/U} + Z - 1] / [(1 + U) Z^2] \\
Z &= [(q R_g)^2] / (1 + 2U) \\
U &= (Mw / Mn) - 1 = \text{polydispersity ratio} - 1 \\
V &= M / (N_A \delta)
Here, $\phi_\text{poly}$, is the volume fraction of polymer, $V$ is the
volume of a polymer coil, $M$ is the molecular weight of the polymer,
$N_A$ is Avogadro's Number, $\delta$ is the bulk density of the polymer,
$\rho_\text{poly}$ is the sld of the polymer, $\rho_\text{solv}$ is the
sld of the solvent, and $R_g$ is the radius of gyration of the polymer coil.
The 2D scattering intensity is calculated in the same way as the 1D,
but where the $q$ vector is redefined as
.. math::
q = \sqrt{q_x^2 + q_y^2}
References
----------
.. [#] O Glatter and O Kratky (editors), *Small Angle X-ray Scattering*, Academic Press, (1982) Page 404
.. [#] <NAME>, <NAME>, *Polymers and Neutron Scattering*, Oxford Science Publications, (1996)
.. [#] <NAME>, *Small Angle Neutron Scattering* in *Modern Techniques for Polymer Characterisation*, Wiley, (1999)
.. [#] http://www.ncnr.nist.gov/staff/hammouda/distance_learning/chapter_28.pdf
Authorship and Verification
----------------------------
* **Author:**
* **Last Modified by:**
* **Last Reviewed by:**
"""
import numpy as np
from numpy import inf, expm1, power
name = "poly_gauss_coil"
title = "Scattering from polydisperse polymer coils"
description = """
Evaluates the scattering from
polydisperse polymer chains.
"""
category = "shape-independent"
# pylint: disable=bad-whitespace, line-too-long
# ["name", "units", default, [lower, upper], "type", "description"],
parameters = [
["i_zero", "1/cm", 70.0, [0.0, inf], "", "Intensity at q=0"],
["rg", "Ang", 75.0, [0.0, inf], "", "Radius of gyration"],
["polydispersity", "None", 2.0, [1.0, inf], "", "Polymer Mw/Mn"],
]
# pylint: enable=bad-whitespace, line-too-long
# NB: Scale and Background are implicit parameters on every model
def Iq(q, i_zero, rg, polydispersity):
# pylint: disable = missing-docstring
u = polydispersity - 1.0
z = q**2 * (rg**2 / (1.0 + 2.0*u))
# need to trap the case of the polydispersity being 1 (ie, monodisperse!)
if polydispersity == 1.0:
result = 2.0 * (expm1(-z) + z)
index = q != 0.
result[index] /= z[index]**2
result[~index] = 1.0
else:
# Taylor series around z=0 of (2*(1+uz)^(-1/u) + z - 1) / (z^2(u+1))
p = [
#(-1 - 20*u - 155*u**2 - 580*u**3 - 1044*u**4 - 720*u**5) / 2520.,
#(+1 + 14*u + 71*u**2 + 154*u**3 + 120*u**4) / 360.,
#(-1 - 9*u - 26*u**2 - 24*u**3) / 60.,
(+1 + 5*u + 6*u**2) / 12.,
(-1 - 2*u) / 3.,
(+1),
]
result = 2.0 * (power(1.0 + u*z, -1.0/u) + z - 1.0) / (1.0 + u)
index = z > 1e-4
result[index] /= z[index]**2
result[~index] = np.polyval(p, z[~index])
return i_zero * result
Iq.vectorized = True # Iq accepts an array of q values
def random():
"""Return a random parameter set for the model."""
rg = 10**np.random.uniform(0, 4)
#rg = 1e3
polydispersity = 10**np.random.uniform(0, 3)
pars = dict(
#scale=1, background=0,
i_zero=1e7, # i_zero is a simple scale
rg=rg,
polydispersity=polydispersity,
)
return pars
demo = dict(scale=1.0,
i_zero=70.0,
rg=75.0,
polydispersity=2.0,
background=0.0)
# these unit test values taken from SasView 3.1.2
tests = [
[{'scale': 1.0, 'i_zero': 70.0, 'rg': 75.0,
'polydispersity': 2.0, 'background': 0.0},
[0.0106939, 0.469418], [57.6405, 0.169016]],
]
|
[
"numpy.expm1",
"numpy.polyval",
"numpy.power",
"numpy.random.uniform"
] |
[((3486, 3510), 'numpy.polyval', 'np.polyval', (['p', 'z[~index]'], {}), '(p, z[~index])\n', (3496, 3510), True, 'import numpy as np\n'), ((3677, 3700), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(4)'], {}), '(0, 4)\n', (3694, 3700), True, 'import numpy as np\n'), ((3740, 3763), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(3)'], {}), '(0, 3)\n', (3757, 3763), True, 'import numpy as np\n'), ((2826, 2835), 'numpy.expm1', 'expm1', (['(-z)'], {}), '(-z)\n', (2831, 2835), False, 'from numpy import inf, expm1, power\n'), ((3351, 3379), 'numpy.power', 'power', (['(1.0 + u * z)', '(-1.0 / u)'], {}), '(1.0 + u * z, -1.0 / u)\n', (3356, 3379), False, 'from numpy import inf, expm1, power\n')]
|
import os
from itertools import product
from concurrent import futures
from contextlib import closing
from datetime import datetime
import numpy as np
from . import _z5py
from .file import File, S3File
from .dataset import Dataset
from .shape_utils import normalize_slices
def product1d(inrange):
for ii in inrange:
yield ii
def blocking(shape, block_shape, roi=None, center_blocks_at_roi=False):
""" Generator for nd blocking.
Args:
shape (tuple): nd shape
block_shape (tuple): nd block shape
roi (tuple[slice]): region of interest (default: None)
center_blocks_at_roi (bool): if given a roi,
whether to center the blocks being generated
at the roi's origin (default: False)
"""
assert len(shape) == len(block_shape), "Invalid number of dimensions."
if roi is None:
# compute the ranges for the full shape
ranges = [range(sha // bsha if sha % bsha == 0 else sha // bsha + 1)
for sha, bsha in zip(shape, block_shape)]
min_coords = [0] * len(shape)
max_coords = shape
else:
# make sure that the roi is valid
roi, _ = normalize_slices(roi, shape)
ranges = [range(rr.start // bsha,
rr.stop // bsha if rr.stop % bsha == 0 else rr.stop // bsha + 1)
for rr, bsha in zip(roi, block_shape)]
min_coords = [rr.start for rr in roi]
max_coords = [rr.stop for rr in roi]
need_shift = False
if roi is not None and center_blocks_at_roi:
shift = [rr.start % bsha for rr, bsha in zip(roi, block_shape)]
need_shift = sum(shift) > 0
# product raises memory error for too large ranges,
# because input iterators are cast to tuple
# so far I have only seen this for 1d "open-ended" datasets
# and hence just implemented a workaround for this case,
# but it should be fairly easy to implement an nd version of product
# without casting to tuple for our use case using the imglib loop trick, see also
# https://stackoverflow.com/questions/8695422/why-do-i-get-a-memoryerror-with-itertools-product
try:
start_points = product(*ranges)
except MemoryError:
assert len(ranges) == 1
start_points = product1d(ranges)
for start_point in start_points:
positions = [sp * bshape for sp, bshape in zip(start_point, block_shape)]
if need_shift:
positions = [pos + sh for pos, sh in zip(positions, shift)]
if any(pos > maxc for pos, maxc in zip(positions, max_coords)):
continue
yield tuple(slice(max(pos, minc), min(pos + bsha, maxc))
for pos, bsha, minc, maxc in zip(positions, block_shape,
min_coords, max_coords))
def copy_dataset_impl(f_in, f_out, in_path_in_file, out_path_in_file,
n_threads, chunks=None, block_shape=None, dtype=None,
roi=None, fit_to_roi=False, **new_compression):
""" Implementation of copy dataset.
Used to implement `copy_dataset`, `convert_to_h5` and `convert_from_h5`.
Can also be used for more flexible use cases, like copying from a zarr/n5
cloud dataset to a filesytem dataset.
Args:
f_in (File): input file object.
f_out (File): output file object.
in_path_in_file (str): name of input dataset.
out_path_in_file (str): name of output dataset.
n_threads (int): number of threads used for copying.
chunks (tuple): chunks of the output dataset.
By default same as input dataset's chunks. (default: None)
block_shape (tuple): block shape used for copying. Must be a multiple
of ``chunks``, which are used by default (default: None)
dtype (str): datatype of the output dataset, default does not change datatype (default: None).
roi (tuple[slice]): region of interest that will be copied. (default: None)
fit_to_roi (bool): if given a roi, whether to set the shape of
the output dataset to the roi's shape
and align chunks with the roi's origin. (default: False)
**new_compression: compression library and options for output dataset. If not given,
the same compression as in the input is used.
"""
ds_in = f_in[in_path_in_file]
# check if we can copy chunk by chunk
in_is_z5 = isinstance(f_in, (File, S3File))
out_is_z5 = isinstance(f_out, (File, S3File))
copy_chunks = (in_is_z5 and out_is_z5) and (chunks is None or chunks == ds_in.chunks) and (roi is None)
# get dataset metadata from input dataset if defaults were given
chunks = ds_in.chunks if chunks is None else chunks
dtype = ds_in.dtype if dtype is None else dtype
# zarr objects may not have compression attribute. if so set it to the settings sent to this function
if not hasattr(ds_in, "compression"):
ds_in.compression = new_compression
compression = new_compression.pop("compression", ds_in.compression)
compression_opts = new_compression
same_lib = in_is_z5 == out_is_z5
if same_lib and compression == ds_in.compression:
compression_opts = compression_opts if compression_opts else ds_in.compression_opts
if out_is_z5:
compression = None if compression == 'raw' else compression
compression_opts = {} if compression_opts is None else compression_opts
else:
compression_opts = {'compression_opts': None} if compression_opts is None else compression_opts
# if we don't have block-shape explitictly given, use chunk size
# otherwise check that it's a multiple of chunks
if block_shape is None:
block_shape = chunks
else:
assert all(bs % ch == 0 for bs, ch in zip(block_shape, chunks)),\
"block_shape must be a multiple of chunks"
shape = ds_in.shape
# we need to create the blocking here, before the shape is potentially altered
# if fit_to_roi == True
blocks = blocking(shape, block_shape, roi, fit_to_roi)
if roi is not None:
roi, _ = normalize_slices(roi, shape)
if fit_to_roi:
shape = tuple(rr.stop - rr.start for rr in roi)
ds_out = f_out.require_dataset(out_path_in_file,
dtype=dtype,
shape=shape,
chunks=chunks,
compression=compression,
**compression_opts)
def write_single_block(bb):
data_in = ds_in[bb].astype(dtype, copy=False)
if np.sum(data_in) == 0:
return
if fit_to_roi and roi is not None:
bb = tuple(slice(b.start - rr.start, b.stop - rr.start)
for b, rr in zip(bb, roi))
ds_out[bb] = data_in
def write_single_chunk(bb):
chunk_id = tuple(b.start // ch for b, ch in zip(bb, chunks))
chunk_in = ds_in.read_chunk(chunk_id)
if chunk_in is None:
return
# check if this is a varlen chunk
varlen = tuple(chunk_in.shape) != tuple(b.stop - b.start for b in bb)
ds_out.write_chunk(chunk_id, chunk_in.astype(dtype, copy=False), varlen)
write_single = write_single_chunk if copy_chunks else write_single_block
with futures.ThreadPoolExecutor(max_workers=n_threads) as tp:
tasks = [tp.submit(write_single, bb) for bb in blocks]
[t.result() for t in tasks]
# copy attributes
in_attrs = ds_in.attrs
out_attrs = ds_out.attrs
for key, val in in_attrs.items():
out_attrs[key] = val
def copy_dataset(in_path, out_path,
in_path_in_file, out_path_in_file,
n_threads, chunks=None,
block_shape=None, dtype=None,
use_zarr_format=None, roi=None,
fit_to_roi=False, **new_compression):
""" Copy dataset, optionally change metadata.
The input dataset will be copied to the output dataset chunk by chunk.
Allows to change chunks, datatype, file format and compression.
Can also just copy a roi.
Args:
in_path (str): path to the input file.
out_path (str): path to the output file.
in_path_in_file (str): name of input dataset.
out_path_in_file (str): name of output dataset.
n_threads (int): number of threads used for copying.
chunks (tuple): chunks of the output dataset.
By default same as input dataset's chunks. (default: None)
block_shape (tuple): block shape used for copying. Must be a multiple
of ``chunks``, which are used by default (default: None)
dtype (str): datatype of the output dataset, default does not change datatype (default: None).
use_zarr_format (bool): file format of the output file,
default does not change format (default: None).
roi (tuple[slice]): region of interest that will be copied. (default: None)
fit_to_roi (bool): if given a roi, whether to set the shape of
the output dataset to the roi's shape
and align chunks with the roi's origin. (default: False)
**new_compression: compression library and options for output dataset. If not given,
the same compression as in the input is used.
"""
f_in = File(in_path)
# check if the file format was specified
# if not, keep the format of the input file
# otherwise set the file format
is_zarr = f_in.is_zarr if use_zarr_format is None else use_zarr_format
f_out = File(out_path, use_zarr_format=is_zarr)
copy_dataset_impl(f_in, f_out, in_path_in_file, out_path_in_file,
n_threads, chunks=chunks, block_shape=block_shape,
dtype=dtype, roi=roi, fit_to_roi=fit_to_roi,
**new_compression)
def copy_group(in_path, out_path, in_path_in_file, out_path_in_file, n_threads):
""" Copy group recursively.
Copy the group recursively, using copy_dataset. Metadata of datasets that
are copied cannot be changed and rois cannot be applied.
Args:
in_path (str): path to the input file.
out_path (str): path to the output file.
in_path_in_file (str): name of input group.
out_path_in_file (str): name of output group.
n_threads (int): number of threads used to copy datasets.
"""
f_in = File(in_path)
f_out = File(out_path)
def copy_attrs(gin, gout):
in_attrs = gin.attrs
out_attrs = gout.attrs
for key, val in in_attrs.items():
out_attrs[key] = val
g_in = f_in[in_path_in_file]
g_out = f_out.require_group(out_path_in_file)
copy_attrs(g_in, g_out)
def copy_object(name, obj):
abs_in_key = os.path.join(in_path_in_file, name)
abs_out_key = os.path.join(out_path_in_file, name)
if isinstance(obj, Dataset):
copy_dataset(in_path, out_path,
abs_in_key, abs_out_key, n_threads)
else:
g = f_out.require_group(abs_out_key)
copy_attrs(obj, g)
g_in.visititems(copy_object)
class Timer:
def __init__(self):
self.start_time = None
self.stop_time = None
@property
def elapsed(self):
try:
return (self.stop_time - self.start_time).total_seconds()
except TypeError as e:
if "'NoneType'" in str(e):
raise RuntimeError("{} either not started, or not stopped".format(self))
def start(self):
self.start_time = datetime.utcnow()
def stop(self):
self.stop_time = datetime.utcnow()
return self.elapsed
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def fetch_test_data_stent():
from imageio import volread
data_i16 = volread('imageio:stent.npz')
return (data_i16 / data_i16.max() * 255).astype(np.uint8)
def fetch_test_data():
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
try:
from io import BytesIO as Buffer
except ImportError:
from StringIO import StringIO as Buffer
import zipfile
from imageio import volread
im_url = "https://imagej.nih.gov/ij/images/t1-head-raw.zip"
with closing(urlopen(im_url)) as response:
if response.status != 200:
raise RuntimeError("Test data could not be found at {}, status code {}".format(
im_url, response.status
))
zip_buffer = Buffer(response.read())
with zipfile.ZipFile(zip_buffer) as zf:
tif_buffer = Buffer(zf.read('JeffT1_le.tif'))
return np.asarray(volread(tif_buffer, format='tif'), dtype=np.uint8)
def remove_trivial_chunks(dataset, n_threads,
remove_specific_value=None):
""" Remove chunks that only contain a single value.
The input dataset will be copied to the output dataset chunk by chunk.
Allows to change datatype, file format and compression as well.
Args:
dataset (z5py.Dataset)
n_threads (int): number of threads
remove_specific_value (int or float): only remove chunks that contain (only) this specific value (default: None)
"""
dtype = dataset.dtype
function = getattr(_z5py, 'remove_trivial_chunks_%s' % dtype)
remove_specific = remove_specific_value is not None
value = remove_specific_value if remove_specific else 0
function(dataset._impl, n_threads, remove_specific, value)
def remove_dataset(dataset, n_threads):
""" Remvoe dataset multi-threaded.
"""
_z5py.remove_dataset(dataset._impl, n_threads)
def remove_chunk(dataset, chunk_id):
""" Remove a chunk
"""
dataset._impl.remove_chunk(dataset._impl, chunk_id)
def remove_chunks(dataset, bounding_box):
""" Remove all chunks overlapping the bounding box
"""
shape = dataset.shape
chunks = dataset.chunks
blocks = blocking(shape, chunks, roi=bounding_box)
for block in blocks:
chunk_id = tuple(b.start // ch for b, ch in zip(block, chunks))
remove_chunk(dataset, chunk_id)
def unique(dataset, n_threads, return_counts=False):
""" Find unique values in dataset.
Args:
dataset (z5py.Dataset)
n_threads (int): number of threads
return_counts (bool): return counts of unique values (default: False)
"""
dtype = dataset.dtype
if return_counts:
function = getattr(_z5py, 'unique_with_counts_%s' % dtype)
else:
function = getattr(_z5py, 'unique_%s' % dtype)
return function(dataset._impl, n_threads)
|
[
"urllib2.urlopen",
"zipfile.ZipFile",
"datetime.datetime.utcnow",
"concurrent.futures.ThreadPoolExecutor",
"itertools.product",
"os.path.join",
"imageio.volread",
"numpy.sum"
] |
[((11994, 12022), 'imageio.volread', 'volread', (['"""imageio:stent.npz"""'], {}), "('imageio:stent.npz')\n", (12001, 12022), False, 'from imageio import volread\n'), ((2191, 2207), 'itertools.product', 'product', (['*ranges'], {}), '(*ranges)\n', (2198, 2207), False, 'from itertools import product\n'), ((7395, 7444), 'concurrent.futures.ThreadPoolExecutor', 'futures.ThreadPoolExecutor', ([], {'max_workers': 'n_threads'}), '(max_workers=n_threads)\n', (7421, 7444), False, 'from concurrent import futures\n'), ((10873, 10908), 'os.path.join', 'os.path.join', (['in_path_in_file', 'name'], {}), '(in_path_in_file, name)\n', (10885, 10908), False, 'import os\n'), ((10931, 10967), 'os.path.join', 'os.path.join', (['out_path_in_file', 'name'], {}), '(out_path_in_file, name)\n', (10943, 10967), False, 'import os\n'), ((11667, 11684), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (11682, 11684), False, 'from datetime import datetime\n'), ((11731, 11748), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (11746, 11748), False, 'from datetime import datetime\n'), ((12747, 12774), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zip_buffer'], {}), '(zip_buffer)\n', (12762, 12774), False, 'import zipfile\n'), ((6679, 6694), 'numpy.sum', 'np.sum', (['data_in'], {}), '(data_in)\n', (6685, 6694), True, 'import numpy as np\n'), ((12480, 12495), 'urllib2.urlopen', 'urlopen', (['im_url'], {}), '(im_url)\n', (12487, 12495), False, 'from urllib2 import urlopen\n'), ((12862, 12895), 'imageio.volread', 'volread', (['tif_buffer'], {'format': '"""tif"""'}), "(tif_buffer, format='tif')\n", (12869, 12895), False, 'from imageio import volread\n')]
|
"""
Data creation:
Load the data, normalize it, and split into train and test.
"""
'''
Added the capability of loading pre-separated UCI train/test data
function LoadData_Splitted_UCI
'''
import numpy as np
import os
import pandas as pd
import tensorflow as tf
DATA_PATH = "../UCI_Datasets"
class DataGenerator:
def __init__(self, dataset_name):
self.dataset_name = dataset_name
# used for metrics calculation
self.scale_c = None # std
self.shift_c = None # mean
def create_cubic_10D_data(self):
Npar = 10
Ntrain = 5000
Nout = 1
Ntest = 1000
# x_train = tf.random.uniform(shape=(Ntrain, Npar))*4.0-2.0
x_train = tf.random.normal(shape=(Ntrain, Npar))
y_train = x_train ** 3
y_train = tf.reduce_sum(y_train, axis=1, keepdims=True)/10.0 + 1.0*tf.random.normal([x_train.shape[0], 1])
# x_test = tf.random.uniform(shape=(Ntest, Npar))
# x_test[:,1] = x_test[:,1] + 4.0
# x_test = np.random.uniform(size=(Ntest,Npar))
# x_test[:,1] = x_test[:,1] + 4.0
x_test = np.random.normal(size=(Ntest,Npar)) + 2.0
x_test = tf.convert_to_tensor(x_test, dtype=tf.float32)
scale_c = np.std(x_test.eval(session=tf.compat.v1.Session()))
y_test = x_test ** 3
y_test = tf.reduce_sum(y_test, axis=1, keepdims=True)/10.0 + 1.0*tf.random.normal([x_test.shape[0], 1])
### to Numpy array in TF1 compat environment using TF2
x_train = x_train.eval(session=tf.compat.v1.Session())
y_train = y_train.eval(session=tf.compat.v1.Session())
x_test = x_test.eval(session=tf.compat.v1.Session())
y_test = y_test.eval(session=tf.compat.v1.Session())
### normalization
x_mean = np.mean(x_train, axis=0)
x_std = np.std(x_train,axis=0)
xtrain_normal = (x_train - x_mean)/x_std
y_mean = np.mean(y_train,axis=0)
y_std = np.std(y_train,axis=0)
ytrain_normal = (y_train - y_mean)/y_std
xvalid_normal = (x_test - x_mean) / x_std
yvalid_normal = (y_test - y_mean) / y_std
X_train = xtrain_normal
y_train = ytrain_normal
X_val = xvalid_normal
y_val = yvalid_normal
self.scale_c = scale_c
return X_train, y_train, X_val, y_val
def create_data(self, seed_in=5, train_prop=0.9):
"""
@param seed_in: seed for numpy random seed
@param train_prop: train proportion
"""
np.random.seed(seed_in)
# load UCI data
dataset = self.dataset_name
dataset_path = f"{DATA_PATH}/{dataset}.txt"
if dataset == 'YearPredictionMSD':
data = np.loadtxt(dataset_path, delimiter=',')
elif dataset == 'naval':
data = np.loadtxt(dataset_path)
data = data[:, :-1] # have 2 y as GT, ignore last
else:
data = np.loadtxt(dataset_path)
# save normalization constants (used for calculating results)
if dataset == 'YearPredictionMSD':
scale_c = np.std(data[:, 0]) # in YearPredictionMSD, label's index = 0
shift_c = np.mean(data[:, 0])
else:
scale_c = np.std(data[:, -1])
shift_c = np.mean(data[:, -1])
# normalize data
for i in range(data.shape[1]):
sdev_norm = np.std(data[:, i])
sdev_norm = 0.001 if sdev_norm == 0 else sdev_norm # avoid zero variance features
data[:, i] = (data[:, i] - np.mean(data[:, i])) / sdev_norm
# split train test
if dataset == 'YearPredictionMSD':
# train: first 463,715 examples
# test: last 51,630 examples
train = data[:463715, :]
test = data[-51630:, :]
else:
# split into train/test in random
perm = np.random.permutation(data.shape[0])
train_size = int(round(train_prop * data.shape[0]))
train = data[perm[:train_size], :]
test = data[perm[train_size:], :]
# split to target and data
if dataset == 'YearPredictionMSD':
y_train = train[:, 0].reshape(-1, 1)
X_train = train[:, 1:]
y_val = test[:, 0].reshape(-1, 1)
X_val = test[:, 1:]
else:
y_train = train[:, -1].reshape(-1, 1)
X_train = train[:, :-1]
y_val = test[:, -1].reshape(-1, 1)
X_val = test[:, :-1]
self.scale_c = scale_c
self.shift_c = shift_c
return X_train, y_train, X_val, y_val
def LoadData_Splitted_UCI(self, loadCSVName, original_data_path, splitted_data_path, split_seed, **kwargs):
## (1) Load the original data for the normalization purpose
# current_dir = os.path.dirname(__file__)
# uci_dir = os.path.join(current_dir, 'UCI_datasets')
uci_dir = original_data_path
if loadCSVName == 'boston':
data = np.loadtxt(os.path.join(uci_dir, 'boston-housing/boston_housing.txt'))
if loadCSVName == 'concrete':
data_df = pd.read_excel(os.path.join(uci_dir, 'concrete/Concrete_Data.xls'))
data = data_df.values
if loadCSVName == 'energy':
data_df = pd.read_excel(os.path.join(uci_dir, 'energy-efficiency/ENB2012_data.xlsx'), engine='openpyxl')
data_df = data_df.dropna(how='all', axis='columns')
data_df = data_df.dropna(how='all', axis='rows')
data = data_df.values
if loadCSVName == 'kin8nm':
data_df = pd.read_csv(os.path.join(uci_dir, 'kin8nm/dataset_2175_kin8nm.csv'), sep=',')
data = data_df.values
if loadCSVName == 'naval':
data = np.loadtxt(os.path.join(uci_dir, 'naval/data.txt'))
if loadCSVName == 'power':
data_df = pd.read_excel(os.path.join(uci_dir, 'power-plant/Folds5x2_pp.xlsx'), engine='openpyxl')
data = data_df.values
if loadCSVName == 'protein':
data_df = pd.read_csv(os.path.join(uci_dir, 'protein/CASP.csv'), sep=',')
# print(data_df)
'''Move the Y data (originally located at the first column) to last column in order to keep consistency
with the normalization process'''
col_names = data_df.columns.tolist()
col_names.append(col_names[0])
del col_names[col_names.index(col_names[0])]
# print(col_names)
data_df = data_df[col_names]
# print(data_df)
data = data_df.values
if loadCSVName == 'wine':
data_df = pd.read_csv(os.path.join(uci_dir, 'wine-quality/winequality-red.csv'), sep=';')
data = data_df.values
if loadCSVName == 'yacht':
data = np.loadtxt(os.path.join(uci_dir, 'yacht/yacht_hydrodynamics.data'))
if loadCSVName == 'MSD':
with open(os.path.join(uci_dir, 'song/YearPredictionMSD.npy'), 'rb') as f:
data = np.load(f)
## (2) Load the pre-splitted train/test data
##
xyTrain_load = np.loadtxt(splitted_data_path+'xyTrain_'+loadCSVName+'_seed_'+str(split_seed)+'.csv', delimiter=',')
xyTest_load = np.loadtxt(splitted_data_path+'xyTest_'+loadCSVName+'_seed_'+str(split_seed)+'.csv', delimiter=',')
xyTrain_load = xyTrain_load.astype(np.float32)
# xyValid_load = xyValid_load.astype(np.float32)
xyTest_load = xyTest_load.astype(np.float32)
# original normalization functions
# work out normalisation constants (need when unnormalising later)
scale_c = np.std(data[:, -1])
shift_c = np.mean(data[:, -1])
# normalise data
num_cols = xyTrain_load.shape[1]
print('num cols: {}'.format(num_cols))
for i in range(0, num_cols):
# get the sdev_norm from original data
sdev_norm = np.std(data[:, i])
sdev_norm = 0.001 if sdev_norm == 0 else sdev_norm
# apply on the pre-splitted data
xyTrain_load[:, i] = (xyTrain_load[:, i] - np.mean(data[:, i]) )/sdev_norm
xyTest_load[:, i] = (xyTest_load[:, i] - np.mean(data[:, i]) )/sdev_norm
# xyValid_load[:, i] = (xyValid_load[:, i] - np.mean(data[:, i]) )/sdev_norm
if loadCSVName == 'energy' or loadCSVName == 'naval':
xTrain = xyTrain_load[:, :-2] ## all columns except last two columns as inputs
yTrain = xyTrain_load[:, -1] ## last column as output
xTest = xyTest_load[:, :-2]
yTest = xyTest_load[:, -1]
else:
xTrain = xyTrain_load[:, :-1]
yTrain = xyTrain_load[:, -1]
xTest = xyTest_load[:, :-1]
yTest = xyTest_load[:, -1]
self.scale_c = scale_c
self.shift_c = shift_c
return xTrain, yTrain, xTest, yTest
|
[
"numpy.random.normal",
"tensorflow.random.normal",
"numpy.mean",
"tensorflow.reduce_sum",
"os.path.join",
"numpy.random.seed",
"numpy.std",
"tensorflow.convert_to_tensor",
"numpy.loadtxt",
"numpy.load",
"tensorflow.compat.v1.Session",
"numpy.random.permutation"
] |
[((714, 752), 'tensorflow.random.normal', 'tf.random.normal', ([], {'shape': '(Ntrain, Npar)'}), '(shape=(Ntrain, Npar))\n', (730, 752), True, 'import tensorflow as tf\n'), ((1174, 1220), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['x_test'], {'dtype': 'tf.float32'}), '(x_test, dtype=tf.float32)\n', (1194, 1220), True, 'import tensorflow as tf\n'), ((1789, 1813), 'numpy.mean', 'np.mean', (['x_train'], {'axis': '(0)'}), '(x_train, axis=0)\n', (1796, 1813), True, 'import numpy as np\n'), ((1830, 1853), 'numpy.std', 'np.std', (['x_train'], {'axis': '(0)'}), '(x_train, axis=0)\n', (1836, 1853), True, 'import numpy as np\n'), ((1920, 1944), 'numpy.mean', 'np.mean', (['y_train'], {'axis': '(0)'}), '(y_train, axis=0)\n', (1927, 1944), True, 'import numpy as np\n'), ((1960, 1983), 'numpy.std', 'np.std', (['y_train'], {'axis': '(0)'}), '(y_train, axis=0)\n', (1966, 1983), True, 'import numpy as np\n'), ((2528, 2551), 'numpy.random.seed', 'np.random.seed', (['seed_in'], {}), '(seed_in)\n', (2542, 2551), True, 'import numpy as np\n'), ((7671, 7690), 'numpy.std', 'np.std', (['data[:, -1]'], {}), '(data[:, -1])\n', (7677, 7690), True, 'import numpy as np\n'), ((7709, 7729), 'numpy.mean', 'np.mean', (['data[:, -1]'], {}), '(data[:, -1])\n', (7716, 7729), True, 'import numpy as np\n'), ((1115, 1151), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(Ntest, Npar)'}), '(size=(Ntest, Npar))\n', (1131, 1151), True, 'import numpy as np\n'), ((2728, 2767), 'numpy.loadtxt', 'np.loadtxt', (['dataset_path'], {'delimiter': '""","""'}), "(dataset_path, delimiter=',')\n", (2738, 2767), True, 'import numpy as np\n'), ((3102, 3120), 'numpy.std', 'np.std', (['data[:, 0]'], {}), '(data[:, 0])\n', (3108, 3120), True, 'import numpy as np\n'), ((3186, 3205), 'numpy.mean', 'np.mean', (['data[:, 0]'], {}), '(data[:, 0])\n', (3193, 3205), True, 'import numpy as np\n'), ((3242, 3261), 'numpy.std', 'np.std', (['data[:, -1]'], {}), '(data[:, -1])\n', (3248, 3261), True, 'import numpy as np\n'), ((3284, 3304), 'numpy.mean', 'np.mean', (['data[:, -1]'], {}), '(data[:, -1])\n', (3291, 3304), True, 'import numpy as np\n'), ((3394, 3412), 'numpy.std', 'np.std', (['data[:, i]'], {}), '(data[:, i])\n', (3400, 3412), True, 'import numpy as np\n'), ((3889, 3925), 'numpy.random.permutation', 'np.random.permutation', (['data.shape[0]'], {}), '(data.shape[0])\n', (3910, 3925), True, 'import numpy as np\n'), ((7957, 7975), 'numpy.std', 'np.std', (['data[:, i]'], {}), '(data[:, i])\n', (7963, 7975), True, 'import numpy as np\n'), ((802, 847), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['y_train'], {'axis': '(1)', 'keepdims': '(True)'}), '(y_train, axis=1, keepdims=True)\n', (815, 847), True, 'import tensorflow as tf\n'), ((859, 898), 'tensorflow.random.normal', 'tf.random.normal', (['[x_train.shape[0], 1]'], {}), '([x_train.shape[0], 1])\n', (875, 898), True, 'import tensorflow as tf\n'), ((1337, 1381), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['y_test'], {'axis': '(1)', 'keepdims': '(True)'}), '(y_test, axis=1, keepdims=True)\n', (1350, 1381), True, 'import tensorflow as tf\n'), ((1393, 1431), 'tensorflow.random.normal', 'tf.random.normal', (['[x_test.shape[0], 1]'], {}), '([x_test.shape[0], 1])\n', (1409, 1431), True, 'import tensorflow as tf\n'), ((1536, 1558), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (1556, 1558), True, 'import tensorflow as tf\n'), ((1599, 1621), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (1619, 1621), True, 'import tensorflow as tf\n'), ((1660, 1682), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (1680, 1682), True, 'import tensorflow as tf\n'), ((1721, 1743), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (1741, 1743), True, 'import tensorflow as tf\n'), ((2820, 2844), 'numpy.loadtxt', 'np.loadtxt', (['dataset_path'], {}), '(dataset_path)\n', (2830, 2844), True, 'import numpy as np\n'), ((2941, 2965), 'numpy.loadtxt', 'np.loadtxt', (['dataset_path'], {}), '(dataset_path)\n', (2951, 2965), True, 'import numpy as np\n'), ((5013, 5071), 'os.path.join', 'os.path.join', (['uci_dir', '"""boston-housing/boston_housing.txt"""'], {}), "(uci_dir, 'boston-housing/boston_housing.txt')\n", (5025, 5071), False, 'import os\n'), ((5148, 5199), 'os.path.join', 'os.path.join', (['uci_dir', '"""concrete/Concrete_Data.xls"""'], {}), "(uci_dir, 'concrete/Concrete_Data.xls')\n", (5160, 5199), False, 'import os\n'), ((5308, 5368), 'os.path.join', 'os.path.join', (['uci_dir', '"""energy-efficiency/ENB2012_data.xlsx"""'], {}), "(uci_dir, 'energy-efficiency/ENB2012_data.xlsx')\n", (5320, 5368), False, 'import os\n'), ((5620, 5675), 'os.path.join', 'os.path.join', (['uci_dir', '"""kin8nm/dataset_2175_kin8nm.csv"""'], {}), "(uci_dir, 'kin8nm/dataset_2175_kin8nm.csv')\n", (5632, 5675), False, 'import os\n'), ((5786, 5825), 'os.path.join', 'os.path.join', (['uci_dir', '"""naval/data.txt"""'], {}), "(uci_dir, 'naval/data.txt')\n", (5798, 5825), False, 'import os\n'), ((5899, 5952), 'os.path.join', 'os.path.join', (['uci_dir', '"""power-plant/Folds5x2_pp.xlsx"""'], {}), "(uci_dir, 'power-plant/Folds5x2_pp.xlsx')\n", (5911, 5952), False, 'import os\n'), ((6079, 6120), 'os.path.join', 'os.path.join', (['uci_dir', '"""protein/CASP.csv"""'], {}), "(uci_dir, 'protein/CASP.csv')\n", (6091, 6120), False, 'import os\n'), ((6675, 6732), 'os.path.join', 'os.path.join', (['uci_dir', '"""wine-quality/winequality-red.csv"""'], {}), "(uci_dir, 'wine-quality/winequality-red.csv')\n", (6687, 6732), False, 'import os\n'), ((6843, 6898), 'os.path.join', 'os.path.join', (['uci_dir', '"""yacht/yacht_hydrodynamics.data"""'], {}), "(uci_dir, 'yacht/yacht_hydrodynamics.data')\n", (6855, 6898), False, 'import os\n'), ((7044, 7054), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (7051, 7054), True, 'import numpy as np\n'), ((1266, 1288), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (1286, 1288), True, 'import tensorflow as tf\n'), ((3547, 3566), 'numpy.mean', 'np.mean', (['data[:, i]'], {}), '(data[:, i])\n', (3554, 3566), True, 'import numpy as np\n'), ((6956, 7007), 'os.path.join', 'os.path.join', (['uci_dir', '"""song/YearPredictionMSD.npy"""'], {}), "(uci_dir, 'song/YearPredictionMSD.npy')\n", (6968, 7007), False, 'import os\n'), ((8139, 8158), 'numpy.mean', 'np.mean', (['data[:, i]'], {}), '(data[:, i])\n', (8146, 8158), True, 'import numpy as np\n'), ((8225, 8244), 'numpy.mean', 'np.mean', (['data[:, i]'], {}), '(data[:, i])\n', (8232, 8244), True, 'import numpy as np\n')]
|
from __future__ import print_function
import emcee
from multiprocessing import Pool
import numpy as np
import corner
import matplotlib.pyplot as plt
import sys
import scipy.optimize as op
from rbvfit.rb_vfit import rb_veldiff as rb_veldiff
from rbvfit import rb_setline as rb
import pdb
def plot_model(wave_obs,fnorm,enorm,fit,model,outfile= False,xlim=[-600.,600.],verbose=False):
#This model only works if there are no nuissance paramteres
theta_prime=fit.best_theta
value1=fit.low_theta
value2=fit.high_theta
n_clump=model.nclump
n_clump_total=np.int(len(theta_prime)/3)
ntransition=model.ntransition
zabs=model.zabs
samples=fit.samples
model_mcmc=fit.model
wave_list=np.zeros( len(model.lambda_rest_original),)
# Use the input lambda rest list to plot correctly
for i in range(0,len(wave_list)):
s=rb.rb_setline(model.lambda_rest_original[i],'closest')
wave_list[i]=s['wave']
wave_rest=wave_obs/(1+zabs[0])
best_N = theta_prime[0:n_clump_total]
best_b = theta_prime[n_clump_total:2 * n_clump_total]
best_v = theta_prime[2 * n_clump_total:3 * n_clump_total]
low_N = value1[0:n_clump_total]
low_b = value1[n_clump_total:2 * n_clump_total]
low_v = value1[2 * n_clump_total:3 * n_clump_total]
high_N = value2[0:n_clump_total]
high_b = value2[n_clump_total:2 * n_clump_total]
high_v = value2[2 * n_clump_total:3 * n_clump_total]
#Now extracting individual fitted components
best_fit, f1 = model.model_fit(theta_prime, wave_obs)
fig, axs = plt.subplots(ntransition, sharex=True, sharey=False,figsize=(12,18 ),gridspec_kw={'hspace': 0})
BIGGER_SIZE = 18
plt.rc('font', size=BIGGER_SIZE) # controls default text sizes
plt.rc('axes', titlesize=BIGGER_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=BIGGER_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=BIGGER_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=BIGGER_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=BIGGER_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
index = np.random.randint(0, high=len(samples), size=100)
if ntransition == 1:
#When there are no nuissance parameter
#Now loop through each transition and plot them in velocity space
vel=rb_veldiff(wave_list[0],wave_rest)
axs.step(vel, fnorm, 'k-', linewidth=1.)
axs.step(vel, enorm, color='r', linewidth=1.)
# Plotting a random sample of outputs extracted from posterior dis
for ind in range(len(index)):
axs.plot(vel, model_mcmc(samples[index[ind], :], wave_obs), color="k", alpha=0.1)
axs.set_ylim([0, 1.6])
axs.set_xlim(xlim)
axs.plot(vel, best_fit, color='b', linewidth=3)
axs.plot([0., 0.], [-0.2, 2.5], 'k:', lw=0.5)
# plot individual components
for dex in range(0,np.shape(f1)[1]):
axs.plot(vel, f1[:, dex], 'g:', linewidth=3)
for iclump in range(0,n_clump):
axs.plot([best_v[iclump],best_v[iclump]],[1.05,1.15],'k--',lw=4)
text1=r'$logN \;= '+ np.str('%.2f' % best_N[iclump]) +'^{ + ' + np.str('%.2f' % (best_N[iclump]-low_N[iclump]))+'}'+ '_{ -' + np.str('%.2f' % (high_N[iclump]-best_N[iclump]))+'}$'
axs.text(best_v[iclump],1.2,text1,
fontsize=14,rotation=90, rotation_mode='anchor')
text2=r'$b ='+np.str('%.0f' % best_b[iclump]) +'^{ + ' + np.str('%.0f' % (best_b[iclump]-low_b[iclump]))+'}'+ '_{ -' + np.str('%.0f' % (high_b[iclump]-best_b[iclump]))+'}$'
axs.text(best_v[iclump]+30,1.2, text2,fontsize=14,rotation=90, rotation_mode='anchor')
else:
#Now loop through each transition and plot them in velocity space
for i in range(0,ntransition):
print(wave_list[i])
vel=rb_veldiff(wave_list[i],wave_rest)
axs[i].step(vel, fnorm, 'k-', linewidth=1.)
axs[i].step(vel, enorm, color='r', linewidth=1.)
#pdb.set_trace()
# Plotting a random sample of outputs extracted from posterior distribution
for ind in range(len(index)):
axs[i].plot(vel, model_mcmc(samples[index[ind], :], wave_obs), color="k", alpha=0.1)
axs[i].set_ylim([0, 1.6])
axs[i].set_xlim(xlim)
axs[i].plot(vel, best_fit, color='b', linewidth=3)
axs[i].plot([0., 0.], [-0.2, 2.5], 'k:', lw=0.5)
# plot individual components
for dex in range(0,np.shape(f1)[1]):
axs[i].plot(vel, f1[:, dex], 'g:', linewidth=3)
for iclump in range(0,n_clump):
axs[i].plot([best_v[iclump],best_v[iclump]],[1.05,1.15],'k--',lw=4)
if i ==0:
text1=r'$logN \;= '+ np.str('%.2f' % best_N[iclump]) +'^{ + ' + np.str('%.2f' % (best_N[iclump]-low_N[iclump]))+'}'+ '_{ -' + np.str('%.2f' % (high_N[iclump]-best_N[iclump]))+'}$'
axs[i].text(best_v[iclump],1.2,text1,
fontsize=14,rotation=90, rotation_mode='anchor')
text2=r'$b ='+np.str('%.0f' % best_b[iclump]) +'^{ + ' + np.str('%.0f' % (best_b[iclump]-low_b[iclump]))+'}'+ '_{ -' + np.str('%.0f' % (high_b[iclump]-best_b[iclump]))+'}$'
axs[i].text(best_v[iclump]+30,1.2, text2,
fontsize=14,rotation=90, rotation_mode='anchor')
if verbose==True:
from IPython.display import display, Math
samples = fit.sampler.get_chain(discard=100, thin=15, flat=True)
nfit = int(fit.ndim / 3)
N_tile = np.tile("logN", nfit)
b_tile = np.tile("b", nfit)
v_tile = np.tile("v", nfit)
tmp = np.append(N_tile, b_tile)
text_label = np.append(tmp, v_tile)
for i in range(len(text_label)):
mcmc = np.percentile(samples[:, i], [16, 50, 84])
q = np.diff(mcmc)
txt = "\mathrm{{{3}}} = {0:.2f}_{{-{1:.2f}}}^{{{2:.2f}}}"
txt = txt.format(mcmc[1], q[0], q[1], text_label[i])
display(Math(txt))
if outfile==False:
plt.show()
else:
outfile_fig =outfile
fig.savefig(outfile_fig, bbox_inches='tight')
######## Computing Likelihoods######
def lnprior(theta, lb, ub):
for index in range(0, len(lb)):
if (lb[index] > theta[index]) or (ub[index] < theta[index]):
return -np.inf
break
return 0.0
def lnlike(theta, model, x, y, yerr):
model = model(theta, x)
inv_sigma2 = 1.0 / (yerr ** 2)
return -0.5 * (np.sum((y - model) ** 2 * inv_sigma2 - np.log(inv_sigma2)))
def lnprob(theta, lb, ub, model, x, y, yerr):
lp = lnprior(theta, lb, ub)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, model, x, y, yerr)
def optimize_guess(model, theta, lb, ub, x, y, yerr):
nll = lambda *args: -lnprob(*args)
result = op.minimize(nll, [theta], args=(lb, ub, model, x, y, yerr))
p = result["x"]
return p
def set_bounds(nguess,bguess,vguess):
Nlow=np.zeros((len(nguess,)))
blow=np.zeros((len(nguess,)))
vlow=np.zeros((len(nguess,)))
NHI=np.zeros((len(nguess,)))
bHI=np.zeros((len(nguess,)))
vHI=np.zeros((len(nguess,)))
for i in range(0,len(nguess)):
Nlow[i]=nguess[i]-2.
blow[i]=bguess[i]-40.
if blow[i] < 2.:
blow[i] = 2.
vlow[i]=vguess[i]-50.
NHI[i]=nguess[i]+2.
bHI[i]=bguess[i]+40.
if bHI[i] > 200.:
bHI[i] = 150.
vHI[i]=vguess[i]+50.
lb=np.concatenate((Nlow,blow,vlow))
ub=np.concatenate((NHI,bHI,vHI))
bounds=[lb,ub]
return bounds, lb, ub
class vfit(object):
def __init__(self, model, theta, lb, ub, wave_obs, fnorm, enorm, no_of_Chain=50, no_of_steps=1000,
perturbation=1e-6):
# Main class that performs all the fitting
self.wave_obs = wave_obs
self.fnorm = fnorm
self.enorm = enorm
self.model = model
self.lb = lb
self.ub = ub
self.theta = theta
self.no_of_Chain = no_of_Chain
self.no_of_steps = no_of_steps
self.perturbation = perturbation
def runmcmc(self, optimize=True,verbose=False):
model = self.model
theta = self.theta
lb = self.lb
ub = self.ub
wave_obs = self.wave_obs
fnorm = self.fnorm
enorm = self.enorm
no_of_Chain = self.no_of_Chain
no_of_steps = self.no_of_steps
perturbation = self.perturbation
if optimize == True:
print('Optimizing Guess ***********')
# Now make a better guess
popt = optimize_guess(model, theta, lb, ub, wave_obs, fnorm, enorm)
print('Done ***********')
else:
print('Skipping Optimizing Guess ***********')
print('Using input guess for mcmc ***********')
popt = theta
print('Preparing emcee ***********')
###### Define a lot of walkers
length_of_lb = len(lb)
ndim, nwalkers = length_of_lb, no_of_Chain
guesses = [popt + perturbation * np.random.randn(ndim) for i in range(nwalkers)]
print("Starting emcee ***********")
burntime = np.round(no_of_steps * .2)
with Pool() as pool:
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, pool=pool, args=(lb, ub, model, wave_obs, fnorm, enorm))
pos, prob, state = sampler.run_mcmc(guesses, no_of_steps,progress=True)
#sampler.reset()
print("Done!")
#print("Now starting the Final Calculations:")
print("*****************")
#width = 30
# Now Running mcmc
#for i, result in enumerate(sampler.sample(pos, iterations=no_of_steps)):
# n = int((width + 1) * float(i) / no_of_steps)
#sys.stdout.write("\r[{0}{1}]".format('#' * n, ' ' * (width - n)))
#sys.stdout.write("\n")
if verbose==True:
from IPython.display import display, Math
samples = sampler.get_chain(discard=100, thin=15, flat=True)
nfit = int(ndim / 3)
N_tile = np.tile("logN", nfit)
b_tile = np.tile("b", nfit)
v_tile = np.tile("v", nfit)
tmp = np.append(N_tile, b_tile)
text_label = np.append(tmp, v_tile)
for i in range(len(text_label)):
mcmc = np.percentile(samples[:, i], [16, 50, 84])
q = np.diff(mcmc)
txt = "\mathrm{{{3}}} = {0:.2f}_{{-{1:.2f}}}^{{{2:.2f}}}"
txt = txt.format(mcmc[1], q[0], q[1], text_label[i])
display(Math(txt))
self.sampler = sampler
self.ndim = ndim
self.nwalkers = nwalkers
def plot_corner(self,outfile=False):
ndim=self.ndim
#samples = self.sampler.chain[:, 100:, :].reshape((-1, ndim)) # sampler.flatchain
samples = self.sampler.get_chain(discard=100, thin=15, flat=True)
st = np.percentile(samples, 50, axis=0) # =np.median(samples,axis=0)#np.median(sampler.flatchain, axis=0)
# df = pd.DataFrame(samples)
# temp=df.mode()
# st=temp.values[0]
nfit = int(ndim / 3)
N_tile = np.tile("logN", nfit)
b_tile = np.tile("b", nfit)
v_tile = np.tile("v", nfit)
tmp = np.append(N_tile, b_tile)
text_label = np.append(tmp, v_tile)
figure = corner.corner(samples, labels=text_label, truths=st)
theta_prime = st
value1 = np.percentile(samples, 10, axis=0)
# This is the empirical mean of the sample:
value2 = np.percentile(samples, 90, axis=0)
# Extract the axes
axes = np.array(figure.axes).reshape((ndim, ndim))
# Loop over the diagonal
for i in range(ndim):
ax = axes[i, i]
ax.axvline(value1[i], color="aqua")
ax.axvline(value2[i], color="aqua")
# Loop over the histograms
for yi in range(ndim):
for xi in range(yi):
ax = axes[yi, xi]
ax.axvline(value1[xi], color="aqua")
ax.axvline(value2[xi], color="aqua")
# ax.axhline(value1[yi], color="g")
# ax.axhline(value2[yi], color="r")
# ax.plot(value1[xi], value1[yi], "sg")
# ax.plot(value2[xi], value2[yi], "sr")
self.best_theta=theta_prime
self.low_theta=value1
self.high_theta=value2
self.samples=samples
if outfile==False:
plt.show()
else:
outfile_fig =outfile
figure.savefig(outfile_fig, bbox_inches='tight')
|
[
"numpy.log",
"emcee.EnsembleSampler",
"numpy.array",
"numpy.isfinite",
"rbvfit.rb_vfit.rb_veldiff",
"corner.corner",
"numpy.diff",
"numpy.concatenate",
"numpy.round",
"numpy.tile",
"scipy.optimize.minimize",
"IPython.display.Math",
"numpy.shape",
"numpy.random.randn",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.show",
"numpy.str",
"rbvfit.rb_setline.rb_setline",
"numpy.append",
"multiprocessing.Pool",
"numpy.percentile",
"matplotlib.pyplot.subplots"
] |
[((1731, 1832), 'matplotlib.pyplot.subplots', 'plt.subplots', (['ntransition'], {'sharex': '(True)', 'sharey': '(False)', 'figsize': '(12, 18)', 'gridspec_kw': "{'hspace': 0}"}), "(ntransition, sharex=True, sharey=False, figsize=(12, 18),\n gridspec_kw={'hspace': 0})\n", (1743, 1832), True, 'import matplotlib.pyplot as plt\n'), ((1878, 1910), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': 'BIGGER_SIZE'}), "('font', size=BIGGER_SIZE)\n", (1884, 1910), True, 'import matplotlib.pyplot as plt\n'), ((1958, 1995), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'titlesize': 'BIGGER_SIZE'}), "('axes', titlesize=BIGGER_SIZE)\n", (1964, 1995), True, 'import matplotlib.pyplot as plt\n'), ((2037, 2074), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'labelsize': 'BIGGER_SIZE'}), "('axes', labelsize=BIGGER_SIZE)\n", (2043, 2074), True, 'import matplotlib.pyplot as plt\n'), ((2119, 2157), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': 'BIGGER_SIZE'}), "('xtick', labelsize=BIGGER_SIZE)\n", (2125, 2157), True, 'import matplotlib.pyplot as plt\n'), ((2199, 2237), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': 'BIGGER_SIZE'}), "('ytick', labelsize=BIGGER_SIZE)\n", (2205, 2237), True, 'import matplotlib.pyplot as plt\n'), ((2279, 2317), 'matplotlib.pyplot.rc', 'plt.rc', (['"""legend"""'], {'fontsize': 'BIGGER_SIZE'}), "('legend', fontsize=BIGGER_SIZE)\n", (2285, 2317), True, 'import matplotlib.pyplot as plt\n'), ((2347, 2386), 'matplotlib.pyplot.rc', 'plt.rc', (['"""figure"""'], {'titlesize': 'BIGGER_SIZE'}), "('figure', titlesize=BIGGER_SIZE)\n", (2353, 2386), True, 'import matplotlib.pyplot as plt\n'), ((7768, 7827), 'scipy.optimize.minimize', 'op.minimize', (['nll', '[theta]'], {'args': '(lb, ub, model, x, y, yerr)'}), '(nll, [theta], args=(lb, ub, model, x, y, yerr))\n', (7779, 7827), True, 'import scipy.optimize as op\n'), ((8431, 8465), 'numpy.concatenate', 'np.concatenate', (['(Nlow, blow, vlow)'], {}), '((Nlow, blow, vlow))\n', (8445, 8465), True, 'import numpy as np\n'), ((8471, 8502), 'numpy.concatenate', 'np.concatenate', (['(NHI, bHI, vHI)'], {}), '((NHI, bHI, vHI))\n', (8485, 8502), True, 'import numpy as np\n'), ((934, 989), 'rbvfit.rb_setline.rb_setline', 'rb.rb_setline', (['model.lambda_rest_original[i]', '"""closest"""'], {}), "(model.lambda_rest_original[i], 'closest')\n", (947, 989), True, 'from rbvfit import rb_setline as rb\n'), ((2678, 2713), 'rbvfit.rb_vfit.rb_veldiff', 'rb_veldiff', (['wave_list[0]', 'wave_rest'], {}), '(wave_list[0], wave_rest)\n', (2688, 2713), True, 'from rbvfit.rb_vfit import rb_veldiff as rb_veldiff\n'), ((6363, 6384), 'numpy.tile', 'np.tile', (['"""logN"""', 'nfit'], {}), "('logN', nfit)\n", (6370, 6384), True, 'import numpy as np\n'), ((6406, 6424), 'numpy.tile', 'np.tile', (['"""b"""', 'nfit'], {}), "('b', nfit)\n", (6413, 6424), True, 'import numpy as np\n'), ((6446, 6464), 'numpy.tile', 'np.tile', (['"""v"""', 'nfit'], {}), "('v', nfit)\n", (6453, 6464), True, 'import numpy as np\n'), ((6483, 6508), 'numpy.append', 'np.append', (['N_tile', 'b_tile'], {}), '(N_tile, b_tile)\n', (6492, 6508), True, 'import numpy as np\n'), ((6534, 6556), 'numpy.append', 'np.append', (['tmp', 'v_tile'], {}), '(tmp, v_tile)\n', (6543, 6556), True, 'import numpy as np\n'), ((6948, 6958), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6956, 6958), True, 'import matplotlib.pyplot as plt\n'), ((7571, 7586), 'numpy.isfinite', 'np.isfinite', (['lp'], {}), '(lp)\n', (7582, 7586), True, 'import numpy as np\n'), ((10125, 10152), 'numpy.round', 'np.round', (['(no_of_steps * 0.2)'], {}), '(no_of_steps * 0.2)\n', (10133, 10152), True, 'import numpy as np\n'), ((11889, 11923), 'numpy.percentile', 'np.percentile', (['samples', '(50)'], {'axis': '(0)'}), '(samples, 50, axis=0)\n', (11902, 11923), True, 'import numpy as np\n'), ((12128, 12149), 'numpy.tile', 'np.tile', (['"""logN"""', 'nfit'], {}), "('logN', nfit)\n", (12135, 12149), True, 'import numpy as np\n'), ((12167, 12185), 'numpy.tile', 'np.tile', (['"""b"""', 'nfit'], {}), "('b', nfit)\n", (12174, 12185), True, 'import numpy as np\n'), ((12203, 12221), 'numpy.tile', 'np.tile', (['"""v"""', 'nfit'], {}), "('v', nfit)\n", (12210, 12221), True, 'import numpy as np\n'), ((12237, 12262), 'numpy.append', 'np.append', (['N_tile', 'b_tile'], {}), '(N_tile, b_tile)\n', (12246, 12262), True, 'import numpy as np\n'), ((12284, 12306), 'numpy.append', 'np.append', (['tmp', 'v_tile'], {}), '(tmp, v_tile)\n', (12293, 12306), True, 'import numpy as np\n'), ((12325, 12377), 'corner.corner', 'corner.corner', (['samples'], {'labels': 'text_label', 'truths': 'st'}), '(samples, labels=text_label, truths=st)\n', (12338, 12377), False, 'import corner\n'), ((12421, 12455), 'numpy.percentile', 'np.percentile', (['samples', '(10)'], {'axis': '(0)'}), '(samples, 10, axis=0)\n', (12434, 12455), True, 'import numpy as np\n'), ((12526, 12560), 'numpy.percentile', 'np.percentile', (['samples', '(90)'], {'axis': '(0)'}), '(samples, 90, axis=0)\n', (12539, 12560), True, 'import numpy as np\n'), ((4373, 4408), 'rbvfit.rb_vfit.rb_veldiff', 'rb_veldiff', (['wave_list[i]', 'wave_rest'], {}), '(wave_list[i], wave_rest)\n', (4383, 4408), True, 'from rbvfit.rb_vfit import rb_veldiff as rb_veldiff\n'), ((6625, 6667), 'numpy.percentile', 'np.percentile', (['samples[:, i]', '[16, 50, 84]'], {}), '(samples[:, i], [16, 50, 84])\n', (6638, 6667), True, 'import numpy as np\n'), ((6688, 6701), 'numpy.diff', 'np.diff', (['mcmc'], {}), '(mcmc)\n', (6695, 6701), True, 'import numpy as np\n'), ((10165, 10171), 'multiprocessing.Pool', 'Pool', ([], {}), '()\n', (10169, 10171), False, 'from multiprocessing import Pool\n'), ((10203, 10309), 'emcee.EnsembleSampler', 'emcee.EnsembleSampler', (['nwalkers', 'ndim', 'lnprob'], {'pool': 'pool', 'args': '(lb, ub, model, wave_obs, fnorm, enorm)'}), '(nwalkers, ndim, lnprob, pool=pool, args=(lb, ub,\n model, wave_obs, fnorm, enorm))\n', (10224, 10309), False, 'import emcee\n'), ((11035, 11056), 'numpy.tile', 'np.tile', (['"""logN"""', 'nfit'], {}), "('logN', nfit)\n", (11042, 11056), True, 'import numpy as np\n'), ((11078, 11096), 'numpy.tile', 'np.tile', (['"""b"""', 'nfit'], {}), "('b', nfit)\n", (11085, 11096), True, 'import numpy as np\n'), ((11118, 11136), 'numpy.tile', 'np.tile', (['"""v"""', 'nfit'], {}), "('v', nfit)\n", (11125, 11136), True, 'import numpy as np\n'), ((11156, 11181), 'numpy.append', 'np.append', (['N_tile', 'b_tile'], {}), '(N_tile, b_tile)\n', (11165, 11181), True, 'import numpy as np\n'), ((11207, 11229), 'numpy.append', 'np.append', (['tmp', 'v_tile'], {}), '(tmp, v_tile)\n', (11216, 11229), True, 'import numpy as np\n'), ((13426, 13436), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13434, 13436), True, 'import matplotlib.pyplot as plt\n'), ((3299, 3311), 'numpy.shape', 'np.shape', (['f1'], {}), '(f1)\n', (3307, 3311), True, 'import numpy as np\n'), ((6887, 6896), 'IPython.display.Math', 'Math', (['txt'], {}), '(txt)\n', (6891, 6896), False, 'from IPython.display import display, Math\n'), ((7459, 7477), 'numpy.log', 'np.log', (['inv_sigma2'], {}), '(inv_sigma2)\n', (7465, 7477), True, 'import numpy as np\n'), ((11299, 11341), 'numpy.percentile', 'np.percentile', (['samples[:, i]', '[16, 50, 84]'], {}), '(samples[:, i], [16, 50, 84])\n', (11312, 11341), True, 'import numpy as np\n'), ((11362, 11375), 'numpy.diff', 'np.diff', (['mcmc'], {}), '(mcmc)\n', (11369, 11375), True, 'import numpy as np\n'), ((12603, 12624), 'numpy.array', 'np.array', (['figure.axes'], {}), '(figure.axes)\n', (12611, 12624), True, 'import numpy as np\n'), ((3651, 3701), 'numpy.str', 'np.str', (["('%.2f' % (high_N[iclump] - best_N[iclump]))"], {}), "('%.2f' % (high_N[iclump] - best_N[iclump]))\n", (3657, 3701), True, 'import numpy as np\n'), ((3962, 4012), 'numpy.str', 'np.str', (["('%.0f' % (high_b[iclump] - best_b[iclump]))"], {}), "('%.0f' % (high_b[iclump] - best_b[iclump]))\n", (3968, 4012), True, 'import numpy as np\n'), ((5153, 5165), 'numpy.shape', 'np.shape', (['f1'], {}), '(f1)\n', (5161, 5165), True, 'import numpy as np\n'), ((10014, 10035), 'numpy.random.randn', 'np.random.randn', (['ndim'], {}), '(ndim)\n', (10029, 10035), True, 'import numpy as np\n'), ((11543, 11552), 'IPython.display.Math', 'Math', (['txt'], {}), '(txt)\n', (11547, 11552), False, 'from IPython.display import display, Math\n'), ((5573, 5623), 'numpy.str', 'np.str', (["('%.2f' % (high_N[iclump] - best_N[iclump]))"], {}), "('%.2f' % (high_N[iclump] - best_N[iclump]))\n", (5579, 5623), True, 'import numpy as np\n'), ((5915, 5965), 'numpy.str', 'np.str', (["('%.0f' % (high_b[iclump] - best_b[iclump]))"], {}), "('%.0f' % (high_b[iclump] - best_b[iclump]))\n", (5921, 5965), True, 'import numpy as np\n'), ((3588, 3637), 'numpy.str', 'np.str', (["('%.2f' % (best_N[iclump] - low_N[iclump]))"], {}), "('%.2f' % (best_N[iclump] - low_N[iclump]))\n", (3594, 3637), True, 'import numpy as np\n'), ((3899, 3948), 'numpy.str', 'np.str', (["('%.0f' % (best_b[iclump] - low_b[iclump]))"], {}), "('%.0f' % (best_b[iclump] - low_b[iclump]))\n", (3905, 3948), True, 'import numpy as np\n'), ((3545, 3576), 'numpy.str', 'np.str', (["('%.2f' % best_N[iclump])"], {}), "('%.2f' % best_N[iclump])\n", (3551, 3576), True, 'import numpy as np\n'), ((3856, 3887), 'numpy.str', 'np.str', (["('%.0f' % best_b[iclump])"], {}), "('%.0f' % best_b[iclump])\n", (3862, 3887), True, 'import numpy as np\n'), ((5510, 5559), 'numpy.str', 'np.str', (["('%.2f' % (best_N[iclump] - low_N[iclump]))"], {}), "('%.2f' % (best_N[iclump] - low_N[iclump]))\n", (5516, 5559), True, 'import numpy as np\n'), ((5852, 5901), 'numpy.str', 'np.str', (["('%.0f' % (best_b[iclump] - low_b[iclump]))"], {}), "('%.0f' % (best_b[iclump] - low_b[iclump]))\n", (5858, 5901), True, 'import numpy as np\n'), ((5467, 5498), 'numpy.str', 'np.str', (["('%.2f' % best_N[iclump])"], {}), "('%.2f' % best_N[iclump])\n", (5473, 5498), True, 'import numpy as np\n'), ((5809, 5840), 'numpy.str', 'np.str', (["('%.0f' % best_b[iclump])"], {}), "('%.0f' % best_b[iclump])\n", (5815, 5840), True, 'import numpy as np\n')]
|
from numpy import logspace
from sys import path as sysPath
sysPath.append('../../src')
#load the module
from interfacePy import Cosmo
cosmo=Cosmo('../../src/data/eos2020.dat',0,1e5)
for T in logspace(-5,5,50):
print(
'T=',T,'GeV\t',
'H=',cosmo.Hubble(T),'GeV\t',
'h_eff=',cosmo.heff(T),'\t',
'g_eff=',cosmo.geff(T),'\t',
's=',cosmo.s(T),'GeV^3\t',
)
if False:
import matplotlib.pyplot as plt
#########-----g_eff and h_eff-----#########
fig=plt.figure(figsize=(9,4))
fig.subplots_adjust(bottom=0.15, left=0.15, top = 0.95, right=0.9,wspace=0.0,hspace=0.0)
fig.suptitle('')
sub = fig.add_subplot(1,1,1)
T=logspace(-5,5,500)
gt=[cosmo.geff(i) for i in T]
ht=[cosmo.heff(i) for i in T]
sub.plot(T,gt,linestyle='--',c='xkcd:red',label=r"$g_{\rm eff} (T)$")
sub.plot(T,ht,linestyle=':',c='xkcd:black',label=r"$h_{\rm eff} (T)$")
sub.set_xlabel(r'$T ~ [{\rm GeV}]$')
sub.set_ylabel(r'rel. dof')
sub.legend(bbox_to_anchor=(1, 0.0),borderaxespad=0.,
borderpad=0.05,ncol=1,loc='lower right',fontsize=14,framealpha=0)
sub.set_yscale('log')
sub.set_xscale('log')
fig.savefig('rdofs-T_examplePlot.pdf',bbox_inches='tight')
#########-----dg_effdT and dh_effdT-----#########
fig=plt.figure(figsize=(9,4))
fig.subplots_adjust(bottom=0.15, left=0.15, top = 0.95, right=0.9,wspace=0.0,hspace=0.0)
fig.suptitle('')
sub = fig.add_subplot(1,1,1)
T=logspace(-5,5,500)
dg=[cosmo.dgeffdT (i) for i in T]
dh=[cosmo.dheffdT(i) for i in T]
sub.plot(T,dg,linestyle='--',c='xkcd:red',label=r"$\dfrac{d g_{\rm eff}}{dT} (T)$")
sub.plot(T,dh,linestyle=':',c='xkcd:black',label=r"$\dfrac{d h_{\rm eff}}{dT} (T)$")
sub.set_xlabel(r'$T ~ [{\rm GeV}]$')
sub.legend(bbox_to_anchor=(1, 0.5),borderaxespad=0.,
borderpad=0.05,ncol=1,loc='lower right',fontsize=14,framealpha=0)
sub.set_yscale('symlog')
sub.set_xscale('log')
fig.savefig('drdofsdT-T_examplePlot.pdf',bbox_inches='tight')
#########-----dh-----#########
fig=plt.figure(figsize=(9,4))
fig.subplots_adjust(bottom=0.15, left=0.15, top = 0.95, right=0.9,wspace=0.0,hspace=0.0)
fig.suptitle('')
sub = fig.add_subplot(1,1,1)
T=logspace(-5,5,500)
dht=[cosmo.dh(i) for i in T]
sub.plot(T,dht,linestyle='-',c='xkcd:black')
sub.set_xlabel(r'$T ~ [{\rm GeV}]$')
sub.set_ylabel(r'$\delta_h = 1 + \dfrac{1}{3} \dfrac{d \log h_{\rm eff} }{d \log T}$')
sub.set_yscale('linear')
sub.set_xscale('log')
fig.savefig('dh-T_examplePlot.pdf',bbox_inches='tight')
|
[
"matplotlib.pyplot.figure",
"numpy.logspace",
"sys.path.append",
"interfacePy.Cosmo"
] |
[((61, 88), 'sys.path.append', 'sysPath.append', (['"""../../src"""'], {}), "('../../src')\n", (75, 88), True, 'from sys import path as sysPath\n'), ((145, 193), 'interfacePy.Cosmo', 'Cosmo', (['"""../../src/data/eos2020.dat"""', '(0)', '(100000.0)'], {}), "('../../src/data/eos2020.dat', 0, 100000.0)\n", (150, 193), False, 'from interfacePy import Cosmo\n'), ((197, 216), 'numpy.logspace', 'logspace', (['(-5)', '(5)', '(50)'], {}), '(-5, 5, 50)\n', (205, 216), False, 'from numpy import logspace\n'), ((510, 536), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 4)'}), '(figsize=(9, 4))\n', (520, 536), True, 'import matplotlib.pyplot as plt\n'), ((691, 711), 'numpy.logspace', 'logspace', (['(-5)', '(5)', '(500)'], {}), '(-5, 5, 500)\n', (699, 711), False, 'from numpy import logspace\n'), ((1321, 1347), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 4)'}), '(figsize=(9, 4))\n', (1331, 1347), True, 'import matplotlib.pyplot as plt\n'), ((1502, 1522), 'numpy.logspace', 'logspace', (['(-5)', '(5)', '(500)'], {}), '(-5, 5, 500)\n', (1510, 1522), False, 'from numpy import logspace\n'), ((2121, 2147), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 4)'}), '(figsize=(9, 4))\n', (2131, 2147), True, 'import matplotlib.pyplot as plt\n'), ((2302, 2322), 'numpy.logspace', 'logspace', (['(-5)', '(5)', '(500)'], {}), '(-5, 5, 500)\n', (2310, 2322), False, 'from numpy import logspace\n')]
|
import numpy as np
from pyquil.gate_matrices import X, Y, Z, H
from forest.benchmarking.operator_tools.superoperator_transformations import *
# Test philosophy:
# Using the by hand calculations found in the docs we check conversion
# between one qubit channels with one Kraus operator (Hadamard) and two
# Kraus operators (the amplitude damping channel). Additionally we check
# a few two qubit channel conversions to get additional confidence.
def amplitude_damping_kraus(p):
Ad0 = np.asarray([[1, 0], [0, np.sqrt(1 - p)]])
Ad1 = np.asarray([[0, np.sqrt(p)], [0, 0]])
return [Ad0, Ad1]
def amplitude_damping_chi(p):
poly1 = (1 + np.sqrt(1 - p)) ** 2
poly2 = (-1 + np.sqrt(1 - p)) ** 2
ad_pro = 0.25 * np.asarray([[poly1, 0, 0, p],
[0, p, -1j * p, 0],
[0, 1j * p, p, 0],
[p, 0, 0, poly2]])
return ad_pro
def amplitude_damping_pauli(p):
poly1 = np.sqrt(1 - p)
ad_pau = np.asarray([[1, 0, 0, 0],
[0, poly1, 0, 0],
[0, 0, poly1, 0],
[p, 0, 0, 1 - p]])
return ad_pau
def amplitude_damping_super(p):
poly1 = np.sqrt(1 - p)
ad_sup = np.asarray([[1, 0, 0, p],
[0, poly1, 0, 0],
[0, 0, poly1, 0],
[0, 0, 0, 1 - p]])
return ad_sup
def amplitude_damping_choi(p):
poly1 = np.sqrt(1 - p)
ad_choi = np.asarray([[1, 0, 0, poly1],
[0, 0, 0, 0],
[0, 0, p, 0],
[poly1, 0, 0, 1 - p]])
return ad_choi
HADChi = 0.5 * np.asarray([[0, 0, 0, 0],
[0, 1, 0, 1],
[0, 0, 0, 0],
[0, 1, 0, 1]])
HADPauli = 1.0 * np.asarray([[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, -1, 0],
[0, 1, 0, 0]])
HADSuper = 0.5 * np.asarray([[1, 1, 1, 1],
[1, -1, 1, -1],
[1, 1, -1, -1],
[1, -1, -1, 1]])
HADChoi = 0.5 * np.asarray([[1, 1, 1, -1],
[1, 1, 1, -1],
[1, 1, 1, -1],
[-1, -1, -1, 1]])
# Single Qubit Pauli Channel
def one_q_pauli_channel_chi(px, py, pz):
p = (px + py + pz)
pp_chi = np.asarray([[1 - p, 0, 0, 0],
[0, px, 0, 0],
[0, 0, py, 0],
[0, 0, 0, pz]])
return pp_chi
# Pauli twirled Amplitude damping channel
def analytical_pauli_twirl_of_AD_chi(p):
# see equation 7 of https://arxiv.org/pdf/1701.03708.pdf
poly1 = (2 + 2 * np.sqrt(1 - p) - p) / 4
poly2 = p / 4
poly3 = (2 - 2 * np.sqrt(1 - p) - p) / 4
pp_chi = np.asarray([[poly1, 0, 0, 0],
[0, poly2, 0, 0],
[0, 0, poly2, 0],
[0, 0, 0, poly3]])
return pp_chi
# I \otimes Z channel or gate (two qubits)
two_qubit_paulis = n_qubit_pauli_basis(2)
IZKraus = two_qubit_paulis.ops_by_label['IZ']
IZSuper = np.diag([1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1])
# one and zero state as a density matrix
ONE_STATE = np.asarray([[0, 0], [0, 1]])
ZERO_STATE = np.asarray([[1, 0], [0, 0]])
# Amplitude damping Kraus operators with p = 0.1
AdKrausOps = amplitude_damping_kraus(.1)
# Use Kraus operators to find output of channel i.e.
# rho_out = A_0 rho A_0^\dag + A_1 rho A_1^\dag.
rho_out = np.matmul(np.matmul(AdKrausOps[0], ONE_STATE), AdKrausOps[0].transpose().conj()) + \
np.matmul(np.matmul(AdKrausOps[1], ONE_STATE), AdKrausOps[1].transpose().conj())
def test_vec():
A = np.asarray([[1, 2], [3, 4]])
B = np.asarray([[1, 2, 5], [3, 4, 6]])
np.testing.assert_array_equal(np.array([[1], [3], [2], [4]]), vec(A))
np.testing.assert_array_equal(np.array([[1], [3], [2], [4], [5], [6]]), vec(B))
def test_unvec():
A = np.asarray([[1, 2], [3, 4]])
C = np.asarray([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
np.testing.assert_array_equal(A, unvec(vec(A)))
np.testing.assert_array_equal(C, unvec(vec(C)))
def test_kraus_ops_sum_to_identity():
# Check kraus ops sum to identity
p = np.random.rand()
Ad0, Ad1 = amplitude_damping_kraus(p)
np.testing.assert_array_almost_equal_nulp(np.matmul(Ad0.transpose().conj(), Ad0)
+ np.matmul(Ad1.transpose().conj(), Ad1), np.eye(2))
def test_kraus2chi():
assert np.allclose(HADChi, kraus2chi(H))
p = np.random.rand()
AdKraus = amplitude_damping_kraus(p)
AdChi = amplitude_damping_chi(p)
assert np.allclose(AdChi, kraus2chi(AdKraus))
assert np.allclose(superop2chi(IZSuper), kraus2chi(IZKraus))
def test_kraus2pauli_liouville():
p = np.random.rand()
AdKraus = amplitude_damping_kraus(p)
AdPauli = amplitude_damping_pauli(p)
assert np.allclose(kraus2pauli_liouville(AdKraus), AdPauli)
assert np.allclose(kraus2pauli_liouville(H), HADPauli)
def test_kraus2superop():
p = np.random.rand()
AdKraus = amplitude_damping_kraus(p)
AdSuper = amplitude_damping_super(p)
np.testing.assert_array_almost_equal_nulp(kraus2superop(AdKraus), AdSuper)
# test application of super operator is the same as application of Kraus ops
ONE_STATE_VEC = vec(ONE_STATE)
np.testing.assert_array_almost_equal_nulp(unvec(np.matmul(kraus2superop(AdKrausOps),
ONE_STATE_VEC)), rho_out)
assert np.allclose(kraus2superop(H), HADSuper)
assert np.allclose(kraus2superop(IZKraus), IZSuper)
# Below here tests non square Kraus operators
# In this example The Kraus operator is M_0 = I \otimes <0| where <0| = (1,0)
Idd = np.asarray([[1, 0], [0, 1]])
M0 = np.kron(Idd, np.asarray([[1, 0]]))
attempt = kraus2superop(M0)
answer = np.kron(M0.conj(), M0)
assert np.allclose(answer, attempt)
def test_kraus2choi():
p = np.random.rand()
AdKraus = amplitude_damping_kraus(p)
AdChoi = amplitude_damping_choi(p)
assert np.allclose(kraus2choi(AdKraus), AdChoi)
assert np.allclose(kraus2choi(H), HADChoi)
def test_chi2pauli_liouville():
p = np.random.rand()
AdChi = amplitude_damping_chi(p)
AdPauli = amplitude_damping_pauli(p)
assert np.allclose(AdPauli, chi2pauli_liouville(AdChi))
assert np.allclose(HADPauli, chi2pauli_liouville(HADChi))
def test_basis_transform_p_to_c():
xz_pauli_basis = np.zeros((16, 1))
xz_pauli_basis[7] = [1.]
assert np.allclose(unvec(pauli2computational_basis_matrix(4) @ xz_pauli_basis), np.kron(X, Z))
def test_basis_transform_c_to_p():
xz_pauli_basis = np.zeros((16, 1))
xz_pauli_basis[7] = [1.]
assert np.allclose(computational2pauli_basis_matrix(4) @ vec(np.kron(X, Z)), xz_pauli_basis)
def test_pl_to_choi():
for i, pauli in enumerate(n_qubit_pauli_basis(2)):
pl = kraus2pauli_liouville(pauli[1])
choi = kraus2choi(pauli[1])
assert np.allclose(choi, pauli_liouville2choi(pl))
pl = kraus2pauli_liouville(H)
choi = kraus2choi(H)
assert np.allclose(choi, pauli_liouville2choi(pl))
def test_superop_to_kraus():
assert np.allclose(superop2kraus(IZSuper), IZKraus)
p = np.random.rand()
AdSuper = amplitude_damping_super(p)
AdKraus = amplitude_damping_kraus(p)
kraus_ops = superop2kraus(AdSuper)
# the order of the Kraus ops matters
# TODO: fix the sign problem in Kraus operators
assert np.allclose([np.abs(kraus_ops[1]), np.abs(kraus_ops[0])], AdKraus)
def test_superop_to_choi():
for i, pauli in enumerate(n_qubit_pauli_basis(2)):
superop = kraus2superop(pauli[1])
choi = kraus2choi(pauli[1])
assert np.allclose(choi, superop2choi(superop))
p = np.random.rand()
AdSuper = amplitude_damping_super(p)
AdChoi = amplitude_damping_choi(p)
assert np.allclose(AdChoi, superop2choi(AdSuper))
superop = kraus2superop(H)
choi = kraus2choi(H)
assert np.allclose(choi, superop2choi(superop))
def test_superop_to_pl():
p = np.random.rand()
AdSuper = amplitude_damping_super(p)
AdPauli = amplitude_damping_pauli(p)
assert np.allclose(AdPauli, superop2pauli_liouville(AdSuper))
AdKraus = amplitude_damping_kraus(p)
superop = kraus2superop(AdKraus)
pauli = kraus2pauli_liouville(AdKraus)
assert np.allclose(pauli, superop2pauli_liouville(superop))
def test_pauli_liouville_to_superop():
p = np.random.rand()
AdSuper = amplitude_damping_super(p)
AdPauli = amplitude_damping_pauli(p)
assert np.allclose(AdSuper, pauli_liouville2superop(AdPauli))
AdKraus = amplitude_damping_kraus(p)
superop = kraus2superop(AdKraus)
pauli = kraus2pauli_liouville(AdKraus)
assert np.allclose(superop, pauli_liouville2superop(pauli))
def test_choi_to_kraus():
for i, pauli in enumerate(n_qubit_pauli_basis(2)):
choi = kraus2choi(pauli[1])
kraus = choi2kraus(choi)
assert np.allclose(choi, kraus2choi(kraus))
id_choi = np.array([[1, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 1]])
assert np.allclose(kraus2choi(choi2kraus(id_choi)), id_choi)
for kraus in choi2kraus(id_choi):
assert np.allclose(abs(kraus), np.eye(2)) or np.allclose(kraus, np.zeros((2, 2)))
def test_choi_to_super():
p = np.random.rand()
AdSuper = amplitude_damping_super(p)
AdChoi = amplitude_damping_choi(p)
assert np.allclose(AdSuper, choi2superop(AdChoi))
def test_choi_pl_bijectivity():
assert np.allclose(choi2superop(choi2superop(np.eye(4))), np.eye(4))
assert np.allclose(superop2choi(superop2choi(np.eye(4))), np.eye(4))
h_choi = kraus2choi(H)
h_superop = kraus2superop(H)
assert np.allclose(choi2superop(choi2superop(h_choi)), h_choi)
assert np.allclose(superop2choi(superop2choi(h_superop)), h_superop)
|
[
"numpy.abs",
"numpy.eye",
"numpy.allclose",
"numpy.sqrt",
"numpy.random.rand",
"numpy.asarray",
"numpy.diag",
"numpy.kron",
"numpy.array",
"numpy.zeros",
"numpy.matmul"
] |
[((3245, 3310), 'numpy.diag', 'np.diag', (['[1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1]'], {}), '([1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1])\n', (3252, 3310), True, 'import numpy as np\n'), ((3365, 3393), 'numpy.asarray', 'np.asarray', (['[[0, 0], [0, 1]]'], {}), '([[0, 0], [0, 1]])\n', (3375, 3393), True, 'import numpy as np\n'), ((3407, 3435), 'numpy.asarray', 'np.asarray', (['[[1, 0], [0, 0]]'], {}), '([[1, 0], [0, 0]])\n', (3417, 3435), True, 'import numpy as np\n'), ((979, 993), 'numpy.sqrt', 'np.sqrt', (['(1 - p)'], {}), '(1 - p)\n', (986, 993), True, 'import numpy as np\n'), ((1007, 1092), 'numpy.asarray', 'np.asarray', (['[[1, 0, 0, 0], [0, poly1, 0, 0], [0, 0, poly1, 0], [p, 0, 0, 1 - p]]'], {}), '([[1, 0, 0, 0], [0, poly1, 0, 0], [0, 0, poly1, 0], [p, 0, 0, 1 - p]]\n )\n', (1017, 1092), True, 'import numpy as np\n'), ((1227, 1241), 'numpy.sqrt', 'np.sqrt', (['(1 - p)'], {}), '(1 - p)\n', (1234, 1241), True, 'import numpy as np\n'), ((1255, 1340), 'numpy.asarray', 'np.asarray', (['[[1, 0, 0, p], [0, poly1, 0, 0], [0, 0, poly1, 0], [0, 0, 0, 1 - p]]'], {}), '([[1, 0, 0, p], [0, poly1, 0, 0], [0, 0, poly1, 0], [0, 0, 0, 1 - p]]\n )\n', (1265, 1340), True, 'import numpy as np\n'), ((1474, 1488), 'numpy.sqrt', 'np.sqrt', (['(1 - p)'], {}), '(1 - p)\n', (1481, 1488), True, 'import numpy as np\n'), ((1503, 1588), 'numpy.asarray', 'np.asarray', (['[[1, 0, 0, poly1], [0, 0, 0, 0], [0, 0, p, 0], [poly1, 0, 0, 1 - p]]'], {}), '([[1, 0, 0, poly1], [0, 0, 0, 0], [0, 0, p, 0], [poly1, 0, 0, 1 - p]]\n )\n', (1513, 1588), True, 'import numpy as np\n'), ((1698, 1766), 'numpy.asarray', 'np.asarray', (['[[0, 0, 0, 0], [0, 1, 0, 1], [0, 0, 0, 0], [0, 1, 0, 1]]'], {}), '([[0, 0, 0, 0], [0, 1, 0, 1], [0, 0, 0, 0], [0, 1, 0, 1]])\n', (1708, 1766), True, 'import numpy as np\n'), ((1866, 1935), 'numpy.asarray', 'np.asarray', (['[[1, 0, 0, 0], [0, 0, 0, 1], [0, 0, -1, 0], [0, 1, 0, 0]]'], {}), '([[1, 0, 0, 0], [0, 0, 0, 1], [0, 0, -1, 0], [0, 1, 0, 0]])\n', (1876, 1935), True, 'import numpy as np\n'), ((2041, 2115), 'numpy.asarray', 'np.asarray', (['[[1, 1, 1, 1], [1, -1, 1, -1], [1, 1, -1, -1], [1, -1, -1, 1]]'], {}), '([[1, 1, 1, 1], [1, -1, 1, -1], [1, 1, -1, -1], [1, -1, -1, 1]])\n', (2051, 2115), True, 'import numpy as np\n'), ((2220, 2294), 'numpy.asarray', 'np.asarray', (['[[1, 1, 1, -1], [1, 1, 1, -1], [1, 1, 1, -1], [-1, -1, -1, 1]]'], {}), '([[1, 1, 1, -1], [1, 1, 1, -1], [1, 1, 1, -1], [-1, -1, -1, 1]])\n', (2230, 2294), True, 'import numpy as np\n'), ((2487, 2562), 'numpy.asarray', 'np.asarray', (['[[1 - p, 0, 0, 0], [0, px, 0, 0], [0, 0, py, 0], [0, 0, 0, pz]]'], {}), '([[1 - p, 0, 0, 0], [0, px, 0, 0], [0, 0, py, 0], [0, 0, 0, pz]])\n', (2497, 2562), True, 'import numpy as np\n'), ((2924, 3012), 'numpy.asarray', 'np.asarray', (['[[poly1, 0, 0, 0], [0, poly2, 0, 0], [0, 0, poly2, 0], [0, 0, 0, poly3]]'], {}), '([[poly1, 0, 0, 0], [0, poly2, 0, 0], [0, 0, poly2, 0], [0, 0, 0,\n poly3]])\n', (2934, 3012), True, 'import numpy as np\n'), ((3845, 3873), 'numpy.asarray', 'np.asarray', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (3855, 3873), True, 'import numpy as np\n'), ((3882, 3916), 'numpy.asarray', 'np.asarray', (['[[1, 2, 5], [3, 4, 6]]'], {}), '([[1, 2, 5], [3, 4, 6]])\n', (3892, 3916), True, 'import numpy as np\n'), ((4103, 4131), 'numpy.asarray', 'np.asarray', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (4113, 4131), True, 'import numpy as np\n'), ((4140, 4185), 'numpy.asarray', 'np.asarray', (['[[1, 2, 3], [4, 5, 6], [7, 8, 9]]'], {}), '([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n', (4150, 4185), True, 'import numpy as np\n'), ((4376, 4392), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4390, 4392), True, 'import numpy as np\n'), ((4696, 4712), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4710, 4712), True, 'import numpy as np\n'), ((4950, 4966), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4964, 4966), True, 'import numpy as np\n'), ((5208, 5224), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (5222, 5224), True, 'import numpy as np\n'), ((5929, 5957), 'numpy.asarray', 'np.asarray', (['[[1, 0], [0, 1]]'], {}), '([[1, 0], [0, 1]])\n', (5939, 5957), True, 'import numpy as np\n'), ((6081, 6109), 'numpy.allclose', 'np.allclose', (['answer', 'attempt'], {}), '(answer, attempt)\n', (6092, 6109), True, 'import numpy as np\n'), ((6143, 6159), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (6157, 6159), True, 'import numpy as np\n'), ((6381, 6397), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (6395, 6397), True, 'import numpy as np\n'), ((6656, 6673), 'numpy.zeros', 'np.zeros', (['(16, 1)'], {}), '((16, 1))\n', (6664, 6673), True, 'import numpy as np\n'), ((6860, 6877), 'numpy.zeros', 'np.zeros', (['(16, 1)'], {}), '((16, 1))\n', (6868, 6877), True, 'import numpy as np\n'), ((7434, 7450), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (7448, 7450), True, 'import numpy as np\n'), ((7971, 7987), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (7985, 7987), True, 'import numpy as np\n'), ((8266, 8282), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (8280, 8282), True, 'import numpy as np\n'), ((8665, 8681), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (8679, 8681), True, 'import numpy as np\n'), ((9233, 9299), 'numpy.array', 'np.array', (['[[1, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 1]]'], {}), '([[1, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 1]])\n', (9241, 9299), True, 'import numpy as np\n'), ((9529, 9545), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (9543, 9545), True, 'import numpy as np\n'), ((731, 826), 'numpy.asarray', 'np.asarray', (['[[poly1, 0, 0, p], [0, p, -1.0j * p, 0], [0, 1.0j * p, p, 0], [p, 0, 0, poly2]]'], {}), '([[poly1, 0, 0, p], [0, p, -1.0j * p, 0], [0, 1.0j * p, p, 0], [p,\n 0, 0, poly2]])\n', (741, 826), True, 'import numpy as np\n'), ((3653, 3688), 'numpy.matmul', 'np.matmul', (['AdKrausOps[0]', 'ONE_STATE'], {}), '(AdKrausOps[0], ONE_STATE)\n', (3662, 3688), True, 'import numpy as np\n'), ((3748, 3783), 'numpy.matmul', 'np.matmul', (['AdKrausOps[1]', 'ONE_STATE'], {}), '(AdKrausOps[1], ONE_STATE)\n', (3757, 3783), True, 'import numpy as np\n'), ((3951, 3981), 'numpy.array', 'np.array', (['[[1], [3], [2], [4]]'], {}), '([[1], [3], [2], [4]])\n', (3959, 3981), True, 'import numpy as np\n'), ((4025, 4065), 'numpy.array', 'np.array', (['[[1], [3], [2], [4], [5], [6]]'], {}), '([[1], [3], [2], [4], [5], [6]])\n', (4033, 4065), True, 'import numpy as np\n'), ((4608, 4617), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (4614, 4617), True, 'import numpy as np\n'), ((5980, 6000), 'numpy.asarray', 'np.asarray', (['[[1, 0]]'], {}), '([[1, 0]])\n', (5990, 6000), True, 'import numpy as np\n'), ((6787, 6800), 'numpy.kron', 'np.kron', (['X', 'Z'], {}), '(X, Z)\n', (6794, 6800), True, 'import numpy as np\n'), ((9776, 9785), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (9782, 9785), True, 'import numpy as np\n'), ((9849, 9858), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (9855, 9858), True, 'import numpy as np\n'), ((651, 665), 'numpy.sqrt', 'np.sqrt', (['(1 - p)'], {}), '(1 - p)\n', (658, 665), True, 'import numpy as np\n'), ((690, 704), 'numpy.sqrt', 'np.sqrt', (['(1 - p)'], {}), '(1 - p)\n', (697, 704), True, 'import numpy as np\n'), ((7690, 7710), 'numpy.abs', 'np.abs', (['kraus_ops[1]'], {}), '(kraus_ops[1])\n', (7696, 7710), True, 'import numpy as np\n'), ((7712, 7732), 'numpy.abs', 'np.abs', (['kraus_ops[0]'], {}), '(kraus_ops[0])\n', (7718, 7732), True, 'import numpy as np\n'), ((514, 528), 'numpy.sqrt', 'np.sqrt', (['(1 - p)'], {}), '(1 - p)\n', (521, 528), True, 'import numpy as np\n'), ((558, 568), 'numpy.sqrt', 'np.sqrt', (['p'], {}), '(p)\n', (565, 568), True, 'import numpy as np\n'), ((6972, 6985), 'numpy.kron', 'np.kron', (['X', 'Z'], {}), '(X, Z)\n', (6979, 6985), True, 'import numpy as np\n'), ((9442, 9451), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (9448, 9451), True, 'import numpy as np\n'), ((9475, 9491), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (9483, 9491), True, 'import numpy as np\n'), ((9763, 9772), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (9769, 9772), True, 'import numpy as np\n'), ((9836, 9845), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (9842, 9845), True, 'import numpy as np\n'), ((2824, 2838), 'numpy.sqrt', 'np.sqrt', (['(1 - p)'], {}), '(1 - p)\n', (2831, 2838), True, 'import numpy as np\n'), ((2887, 2901), 'numpy.sqrt', 'np.sqrt', (['(1 - p)'], {}), '(1 - p)\n', (2894, 2901), True, 'import numpy as np\n')]
|
import numpy as np
import imageio
from PoissonTemperature import FiniteDifferenceMatrixConstruction
def ind_sub_conversion(img, ind2sub_fn, sub2ind_fn):
rows, cols = img.shape[:2]
num = rows*cols
arange = np.arange(rows*cols, dtype=np.int32)
ind2sub = np.empty((num, 2), dtype=np.int32)
ind2sub[:, 0] = np.floor(arange/cols)
ind2sub[:, 1] = np.remainder(arange, cols)
sub2ind = arange.reshape((rows, cols))
np.save(ind2sub_fn, ind2sub)
np.save(sub2ind_fn, sub2ind)
def pie(FDMC, background, foreground):
Lap, Lap_Solver_Array, Rhs, is_unknown, _, _ = \
FDMC.laplacian_matrix_construction(mask.ravel())
bg = background.reshape((-1, 3))
fg = foreground.reshape((-1, 3))
result = bg.copy()
lap = Lap.dot(fg[is_unknown, :])
lap_rhs = Rhs.dot(fg)
lap_unknown = lap - lap_rhs
poisson_sol = Lap_Solver_Array[0](lap_unknown+Rhs.dot(bg))
result[is_unknown, :] = poisson_sol
result = result.reshape(background.shape)
result[result < 0] = 0.0
result[result > 1] = 1.0
return (result*255).astype(np.uint8)
if __name__ == '__main__':
folder = './data/pie/'
mask = imageio.imread(folder+'mask.png')[:, :, 0].astype(np.float32)
background = imageio.imread(folder+'mona.png')[:, :, :3]/255
foreground = imageio.imread(folder+'gine.png')[:, :, :3]/255
mask[mask > 0] = np.nan
ind2sub_fn = folder+'ind2sub.npy'
sub2ind_fn = folder+'sub2ind.npy'
ind_sub_conversion(mask, ind2sub_fn, sub2ind_fn)
FDMC = FiniteDifferenceMatrixConstruction(ind2sub_fn, sub2ind_fn)
result = pie(FDMC, background, foreground)
imageio.imwrite(folder+'result.png', result)
|
[
"numpy.arange",
"imageio.imwrite",
"numpy.floor",
"PoissonTemperature.FiniteDifferenceMatrixConstruction",
"numpy.remainder",
"numpy.empty",
"imageio.imread",
"numpy.save"
] |
[((219, 257), 'numpy.arange', 'np.arange', (['(rows * cols)'], {'dtype': 'np.int32'}), '(rows * cols, dtype=np.int32)\n', (228, 257), True, 'import numpy as np\n'), ((270, 304), 'numpy.empty', 'np.empty', (['(num, 2)'], {'dtype': 'np.int32'}), '((num, 2), dtype=np.int32)\n', (278, 304), True, 'import numpy as np\n'), ((325, 348), 'numpy.floor', 'np.floor', (['(arange / cols)'], {}), '(arange / cols)\n', (333, 348), True, 'import numpy as np\n'), ((367, 393), 'numpy.remainder', 'np.remainder', (['arange', 'cols'], {}), '(arange, cols)\n', (379, 393), True, 'import numpy as np\n'), ((442, 470), 'numpy.save', 'np.save', (['ind2sub_fn', 'ind2sub'], {}), '(ind2sub_fn, ind2sub)\n', (449, 470), True, 'import numpy as np\n'), ((475, 503), 'numpy.save', 'np.save', (['sub2ind_fn', 'sub2ind'], {}), '(sub2ind_fn, sub2ind)\n', (482, 503), True, 'import numpy as np\n'), ((1525, 1583), 'PoissonTemperature.FiniteDifferenceMatrixConstruction', 'FiniteDifferenceMatrixConstruction', (['ind2sub_fn', 'sub2ind_fn'], {}), '(ind2sub_fn, sub2ind_fn)\n', (1559, 1583), False, 'from PoissonTemperature import FiniteDifferenceMatrixConstruction\n'), ((1635, 1681), 'imageio.imwrite', 'imageio.imwrite', (["(folder + 'result.png')", 'result'], {}), "(folder + 'result.png', result)\n", (1650, 1681), False, 'import imageio\n'), ((1242, 1277), 'imageio.imread', 'imageio.imread', (["(folder + 'mona.png')"], {}), "(folder + 'mona.png')\n", (1256, 1277), False, 'import imageio\n'), ((1307, 1342), 'imageio.imread', 'imageio.imread', (["(folder + 'gine.png')"], {}), "(folder + 'gine.png')\n", (1321, 1342), False, 'import imageio\n'), ((1163, 1198), 'imageio.imread', 'imageio.imread', (["(folder + 'mask.png')"], {}), "(folder + 'mask.png')\n", (1177, 1198), False, 'import imageio\n')]
|
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
def convert_to_sqft(str):
tokens = str.split(' - ')
if len(tokens) == 2:
return (float(tokens[0]) + float(tokens[1])) / 2
try:
return float(tokens[0])
except Exception:
return np.NAN
def convert_to_num(num):
tokens = str(num).split(' ')
return float(tokens[0])
def train_model(X, Y):
regression = LinearRegression()
regression.fit(X, Y)
return regression
def get_training_data():
dataframe = pd.read_csv("./Bengaluru_House_Data.csv")
df = dataframe.drop(columns=["area_type", "balcony", "society", "availability"], axis='columns')
df['total_sqft'] = df['total_sqft'].apply(convert_to_sqft)
df['size'] = df['size'].apply(convert_to_num)
locations = pd.get_dummies(df["location"])
df_merge = pd.concat([df.drop(columns=["location"]), locations], axis='columns')
df_merge = df_merge.drop(columns=["Unnamed: 9"], axis='columns')
df_merge = df_merge.dropna()
X = df_merge.drop(['price'], axis='columns')
Y = df_merge['price']
return X, Y
def predict_price(regression, X, location, bhk, total_sqft, bath):
location_index = np.where(X.columns == location)[0][0]
x = np.zeros(len(X.columns))
x[0] = bhk
x[1] = total_sqft
x[2] = bath
if location_index >= 0:
x[location_index] = 1
return regression.predict([x])[0]
|
[
"pandas.get_dummies",
"numpy.where",
"sklearn.linear_model.LinearRegression",
"pandas.read_csv"
] |
[((441, 459), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (457, 459), False, 'from sklearn.linear_model import LinearRegression\n'), ((549, 590), 'pandas.read_csv', 'pd.read_csv', (['"""./Bengaluru_House_Data.csv"""'], {}), "('./Bengaluru_House_Data.csv')\n", (560, 590), True, 'import pandas as pd\n'), ((821, 851), 'pandas.get_dummies', 'pd.get_dummies', (["df['location']"], {}), "(df['location'])\n", (835, 851), True, 'import pandas as pd\n'), ((1219, 1250), 'numpy.where', 'np.where', (['(X.columns == location)'], {}), '(X.columns == location)\n', (1227, 1250), True, 'import numpy as np\n')]
|
import numpy as np
import numpy.testing as npt
import slippy
import slippy.core as core
"""
If you add a material you need to add the properties that it will be tested with to the material_parameters dict,
the key should be the name of the class (what ever it is declared as after the class key word).
The value should be a tuple of dicts:
The first dict in the tuple will be unpacked to instantiate the class,
The second will be used with the displacement from loads method
The third will be used with the loads from displacement method to ensure that the methods are inverses of each other
If there is a limit the applicability of the displacements from loads method (such as for a perfectly plastic material
the _max_load key word should be set in the second dict.
For more complex behaviour please also implement your own tests
"""
material_parameters = {
'Elastic': ({'name': 'steel_5', 'properties': {'E': 200e9, 'v': 0.3}},
{'grid_spacing': 0.01, 'simple': True},
{'grid_spacing': 0.01, 'simple': True, 'tol': 1e-9}),
'Rigid': ({}, {}, {})
}
exceptions = [core.Rigid]
def test_materials_basic():
# check that one of influence matrix or displacement from loading is given
for material in core.materials._IMMaterial._subclass_registry:
if material in exceptions:
continue
try:
mat_params = material_parameters[material.material_type]
except KeyError:
raise AssertionError(f"Material test parameters are not specified, for material {material.material_type}")
mat_instance = material(**mat_params[0])
max_load = mat_params[1].pop('_max_load', 1)
np.random.seed(0)
loads = np.random.rand(16, 16) * max_load
# check that the loads and displacement functions are inverse of each other
for direction in {'x', 'y', 'z'}:
load_in_direction = {direction: loads}
displacement = mat_instance.displacement_from_surface_loads(load_in_direction, **mat_params[1])
set_disp = displacement[direction]
loads_calc = mat_instance.loads_from_surface_displacement(displacements={direction: set_disp},
**mat_params[2])
npt.assert_allclose(loads, slippy.asnumpy(loads_calc[direction]), atol=max_load * 0.02)
def test_elastic_coupled():
mat = core.Elastic('steel_6', {'E': 200e9, 'v': 0.3})
np.random.seed(0)
loads1 = np.random.rand(16, 16)
loads2 = np.random.rand(16, 16)
directions = 'xyzx'
for i in range(3):
dir_1 = directions[i]
dir_2 = directions[i+1]
loads_in_direction = {dir_1: loads1, dir_2: loads2}
displacement = mat.displacement_from_surface_loads(loads_in_direction, grid_spacing=0.01, simple=True)
loads_calc = mat.loads_from_surface_displacement(displacements=displacement,
grid_spacing=0.01, simple=True)
for direction in [dir_1, dir_2]:
npt.assert_allclose(loads_in_direction[direction], slippy.asnumpy(loads_calc[direction]), atol=0.02)
displacement = mat.displacement_from_surface_loads(loads_in_direction, grid_spacing=0.01, simple=False)
loads_calc = mat.loads_from_surface_displacement(displacements=displacement,
grid_spacing=0.01, simple=False)
for direction in [dir_1, dir_2]:
npt.assert_allclose(loads_in_direction[direction], slippy.asnumpy(loads_calc[direction]), atol=0.02)
|
[
"slippy.asnumpy",
"slippy.core.Elastic",
"numpy.random.rand",
"numpy.random.seed"
] |
[((2428, 2484), 'slippy.core.Elastic', 'core.Elastic', (['"""steel_6"""', "{'E': 200000000000.0, 'v': 0.3}"], {}), "('steel_6', {'E': 200000000000.0, 'v': 0.3})\n", (2440, 2484), True, 'import slippy.core as core\n'), ((2480, 2497), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (2494, 2497), True, 'import numpy as np\n'), ((2512, 2534), 'numpy.random.rand', 'np.random.rand', (['(16)', '(16)'], {}), '(16, 16)\n', (2526, 2534), True, 'import numpy as np\n'), ((2548, 2570), 'numpy.random.rand', 'np.random.rand', (['(16)', '(16)'], {}), '(16, 16)\n', (2562, 2570), True, 'import numpy as np\n'), ((1689, 1706), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1703, 1706), True, 'import numpy as np\n'), ((1724, 1746), 'numpy.random.rand', 'np.random.rand', (['(16)', '(16)'], {}), '(16, 16)\n', (1738, 1746), True, 'import numpy as np\n'), ((2327, 2364), 'slippy.asnumpy', 'slippy.asnumpy', (['loads_calc[direction]'], {}), '(loads_calc[direction])\n', (2341, 2364), False, 'import slippy\n'), ((3131, 3168), 'slippy.asnumpy', 'slippy.asnumpy', (['loads_calc[direction]'], {}), '(loads_calc[direction])\n', (3145, 3168), False, 'import slippy\n'), ((3573, 3610), 'slippy.asnumpy', 'slippy.asnumpy', (['loads_calc[direction]'], {}), '(loads_calc[direction])\n', (3587, 3610), False, 'import slippy\n')]
|
# Copyright 2020-2021 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pyglet
import numpy as np
import sklearn.preprocessing
class Joint_extractor:
def __init__(self, num_of_joints=18):
self.num_of_joints = num_of_joints
self.start_points = []
self.end_points = []
for j in range(18):
self.start_points.append([])
self.end_points.append([])
def compute_rays(self, cv_kps, image_width, image_height):
pmat = (pyglet.gl.GLdouble * 16)()
mvmat = (pyglet.gl.GLdouble * 16)()
view = (pyglet.gl.GLint * 4)()
pyglet.gl.glGetDoublev(pyglet.gl.GL_MODELVIEW_MATRIX, mvmat)
pyglet.gl.glGetDoublev(pyglet.gl.GL_PROJECTION_MATRIX, pmat)
pyglet.gl.glGetIntegerv(pyglet.gl.GL_VIEWPORT, view)
if cv_kps.size != 0:
for i, cv_kp in enumerate(cv_kps):
if cv_kp[0] != -1 and cv_kp[0] != -1:
start_x = pyglet.gl.GLdouble()
start_y = pyglet.gl.GLdouble()
start_z = pyglet.gl.GLdouble()
end_x = pyglet.gl.GLdouble()
end_y = pyglet.gl.GLdouble()
end_z = pyglet.gl.GLdouble()
pyglet.gl.gluUnProject(cv_kp[0], image_height - cv_kp[1], 0, mvmat, pmat, view, start_x,
start_y, start_z)
pyglet.gl.gluUnProject(cv_kp[0], image_height - cv_kp[1], 1, mvmat, pmat, view, end_x, end_y,
end_z)
self.start_points[i].append(np.asarray([start_x.value, start_y.value, start_z.value]))
self.end_points[i].append(np.asarray([end_x.value, end_y.value, end_z.value]))
@property
def compute_3D_positions(self):
for i in range(self.num_of_joints):
if len(self.start_points[i]) == 0 or len(self.end_points[i]) == 0:
print("Failed to estimate the position of the joints...")
return [[], []]
points_3D = []
dists_3D = []
inds_sorted = None
for i in range(self.num_of_joints):
d = 100
first_time = True
while d > 0.05:
if first_time:
s = np.asarray(self.start_points[i])
e = np.asarray(self.end_points[i])
else:
s = s[inds_sorted[:-1]]
e = e[inds_sorted[:-1]]
v = e - s
ni = sklearn.preprocessing.normalize(v, norm="l2")
nx = ni[:, 0]
ny = ni[:, 1]
nz = ni[:, 2]
sxx = np.sum(nx * nx - 1)
syy = np.sum(ny * ny - 1)
szz = np.sum(nz * nz - 1)
sxy = np.sum(nx * ny)
sxz = np.sum(nx * nz)
syz = np.sum(ny * nz)
S = np.asarray([np.asarray([sxx, sxy, sxz]), np.asarray([sxy, syy, syz]), np.asarray([sxz, syz, szz])])
cx = np.sum(s[:, 0] * (nx * nx - 1) + s[:, 1] * (nx * ny) + s[:, 2] * (nx * nz))
cy = np.sum(s[:, 0] * (nx * ny) + s[:, 1] * (ny * ny - 1) + s[:, 2] * (ny * nz))
cz = np.sum(s[:, 0] * (nx * nz) + s[:, 1] * (ny * nz) + s[:, 2] * (nz * nz - 1))
C = np.asarray([cx, cy, cz])
p_intersect = np.linalg.inv(np.asarray(S)).dot(C)
N = s.shape[0]
distances = np.zeros(N, dtype=np.float32)
for j in range(N):
ui = ((p_intersect - s[j, :]).dot(np.transpose(v[j, :]))) / (v[j, :].dot(v[j, :]))
distances[j] = np.linalg.norm(p_intersect - s[j, :] - ui * v[j, :])
# for i=1:N %http://mathworld.wolfram.com/Point-LineDistance3-Dimensional.html:
# distances(i) = norm(cross(p_intersect-PA(i,:),p_intersect-PB(i,:))) / norm(Si(i,:));
inds_sorted = np.argsort(distances)
d = distances[inds_sorted[-1]]
first_time = False
points_3D.append(p_intersect)
dists_3D.append(distances)
points_3D = np.asarray(points_3D, dtype=np.float32)
dists_3D = np.asarray(dists_3D, dtype=object)
return points_3D, dists_3D
|
[
"pyglet.gl.glGetDoublev",
"numpy.asarray",
"numpy.argsort",
"numpy.sum",
"numpy.zeros",
"pyglet.gl.glGetIntegerv",
"numpy.linalg.norm",
"numpy.transpose",
"pyglet.gl.GLdouble",
"pyglet.gl.gluUnProject"
] |
[((1131, 1191), 'pyglet.gl.glGetDoublev', 'pyglet.gl.glGetDoublev', (['pyglet.gl.GL_MODELVIEW_MATRIX', 'mvmat'], {}), '(pyglet.gl.GL_MODELVIEW_MATRIX, mvmat)\n', (1153, 1191), False, 'import pyglet\n'), ((1200, 1260), 'pyglet.gl.glGetDoublev', 'pyglet.gl.glGetDoublev', (['pyglet.gl.GL_PROJECTION_MATRIX', 'pmat'], {}), '(pyglet.gl.GL_PROJECTION_MATRIX, pmat)\n', (1222, 1260), False, 'import pyglet\n'), ((1269, 1321), 'pyglet.gl.glGetIntegerv', 'pyglet.gl.glGetIntegerv', (['pyglet.gl.GL_VIEWPORT', 'view'], {}), '(pyglet.gl.GL_VIEWPORT, view)\n', (1292, 1321), False, 'import pyglet\n'), ((4722, 4761), 'numpy.asarray', 'np.asarray', (['points_3D'], {'dtype': 'np.float32'}), '(points_3D, dtype=np.float32)\n', (4732, 4761), True, 'import numpy as np\n'), ((4781, 4815), 'numpy.asarray', 'np.asarray', (['dists_3D'], {'dtype': 'object'}), '(dists_3D, dtype=object)\n', (4791, 4815), True, 'import numpy as np\n'), ((3225, 3244), 'numpy.sum', 'np.sum', (['(nx * nx - 1)'], {}), '(nx * nx - 1)\n', (3231, 3244), True, 'import numpy as np\n'), ((3267, 3286), 'numpy.sum', 'np.sum', (['(ny * ny - 1)'], {}), '(ny * ny - 1)\n', (3273, 3286), True, 'import numpy as np\n'), ((3309, 3328), 'numpy.sum', 'np.sum', (['(nz * nz - 1)'], {}), '(nz * nz - 1)\n', (3315, 3328), True, 'import numpy as np\n'), ((3351, 3366), 'numpy.sum', 'np.sum', (['(nx * ny)'], {}), '(nx * ny)\n', (3357, 3366), True, 'import numpy as np\n'), ((3389, 3404), 'numpy.sum', 'np.sum', (['(nx * nz)'], {}), '(nx * nz)\n', (3395, 3404), True, 'import numpy as np\n'), ((3427, 3442), 'numpy.sum', 'np.sum', (['(ny * nz)'], {}), '(ny * nz)\n', (3433, 3442), True, 'import numpy as np\n'), ((3584, 3659), 'numpy.sum', 'np.sum', (['(s[:, 0] * (nx * nx - 1) + s[:, 1] * (nx * ny) + s[:, 2] * (nx * nz))'], {}), '(s[:, 0] * (nx * nx - 1) + s[:, 1] * (nx * ny) + s[:, 2] * (nx * nz))\n', (3590, 3659), True, 'import numpy as np\n'), ((3681, 3756), 'numpy.sum', 'np.sum', (['(s[:, 0] * (nx * ny) + s[:, 1] * (ny * ny - 1) + s[:, 2] * (ny * nz))'], {}), '(s[:, 0] * (nx * ny) + s[:, 1] * (ny * ny - 1) + s[:, 2] * (ny * nz))\n', (3687, 3756), True, 'import numpy as np\n'), ((3778, 3853), 'numpy.sum', 'np.sum', (['(s[:, 0] * (nx * nz) + s[:, 1] * (ny * nz) + s[:, 2] * (nz * nz - 1))'], {}), '(s[:, 0] * (nx * nz) + s[:, 1] * (ny * nz) + s[:, 2] * (nz * nz - 1))\n', (3784, 3853), True, 'import numpy as np\n'), ((3874, 3898), 'numpy.asarray', 'np.asarray', (['[cx, cy, cz]'], {}), '([cx, cy, cz])\n', (3884, 3898), True, 'import numpy as np\n'), ((4024, 4053), 'numpy.zeros', 'np.zeros', (['N'], {'dtype': 'np.float32'}), '(N, dtype=np.float32)\n', (4032, 4053), True, 'import numpy as np\n'), ((4517, 4538), 'numpy.argsort', 'np.argsort', (['distances'], {}), '(distances)\n', (4527, 4538), True, 'import numpy as np\n'), ((1483, 1503), 'pyglet.gl.GLdouble', 'pyglet.gl.GLdouble', ([], {}), '()\n', (1501, 1503), False, 'import pyglet\n'), ((1534, 1554), 'pyglet.gl.GLdouble', 'pyglet.gl.GLdouble', ([], {}), '()\n', (1552, 1554), False, 'import pyglet\n'), ((1585, 1605), 'pyglet.gl.GLdouble', 'pyglet.gl.GLdouble', ([], {}), '()\n', (1603, 1605), False, 'import pyglet\n'), ((1634, 1654), 'pyglet.gl.GLdouble', 'pyglet.gl.GLdouble', ([], {}), '()\n', (1652, 1654), False, 'import pyglet\n'), ((1683, 1703), 'pyglet.gl.GLdouble', 'pyglet.gl.GLdouble', ([], {}), '()\n', (1701, 1703), False, 'import pyglet\n'), ((1732, 1752), 'pyglet.gl.GLdouble', 'pyglet.gl.GLdouble', ([], {}), '()\n', (1750, 1752), False, 'import pyglet\n'), ((1773, 1883), 'pyglet.gl.gluUnProject', 'pyglet.gl.gluUnProject', (['cv_kp[0]', '(image_height - cv_kp[1])', '(0)', 'mvmat', 'pmat', 'view', 'start_x', 'start_y', 'start_z'], {}), '(cv_kp[0], image_height - cv_kp[1], 0, mvmat, pmat,\n view, start_x, start_y, start_z)\n', (1795, 1883), False, 'import pyglet\n'), ((1943, 2047), 'pyglet.gl.gluUnProject', 'pyglet.gl.gluUnProject', (['cv_kp[0]', '(image_height - cv_kp[1])', '(1)', 'mvmat', 'pmat', 'view', 'end_x', 'end_y', 'end_z'], {}), '(cv_kp[0], image_height - cv_kp[1], 1, mvmat, pmat,\n view, end_x, end_y, end_z)\n', (1965, 2047), False, 'import pyglet\n'), ((2822, 2854), 'numpy.asarray', 'np.asarray', (['self.start_points[i]'], {}), '(self.start_points[i])\n', (2832, 2854), True, 'import numpy as np\n'), ((2879, 2909), 'numpy.asarray', 'np.asarray', (['self.end_points[i]'], {}), '(self.end_points[i])\n', (2889, 2909), True, 'import numpy as np\n'), ((4227, 4279), 'numpy.linalg.norm', 'np.linalg.norm', (['(p_intersect - s[j, :] - ui * v[j, :])'], {}), '(p_intersect - s[j, :] - ui * v[j, :])\n', (4241, 4279), True, 'import numpy as np\n'), ((2135, 2192), 'numpy.asarray', 'np.asarray', (['[start_x.value, start_y.value, start_z.value]'], {}), '([start_x.value, start_y.value, start_z.value])\n', (2145, 2192), True, 'import numpy as np\n'), ((2240, 2291), 'numpy.asarray', 'np.asarray', (['[end_x.value, end_y.value, end_z.value]'], {}), '([end_x.value, end_y.value, end_z.value])\n', (2250, 2291), True, 'import numpy as np\n'), ((3475, 3502), 'numpy.asarray', 'np.asarray', (['[sxx, sxy, sxz]'], {}), '([sxx, sxy, sxz])\n', (3485, 3502), True, 'import numpy as np\n'), ((3504, 3531), 'numpy.asarray', 'np.asarray', (['[sxy, syy, syz]'], {}), '([sxy, syy, syz])\n', (3514, 3531), True, 'import numpy as np\n'), ((3533, 3560), 'numpy.asarray', 'np.asarray', (['[sxz, syz, szz]'], {}), '([sxz, syz, szz])\n', (3543, 3560), True, 'import numpy as np\n'), ((3943, 3956), 'numpy.asarray', 'np.asarray', (['S'], {}), '(S)\n', (3953, 3956), True, 'import numpy as np\n'), ((4143, 4164), 'numpy.transpose', 'np.transpose', (['v[j, :]'], {}), '(v[j, :])\n', (4155, 4164), True, 'import numpy as np\n')]
|
from tensorflow.keras.models import Model
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
import cv2
import numpy as np
import pandas as pd
from tensorflow.keras.models import load_model
import tensorflow as tf
import os
#---------------------------------------------------------------------------------
# get data
#---------------------------------------------------------------------------------
def data(input_channel, i, val_save_dir, test_save_dir):
### load train data based on input channels
if run_type == 'val':
if input_channel == 1:
fn = 'val_arr_1ch.npy'
elif input_channel == 3:
fn = 'val_arr_3ch.npy'
data = np.load(os.path.join(pro_data_dir, fn))
df = pd.read_csv(os.path.join(val_save_dir, 'val_pred_df.csv'))
elif run_type == 'test':
if input_channel == 1:
fn = 'test_arr_1ch.npy'
elif input_channel == 3:
fn = 'test_arr_3ch.npy'
data = np.load(os.path.join(pro_data_dir, fn))
df = pd.read_csv(os.path.join(test_save_dir, 'test_pred_df.csv'))
elif run_type == 'exval':
if input_channel == 1:
fn = 'exval_arr_1ch.npy'
elif input_channel == 3:
fn = 'exval_arr_3ch.npy'
data = np.load(os.path.join(pro_data_dir, fn))
df = pd.read_csv(os.path.join(exval_save_dir, 'exval_pred_df.csv'))
### load label
y_true = df['label']
y_pred_class = df['y_pred_class']
y_pred = df['y_pred']
ID = df['fn']
### find the ith image to show grad-cam map
img = data[i, :, :, :]
img = img.reshape((1, 192, 192, 3))
label = y_true[i]
pred_index = y_pred_class[i]
y_pred = y_pred[i]
ID = ID[i]
return img, label, pred_index, y_pred, ID
#------------------------------------------------------------------------------------
# find last conv layer
#-----------------------------------------------------------------------------------
def find_target_layer(model, saved_model):
# find the final conv layer by looping layers in reverse order
for layer in reversed(model.layers):
# check to see if the layer has a 4D output
if len(layer.output_shape) == 4:
return layer.name
raise ValueError("Could not find 4D layer. Cannot apply GradCAM.")
#----------------------------------------------------------------------------------
# calculate gradient class actiavtion map
#----------------------------------------------------------------------------------
def compute_heatmap(model, saved_model, image, pred_index, last_conv_layer):
"""
construct our gradient model by supplying (1) the inputs
to our pre-trained model, (2) the output of the (presumably)
final 4D layer in the network, and (3) the output of the
softmax activations from the model
"""
gradModel = Model(
inputs=[model.inputs],
outputs=[model.get_layer(last_conv_layer).output, model.output]
)
# record operations for automatic differentiation
with tf.GradientTape() as tape:
"""
cast the image tensor to a float-32 data type, pass the
image through the gradient model, and grab the loss
associated with the specific class index
"""
print(pred_index)
inputs = tf.cast(image, tf.float32)
print(image.shape)
last_conv_layer_output, preds = gradModel(inputs)
print(preds)
print(preds.shape)
# class_channel = preds[:, pred_index]
class_channel = preds
# use automatic differentiation to compute the gradients
grads = tape.gradient(class_channel, last_conv_layer_output)
"""
This is a vector where each entry is the mean intensity of the gradient
over a specific feature map channel
"""
pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2))
"""
We multiply each channel in the feature map array
by "how important this channel is" with regard to the top predicted class
then sum all the channels to obtain the heatmap class activation
"""
last_conv_layer_output = last_conv_layer_output[0]
heatmap = last_conv_layer_output @ pooled_grads[..., tf.newaxis]
heatmap = tf.squeeze(heatmap)
# For visualization purpose, we will also normalize the heatmap between 0 & 1
heatmap = tf.maximum(heatmap, 0) / tf.math.reduce_max(heatmap)
heatmap = heatmap.numpy()
return heatmap
#------------------------------------------------------------------------------------
# save gradcam heat map
#-----------------------------------------------------------------------------------
def save_gradcam(image, heatmap, val_gradcam_dir, test_gradcam_dir, alpha, i):
# print('heatmap:', heatmap.shape)
# Rescale heatmap to a range 0-255
heatmap = np.uint8(255 * heatmap)
# Use jet colormap to colorize heatmap
jet = cm.get_cmap("jet")
# Use RGB values of the colormap
jet_colors = jet(np.arange(256))[:, :3]
jet_heatmap = jet_colors[heatmap]
# resize heatmap
jet_heatmap = keras.preprocessing.image.array_to_img(jet_heatmap)
jet_heatmap0 = jet_heatmap.resize(re_size)
jet_heatmap1 = keras.preprocessing.image.img_to_array(jet_heatmap0)
# print('jet_heatmap:', jet_heatmap1.shape)
# resize background CT image
img = image.reshape((192, 192, 3))
img = keras.preprocessing.image.array_to_img(img)
img0 = img.resize(re_size)
img1 = keras.preprocessing.image.img_to_array(img0)
# print('img shape:', img1.shape)
# Superimpose the heatmap on original image
superimposed_img = jet_heatmap1 * alpha + img1
superimposed_img = keras.preprocessing.image.array_to_img(superimposed_img)
# Save the superimposed image
if run_type == 'val':
save_dir = val_gradcam_dir
elif run_type == 'test':
save_dir = test_gradcam_dir
elif run_type == 'exval':
save_dir = exval_gradcam_dir
fn1 = str(conv_n) + '_' + str(i) + '_' + 'gradcam.png'
fn2 = str(conv_n) + '_' + str(i) + '_' + 'heatmap.png'
fn3 = str(conv_n) + '_' + str(i) + '_' + 'heatmap_raw.png'
fn4 = str(i) + '_' + 'CT.png'
superimposed_img.save(os.path.join(save_dir, fn1))
# jet_heatmap0.save(os.path.join(save_dir, fn2))
# jet_heatmap.save(os.path.join(save_dir, fn3))
# img0.save(os.path.join(save_dir, fn4))
if __name__ == '__main__':
train_img_dir = '/media/bhkann/HN_RES1/HN_CONTRAST/train_img_dir'
val_save_dir = '/mnt/aertslab/USERS/Zezhong/constrast_detection/val'
test_save_dir = '/mnt/aertslab/USERS/Zezhong/constrast_detection/test'
exval_save_dir = '/mnt/aertslab/USERS/Zezhong/constrast_detection/exval'
val_gradcam_dir = '/mnt/aertslab/USERS/Zezhong/constrast_detection/val/gradcam'
test_gradcam_dir = '/mnt/aertslab/USERS/Zezhong/constrast_detection/test/gradcam'
exval_gradcam_dir = '/mnt/aertslab/USERS/Zezhong/constrast_detection/test/gradcam'
pro_data_dir = '/home/bhkann/zezhong/git_repo/IV-Contrast-CNN-Project/pro_data'
model_dir = '/mnt/aertslab/USERS/Zezhong/contrast_detection/model'
input_channel = 3
re_size = (192, 192)
i = 72
crop = True
alpha = 0.9
saved_model = 'ResNet_2021_07_18_06_28_40'
show_network = False
conv_n = 'conv5'
run_type = 'val'
#---------------------------------------------------------
# run main function
#--------------------------------------------------------
if run_type == 'val':
save_dir = val_save_dir
elif run_type == 'test':
save_dir = test_save_dir
## load model and find conv layers
model = load_model(os.path.join(model_dir, saved_model))
# model.summary()
list_i = [100, 105, 110, 115, 120, 125]
for i in list_i:
image, label, pred_index, y_pred, ID = data(
input_channel=input_channel,
i=i,
val_save_dir=val_save_dir,
test_save_dir=test_save_dir
)
conv_list = ['conv2', 'conv3', 'conv4', 'conv5']
conv_list = ['conv4']
for conv_n in conv_list:
if conv_n == 'conv2':
last_conv_layer = 'conv2_block3_1_conv'
elif conv_n == 'conv3':
last_conv_layer = 'conv3_block4_1_conv'
elif conv_n == 'conv4':
last_conv_layer = 'conv4_block6_1_conv'
elif conv_n == 'conv5':
last_conv_layer = 'conv5_block3_out'
heatmap = compute_heatmap(
model=model,
saved_model=saved_model,
image=image,
pred_index=pred_index,
last_conv_layer=last_conv_layer
)
save_gradcam(
image=image,
heatmap=heatmap,
val_gradcam_dir=val_gradcam_dir,
test_gradcam_dir=test_gradcam_dir,
alpha=alpha,
i=i
)
print('label:', label)
print('ID:', ID)
print('y_pred:', y_pred)
print('prediction:', pred_index)
print('conv layer:', conv_n)
# if last_conv_layer is None:
# last_conv_layer = find_target_layer(
# model=model,
# saved_model=saved_model
# )
# print(last_conv_layer)
#
# if show_network == True:
# for idx in range(len(model.layers)):
# print(model.get_layer(index = idx).name)
# # compute the guided gradients
# castConvOutputs = tf.cast(convOutputs > 0, "float32")
# castGrads = tf.cast(grads > 0, "float32")
# guidedGrads = castConvOutputs * castGrads * grads
# # the convolution and guided gradients have a batch dimension
# # (which we don't need) so let's grab the volume itself and
# # discard the batch
# convOutputs = convOutputs[0]
# guidedGrads = guidedGrads[0]
#
# # compute the average of the gradient values, and using them
# # as weights, compute the ponderation of the filters with
# # respect to the weights
# weights = tf.reduce_mean(guidedGrads, axis=(0, 1))
# cam = tf.reduce_sum(tf.multiply(weights, convOutputs), axis=-1)
#
# # grab the spatial dimensions of the input image and resize
# # the output class activation map to match the input image
# # dimensions
## (w, h) = (image.shape[2], image.shape[1])
## heatmap = cv2.resize(cam.numpy(), (w, h))
# heatmap = cv2.resize(heatmap.numpy(), (64, 64))
# # normalize the heatmap such that all values lie in the range
## # [0, 1], scale the resulting values to the range [0, 255],
## # and then convert to an unsigned 8-bit integer
# numer = heatmap - np.min(heatmap)
# eps = 1e-8
# denom = (heatmap.max() - heatmap.min()) + eps
# heatmap = numer / denom
# heatmap = (heatmap * 255).astype("uint8")
# colormap=cv2.COLORMAP_VIRIDIS
# heatmap = cv2.applyColorMap(heatmap, colormap)
# print('heatmap shape:', heatmap.shape)
## img = image[:, :, :, 0]
## print('img shape:', img.shape)
# img = image.reshape((64, 64, 3))
# print(img.shape)
# output = cv2.addWeighted(img, 0.5, heatmap, 0.5, 0)
#
#
# return heatmap, output
|
[
"numpy.uint8",
"os.path.join",
"tensorflow.keras.preprocessing.image.array_to_img",
"tensorflow.GradientTape",
"tensorflow.squeeze",
"tensorflow.math.reduce_max",
"tensorflow.maximum",
"tensorflow.reduce_mean",
"tensorflow.keras.preprocessing.image.img_to_array",
"tensorflow.cast",
"matplotlib.cm.get_cmap",
"numpy.arange"
] |
[((3903, 3940), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['grads'], {'axis': '(0, 1, 2)'}), '(grads, axis=(0, 1, 2))\n', (3917, 3940), True, 'import tensorflow as tf\n'), ((4296, 4315), 'tensorflow.squeeze', 'tf.squeeze', (['heatmap'], {}), '(heatmap)\n', (4306, 4315), True, 'import tensorflow as tf\n'), ((4888, 4911), 'numpy.uint8', 'np.uint8', (['(255 * heatmap)'], {}), '(255 * heatmap)\n', (4896, 4911), True, 'import numpy as np\n'), ((4965, 4983), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""jet"""'], {}), "('jet')\n", (4976, 4983), True, 'import matplotlib.cm as cm\n'), ((5143, 5194), 'tensorflow.keras.preprocessing.image.array_to_img', 'keras.preprocessing.image.array_to_img', (['jet_heatmap'], {}), '(jet_heatmap)\n', (5181, 5194), False, 'from tensorflow import keras\n'), ((5261, 5313), 'tensorflow.keras.preprocessing.image.img_to_array', 'keras.preprocessing.image.img_to_array', (['jet_heatmap0'], {}), '(jet_heatmap0)\n', (5299, 5313), False, 'from tensorflow import keras\n'), ((5444, 5487), 'tensorflow.keras.preprocessing.image.array_to_img', 'keras.preprocessing.image.array_to_img', (['img'], {}), '(img)\n', (5482, 5487), False, 'from tensorflow import keras\n'), ((5530, 5574), 'tensorflow.keras.preprocessing.image.img_to_array', 'keras.preprocessing.image.img_to_array', (['img0'], {}), '(img0)\n', (5568, 5574), False, 'from tensorflow import keras\n'), ((5735, 5791), 'tensorflow.keras.preprocessing.image.array_to_img', 'keras.preprocessing.image.array_to_img', (['superimposed_img'], {}), '(superimposed_img)\n', (5773, 5791), False, 'from tensorflow import keras\n'), ((3126, 3143), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (3141, 3143), True, 'import tensorflow as tf\n'), ((3393, 3419), 'tensorflow.cast', 'tf.cast', (['image', 'tf.float32'], {}), '(image, tf.float32)\n', (3400, 3419), True, 'import tensorflow as tf\n'), ((4413, 4435), 'tensorflow.maximum', 'tf.maximum', (['heatmap', '(0)'], {}), '(heatmap, 0)\n', (4423, 4435), True, 'import tensorflow as tf\n'), ((4438, 4465), 'tensorflow.math.reduce_max', 'tf.math.reduce_max', (['heatmap'], {}), '(heatmap)\n', (4456, 4465), True, 'import tensorflow as tf\n'), ((6262, 6289), 'os.path.join', 'os.path.join', (['save_dir', 'fn1'], {}), '(save_dir, fn1)\n', (6274, 6289), False, 'import os\n'), ((7719, 7755), 'os.path.join', 'os.path.join', (['model_dir', 'saved_model'], {}), '(model_dir, saved_model)\n', (7731, 7755), False, 'import os\n'), ((771, 801), 'os.path.join', 'os.path.join', (['pro_data_dir', 'fn'], {}), '(pro_data_dir, fn)\n', (783, 801), False, 'import os\n'), ((828, 873), 'os.path.join', 'os.path.join', (['val_save_dir', '"""val_pred_df.csv"""'], {}), "(val_save_dir, 'val_pred_df.csv')\n", (840, 873), False, 'import os\n'), ((5042, 5056), 'numpy.arange', 'np.arange', (['(256)'], {}), '(256)\n', (5051, 5056), True, 'import numpy as np\n'), ((1063, 1093), 'os.path.join', 'os.path.join', (['pro_data_dir', 'fn'], {}), '(pro_data_dir, fn)\n', (1075, 1093), False, 'import os\n'), ((1120, 1167), 'os.path.join', 'os.path.join', (['test_save_dir', '"""test_pred_df.csv"""'], {}), "(test_save_dir, 'test_pred_df.csv')\n", (1132, 1167), False, 'import os\n'), ((1360, 1390), 'os.path.join', 'os.path.join', (['pro_data_dir', 'fn'], {}), '(pro_data_dir, fn)\n', (1372, 1390), False, 'import os\n'), ((1417, 1466), 'os.path.join', 'os.path.join', (['exval_save_dir', '"""exval_pred_df.csv"""'], {}), "(exval_save_dir, 'exval_pred_df.csv')\n", (1429, 1466), False, 'import os\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Common utility functions
Created on Sun May 27 16:37:42 2018
@author: chen
"""
import math
import cv2
import os
from imutils import paths
import numpy as np
import scipy.ndimage
def rotate_cooridinate(cooridinate_og,rotate_angle,rotate_center):
"""
calculate the coordinates after rotation
"""
rotate_angle = rotate_angle*(math.pi/180)
rotated_x = (cooridinate_og[0]-rotate_center[0])*math.cos(rotate_angle)\
-(cooridinate_og[1]-rotate_center[1])*math.sin(rotate_angle)+rotate_center[0]
rotated_y = (cooridinate_og[0]-rotate_center[0])*math.sin(rotate_angle)\
+(cooridinate_og[1]-rotate_center[1])*math.cos(rotate_angle)+rotate_center[1]
rotated_coordinate = np.array([rotated_x,rotated_y])
rotated_coordinate = np.round(rotated_coordinate).astype(np.int)
return rotated_coordinate
def mkdir(path):
"""
create new folder automatically
"""
folder = os.path.exists(path)
if not folder:
os.makedirs(path)
def load_data(path):
"""
load data from specified folder
"""
print("[INFO] loading images...")
imgs = []
# grab the image paths and randomly shuffle them
imagePaths = sorted(list(paths.list_images(path)))
for imagePath in imagePaths:
# load the image, pre-process it, and store it in the data list
image = cv2.imread(imagePath,cv2.IMREAD_GRAYSCALE)
imgs.append(image)
return imgs
def sigmoid(x):
return 1 / (1 + math.exp(-x))
def normfun(x,sigma):
"""
function of normal distribution
"""
mu = 45
pdf = np.exp(-((x - mu)**2)/(2*sigma**2)) / (sigma * np.sqrt(2*np.pi))
return pdf
def calc_box(box,x_gap,y_gap,rotate_angle,center):
"""
calculate the size of the required surrounding environment for doorway segmentation
box: four corners' coordinates of doorway
x_gap: remained space in the vertical way
y_gap: remained space in the horizontal way
"""
door_box = np.array([box[0][::-1]+[y_gap,x_gap],box[1][::-1]+[y_gap,-x_gap],
box[2][::-1]-[y_gap,x_gap],box[3][::-1]-[y_gap,-x_gap]])
rotated_box = []
for coordinate in door_box:
box_coordinate = rotate_cooridinate(coordinate,rotate_angle,center)
rotated_box.append(box_coordinate)
rotated_box = np.array(rotated_box)
box = [np.min(rotated_box[:,0]),np.min(rotated_box[:,1]),np.max(rotated_box[:,0]),np.max(rotated_box[:,1])]
return box
def calc_IoU(candidateBound, groundTruthBounds):
"""
calculate the intersection over union
"""
cx1 = candidateBound[0]
cy1 = candidateBound[1]
cx2 = candidateBound[2]
cy2 = candidateBound[3]
gx1 = groundTruthBounds[:,0]
gy1 = groundTruthBounds[:,1]
gx2 = groundTruthBounds[:,2]
gy2 = groundTruthBounds[:,3]
carea = (cx2 - cx1) * (cy2 - cy1)
garea = (gx2 - gx1) * (gy2 - gy1)
x1 = np.maximum(cx1, gx1)
y1 = np.maximum(cy1, gy1)
x2 = np.minimum(cx2, gx2)
y2 = np.minimum(cy2, gy2)
w = np.maximum(0, x2 - x1)
h = np.maximum(0, y2 - y1)
area = w * h
ious = area / (carea + garea - area)
return ious
def overlapp(candidateBound, groundTruthBounds):
"""
calculate the proportion of prediction to groundtruth
"""
cx1 = candidateBound[0]
cy1 = candidateBound[1]
cx2 = candidateBound[2]
cy2 = candidateBound[3]
gx1 = groundTruthBounds[:,0]
gy1 = groundTruthBounds[:,1]
gx2 = groundTruthBounds[:,2]
gy2 = groundTruthBounds[:,3]
garea = (gx2 - gx1) * (gy2 - gy1)
x1 = np.maximum(cx1, gx1)
y1 = np.maximum(cy1, gy1)
x2 = np.minimum(cx2, gx2)
y2 = np.minimum(cy2, gy2)
w = np.maximum(0, x2 - x1)
h = np.maximum(0, y2 - y1)
area = w * h
reious = area / garea
return reious
def calc_corner(door_center,door_size,door_depth,side):
"""
calculate the corners' coordinates from the centroid, size and depth of doorway
door_corners_inside is a list of coordinates of corners close to the corridor
door_corners_outside is a list of coordinates of corners close to the room
"""
door_corners_inside = [door_center-np.array([np.int(door_size/2),0]),
door_center+np.array([door_size-np.int(door_size/2),0])]
door_corners_outside = [x-np.array([0,np.power(-1,side)*door_depth[side]])
for x in door_corners_inside]
door_corners_outside = np.array(door_corners_outside)
return door_corners_inside,door_corners_outside
def draw_door(mask,complete_map,door,door_depth,side):
"""
label the doorway on the mask and add some error inside the doorway region
"""
door_size = abs(door[1,0]-door[0,0])
door_area_inside = door+np.array([0,np.power(-1,side)*door_depth[side]])
# label the doorway on the mask
cv2.rectangle(mask,tuple(door[0][::-1]),tuple(door_area_inside[1][::-1]),255,-1)
# add a small point to emulate the error in the doorway region
if door_size>20:
if np.random.randint(4)==0:
if side ==0:
pt_center = [np.random.randint(door[0,0]+4,door[1,0]-3),np.random.randint(door[0,1],door_area_inside[0,1])]
else:
pt_center = [np.random.randint(door[0,0]+3,door[1,0]-2),np.random.randint(door_area_inside[0,1],door[0,1])]
cv2.circle(complete_map,tuple(pt_center[::-1]),np.random.choice([1,2,3]),0,-1)
return door_size
def room_division(room_space,num_room):
"""
assign the lengths of rooms according to the length of corridor and number of rooms
room_space: coordinates of corridor's side
num_room: the number of rooms on one side
rooms: a list of the coordinates belonging to different rooms
rooms_corners: a list of only the top and bottom cooridnates of different rooms
"""
rooms = []
rooms_corners=[]
a = num_room
thickness = np.random.randint(2,5)
length = room_space.shape[0]-(num_room-1)*thickness
start_point = 0
for i in range(num_room-1):
room_size = np.random.randint(length/(a+0.7),length/(a-0.7))
room = room_space[start_point:start_point+room_size,:]
rooms.append(room)
start_point +=room_size+thickness
room = room_space[start_point:,:]
rooms.append(room)
rooms = [room.astype(np.int) for room in rooms]
for x in rooms:
rooms_corner = np.concatenate((x[0,:][np.newaxis,:],x[-1,:][np.newaxis,:]),axis = 0)
rooms_corners.append(rooms_corner)
return rooms,rooms_corners
def calc_gradient(gmap):
"""
calculate the gradient of image to find the contour
"""
kernel = np.array([[1,1,1],[1,-8,1],[1,1,1]])
img = gmap.astype(np.int16)
gradient = scipy.ndimage.correlate(img,kernel,mode = 'constant',cval =127)
return gradient
|
[
"numpy.sqrt",
"math.cos",
"numpy.array",
"imutils.paths.list_images",
"math.exp",
"os.path.exists",
"numpy.max",
"numpy.exp",
"numpy.concatenate",
"numpy.min",
"numpy.maximum",
"numpy.round",
"numpy.random.choice",
"numpy.int",
"cv2.imread",
"numpy.minimum",
"os.makedirs",
"numpy.power",
"numpy.random.randint",
"math.sin"
] |
[((774, 806), 'numpy.array', 'np.array', (['[rotated_x, rotated_y]'], {}), '([rotated_x, rotated_y])\n', (782, 806), True, 'import numpy as np\n'), ((988, 1008), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1002, 1008), False, 'import os\n'), ((2068, 2209), 'numpy.array', 'np.array', (['[box[0][::-1] + [y_gap, x_gap], box[1][::-1] + [y_gap, -x_gap], box[2][::-1\n ] - [y_gap, x_gap], box[3][::-1] - [y_gap, -x_gap]]'], {}), '([box[0][::-1] + [y_gap, x_gap], box[1][::-1] + [y_gap, -x_gap], \n box[2][::-1] - [y_gap, x_gap], box[3][::-1] - [y_gap, -x_gap]])\n', (2076, 2209), True, 'import numpy as np\n'), ((2407, 2428), 'numpy.array', 'np.array', (['rotated_box'], {}), '(rotated_box)\n', (2415, 2428), True, 'import numpy as np\n'), ((2998, 3018), 'numpy.maximum', 'np.maximum', (['cx1', 'gx1'], {}), '(cx1, gx1)\n', (3008, 3018), True, 'import numpy as np\n'), ((3028, 3048), 'numpy.maximum', 'np.maximum', (['cy1', 'gy1'], {}), '(cy1, gy1)\n', (3038, 3048), True, 'import numpy as np\n'), ((3058, 3078), 'numpy.minimum', 'np.minimum', (['cx2', 'gx2'], {}), '(cx2, gx2)\n', (3068, 3078), True, 'import numpy as np\n'), ((3088, 3108), 'numpy.minimum', 'np.minimum', (['cy2', 'gy2'], {}), '(cy2, gy2)\n', (3098, 3108), True, 'import numpy as np\n'), ((3117, 3139), 'numpy.maximum', 'np.maximum', (['(0)', '(x2 - x1)'], {}), '(0, x2 - x1)\n', (3127, 3139), True, 'import numpy as np\n'), ((3148, 3170), 'numpy.maximum', 'np.maximum', (['(0)', '(y2 - y1)'], {}), '(0, y2 - y1)\n', (3158, 3170), True, 'import numpy as np\n'), ((3667, 3687), 'numpy.maximum', 'np.maximum', (['cx1', 'gx1'], {}), '(cx1, gx1)\n', (3677, 3687), True, 'import numpy as np\n'), ((3697, 3717), 'numpy.maximum', 'np.maximum', (['cy1', 'gy1'], {}), '(cy1, gy1)\n', (3707, 3717), True, 'import numpy as np\n'), ((3727, 3747), 'numpy.minimum', 'np.minimum', (['cx2', 'gx2'], {}), '(cx2, gx2)\n', (3737, 3747), True, 'import numpy as np\n'), ((3757, 3777), 'numpy.minimum', 'np.minimum', (['cy2', 'gy2'], {}), '(cy2, gy2)\n', (3767, 3777), True, 'import numpy as np\n'), ((3786, 3808), 'numpy.maximum', 'np.maximum', (['(0)', '(x2 - x1)'], {}), '(0, x2 - x1)\n', (3796, 3808), True, 'import numpy as np\n'), ((3817, 3839), 'numpy.maximum', 'np.maximum', (['(0)', '(y2 - y1)'], {}), '(0, y2 - y1)\n', (3827, 3839), True, 'import numpy as np\n'), ((4545, 4575), 'numpy.array', 'np.array', (['door_corners_outside'], {}), '(door_corners_outside)\n', (4553, 4575), True, 'import numpy as np\n'), ((6006, 6029), 'numpy.random.randint', 'np.random.randint', (['(2)', '(5)'], {}), '(2, 5)\n', (6023, 6029), True, 'import numpy as np\n'), ((6766, 6810), 'numpy.array', 'np.array', (['[[1, 1, 1], [1, -8, 1], [1, 1, 1]]'], {}), '([[1, 1, 1], [1, -8, 1], [1, 1, 1]])\n', (6774, 6810), True, 'import numpy as np\n'), ((1055, 1072), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (1066, 1072), False, 'import os\n'), ((1441, 1484), 'cv2.imread', 'cv2.imread', (['imagePath', 'cv2.IMREAD_GRAYSCALE'], {}), '(imagePath, cv2.IMREAD_GRAYSCALE)\n', (1451, 1484), False, 'import cv2\n'), ((1677, 1718), 'numpy.exp', 'np.exp', (['(-(x - mu) ** 2 / (2 * sigma ** 2))'], {}), '(-(x - mu) ** 2 / (2 * sigma ** 2))\n', (1683, 1718), True, 'import numpy as np\n'), ((2440, 2465), 'numpy.min', 'np.min', (['rotated_box[:, 0]'], {}), '(rotated_box[:, 0])\n', (2446, 2465), True, 'import numpy as np\n'), ((2465, 2490), 'numpy.min', 'np.min', (['rotated_box[:, 1]'], {}), '(rotated_box[:, 1])\n', (2471, 2490), True, 'import numpy as np\n'), ((2490, 2515), 'numpy.max', 'np.max', (['rotated_box[:, 0]'], {}), '(rotated_box[:, 0])\n', (2496, 2515), True, 'import numpy as np\n'), ((2515, 2540), 'numpy.max', 'np.max', (['rotated_box[:, 1]'], {}), '(rotated_box[:, 1])\n', (2521, 2540), True, 'import numpy as np\n'), ((6157, 6214), 'numpy.random.randint', 'np.random.randint', (['(length / (a + 0.7))', '(length / (a - 0.7))'], {}), '(length / (a + 0.7), length / (a - 0.7))\n', (6174, 6214), True, 'import numpy as np\n'), ((6494, 6567), 'numpy.concatenate', 'np.concatenate', (['(x[0, :][np.newaxis, :], x[-1, :][np.newaxis, :])'], {'axis': '(0)'}), '((x[0, :][np.newaxis, :], x[-1, :][np.newaxis, :]), axis=0)\n', (6508, 6567), True, 'import numpy as np\n'), ((831, 859), 'numpy.round', 'np.round', (['rotated_coordinate'], {}), '(rotated_coordinate)\n', (839, 859), True, 'import numpy as np\n'), ((1293, 1316), 'imutils.paths.list_images', 'paths.list_images', (['path'], {}), '(path)\n', (1310, 1316), False, 'from imutils import paths\n'), ((1562, 1574), 'math.exp', 'math.exp', (['(-x)'], {}), '(-x)\n', (1570, 1574), False, 'import math\n'), ((1724, 1742), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (1731, 1742), True, 'import numpy as np\n'), ((5118, 5138), 'numpy.random.randint', 'np.random.randint', (['(4)'], {}), '(4)\n', (5135, 5138), True, 'import numpy as np\n'), ((460, 482), 'math.cos', 'math.cos', (['rotate_angle'], {}), '(rotate_angle)\n', (468, 482), False, 'import math\n'), ((538, 560), 'math.sin', 'math.sin', (['rotate_angle'], {}), '(rotate_angle)\n', (546, 560), False, 'import math\n'), ((631, 653), 'math.sin', 'math.sin', (['rotate_angle'], {}), '(rotate_angle)\n', (639, 653), False, 'import math\n'), ((709, 731), 'math.cos', 'math.cos', (['rotate_angle'], {}), '(rotate_angle)\n', (717, 731), False, 'import math\n'), ((5493, 5520), 'numpy.random.choice', 'np.random.choice', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (5509, 5520), True, 'import numpy as np\n'), ((4271, 4292), 'numpy.int', 'np.int', (['(door_size / 2)'], {}), '(door_size / 2)\n', (4277, 4292), True, 'import numpy as np\n'), ((4861, 4879), 'numpy.power', 'np.power', (['(-1)', 'side'], {}), '(-1, side)\n', (4869, 4879), True, 'import numpy as np\n'), ((5197, 5246), 'numpy.random.randint', 'np.random.randint', (['(door[0, 0] + 4)', '(door[1, 0] - 3)'], {}), '(door[0, 0] + 4, door[1, 0] - 3)\n', (5214, 5246), True, 'import numpy as np\n'), ((5240, 5293), 'numpy.random.randint', 'np.random.randint', (['door[0, 1]', 'door_area_inside[0, 1]'], {}), '(door[0, 1], door_area_inside[0, 1])\n', (5257, 5293), True, 'import numpy as np\n'), ((5339, 5388), 'numpy.random.randint', 'np.random.randint', (['(door[0, 0] + 3)', '(door[1, 0] - 2)'], {}), '(door[0, 0] + 3, door[1, 0] - 2)\n', (5356, 5388), True, 'import numpy as np\n'), ((5382, 5435), 'numpy.random.randint', 'np.random.randint', (['door_area_inside[0, 1]', 'door[0, 1]'], {}), '(door_area_inside[0, 1], door[0, 1])\n', (5399, 5435), True, 'import numpy as np\n'), ((4355, 4376), 'numpy.int', 'np.int', (['(door_size / 2)'], {}), '(door_size / 2)\n', (4361, 4376), True, 'import numpy as np\n'), ((4422, 4440), 'numpy.power', 'np.power', (['(-1)', 'side'], {}), '(-1, side)\n', (4430, 4440), True, 'import numpy as np\n')]
|
from typing import Dict, Any, Optional, List
import gym
import numpy as np
from collections import defaultdict
from flatland.core.grid.grid4_utils import get_new_position
from flatland.envs.agent_utils import EnvAgent, RailAgentStatus
from flatland.envs.rail_env import RailEnv, RailEnvActions
from envs.flatland.observations.segment_graph import Graph
from envs.flatland.utils.gym_env import StepOutput
def available_actions(env: RailEnv, agent: EnvAgent, allow_noop=False) -> List[int]:
if agent.position is None:
return [0, 1, 0, 1]
else:
possible_transitions = env.rail.get_transitions(*agent.position, agent.direction)
# some actions are always available:
available_acts = [0] * len(RailEnvActions)
available_acts[RailEnvActions.MOVE_FORWARD] = 1
available_acts[RailEnvActions.STOP_MOVING] = 1
if allow_noop:
available_acts[RailEnvActions.DO_NOTHING] = 1
# check if turn left/right are available:
for movement in range(4):
if possible_transitions[movement]:
if movement == (agent.direction + 1) % 4:
available_acts[RailEnvActions.MOVE_RIGHT] = 1
elif movement == (agent.direction - 1) % 4:
available_acts[RailEnvActions.MOVE_LEFT] = 1
return available_acts[1:]
def potential_deadlock_action_masking(env: RailEnv, potential_deadlock: List) -> List[int]:
avaliable_actions = [0, 0, 0, 1]
avaliable_actions[0] = 0 if potential_deadlock[0] != 1 and potential_deadlock[0] != -1 else 1
avaliable_actions[1] = 0 if potential_deadlock[1] != 1 and potential_deadlock[1] != -1 else 1
avaliable_actions[2] = 0 if potential_deadlock[2] != 1 and potential_deadlock[2] != -1 else 1
return avaliable_actions
def priority_dist_action_masking(dist_ind, priority) -> List[int]:
available_actions = [0, 0, 0, 0]
if priority == 0:
return [0, 0, 0, 1]
else:
available_actions[dist_ind] = 1
return available_actions
class AvailableActionsWrapper(gym.Wrapper):
def __init__(self, env, allow_noop=False, potential_deadlock_masking=False) -> None:
super().__init__(env)
self._allow_noop = allow_noop
self._potential_deadlock_masking = potential_deadlock_masking
self.observation_space = gym.spaces.Dict({
'obs': self.env.observation_space,
'available_actions': gym.spaces.Box(low=0, high=1, shape=(self.action_space.n,), dtype=np.int32)
})
def step(self, action_dict: Dict[int, RailEnvActions]) -> StepOutput:
obs, reward, done, info = self.env.step(action_dict)
return StepOutput(self._transform_obs(obs), reward, done, info)
def reset(self, random_seed: Optional[int] = None) -> Dict[int, Any]:
return self._transform_obs(self.env.reset(random_seed))
def _transform_obs(self, obs):
rail_env = self.unwrapped.rail_env
if not self._potential_deadlock_masking:
return {
agent_id: {
'obs': agent_obs,
'available_actions': np.asarray(
available_actions(rail_env, rail_env.agents[agent_id], self._allow_noop))
} for agent_id, agent_obs in obs.items()
}
else:
return {
agent_id: {
'obs': agent_obs,
'available_actions': np.asarray(
priority_dist_action_masking(agent_obs[0], agent_obs[1]))
} for agent_id, agent_obs in obs.items()
}
def find_all_cells_where_agent_can_choose(rail_env: RailEnv):
switches = []
switches_neighbors = []
directions = list(range(4))
for h in range(rail_env.height):
for w in range(rail_env.width):
pos = (w, h)
is_switch = False
# Check for switch: if there is more than one outgoing transition
for orientation in directions:
possible_transitions = rail_env.rail.get_transitions(*pos, orientation)
num_transitions = np.count_nonzero(possible_transitions)
if num_transitions > 1:
switches.append(pos)
is_switch = True
break
if is_switch:
# Add all neighbouring rails, if pos is a switch
for orientation in directions:
possible_transitions = rail_env.rail.get_transitions(*pos, orientation)
for movement in directions:
if possible_transitions[movement]:
switches_neighbors.append(get_new_position(pos, movement))
decision_cells = switches + switches_neighbors
return tuple(map(set, (switches, switches_neighbors, decision_cells)))
class SkipNoChoiceCellsWrapper(gym.Wrapper):
def __init__(self, env, accumulate_skipped_rewards: bool, discounting: float) -> None:
super().__init__(env)
self._switches = None
self._switches_neighbors = None
self._decision_cells = None
self._accumulate_skipped_rewards = accumulate_skipped_rewards
self._discounting = discounting
self._skipped_rewards = defaultdict(list)
def _on_decision_cell(self, agent: EnvAgent):
return agent.position is None \
or agent.position == agent.initial_position \
or agent.position in self._decision_cells
def _on_switch(self, agent: EnvAgent):
return agent.position in self._switches
def _next_to_switch(self, agent: EnvAgent):
return agent.position in self._switches_neighbors
def step(self, action_dict: Dict[int, RailEnvActions]) -> StepOutput:
o, r, d, i = {}, {}, {}, {}
while len(o) == 0:
obs, reward, done, info = self.env.step(action_dict)
for agent_id, agent_obs in obs.items():
if done[agent_id] or self._on_decision_cell(self.unwrapped.rail_env.agents[agent_id]):
o[agent_id] = agent_obs
r[agent_id] = reward[agent_id]
d[agent_id] = done[agent_id]
i[agent_id] = info[agent_id]
if self._accumulate_skipped_rewards:
discounted_skipped_reward = r[agent_id]
for skipped_reward in reversed(self._skipped_rewards[agent_id]):
discounted_skipped_reward = self._discounting * discounted_skipped_reward + skipped_reward
r[agent_id] = discounted_skipped_reward
self._skipped_rewards[agent_id] = []
elif self._accumulate_skipped_rewards:
self._skipped_rewards[agent_id].append(reward[agent_id])
d['__all__'] = done['__all__']
action_dict = {}
return StepOutput(o, r, d, i)
def reset(self, random_seed: Optional[int] = None) -> Dict[int, Any]:
obs = self.env.reset(random_seed)
self._switches, self._switches_neighbors, self._decision_cells = \
find_all_cells_where_agent_can_choose(self.unwrapped.rail_env)
return obs
class RewardWrapperShortestPathObs(gym.Wrapper):
def __init__(self, env, rewards) -> None:
super().__init__(env)
self._finished_reward = rewards['finished_reward']
self._invalid_action_reward = rewards['invalid_action_reward']
self._not_finished_reward = rewards['not_finished_reward']
self._step_reward = rewards['step_reward']
self._step_shortest_path = rewards['step_shortest_path']
self._step_second_shortest_path = rewards['step_second_shortest_path']
self._deadlock_reward = rewards['deadlock_reward']
self._dont_move_reward = rewards['dont_move_reward']
self._deadlock_avoidance_reward = rewards['deadlock_avoidance_reward']
self._stop_on_switch_reward = rewards['stop_on_switch_reward']
self._stop_potential_deadlock_reward = rewards['stop_potential_deadlock_reward']
self._deadlock_unusable_switch_avoidance_reward = rewards['deadlock_unusable_switch_avoidance']
self._priority_reward = rewards['priority_reward']
self._priority_reward_shortest_path = rewards['priority_reward_shortest_path']
self._priority_reward_alternative_path = rewards['priority_reward_alternative_path']
self._priority_penalty = rewards['priority_penalty']
self._priority_no_path_penalty = rewards['priority_no_path_penalty']
rail_env: RailEnv = self.unwrapped.rail_env
self._prev_dist = {agent.handle: [-1, -1] for agent in rail_env.agents}
self._prev_action_mask = {agent.handle: available_actions(rail_env, agent, False) for agent in rail_env.agents}
self._prev_pos = {agent.handle: Graph.get_virtual_position(agent.handle) for agent in rail_env.agents}
self._prev_potential_deadlock = {agent.handle: (0, 0, 0) for agent in rail_env.agents}
self._prev_on_switch = {agent.handle: 0 for agent in rail_env.agents}
@staticmethod
def reward_function(handle, agent_obs, agent_action, agent_done, agent_status, agent_virtual_pos,
_prev_potential_deadlock, _prev_dist, _prev_action_mask, _prev_pos, _prev_on_switch,
_finished_reward, _invalid_action_reward, _not_finished_reward, _step_reward,
_step_shortest_path, _step_second_shortest_path, _deadlock_reward, _dont_move_reward,
_deadlock_avoidance_reward, _stop_on_switch_reward, _stop_potential_deadlock_reward,
_deadlock_unusable_switch_avoidance_reward, _priority_reward, _priority_reward_shortest_path,
_priority_reward_alternative_path, _priority_penalty, _priority_no_path_penalty):
if agent_done: # done
if agent_status in [RailAgentStatus.DONE, RailAgentStatus.DONE_REMOVED]:
reward = _finished_reward
elif agent_obs[7] == 1:
reward = _deadlock_reward
else:
reward = _not_finished_reward
elif agent_obs[7] == 1: # deadlock
reward = _deadlock_reward
else:
potential_deadlock = [agent_obs[19], agent_obs[20], agent_obs[21]]
available_dirs = sum(1 for d in potential_deadlock if d != -1)
deadlock_dirs = sum(1 for d in potential_deadlock if d == 1)
if agent_action == RailEnvActions.STOP_MOVING:
if agent_obs[30] == 1:
reward = _stop_on_switch_reward
elif agent_obs[36] == 1:
#TODO think about this
reward = _deadlock_unusable_switch_avoidance_reward * 1 / agent_obs[35] if agent_obs[35] >= 1 else _stop_on_switch_reward
# elif (deadlock_dirs / available_dirs) == 1. and agent_action == RailEnvActions.STOP_MOVING:
# reward = _stop_potential_deadlock_reward * 1/agent_obs[35] if agent_obs[35] >= 1 else _stop_on_switch_reward
elif agent_obs[39] == 0:
reward = _priority_reward
else:
reward = _dont_move_reward
elif agent_action in [RailEnvActions.MOVE_LEFT, RailEnvActions.MOVE_RIGHT, RailEnvActions.MOVE_FORWARD]:
deadlock_actions = [idx + 1 for idx, action in enumerate(_prev_potential_deadlock) if action == 1]
unavaliable_actions = [idx + 1 for idx, action in enumerate(_prev_potential_deadlock) if action == -1]
if _prev_on_switch == 1 and (agent_action not in deadlock_actions and len(deadlock_actions) > 0) and (
agent_action not in unavaliable_actions):
reward = _deadlock_avoidance_reward
elif agent_obs[39] == 1:
if agent_obs[9] < _prev_dist[0] and agent_obs[9] < 5000:
reward = _priority_reward_shortest_path
elif agent_obs[9] < _prev_dist[1] < 5000:
reward = _priority_reward_alternative_path
else:
reward = _priority_no_path_penalty
elif agent_obs[39] == 0:
reward = _priority_penalty
else:
reward = _step_reward
else:
reward = -1
return reward
def step(self, action_dict: Dict[int, RailEnvActions]) -> StepOutput:
rail_env: RailEnv = self.unwrapped.rail_env
for handle in action_dict:
action_dict[handle] += 1
obs, reward, done, info = self.env.step(action_dict)
o, r, d, i = {}, {}, {}, {}
for agent_id, agent_obs in obs.items():
o[agent_id] = obs[agent_id]
d[agent_id] = done[agent_id]
i[agent_id] = info[agent_id]
r[agent_id] = self.reward_function(handle=agent_id,
agent_obs=agent_obs,
agent_action=action_dict[agent_id],
agent_done=done[agent_id],
agent_status=rail_env.agents[agent_id].status,
agent_virtual_pos=Graph.get_virtual_position(agent_id),
_prev_potential_deadlock=self._prev_potential_deadlock[agent_id],
_prev_dist=self._prev_dist[agent_id],
_prev_pos=self._prev_pos[agent_id],
_prev_action_mask=self._prev_action_mask[agent_id],
_prev_on_switch=self._prev_on_switch[agent_id],
_finished_reward=self._finished_reward,
_invalid_action_reward=self._invalid_action_reward,
_not_finished_reward=self._not_finished_reward,
_step_reward=self._step_reward,
_step_shortest_path=self._step_shortest_path,
_step_second_shortest_path=self._step_second_shortest_path,
_deadlock_reward=self._deadlock_reward,
_dont_move_reward=self._dont_move_reward,
_deadlock_avoidance_reward=self._deadlock_avoidance_reward,
_stop_on_switch_reward=self._stop_on_switch_reward,
_stop_potential_deadlock_reward=self._stop_potential_deadlock_reward,
_deadlock_unusable_switch_avoidance_reward=self._deadlock_unusable_switch_avoidance_reward,
_priority_penalty=self._priority_penalty,
_priority_reward=self._priority_reward,
_priority_reward_alternative_path=self._priority_reward_alternative_path,
_priority_reward_shortest_path=self._priority_reward_shortest_path,
_priority_no_path_penalty=self._priority_no_path_penalty
)
# set prev_states to the length of shortest path if you go L, then F, then R (L,F,R). That corresponds to
# features 9, 10, 11 in the feature vector
# print(f"obs: {o}, reward: {r}, prev_dist: {self._prev_dist}")
self._prev_dist[agent_id] = (agent_obs[9], agent_obs[15])
self._prev_action_mask[agent_id] = available_actions(rail_env, rail_env.agents[agent_id], False)
# update potential_deadlock attribute
self._prev_potential_deadlock[agent_id] = (agent_obs[19], agent_obs[20], agent_obs[21])
# update prev_pos
self._prev_pos[agent_id] = Graph.get_virtual_position(agent_id)
self._prev_on_switch[agent_id] = agent_obs[30]
d['__all__'] = done['__all__'] or all(d.values())
return StepOutput(o, r, d, info={agent: {
'max_episode_steps': int(4 * 2 * (
self.rail_env.width + self.rail_env.height + self.rail_env.get_num_agents() / self.num_cities)),
'num_agents': self.rail_env.get_num_agents(),
'agent_done': d[agent] and agent not in self.rail_env.active_agents,
} for agent in o.keys()})
def reset(self, random_seed: Optional[int] = None) -> Dict[int, Any]:
obs = self.env.reset(random_seed=random_seed)
self._prev_dist = {k: (o[9], o[15]) for k, o in obs.items()}
return obs
class RewardWrapper(gym.Wrapper):
def __init__(self, env, rewards) -> None:
super().__init__(env)
# self._finished_reward = rewards['finished_reward']
# self._invalid_action_reward = rewards['invalid_action_reward']
# self._not_finished_reward = rewards['not_finished_reward']
# self._step_reward = rewards['step_reward']
# self._step_shortest_path = rewards['step_shortest_path']
# self._step_second_shortest_path = rewards['step_second_shortest_path']
# self._deadlock_reward = rewards['deadlock_reward']
# self._dont_move_reward = rewards['dont_move_reward']
# self._deadlock_avoidance_reward = rewards['deadlock_avoidance_reward']
# self._stop_on_switch_reward = rewards['stop_on_switch_reward']
# self._stop_potential_deadlock_reward = rewards['stop_potential_deadlock_reward']
# self._deadlock_unusable_switch_avoidance_reward = rewards['deadlock_unusable_switch_avoidance']
# self._priority_reward = rewards['priority_reward']
# self._priority_reward_shortest_path = rewards['priority_reward_shortest_path']
# self._priority_reward_alternative_path = rewards['priority_reward_alternative_path']
# self._priority_penalty = rewards['priority_penalty']
# self._priority_no_path_penalty = rewards['priority_no_path_penalty']
self._finished_reward = rewards['finished_reward']
self._deadlock_reward = rewards['deadlock_reward']
self._step_reward = rewards['step_reward']
self._deadlock_unusable_switch_avoidance_reward = rewards['deadlock_unusable_switch_avoidance']
self._stop_priority_depart = rewards['stop_priority_depart']
self._stop_no_deadlocks_reward = rewards['stop_no_deadlocks_reward']
rail_env: RailEnv = self.unwrapped.rail_env
# self._prev_dist = {}
# self._prev_action_mask = {agent.handle: available_actions(rail_env, agent, False) for agent in rail_env.agents}
# self._prev_pos = {agent.handle: Graph.get_virtual_position(agent.handle) for agent in rail_env.agents}
#
# self._prev_potential_deadlock = {}
# self._prev_on_switch = {}
# self._prev_deadlock_unusable = {}
self._prev_shortest_action = {}
self._prev_priority = {}
def reward_function(self, handle, agent_obs, agent_action, agent_done, agent_status):
if agent_done:
if agent_status in [RailAgentStatus.DONE, RailAgentStatus.DONE_REMOVED]:
reward = self._finished_reward
else:
reward = self._step_reward
elif agent_obs[5] == 1 and agent_action == RailEnvActions.STOP_MOVING:
reward = 0
elif agent_obs[5] == 1 and agent_action != RailEnvActions.STOP_MOVING:
reward = -10
else:
if self._prev_priority[handle] == 0:
if agent_action == RailEnvActions.STOP_MOVING:
reward = 0
else:
reward = -10
else:
#if (agent_action - 1) == np.argmax(self._prev_shortest_action[handle]):
if agent_action != RailEnvActions.STOP_MOVING:
reward = 0
else:
reward = -10
# reward = (1 / agent_obs[4] + self._step_reward) * 0.70
# if agent_action == RailEnvActions.STOP_MOVING:
# if self._prev_priority[handle] == 0 and agent_status == RailAgentStatus.READY_TO_DEPART:
# reward = self._stop_priority_depart
# elif self._prev_deadlock_unusable[handle] == 1:
# reward = self._deadlock_unusable_switch_avoidance_reward
#
# elif 1 not in self._prev_potential_deadlock[handle] and self._prev_deadlock_unusable[handle] == 0 and self._prev_priority[handle] == 1:
# reward = self._stop_no_deadlocks_reward
# if agent_done: # done
# if agent_status in [RailAgentStatus.DONE, RailAgentStatus.DONE_REMOVED]:
# reward = _finished_reward
# elif agent_obs[7] == 1:
# reward = _deadlock_reward
# else:
# reward = _not_finished_reward
#
# elif agent_obs[7] == 1: # deadlock
# reward = _deadlock_reward
#
# else:
# potential_deadlock = [agent_obs[19], agent_obs[20], agent_obs[21]]
# available_dirs = sum(1 for d in potential_deadlock if d != -1)
# deadlock_dirs = sum(1 for d in potential_deadlock if d == 1)
#
# if agent_action == RailEnvActions.STOP_MOVING:
# if agent_obs[30] == 1:
# reward = _stop_on_switch_reward
# elif agent_obs[36] == 1:
# # TODO think about this
# if agent_obs[35] == 1:
# reward = _deadlock_unusable_switch_avoidance_reward
# else:
# r = -_deadlock_unusable_switch_avoidance_reward
# reward = -(r**(1/agent_obs[35])*0.5)
# # elif (deadlock_dirs / available_dirs) == 1. and agent_action == RailEnvActions.STOP_MOVING:
# # reward = _stop_potential_deadlock_reward * 1/agent_obs[35] if agent_obs[35] >= 1 else _stop_on_switch_reward
# elif agent_obs[39] == 0:
# reward = _priority_reward
# else:
# reward = _dont_move_reward
#
# elif agent_action in [RailEnvActions.MOVE_LEFT, RailEnvActions.MOVE_RIGHT, RailEnvActions.MOVE_FORWARD]:
# deadlock_actions = [idx + 1 for idx, action in enumerate(_prev_potential_deadlock) if action == 1]
# unavaliable_actions = [idx + 1 for idx, action in enumerate(_prev_potential_deadlock) if action == -1]
# if _prev_on_switch == 1 and (agent_action not in deadlock_actions and len(deadlock_actions) > 0) and (
# agent_action not in unavaliable_actions):
# reward = _deadlock_avoidance_reward
#
# elif agent_obs[39] == 1:
# if agent_obs[9] < _prev_dist[0] and agent_obs[9] < 5000:
# reward = _priority_reward_shortest_path
# elif agent_obs[9] < _prev_dist[1] < 5000:
# reward = _priority_reward_alternative_path
# else:
# reward = _priority_no_path_penalty
# elif agent_obs[39] == 0:
# reward = _priority_penalty
#
# else:
# reward = _step_reward
#
# else:
# reward = -1
#
# return reward
#
# if agent_done:
# if agent_status in [RailAgentStatus.DONE, RailAgentStatus.DONE_REMOVED]:
# # agent is done and really done -> give finished reward
# reward = _finished_reward
# else:
# # agent is done but not really done -> give not_finished reward
# if agent_obs[7] == 1:
# reward = _deadlock_reward
# else:
# reward = _not_finished_reward
#
# elif agent_obs[7] == 1:
# reward = _deadlock_reward
#
# else:
# if agent_obs[9] < _prev_dist[0] and agent_obs[9] != -1:
# reward = _step_shortest_path
#
# elif agent_obs[15] < _prev_dist[1] and agent_obs[15] != -1:
# reward = _step_second_shortest_path
#
# else:
# reward = _step_reward
#
#
#
# # invalid action reward
# if _prev_action_mask[agent_action-1] == 0:
# reward += _invalid_action_reward
#
# # if agent not moving
# if tuple(_prev_pos) == tuple(agent_virtual_pos):
# reward += _dont_move_reward
#
# # stop on switch
# if agent_obs[30] == 1 and agent_action == RailEnvActions.STOP_MOVING:
# reward += _stop_on_switch_reward
#
# potential_deadlock = [agent_obs[19], agent_obs[20], agent_obs[21]]
# available_dirs = sum(1 for d in potential_deadlock if d != -1)
# deadlock_dirs = sum(1 for d in potential_deadlock if d == 1)
# if (deadlock_dirs / available_dirs) == 1. and agent_action == RailEnvActions.STOP_MOVING:
# reward += _stop_potential_deadlock_reward * 1/agent_obs[35] if agent_obs[35] >= 1 else 0
#
#
# if agent_obs[36] == 1 and agent_action == RailEnvActions.STOP_MOVING:
# reward += _deadlock_unusable_switch_avoidance_reward * 1 / agent_obs[35] if agent_obs[35] >= 1 else 0
#
# # reward if agent avoided deadlock
# if _prev_on_switch == 1:
# deadlock_actions = [idx + 1 for idx, action in enumerate(_prev_potential_deadlock) if action == 1]
# unavaliable_actions = [idx + 1 for idx, action in enumerate(_prev_potential_deadlock) if action == -1]
# if (agent_action not in deadlock_actions and len(deadlock_actions) > 0) and (
# agent_action not in unavaliable_actions) and (agent_action != RailEnvActions.DO_NOTHING) \
# and (agent_action != RailEnvActions.STOP_MOVING):
# reward = _deadlock_avoidance_reward
return reward
def step(self, action_dict: Dict[int, RailEnvActions]) -> StepOutput:
rail_env: RailEnv = self.unwrapped.rail_env
for handle in action_dict:
action_dict[handle] += 1
if action_dict[handle] < 4:
action_dict[handle] = possible_actions_sorted_by_distance(rail_env, handle)[0][0]
obs, reward, done, info = self.env.step(action_dict)
o, r, d, i = {}, {}, {}, {}
for agent_id, agent_obs in obs.items():
o[agent_id] = obs[agent_id]
d[agent_id] = done[agent_id]
i[agent_id] = info[agent_id]
r[agent_id] = self.reward_function(handle=agent_id,
agent_obs=agent_obs,
agent_action=action_dict[agent_id],
agent_done=done[agent_id],
agent_status=rail_env.agents[agent_id].status,
)
# print(f"Agent {agent_id}, obs: {agent_obs}, prev_priority: {self._prev_priority[agent_id]}, prev_dist_action: {self._prev_shortest_action[agent_id]}, reward: {r[agent_id]}, action: {action_dict[agent_id] - 1}")
# set prev_states to the length of shortest path if you go L, then F, then R (L,F,R). That corresponds to
# features 9, 10, 11 in the feature vector
# print(f"obs: {o}, reward: {r}, prev_dist: {self._prev_dist}")
# self._prev_dist[agent_id] = (agent_obs[9], agent_obs[15])
# self._prev_action_mask[agent_id] = available_actions(rail_env, rail_env.agents[agent_id], False)
# update potential_deadlock attribute
# self._prev_potential_deadlock[agent_id] = (agent_obs[10], agent_obs[11], agent_obs[12])
# update prev_pos
# self._prev_pos[agent_id] = Graph.get_virtual_position(agent_id)
# self._prev_on_switch[agent_id] = agent_obs[13]
self._prev_shortest_action[agent_id] = [agent_obs[0], agent_obs[1], agent_obs[2]]
self._prev_priority[agent_id] = agent_obs[3]
# self._prev_deadlock_unusable[agent_id] = agent_obs[19]
d['__all__'] = done['__all__'] or all(d.values())
return StepOutput(o, r, d, info={agent: {
'max_episode_steps': int(4 * 2 * (
self.rail_env.width + self.rail_env.height + self.rail_env.get_num_agents() / self.num_cities)),
'num_agents': self.rail_env.get_num_agents(),
'agent_done': d[agent] and agent not in self.rail_env.active_agents,
} for agent in o.keys()})
def reset(self, random_seed: Optional[int] = None) -> Dict[int, Any]:
obs = self.env.reset(random_seed=random_seed)
# self._prev_dist = {k: (o[9], o[15]) for k, o in obs.items()}
# self._prev_potential_deadlock = {k: (o[10], o[11], o[12]) for k, o in obs.items()}
# self._prev_on_switch = {k: o[13] for k, o in obs.items()}
self._prev_shortest_action = {k: [o[0], o[1], o[2]] for k, o in obs.items()}
self._prev_priority = {k: o[3] for k, o in obs.items()}
# self._prev_deadlock_unusable = {k: o[19] for k, o in obs.items()}
return obs
class SparseRewardWrapper(gym.Wrapper):
def __init__(self, env, finished_reward=1, not_finished_reward=-1) -> None:
super().__init__(env)
self._finished_reward = finished_reward
self._not_finished_reward = not_finished_reward
def step(self, action_dict: Dict[int, RailEnvActions]) -> StepOutput:
rail_env: RailEnv = self.unwrapped.rail_env
for handle in action_dict:
action_dict[handle] += 1
obs, reward, done, info = self.env.step(action_dict)
o, r, d, i = {}, {}, {}, {}
for agent_id, agent_obs in obs.items():
o[agent_id] = obs[agent_id]
d[agent_id] = done[agent_id]
i[agent_id] = info[agent_id]
if done[agent_id]:
if rail_env.agents[agent_id].status in [RailAgentStatus.DONE, RailAgentStatus.DONE_REMOVED]:
# agent is done and really done -> give finished reward
r[agent_id] = self._finished_reward
else:
# agent is done but not really done -> give not_finished reward
r[agent_id] = self._not_finished_reward
else:
r[agent_id] = 0
d['__all__'] = done['__all__'] or all(d.values())
return StepOutput(o, r, d, i)
def reset(self, random_seed: Optional[int] = None) -> Dict[int, Any]:
return self.env.reset(random_seed)
class DeadlockWrapper(gym.Wrapper):
def __init__(self, env, deadlock_reward=-1) -> None:
super().__init__(env)
self._deadlock_reward = deadlock_reward
self._deadlocked_agents = []
def check_deadlock(self): # -> Set[int]:
rail_env: RailEnv = self.unwrapped.rail_env
new_deadlocked_agents = []
for agent in rail_env.agents:
if agent.status == RailAgentStatus.ACTIVE and agent.handle not in self._deadlocked_agents:
position = agent.position
direction = agent.direction
while position is not None:
possible_transitions = rail_env.rail.get_transitions(*position, direction)
num_transitions = np.count_nonzero(possible_transitions)
if num_transitions == 1:
new_direction_me = np.argmax(possible_transitions)
new_cell_me = get_new_position(position, new_direction_me)
opp_agent = rail_env.agent_positions[new_cell_me]
if opp_agent != -1:
opp_position = rail_env.agents[opp_agent].position
opp_direction = rail_env.agents[opp_agent].direction
opp_possible_transitions = rail_env.rail.get_transitions(*opp_position, opp_direction)
opp_num_transitions = np.count_nonzero(opp_possible_transitions)
if opp_num_transitions == 1:
if opp_direction != direction:
self._deadlocked_agents.append(agent.handle)
new_deadlocked_agents.append(agent.handle)
position = None
else:
position = new_cell_me
direction = new_direction_me
else:
position = new_cell_me
direction = new_direction_me
else:
position = None
else:
position = None
return new_deadlocked_agents
def step(self, action_dict: Dict[int, RailEnvActions]) -> StepOutput:
obs, reward, done, info = self.env.step(action_dict)
if self._deadlock_reward != 0:
new_deadlocked_agents = self.check_deadlock()
else:
new_deadlocked_agents = []
o, r, d, i = {}, {}, {}, {}
for agent_id, agent_obs in obs.items():
if agent_id not in self._deadlocked_agents or agent_id in new_deadlocked_agents:
o[agent_id] = obs[agent_id]
d[agent_id] = done[agent_id]
i[agent_id] = info[agent_id]
r[agent_id] = reward[agent_id]
if agent_id in new_deadlocked_agents:
# agent is in deadlocked (and was not before) -> give deadlock reward and set to done
r[agent_id] += self._deadlock_reward
d[agent_id] = True
d['__all__'] = done['__all__'] or all(d.values())
return StepOutput(o, r, d, i)
def reset(self, random_seed: Optional[int] = None) -> Dict[int, Any]:
self._deadlocked_agents = []
return self.env.reset(random_seed)
def possible_actions_sorted_by_distance(env: RailEnv, handle: int):
agent = env.agents[handle]
if agent.status == RailAgentStatus.READY_TO_DEPART:
agent_virtual_position = agent.initial_position
elif agent.status == RailAgentStatus.ACTIVE:
agent_virtual_position = agent.position
elif agent.status == RailAgentStatus.DONE:
agent_virtual_position = agent.target
else:
return None
possible_transitions = env.rail.get_transitions(*agent_virtual_position, agent.direction)
distance_map = env.distance_map.get()[handle]
possible_steps = []
for movement in list(range(4)):
if possible_transitions[movement]:
if movement == agent.direction:
action = RailEnvActions.MOVE_FORWARD
elif movement == (agent.direction + 1) % 4:
action = RailEnvActions.MOVE_RIGHT
elif movement == (agent.direction - 1) % 4:
action = RailEnvActions.MOVE_LEFT
else:
raise ValueError("Wtf, debug this shit.")
distance = distance_map[get_new_position(agent_virtual_position, movement) + (movement,)]
possible_steps.append((action, distance))
possible_steps = sorted(possible_steps, key=lambda step: step[1])
if len(possible_steps) == 1:
return possible_steps * 2
else:
return possible_steps
class ShortestPathActionWrapper(gym.Wrapper):
def __init__(self, env) -> None:
super().__init__(env)
print("Apply ShortestPathActionWrapper")
self.action_space = gym.spaces.Discrete(n=3) # stop, shortest path, other direction
def step(self, action_dict: Dict[int, RailEnvActions]) -> StepOutput:
rail_env: RailEnv = self.env.unwrapped.rail_env
transformed_action_dict = {}
for agent_id, action in action_dict.items():
if action == 0:
transformed_action_dict[agent_id] = action
else:
assert action in [1, 2]
transformed_action_dict[agent_id] = possible_actions_sorted_by_distance(rail_env, agent_id)[action - 1][
0]
step_output = self.env.step(transformed_action_dict)
return step_output
def reset(self, random_seed: Optional[int] = None) -> Dict[int, Any]:
return self.env.reset(random_seed)
class DeadlockResolutionWrapper(gym.Wrapper):
def __init__(self, env, deadlock_reward=0) -> None:
super().__init__(env)
self._deadlock_reward = deadlock_reward
self._num_swaps = defaultdict(int)
def get_deadlocks(self, agent: EnvAgent, seen: List[int]) -> EnvAgent:
# abort if agent already checked
if agent.handle in seen:
# handle circular deadlock
seen.append(agent.handle)
# return
return []
# add agent to seen agents
seen.append(agent.handle)
# get rail environment
rail_env: RailEnv = self.unwrapped.rail_env
# get transitions for agent's position and direction
transitions = rail_env.rail.get_transitions(*agent.position, agent.direction)
num_possible_transitions = np.count_nonzero(transitions)
# initialize list to assign deadlocked agents to directions
deadlocked_agents = [None] * len(transitions)
# check if all possible transitions are blocked
for direction, transition in enumerate(transitions):
# only check transitions > 0 but iterate through all to get direction
if transition > 0:
# get opposite agent in direction of travel if cell is occuppied
new_position = get_new_position(agent.position, direction)
i_opp_agent = rail_env.agent_positions[new_position]
if i_opp_agent != -1:
opp_agent = rail_env.agents[i_opp_agent]
# get blocking agents of opposite agent
blocking_agents = self.get_deadlocks(opp_agent, seen)
# add opposite agent to deadlocked agents if blocked by
# checking agent. also add opposite agent if it is part
# of a circular blocking structure.
if agent in blocking_agents or seen[0] == seen[-1]:
deadlocked_agents[direction] = opp_agent
# return deadlocked agents if applicable
num_deadlocked_agents = np.count_nonzero(deadlocked_agents)
if num_deadlocked_agents > 0:
# deadlock has to be resolved only if no transition is possible
if num_deadlocked_agents == num_possible_transitions:
return deadlocked_agents
# workaround for already commited agent inside cell that is blocked by at least one agent
if agent.speed_data['position_fraction'] > 1:
return deadlocked_agents
return []
def step(self, action_dict: Dict[int, RailEnvActions]) -> StepOutput:
obs, reward, done, info = self.env.step(action_dict)
# get rail environment
rail_env: RailEnv = self.unwrapped.rail_env
# check agents that have status ACTIVE for deadlocks, env.active_agents contains also other agents
active_agents = [agent for agent in rail_env.agents if agent.status == RailAgentStatus.ACTIVE]
for agent in active_agents:
deadlocked_agents = self.get_deadlocks(agent, [])
if len(deadlocked_agents) > 0:
# favor transition in front as most natural
d_agent = deadlocked_agents[agent.direction]
# get most likely transition if straight forward is no valid transition
if d_agent is None:
transitions = rail_env.rail.get_transitions(*agent.position, agent.direction)
agent.direction = np.argmax(transitions)
d_agent = deadlocked_agents[agent.direction]
# already commited agent can have only one transition blocked
if d_agent is None:
d_agent = [a for a in deadlocked_agents if a is not None][0]
# swap the deadlocked pair
agent.position, d_agent.position = d_agent.position, agent.position
rail_env.agent_positions[agent.position] = agent.handle
rail_env.agent_positions[d_agent.position] = d_agent.handle
# set direction of blocking agent because of corners
d_agent.direction = (agent.direction + 2) % 4
# position is exact after swap
agent.speed_data['position_fraction'] = 0.0
d_agent.speed_data['position_fraction'] = 0.0
# punish agents for deadlock
reward[agent.handle] += self._deadlock_reward
reward[d_agent.handle] += self._deadlock_reward
# increase swap counter in info dict
self._num_swaps[agent.handle] += 1
self._num_swaps[d_agent.handle] += 1
for i_agent in info:
info[i_agent]['num_swaps'] = self._num_swaps[i_agent]
return obs, reward, done, info
def reset(self, random_seed: Optional[int] = None) -> Dict[int, Any]:
self._num_swaps = defaultdict(int)
return self.env.reset(random_seed)
class FlatlandRenderWrapper(RailEnv, gym.Env):
# reward_range = (-float('inf'), float('inf'))
# spec = None
# # Set these in ALL subclasses
# observation_space = None
def __init__(self, use_renderer=False, *args, **kwargs):
super().__init__(*args, **kwargs)
self.use_renderer = use_renderer
self.renderer = None
self.metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 10,
'semantics.autoreset': True
}
if self.use_renderer:
self.initialize_renderer()
def reset(self, *args, **kwargs):
if self.use_renderer:
if self.renderer: # TODO: Errors with RLLib with renderer as None.
self.renderer.reset()
return super().reset(*args, **kwargs)
def render(self, mode='human'):
"""
This methods provides the option to render the
environment's behavior to a window which should be
readable to the human eye if mode is set to 'human'.
"""
if not self.use_renderer:
return
if not self.renderer:
self.initialize_renderer(mode=mode)
return self.update_renderer(mode=mode)
def initialize_renderer(self, mode="human"):
# Initiate the renderer
from flatland.utils.rendertools import RenderTool, AgentRenderVariant
self.renderer = RenderTool(self, gl="PGL", # gl="TKPILSVG",
agent_render_variant=AgentRenderVariant.ONE_STEP_BEHIND,
show_debug=False,
screen_height=600, # Adjust these parameters to fit your resolution
screen_width=800) # Adjust these parameters to fit your resolution
def update_renderer(self, mode='human'):
image = self.renderer.render_env(show=True, show_observations=False, show_predictions=False,
return_image=True)
return image[:, :, :3]
def set_renderer(self, renderer):
self.use_renderer = renderer
if self.use_renderer:
self.initialize_renderer(mode=self.use_renderer)
def close(self):
super().close()
if self.renderer:
try:
self.renderer.close_window()
self.renderer = None
except Exception as e:
# This is since the last step(Due to a stopping criteria) is skipped by rllib
# Due to this done is not true and the env does not close
# Finally the env is closed when RLLib exits but at that time there is no window
# and hence the error
print("Could Not close window due to:", e)
|
[
"gym.spaces.Discrete",
"envs.flatland.observations.segment_graph.Graph.get_virtual_position",
"gym.spaces.Box",
"numpy.count_nonzero",
"numpy.argmax",
"collections.defaultdict",
"envs.flatland.utils.gym_env.StepOutput",
"flatland.utils.rendertools.RenderTool",
"flatland.core.grid.grid4_utils.get_new_position"
] |
[((5246, 5263), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5257, 5263), False, 'from collections import defaultdict\n'), ((6896, 6918), 'envs.flatland.utils.gym_env.StepOutput', 'StepOutput', (['o', 'r', 'd', 'i'], {}), '(o, r, d, i)\n', (6906, 6918), False, 'from envs.flatland.utils.gym_env import StepOutput\n'), ((31423, 31445), 'envs.flatland.utils.gym_env.StepOutput', 'StepOutput', (['o', 'r', 'd', 'i'], {}), '(o, r, d, i)\n', (31433, 31445), False, 'from envs.flatland.utils.gym_env import StepOutput\n'), ((34838, 34860), 'envs.flatland.utils.gym_env.StepOutput', 'StepOutput', (['o', 'r', 'd', 'i'], {}), '(o, r, d, i)\n', (34848, 34860), False, 'from envs.flatland.utils.gym_env import StepOutput\n'), ((36611, 36635), 'gym.spaces.Discrete', 'gym.spaces.Discrete', ([], {'n': '(3)'}), '(n=3)\n', (36630, 36635), False, 'import gym\n'), ((37601, 37617), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (37612, 37617), False, 'from collections import defaultdict\n'), ((38223, 38252), 'numpy.count_nonzero', 'np.count_nonzero', (['transitions'], {}), '(transitions)\n', (38239, 38252), True, 'import numpy as np\n'), ((39490, 39525), 'numpy.count_nonzero', 'np.count_nonzero', (['deadlocked_agents'], {}), '(deadlocked_agents)\n', (39506, 39525), True, 'import numpy as np\n'), ((42341, 42357), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (42352, 42357), False, 'from collections import defaultdict\n'), ((43835, 43978), 'flatland.utils.rendertools.RenderTool', 'RenderTool', (['self'], {'gl': '"""PGL"""', 'agent_render_variant': 'AgentRenderVariant.ONE_STEP_BEHIND', 'show_debug': '(False)', 'screen_height': '(600)', 'screen_width': '(800)'}), "(self, gl='PGL', agent_render_variant=AgentRenderVariant.\n ONE_STEP_BEHIND, show_debug=False, screen_height=600, screen_width=800)\n", (43845, 43978), False, 'from flatland.utils.rendertools import RenderTool, AgentRenderVariant\n'), ((8860, 8900), 'envs.flatland.observations.segment_graph.Graph.get_virtual_position', 'Graph.get_virtual_position', (['agent.handle'], {}), '(agent.handle)\n', (8886, 8900), False, 'from envs.flatland.observations.segment_graph import Graph\n'), ((16333, 16369), 'envs.flatland.observations.segment_graph.Graph.get_virtual_position', 'Graph.get_virtual_position', (['agent_id'], {}), '(agent_id)\n', (16359, 16369), False, 'from envs.flatland.observations.segment_graph import Graph\n'), ((2400, 2475), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': '(0)', 'high': '(1)', 'shape': '(self.action_space.n,)', 'dtype': 'np.int32'}), '(low=0, high=1, shape=(self.action_space.n,), dtype=np.int32)\n', (2414, 2475), False, 'import gym\n'), ((4095, 4133), 'numpy.count_nonzero', 'np.count_nonzero', (['possible_transitions'], {}), '(possible_transitions)\n', (4111, 4133), True, 'import numpy as np\n'), ((38717, 38760), 'flatland.core.grid.grid4_utils.get_new_position', 'get_new_position', (['agent.position', 'direction'], {}), '(agent.position, direction)\n', (38733, 38760), False, 'from flatland.core.grid.grid4_utils import get_new_position\n'), ((13406, 13442), 'envs.flatland.observations.segment_graph.Graph.get_virtual_position', 'Graph.get_virtual_position', (['agent_id'], {}), '(agent_id)\n', (13432, 13442), False, 'from envs.flatland.observations.segment_graph import Graph\n'), ((32313, 32351), 'numpy.count_nonzero', 'np.count_nonzero', (['possible_transitions'], {}), '(possible_transitions)\n', (32329, 32351), True, 'import numpy as np\n'), ((36120, 36170), 'flatland.core.grid.grid4_utils.get_new_position', 'get_new_position', (['agent_virtual_position', 'movement'], {}), '(agent_virtual_position, movement)\n', (36136, 36170), False, 'from flatland.core.grid.grid4_utils import get_new_position\n'), ((40918, 40940), 'numpy.argmax', 'np.argmax', (['transitions'], {}), '(transitions)\n', (40927, 40940), True, 'import numpy as np\n'), ((32440, 32471), 'numpy.argmax', 'np.argmax', (['possible_transitions'], {}), '(possible_transitions)\n', (32449, 32471), True, 'import numpy as np\n'), ((32510, 32554), 'flatland.core.grid.grid4_utils.get_new_position', 'get_new_position', (['position', 'new_direction_me'], {}), '(position, new_direction_me)\n', (32526, 32554), False, 'from flatland.core.grid.grid4_utils import get_new_position\n'), ((32998, 33040), 'numpy.count_nonzero', 'np.count_nonzero', (['opp_possible_transitions'], {}), '(opp_possible_transitions)\n', (33014, 33040), True, 'import numpy as np\n'), ((4669, 4700), 'flatland.core.grid.grid4_utils.get_new_position', 'get_new_position', (['pos', 'movement'], {}), '(pos, movement)\n', (4685, 4700), False, 'from flatland.core.grid.grid4_utils import get_new_position\n')]
|
import numpy as np
import cv2 as cv
img = cv.imread('1.jpeg',cv.IMREAD_COLOR)
#for polygon we need to have set of points so we create a numpy array. and pts is an object.
pts = np.array([[20,33],[300,120], [67,79], [123,111], [144,134]], np.int32)
#the method polylines will actully draws a polygon by taking different parametes, 1.where to draw (img),
#2.which set of points, 3.checks first and last point should be connected or not by (bool), 4.color, 5.widht of line.
cv.polylines(img, [pts], True,(0,231,123), 1)
cv.imshow('image',img)
cv.waitKey(0)
cv.destroyAllWindows()
|
[
"cv2.polylines",
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.waitKey",
"cv2.imread"
] |
[((43, 79), 'cv2.imread', 'cv.imread', (['"""1.jpeg"""', 'cv.IMREAD_COLOR'], {}), "('1.jpeg', cv.IMREAD_COLOR)\n", (52, 79), True, 'import cv2 as cv\n'), ((180, 256), 'numpy.array', 'np.array', (['[[20, 33], [300, 120], [67, 79], [123, 111], [144, 134]]', 'np.int32'], {}), '([[20, 33], [300, 120], [67, 79], [123, 111], [144, 134]], np.int32)\n', (188, 256), True, 'import numpy as np\n'), ((475, 523), 'cv2.polylines', 'cv.polylines', (['img', '[pts]', '(True)', '(0, 231, 123)', '(1)'], {}), '(img, [pts], True, (0, 231, 123), 1)\n', (487, 523), True, 'import cv2 as cv\n'), ((524, 547), 'cv2.imshow', 'cv.imshow', (['"""image"""', 'img'], {}), "('image', img)\n", (533, 547), True, 'import cv2 as cv\n'), ((547, 560), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (557, 560), True, 'import cv2 as cv\n'), ((561, 583), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (581, 583), True, 'import cv2 as cv\n')]
|
import scipy, numpy, typing, numbers
from tequila.objective import Objective
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from .optimizer_base import Optimizer
from ._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from collections import namedtuple
from tequila.utils.exceptions import TequilaException
from tequila.circuit.noise import NoiseModel
from tequila.tools.qng import get_qng_combos
class TequilaScipyException(TequilaException):
""" """
pass
SciPyReturnType = namedtuple('SciPyReturnType', 'energy angles history scipy_output')
class OptimizerSciPy(Optimizer):
""" """
gradient_free_methods = ['NELDER-MEAD', 'COBYLA', 'POWELL', 'SLSQP']
gradient_based_methods = ['L-BFGS-B', 'BFGS', 'CG', 'TNC']
hessian_based_methods = ["TRUST-KRYLOV", "NEWTON-CG", "DOGLEG", "TRUST-NCG", "TRUST-EXACT", "TRUST-CONSTR"]
@classmethod
def available_methods(cls):
""":return: All tested available methods"""
return cls.gradient_free_methods + cls.gradient_based_methods + cls.hessian_based_methods
def __init__(self, method: str = "L-BFGS-B",
tol: numbers.Real = None,
method_options=None,
method_bounds=None,
method_constraints=None,
silent: bool = True,
**kwargs):
"""
Optimize a circuit to minimize a given objective using scipy
See the Optimizer class for all other parameters to initialize
:param method: The scipy method passed as string
:param use_gradient: do gradient based optimization
:param tol: See scipy documentation for the method you picked
:param method_options: See scipy documentation for the method you picked
:param method_bounds: See scipy documentation for the method you picked
:param method_constraints: See scipy documentation for the method you picked
:param silent: if False the optimizer print out all evaluated energies
:param use_gradient: select if gradients shall be used. Can be done automatically for most methods
"""
super().__init__(**kwargs)
if hasattr(method, "upper"):
self.method = method.upper()
else:
self.method = method
self.tol = tol
self.method_options = method_options
if method_bounds is not None:
method_bounds = {assign_variable(k): v for k, v in method_bounds.items()}
self.method_bounds = method_bounds
self.silent = silent
if method_options is None:
self.method_options = {'maxiter': self.maxiter}
else:
self.method_options = method_options
if 'maxiter' not in method_options:
self.method_options['maxiter'] = self.maxiter
self.method_options['disp'] = not silent
if method_constraints is None:
self.method_constraints = ()
else:
self.method_constraints = method_constraints
def __call__(self, objective: Objective,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyReturnType:
"""
Optimizes with scipy and gives back the optimized angles
Get the optimized energies over the history
:param objective: The tequila Objective to minimize
:param initial_values: initial values for the objective
:param return_scipy_output: chose if the full scipy output shall be returned
:param reset_history: reset the history before optimization starts (has no effect if self.save_history is False)
:return: tuple of optimized energy ,optimized angles and scipy output
"""
infostring = "{:15} : {}\n".format("Method", self.method)
infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(objective, initial_values, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
# do the compilation here to avoid costly recompilation during the optimization
compiled_objective = self.compile_objective(objective=objective)
E = _EvalContainer(objective=compiled_objective,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
backend_options=self.backend_options,
print_level=self.print_level)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise,
backend_options=self.backend_options)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
grad_obj, comp_grad_obj = self.compile_gradient(objective=objective, variables=variables, gradient=gradient)
expvals = sum([o.count_expectationvalues() for o in comp_grad_obj.values()])
infostring += "{:15} : {} expectationvalues\n".format("gradient", expvals)
dE = _GradContainer(objective=comp_grad_obj,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level,
backend_options=self.backend_options)
if compile_hessian:
hess_obj, comp_hess_obj = self.compile_hessian(variables=variables,
hessian=hessian,
grad_obj=grad_obj,
comp_grad_obj=comp_grad_obj)
expvals = sum([o.count_expectationvalues() for o in comp_hess_obj.values()])
infostring += "{:15} : {} expectationvalues\n".format("hessian", expvals)
ddE = _HessContainer(objective=comp_hess_obj,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level,
backend_options=self.backend_options)
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
E_final = res.fun
angles_final = dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyReturnType(energy=E_final, angles=format_variable_dictionary(angles_final), history=self.history,
scipy_output=res)
def available_methods(energy=True, gradient=True, hessian=True) -> typing.List[str]:
"""Convenience
:return: Available methods of the scipy optimizer
Parameters
----------
energy :
(Default value = True)
gradient :
(Default value = True)
hessian :
(Default value = True)
Returns
-------
"""
methods = []
if energy:
methods += OptimizerSciPy.gradient_free_methods
if gradient:
methods += OptimizerSciPy.gradient_based_methods
if hessian:
methods += OptimizerSciPy.hessian_based_methods
return methods
def minimize(objective: Objective,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyReturnType:
"""
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : (Default value = None) :
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None] : (Default value = None) :
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real]: (Default value = None):
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable] :
(Default value = None)
List of Variables to optimize
samples: int :
(Default value = None)
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int :
(Default value = 100)
backend: str :
(Default value = None)
Simulator backend, will be automatically chosen if set to None
backend_options: dict:
(Default value = None)
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel:
(Default value =None)
a NoiseModel to apply to all expectation values in the objective.
method: str :
(Default value = "BFGS")
Optimization method (see scipy documentation, or 'available methods')
tol: float :
(Default value = 1.e-3)
Convergence tolerance for optimization (see scipy documentation)
method_options: dict :
(Default value = None)
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]]:
(Default value = None)
bounds for the variables (see scipy documentation)
method_constraints :
(Default value = None)
(see scipy documentation
silent: bool :
(Default value = False)
No printout if True
save_history: bool:
(Default value = True)
Save the history throughout the optimization
Returns
-------
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = OptimizerSciPy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(objective=objective,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
|
[
"collections.namedtuple",
"tequila.objective.objective.format_variable_dictionary",
"tequila.tools.qng.get_qng_combos",
"scipy.optimize.minimize",
"numpy.array",
"tequila.utils.exceptions.TequilaException",
"tequila.objective.objective.assign_variable"
] |
[((587, 654), 'collections.namedtuple', 'namedtuple', (['"""SciPyReturnType"""', '"""energy angles history scipy_output"""'], {}), "('SciPyReturnType', 'energy angles history scipy_output')\n", (597, 654), False, 'from collections import namedtuple\n'), ((16511, 16552), 'tequila.objective.objective.format_variable_dictionary', 'format_variable_dictionary', (['method_bounds'], {}), '(method_bounds)\n', (16537, 16552), False, 'from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list\n'), ((4853, 4878), 'numpy.array', 'numpy.array', (['param_values'], {}), '(param_values)\n', (4864, 4878), False, 'import scipy, numpy, typing, numbers\n'), ((9994, 10210), 'scipy.optimize.minimize', 'scipy.optimize.minimize', (['E'], {'x0': 'param_values', 'jac': 'dE', 'hess': 'ddE', 'args': '(Es,)', 'method': 'self.method', 'tol': 'self.tol', 'bounds': 'bounds', 'constraints': 'self.method_constraints', 'options': 'self.method_options', 'callback': 'callback'}), '(E, x0=param_values, jac=dE, hess=ddE, args=(Es,),\n method=self.method, tol=self.tol, bounds=bounds, constraints=self.\n method_constraints, options=self.method_options, callback=callback)\n', (10017, 10210), False, 'import scipy, numpy, typing, numbers\n'), ((16217, 16253), 'tequila.objective.objective.format_variable_dictionary', 'format_variable_dictionary', (['gradient'], {}), '(gradient)\n', (16243, 16253), False, 'from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list\n'), ((17354, 17372), 'tequila.objective.objective.assign_variable', 'assign_variable', (['k'], {}), '(k)\n', (17369, 17372), False, 'from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list\n'), ((2505, 2523), 'tequila.objective.objective.assign_variable', 'assign_variable', (['k'], {}), '(k)\n', (2520, 2523), False, 'from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list\n'), ((6505, 6671), 'tequila.tools.qng.get_qng_combos', 'get_qng_combos', (['objective'], {'initial_values': 'initial_values', 'backend': 'self.backend', 'samples': 'self.samples', 'noise': 'self.noise', 'backend_options': 'self.backend_options'}), '(objective, initial_values=initial_values, backend=self.\n backend, samples=self.samples, noise=self.noise, backend_options=self.\n backend_options)\n', (6519, 6671), False, 'from tequila.tools.qng import get_qng_combos\n'), ((11659, 11699), 'tequila.objective.objective.format_variable_dictionary', 'format_variable_dictionary', (['angles_final'], {}), '(angles_final)\n', (11685, 11699), False, 'from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list\n'), ((6411, 6478), 'tequila.utils.exceptions.TequilaException', 'TequilaException', (['"""Sorry, QNG and hessian not yet tested together."""'], {}), "('Sorry, QNG and hessian not yet tested together.')\n", (6427, 6478), False, 'from tequila.utils.exceptions import TequilaException\n'), ((16411, 16432), 'tequila.objective.objective.assign_variable', 'assign_variable', (['k[0]'], {}), '(k[0])\n', (16426, 16432), False, 'from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list\n'), ((16434, 16457), 'tequila.objective.objective.assign_variable', 'assign_variable', (['[k[1]]'], {}), '([k[1]])\n', (16449, 16457), False, 'from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list\n')]
|
#
# This file is part of the chi repository
# (https://github.com/DavAug/chi/) which is released under the
# BSD 3-clause license. See accompanying LICENSE.md for copyright notice and
# full license details.
#
import copy
import myokit
import myokit.formats.sbml as sbml
import numpy as np
class MechanisticModel(object):
"""
A base class for models that are specified by sbml files.
Parameters
----------
sbml_file
A path to the SBML model file that specifies the model.
"""
def __init__(self, sbml_file):
super(MechanisticModel, self).__init__()
# Import model
self._model = sbml.SBMLImporter().model(sbml_file)
# Set default number and names of states, parameters and outputs.
self._set_number_and_names()
# Get time unit
self._time_unit = self._get_time_unit()
# Create simulator without sensitivities
# (intentionally public property)
self.simulator = myokit.Simulation(self._model)
self._has_sensitivities = False
def _get_time_unit(self):
"""
Gets the model's time unit.
"""
# Get bound variables
bound_variables = [var for var in self._model.variables(bound=True)]
# Get the variable that is bound to time
# (only one can exist in myokit.Model)
for var in bound_variables:
if var._binding == 'time':
return var.unit()
def _set_const(self, parameters):
"""
Sets values of constant model parameters.
"""
for id_var, var in enumerate(self._const_names):
self.simulator.set_constant(var, float(parameters[id_var]))
def _set_state(self, parameters):
"""
Sets initial values of states.
"""
parameters = np.array(parameters)
parameters = parameters[self._original_order]
self.simulator.set_state(parameters)
def _set_number_and_names(self):
"""
Sets the number of states, parameters and outputs, as well as their
names. If the model is ``None`` the self._model is taken.
"""
# Get the number of states and parameters
self._n_states = self._model.count_states()
n_const = self._model.count_variables(const=True)
self._n_parameters = self._n_states + n_const
# Get constant variable names and state names
names = [var.qname() for var in self._model.states()]
self._state_names = sorted(names)
self._const_names = sorted(
[var.qname() for var in self._model.variables(const=True)])
# Remember original order of state names for simulation
order_after_sort = np.argsort(names)
self._original_order = np.argsort(order_after_sort)
# Set default parameter names
self._parameter_names = self._state_names + self._const_names
# Set default outputs
self._output_names = self._state_names
self._n_outputs = self._n_states
# Create references of displayed parameter and output names to
# orginal myokit names (defaults to identity map)
# (Key: myokit name, value: displayed name)
self._parameter_name_map = dict(
zip(self._parameter_names, self._parameter_names))
self._output_name_map = dict(
zip(self._output_names, self._output_names))
def copy(self):
"""
Returns a deep copy of the mechanistic model.
.. note::
Copying the model resets the sensitivity settings.
"""
# Copy model manually and get protocol
myokit_model = self._model.clone()
protocol = self.simulator._protocol
# Copy the mechanistic model
model = copy.deepcopy(self)
# Replace myokit model by safe copy and create simulator
model._model = myokit_model
model.simulator = myokit.Simulation(myokit_model, protocol)
return model
def enable_sensitivities(self, enabled, parameter_names=None):
"""
Enables the computation of the model output sensitivities to the model
parameters if set to ``True``.
The sensitivities are computed using the forward sensitivities method,
where an ODE for each sensitivity is derived. The sensitivities are
returned together with the solution to the orginal system of ODEs when
simulating the mechanistic model :meth:`simulate`.
The optional parameter names argument can be used to set which
sensitivities are computed. By default the sensitivities to all
parameters are computed.
:param enabled: A boolean flag which enables (``True``) / disables
(``False``) the computation of sensitivities.
:type enabled: bool
:param parameter_names: A list of parameter names of the model. If
``None`` sensitivities for all parameters are computed.
:type parameter_names: list[str], optional
"""
enabled = bool(enabled)
# Get dosing regimen from existing simulator
protocol = self.simulator._protocol
if not enabled:
if self._has_sensitivities:
# Disable sensitivities
sim = myokit.Simulation(self._model, protocol)
self.simulator = sim
self._has_sensitivities = False
return None
# Sensitivities are already disabled
return None
# Get parameters whose output sensitivities are computed
parameters = []
for param_id, param in enumerate(self._parameter_names):
if param_id < self._n_states:
# Convert initial value parameters to the correct syntax
parameters.append('init(' + param + ')')
continue
# Other parameters can be appended without modification
parameters.append(param)
if parameter_names is not None:
# Get myokit names for input parameter names
container = []
for index, public_name in enumerate(
self._parameter_name_map.values()):
if public_name in parameter_names:
container.append(parameters[index])
parameters = container
if not parameters:
raise ValueError(
'None of the parameters could be identified. The valid '
'parameter names are <' + str(self._parameter_names) + '>.')
# Create simulator
sensitivities = (self._output_names, parameters)
sim = myokit.Simulation(self._model, protocol, sensitivities)
# Update simulator and sensitivity state
self.simulator = sim
self._has_sensitivities = True
def has_sensitivities(self):
"""
Returns a boolean indicating whether sensitivities have been enabled.
"""
return self._has_sensitivities
def n_outputs(self):
"""
Returns the number of output dimensions.
By default this is the number of states.
"""
return self._n_outputs
def n_parameters(self):
"""
Returns the number of parameters in the model.
Parameters of the model are initial state values and structural
parameter values.
"""
return self._n_parameters
def outputs(self):
"""
Returns the output names of the model.
"""
# Get user specified output names
output_names = [
self._output_name_map[name] for name in self._output_names]
return output_names
def parameters(self):
"""
Returns the parameter names of the model.
"""
# Get user specified parameter names
parameter_names = [
self._parameter_name_map[name] for name in self._parameter_names]
return parameter_names
def set_outputs(self, outputs):
"""
Sets outputs of the model.
The outputs can be set to any quantifiable variable name of the
:class:`myokit.Model`, e.g. `compartment.variable`.
.. note::
Setting outputs resets the sensitivity settings (by default
sensitivities are disabled.)
:param outputs:
A list of output names.
:type outputs: list[str]
"""
outputs = list(outputs)
# Translate public names to myokit names, if set previously
for myokit_name, public_name in self._output_name_map.items():
if public_name in outputs:
# Replace public name by myokit name
index = outputs.index(public_name)
outputs[index] = myokit_name
# Check that outputs are valid
for output in outputs:
try:
var = self.simulator._model.get(output)
if not (var.is_state() or var.is_intermediary()):
raise ValueError(
'Outputs have to be state or intermediary variables.')
except KeyError:
raise KeyError(
'The variable <' + str(output) + '> does not exist in the '
'model.')
# Remember outputs
self._output_names = outputs
self._n_outputs = len(outputs)
# Create an updated output name map
output_name_map = {}
for myokit_name in self._output_names:
try:
output_name_map[myokit_name] = self._output_name_map[
myokit_name]
except KeyError:
# The output did not exist before, so create an identity map
output_name_map[myokit_name] = myokit_name
self._output_name_map = output_name_map
# Disable sensitivities
self.enable_sensitivities(False)
def set_output_names(self, names):
"""
Assigns names to the model outputs. By default the
:class:`myokit.Model` names are assigned to the outputs.
:param names: A dictionary that maps the current output names to new
names.
:type names: dict[str, str]
"""
if not isinstance(names, dict):
raise TypeError(
'Names has to be a dictionary with the current output names'
'as keys and the new output names as values.')
# Check that new output names are unique
new_names = list(names.values())
n_unique_new_names = len(set(names.values()))
if len(new_names) != n_unique_new_names:
raise ValueError(
'The new output names have to be unique.')
# Check that new output names do not exist already
for new_name in new_names:
if new_name in list(self._output_name_map.values()):
raise ValueError(
'The output names cannot coincide with existing '
'output names. One output is already called '
'<' + str(new_name) + '>.')
# Replace currently displayed names by new names
for myokit_name in self._output_names:
old_name = self._output_name_map[myokit_name]
try:
new_name = names[old_name]
self._output_name_map[myokit_name] = str(new_name)
except KeyError:
# KeyError indicates that the current output is not being
# renamed.
pass
def set_parameter_names(self, names):
"""
Assigns names to the parameters. By default the :class:`myokit.Model`
names are assigned to the parameters.
:param names: A dictionary that maps the current parameter names to new
names.
:type names: dict[str, str]
"""
if not isinstance(names, dict):
raise TypeError(
'Names has to be a dictionary with the current parameter names'
'as keys and the new parameter names as values.')
# Check that new parameter names are unique
new_names = list(names.values())
n_unique_new_names = len(set(names.values()))
if len(new_names) != n_unique_new_names:
raise ValueError(
'The new parameter names have to be unique.')
# Check that new parameter names do not exist already
for new_name in new_names:
if new_name in list(self._parameter_name_map.values()):
raise ValueError(
'The parameter names cannot coincide with existing '
'parameter names. One parameter is already called '
'<' + str(new_name) + '>.')
# Replace currently displayed names by new names
for myokit_name in self._parameter_names:
old_name = self._parameter_name_map[myokit_name]
try:
new_name = names[old_name]
self._parameter_name_map[myokit_name] = str(new_name)
except KeyError:
# KeyError indicates that the current parameter is not being
# renamed.
pass
def simulate(self, parameters, times):
"""
Returns the numerical solution of the model outputs (and optionally
the sensitivites) for the specified parameters and times.
The model outputs are returned as a 2 dimensional NumPy array of shape
(n_outputs, n_times). If sensitivities are enabled, a tuple is returned
with the NumPy array of the model outputs and a NumPy array of the
sensitivities of shape (n_times, n_outputs, n_parameters).
:param parameters: An array-like object with values for the model
parameters.
:type parameters: list, numpy.ndarray
:param times: An array-like object with time points at which the output
values are returned.
:type times: list, numpy.ndarray
"""
# Reset simulation
self.simulator.reset()
# Set initial conditions
self._set_state(parameters[:self._n_states])
# Set constant model parameters
self._set_const(parameters[self._n_states:])
# Simulate
if not self._has_sensitivities:
output = self.simulator.run(
times[-1] + 1, log=self._output_names, log_times=times)
output = np.array([output[name] for name in self._output_names])
return output
output, sensitivities = self.simulator.run(
times[-1] + 1, log=self._output_names, log_times=times)
output = np.array([output[name] for name in self._output_names])
sensitivities = np.array(sensitivities)
return output, sensitivities
def time_unit(self):
"""
Returns the model's unit of time.
"""
return self._time_unit
class PharmacodynamicModel(MechanisticModel):
"""
Converts a pharmacodynamic model specified by an SBML file into a forward
model that can be solved numerically.
Extends :class:`MechanisticModel`.
Parameters
----------
sbml_file
A path to the SBML model file that specifies the pharmacodynamic model.
"""
def __init__(self, sbml_file):
super(PharmacodynamicModel, self).__init__(sbml_file)
# Set default pharmacokinetic input variable
# (Typically drug concentration)
self._pk_input = None
if self._model.has_variable('myokit.drug_concentration'):
self._pk_input = 'myokit.drug_concentration'
def pk_input(self):
"""
Returns the pharmacokinetic input variable. In most models this will be
the concentration of the drug.
Defaults to ``None`` or ``myokit.drug_concentration`` if the latter is
among the model parameters.
"""
return self._pk_input
def set_pk_input(self, name):
"""
Sets the pharmacokinetic input variable. In most models this will be
the concentration of the drug.
The name has to match a parameter of the model.
"""
if name not in self._parameter_names:
raise ValueError(
'The name does not match a model parameter.')
self._pk_input = name
class PharmacokineticModel(MechanisticModel):
"""
Converts a pharmacokinetic model specified by an SBML file into a forward
model that can be solved numerically.
Extends :class:`MechanisticModel`.
Parameters
----------
sbml_file
A path to the SBML model file that specifies the pharmacokinetic model.
"""
def __init__(self, sbml_file):
super(PharmacokineticModel, self).__init__(sbml_file)
# Set default dose administration
self._administration = None
# Safe vanilla model
self._vanilla_model = self._model.clone()
# Set default output variable that interacts with the pharmacodynamic
# model
# (Typically drug concentration in central compartment)
self._pd_output = None
if self._model.has_variable('central.drug_concentration'):
self._pd_output = 'central.drug_concentration'
# Set default output to pd output if not None
if self._pd_output is not None:
self.set_outputs([self._pd_output])
def _add_dose_compartment(self, model, drug_amount):
"""
Adds a dose compartment to the model with a linear absorption rate to
the connected compartment.
"""
# Add a dose compartment to the model
dose_comp = model.add_component_allow_renaming('dose')
# Create a state variable for the drug amount in the dose compartment
dose_drug_amount = dose_comp.add_variable('drug_amount')
dose_drug_amount.set_rhs(0)
dose_drug_amount.set_unit(drug_amount.unit())
dose_drug_amount.promote()
# Create an absorption rate variable
absorption_rate = dose_comp.add_variable('absorption_rate')
absorption_rate.set_rhs(1)
absorption_rate.set_unit(1 / self.time_unit())
# Add outflow expression to dose compartment
dose_drug_amount.set_rhs(
myokit.Multiply(
myokit.PrefixMinus(myokit.Name(absorption_rate)),
myokit.Name(dose_drug_amount)
)
)
# Add inflow expression to connected compartment
rhs = drug_amount.rhs()
drug_amount.set_rhs(
myokit.Plus(
rhs,
myokit.Multiply(
myokit.Name(absorption_rate),
myokit.Name(dose_drug_amount)
)
)
)
# Update number of parameters and states, as well as their names
self._model = model
self._set_number_and_names()
# Set default output to pd_output if it is not None
if self._pd_output is not None:
self.set_outputs([self._pd_output])
return model, dose_drug_amount
def _add_dose_rate(self, compartment, drug_amount):
"""
Adds a dose rate variable to the state variable, which is bound to the
dosing regimen.
"""
# Register a dose rate variable to the compartment and bind it to
# pace, i.e. tell myokit that its value is set by the dosing regimen/
# myokit.Protocol
dose_rate = compartment.add_variable_allow_renaming(
str('dose_rate'))
dose_rate.set_binding('pace')
# Set initial value to 0 and unit to unit of drug amount over unit of
# time
dose_rate.set_rhs(0)
dose_rate.set_unit(drug_amount.unit() / self.time_unit())
# Add the dose rate to the rhs of the drug amount variable
rhs = drug_amount.rhs()
drug_amount.set_rhs(
myokit.Plus(
rhs,
myokit.Name(dose_rate)
)
)
def administration(self):
"""
Returns the mode of administration in form of a dictionary.
The dictionary has the keys 'compartment' and 'direct'. The former
provides information about which compartment is dosed, and the latter
whether the dose is administered directly ot indirectly to the
compartment.
"""
return self._administration
def dosing_regimen(self):
"""
Returns the dosing regimen of the compound in form of a
:class:`myokit.Protocol`. If the protocol has not been set, ``None`` is
returned.
"""
return self.simulator._protocol
def set_administration(
self, compartment, amount_var='drug_amount', direct=True):
r"""
Sets the route of administration of the compound.
The compound is administered to the selected compartment either
directly or indirectly. If it is administered directly, a dose rate
variable is added to the drug amount's rate of change expression
.. math ::
\frac{\text{d}A}{\text{d}t} = \text{RHS} + r_d,
where :math:`A` is the drug amount in the selected compartment, RHS is
the rate of change of :math:`A` prior to adding the dose rate, and
:math:`r_d` is the dose rate.
The dose rate can be set by :meth:`set_dosing_regimen`.
If the route of administration is indirect, a dosing compartment
is added to the model, which is connected to the selected compartment.
The dose rate variable is then added to the rate of change expression
of the dose amount variable in the dosing compartment. The drug amount
in the dosing compartment flows at a linear absorption rate into the
selected compartment
.. math ::
\frac{\text{d}A_d}{\text{d}t} = -k_aA_d + r_d \\
\frac{\text{d}A}{\text{d}t} = \text{RHS} + k_aA_d,
where :math:`A_d` is the amount of drug in the dose compartment and
:math:`k_a` is the absorption rate.
Setting an indirect administration route changes the number of
parameters of the model, and resets the parameter names to their
defaults.
.. note:
Setting the route of administration will reset the sensitivity
settings.
:param compartment: Compartment to which doses are either directly or
indirectly administered.
:type compartment: str
:param amount_var: Drug amount variable in the compartment. By default
the drug amount variable is assumed to be 'drug_amount'.
:type amount_var: str, optional
:param direct: A boolean flag that indicates whether the dose is
administered directly or indirectly to the compartment.
:type direct: bool, optional
"""
# Check inputs
model = self._vanilla_model.clone()
if not model.has_component(compartment):
raise ValueError(
'The model does not have a compartment named <'
+ str(compartment) + '>.')
comp = model.get(compartment, class_filter=myokit.Component)
if not comp.has_variable(amount_var):
raise ValueError(
'The drug amount variable <' + str(amount_var) + '> could not '
'be found in the compartment.')
drug_amount = comp.get(amount_var)
if not drug_amount.is_state():
raise ValueError(
'The variable <' + str(drug_amount) + '> is not a state '
'variable, and can therefore not be dosed.')
# If administration is indirect, add a dosing compartment and update
# the drug amount variable to the one in the dosing compartment
if not direct:
model, drug_amount = self._add_dose_compartment(model, drug_amount)
comp = model.get(compartment, class_filter=myokit.Component)
# Add dose rate variable to the right hand side of the drug amount
self._add_dose_rate(comp, drug_amount)
# Update model and simulator
# (otherwise simulator won't know about pace bound variable)
self._model = model
self.simulator = myokit.Simulation(model)
self._has_sensitivities = False
# Remember type of administration
self._administration = dict(
{'compartment': compartment, 'direct': direct})
def set_dosing_regimen(
self, dose, start, duration=0.01, period=None, num=None):
"""
Sets the dosing regimen with which the compound is administered.
The route of administration can be set with :meth:`set_administration`.
However, the type of administration, e.g. bolus injection or infusion,
may be controlled with the duration input.
By default the dose is administered as a bolus injection (duration on
a time scale that is 100 fold smaller than the basic time unit). To
model an infusion of the dose over a longer time period, the
``duration`` can be adjusted to the appropriate time scale.
By default the dose is administered once. To apply multiple doses
provide a dose administration period.
Parameters
----------
dose
The amount of the compound that is injected at each administration.
start
Start time of the treatment.
duration
Duration of dose administration. For a bolus injection, a dose
duration of 1% of the time unit should suffice. By default the
duration is set to 0.01 (bolus).
period
Periodicity at which doses are administered. If ``None`` the dose
is administered only once.
num
Number of administered doses. If ``None`` and the periodicity of
the administration is not ``None``, doses are administered
indefinitely.
"""
if self._administration is None:
raise ValueError(
'The route of administration of the dose has not been set.')
if num is None:
# Myokits default is zero, i.e. infinitely many doses
num = 0
if period is None:
# If period is not provided, we administer a single dose
# Myokits defaults are 0s for that.
period = 0
num = 0
# Translate dose to dose rate
dose_rate = dose / duration
# Set dosing regimen
dosing_regimen = myokit.pacing.blocktrain(
period=period, duration=duration, offset=start, level=dose_rate,
limit=num)
self.simulator.set_protocol(dosing_regimen)
def pd_output(self):
"""
Returns the variable which interacts with the pharmacodynamic model.
In most models this will be the concentration of the drug in the
central compartment.
This variable is mapped to the
:meth:`chi.PharmacodynamicModel.pk_input` variable when a
:class:`PKPDModel` is instantiated.
Defaults to ``None`` or ``central.drug_concentration`` if the latter is
among the model parameters.
"""
return self._pd_output
def set_pd_output(self, name):
"""
Sets the variable which interacts with the pharmacodynamic model.
In most models this will be the concentration of the drug in the
central compartment.
The name has to match a parameter of the model.
This variable is mapped to the
:meth:`chi.PharmacodynamicModel.pk_input` variable when a
:class:`PKPDModel` is instantiated.
"""
# Get intermediate variable names
inter_names = [
var.qname() for var in self._model.variables(inter=True)]
names = inter_names + self._parameter_names
if name not in names:
raise ValueError(
'The name does not match a model variable.')
self._pd_output = name
class ReducedMechanisticModel(object):
"""
A class that can be used to permanently fix model parameters of a
:class:`MechanisticModel` instance.
This may be useful to explore simplified versions of a model before
defining a new SBML file.
Parameters
----------
mechanistic_model
An instance of a :class:`MechanisticModel`.
"""
def __init__(self, mechanistic_model):
super(ReducedMechanisticModel, self).__init__()
# Check input
if not isinstance(mechanistic_model, MechanisticModel):
raise ValueError(
'The mechanistic model has to be an instance of a '
'chi.MechanisticModel')
self._mechanistic_model = mechanistic_model
self.simulator = mechanistic_model.simulator
# Set defaults
self._fixed_params_mask = None
self._fixed_params_values = None
self._n_parameters = mechanistic_model.n_parameters()
self._parameter_names = mechanistic_model.parameters()
def copy(self):
"""
Returns a deep copy of the reduced model.
.. note::
Copying the model resets the sensitivity settings.
"""
# Get a safe copy of the mechanistic model
mechanistic_model = self._mechanistic_model.copy()
# Copy the reduced model
# (this possibly corrupts the mechanistic model and the
# simulator)
model = copy.deepcopy(self)
# Replace mechanistic model and simulator
model._mechanistic_model = mechanistic_model
model.simulator = mechanistic_model.simulator
return model
def dosing_regimen(self):
"""
Returns the dosing regimen of the compound in form of a
:class:`myokit.Protocol`. If the protocol has not been set, ``None`` is
returned.
If the model does not support dose administration, ``None`` is
returned.
"""
try:
return self._mechanistic_model.dosing_regimen()
except AttributeError:
return None
def enable_sensitivities(self, enabled):
"""
Enables the computation of the output sensitivities with respect to
the free model parameters.
"""
if not enabled:
self._mechanistic_model.enable_sensitivities(enabled)
return None
# Get free parameters
free_parameters = np.array(self._parameter_names)
if self._fixed_params_mask is not None:
free_parameters = free_parameters[~self._fixed_params_mask]
# Set sensitivities
self._mechanistic_model.enable_sensitivities(
enabled, free_parameters)
def fix_parameters(self, name_value_dict):
"""
Fixes the value of model parameters, and effectively removes them as a
parameter from the model. Fixing the value of a parameter at ``None``,
sets the parameter free again.
Parameters
----------
name_value_dict
A dictionary with model parameter names as keys, and parameter
values as values.
"""
# Check type
try:
name_value_dict = dict(name_value_dict)
except (TypeError, ValueError):
raise ValueError(
'The name-value dictionary has to be convertable to a python '
'dictionary.')
# If no model parameters have been fixed before, instantiate a mask
# and values
if self._fixed_params_mask is None:
self._fixed_params_mask = np.zeros(
shape=self._n_parameters, dtype=bool)
if self._fixed_params_values is None:
self._fixed_params_values = np.empty(shape=self._n_parameters)
# Update the mask and values
for index, name in enumerate(self._parameter_names):
try:
value = name_value_dict[name]
except KeyError:
# KeyError indicates that parameter name is not being fixed
continue
# Fix parameter if value is not None, else unfix it
self._fixed_params_mask[index] = value is not None
self._fixed_params_values[index] = value
# If all parameters are free, set mask and values to None again
if np.alltrue(~self._fixed_params_mask):
self._fixed_params_mask = None
self._fixed_params_values = None
# Remove sensitivities for fixed parameters
if self.has_sensitivities() is True:
self.enable_sensitivities(True)
def has_sensitivities(self):
"""
Returns a boolean indicating whether sensitivities have been enabled.
"""
return self._mechanistic_model.has_sensitivities()
def mechanistic_model(self):
"""
Returns the original mechanistic model.
"""
return self._mechanistic_model
def n_fixed_parameters(self):
"""
Returns the number of fixed model parameters.
"""
if self._fixed_params_mask is None:
return 0
n_fixed = int(np.sum(self._fixed_params_mask))
return n_fixed
def n_outputs(self):
"""
Returns the number of output dimensions.
By default this is the number of states.
"""
return self._mechanistic_model.n_outputs()
def n_parameters(self):
"""
Returns the number of parameters in the model.
Parameters of the model are initial state values and structural
parameter values.
"""
# Get number of fixed parameters
n_fixed = 0
if self._fixed_params_mask is not None:
n_fixed = int(np.sum(self._fixed_params_mask))
# Subtract fixed parameters from total number
n_parameters = self._n_parameters - n_fixed
return n_parameters
def outputs(self):
"""
Returns the output names of the model.
"""
return self._mechanistic_model.outputs()
def parameters(self):
"""
Returns the parameter names of the model.
"""
# Remove fixed model parameters
names = self._parameter_names
if self._fixed_params_mask is not None:
names = np.array(names)
names = names[~self._fixed_params_mask]
names = list(names)
return copy.copy(names)
def pd_output(self):
"""
Returns the variable which interacts with the pharmacodynamic model.
In most models this will be the concentration of the drug in the
central compartment.
This variable is mapped to the
:meth:`chi.PharmacodynamicModel.pk_input` variable when a
:class:`PKPDModel` is instantiated.
Defaults to ``None`` or ``central.drug_concentration`` if the latter is
among the model parameters.
If the model does not support a pd output, ``None`` is returned.
"""
try:
return self._mechanistic_model.pd_output()
except AttributeError:
return None
def pk_input(self):
"""
Returns the pharmacokinetic input variable. In most models this will be
the concentration of the drug.
Defaults to ``None`` or ``myokit.drug_concentration`` if the latter is
among the model parameters.
If the model does not support a pk input, ``None`` is returned.
"""
try:
return self._mechanistic_model.pk_input()
except AttributeError:
return None
def set_dosing_regimen(
self, dose, start, duration=0.01, period=None, num=None):
"""
Sets the dosing regimen with which the compound is administered.
The route of administration can be set with :meth:`set_administration`.
However, the type of administration, e.g. bolus injection or infusion,
may be controlled with the duration input.
By default the dose is administered as a bolus injection (duration on
a time scale that is 100 fold smaller than the basic time unit). To
model an infusion of the dose over a longer time period, the
``duration`` can be adjusted to the appropriate time scale.
By default the dose is administered once. To apply multiple doses
provide a dose administration period.
Parameters
----------
dose
The amount of the compound that is injected at each administration.
start
Start time of the treatment.
duration
Duration of dose administration. For a bolus injection, a dose
duration of 1% of the time unit should suffice. By default the
duration is set to 0.01 (bolus).
period
Periodicity at which doses are administered. If ``None`` the dose
is administered only once.
num
Number of administered doses. If ``None`` and the periodicity of
the administration is not ``None``, doses are administered
indefinitely.
"""
try:
self._mechanistic_model.set_dosing_regimen(
dose, start, duration, period, num)
except AttributeError:
raise AttributeError(
'The mechanistic model does not support dosing regimens.')
def set_outputs(self, outputs):
"""
Sets outputs of the model.
Parameters
----------
outputs
A list of quantifiable variable names of the :class:`myokit.Model`,
e.g. `compartment.variable`.
"""
self._mechanistic_model.set_outputs(outputs)
def set_output_names(self, names):
"""
Assigns names to the outputs. By default the :class:`myokit.Model`
names are assigned to the outputs.
Parameters
----------
names
A dictionary that maps the current output names to new names.
"""
self._mechanistic_model.set_output_names(names)
def set_parameter_names(self, names):
"""
Assigns names to the parameters. By default the :class:`myokit.Model`
names are assigned to the parameters.
Parameters
----------
names
A dictionary that maps the current parameter names to new names.
"""
# Set parameter names
self._mechanistic_model.set_parameter_names(names)
self._parameter_names = self._mechanistic_model.parameters()
def simulate(self, parameters, times):
"""
Returns the numerical solution of the model outputs (and optionally
the sensitivites) for the specified parameters and times.
The model outputs are returned as a 2 dimensional NumPy array of shape
(n_outputs, n_times). If sensitivities are enabled, a tuple is returned
with the NumPy array of the model outputs and a NumPy array of the
sensitivities of shape (n_times, n_outputs, n_parameters).
:param parameters: An array-like object with values for the model
parameters.
:type parameters: list, numpy.ndarray
:param times: An array-like object with time points at which the output
values are returned.
:type times: list, numpy.ndarray
"""
# Insert fixed parameter values
if self._fixed_params_mask is not None:
self._fixed_params_values[
~self._fixed_params_mask] = parameters
parameters = self._fixed_params_values
return self._mechanistic_model.simulate(parameters, times)
def time_unit(self):
"""
Returns the model's unit of time.
"""
return self._mechanistic_model.time_unit()
|
[
"numpy.alltrue",
"myokit.formats.sbml.SBMLImporter",
"myokit.pacing.blocktrain",
"myokit.Simulation",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.empty",
"copy.deepcopy",
"copy.copy",
"myokit.Name"
] |
[((984, 1014), 'myokit.Simulation', 'myokit.Simulation', (['self._model'], {}), '(self._model)\n', (1001, 1014), False, 'import myokit\n'), ((1824, 1844), 'numpy.array', 'np.array', (['parameters'], {}), '(parameters)\n', (1832, 1844), True, 'import numpy as np\n'), ((2721, 2738), 'numpy.argsort', 'np.argsort', (['names'], {}), '(names)\n', (2731, 2738), True, 'import numpy as np\n'), ((2770, 2798), 'numpy.argsort', 'np.argsort', (['order_after_sort'], {}), '(order_after_sort)\n', (2780, 2798), True, 'import numpy as np\n'), ((3777, 3796), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (3790, 3796), False, 'import copy\n'), ((3925, 3966), 'myokit.Simulation', 'myokit.Simulation', (['myokit_model', 'protocol'], {}), '(myokit_model, protocol)\n', (3942, 3966), False, 'import myokit\n'), ((6650, 6705), 'myokit.Simulation', 'myokit.Simulation', (['self._model', 'protocol', 'sensitivities'], {}), '(self._model, protocol, sensitivities)\n', (6667, 6705), False, 'import myokit\n'), ((14685, 14740), 'numpy.array', 'np.array', (['[output[name] for name in self._output_names]'], {}), '([output[name] for name in self._output_names])\n', (14693, 14740), True, 'import numpy as np\n'), ((14765, 14788), 'numpy.array', 'np.array', (['sensitivities'], {}), '(sensitivities)\n', (14773, 14788), True, 'import numpy as np\n'), ((24314, 24338), 'myokit.Simulation', 'myokit.Simulation', (['model'], {}), '(model)\n', (24331, 24338), False, 'import myokit\n'), ((26633, 26737), 'myokit.pacing.blocktrain', 'myokit.pacing.blocktrain', ([], {'period': 'period', 'duration': 'duration', 'offset': 'start', 'level': 'dose_rate', 'limit': 'num'}), '(period=period, duration=duration, offset=start,\n level=dose_rate, limit=num)\n', (26657, 26737), False, 'import myokit\n'), ((29580, 29599), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (29593, 29599), False, 'import copy\n'), ((30567, 30598), 'numpy.array', 'np.array', (['self._parameter_names'], {}), '(self._parameter_names)\n', (30575, 30598), True, 'import numpy as np\n'), ((32466, 32502), 'numpy.alltrue', 'np.alltrue', (['(~self._fixed_params_mask)'], {}), '(~self._fixed_params_mask)\n', (32476, 32502), True, 'import numpy as np\n'), ((34550, 34566), 'copy.copy', 'copy.copy', (['names'], {}), '(names)\n', (34559, 34566), False, 'import copy\n'), ((14464, 14519), 'numpy.array', 'np.array', (['[output[name] for name in self._output_names]'], {}), '([output[name] for name in self._output_names])\n', (14472, 14519), True, 'import numpy as np\n'), ((31723, 31769), 'numpy.zeros', 'np.zeros', ([], {'shape': 'self._n_parameters', 'dtype': 'bool'}), '(shape=self._n_parameters, dtype=bool)\n', (31731, 31769), True, 'import numpy as np\n'), ((31874, 31908), 'numpy.empty', 'np.empty', ([], {'shape': 'self._n_parameters'}), '(shape=self._n_parameters)\n', (31882, 31908), True, 'import numpy as np\n'), ((33275, 33306), 'numpy.sum', 'np.sum', (['self._fixed_params_mask'], {}), '(self._fixed_params_mask)\n', (33281, 33306), True, 'import numpy as np\n'), ((34434, 34449), 'numpy.array', 'np.array', (['names'], {}), '(names)\n', (34442, 34449), True, 'import numpy as np\n'), ((645, 664), 'myokit.formats.sbml.SBMLImporter', 'sbml.SBMLImporter', ([], {}), '()\n', (662, 664), True, 'import myokit.formats.sbml as sbml\n'), ((5283, 5323), 'myokit.Simulation', 'myokit.Simulation', (['self._model', 'protocol'], {}), '(self._model, protocol)\n', (5300, 5323), False, 'import myokit\n'), ((18403, 18432), 'myokit.Name', 'myokit.Name', (['dose_drug_amount'], {}), '(dose_drug_amount)\n', (18414, 18432), False, 'import myokit\n'), ((20004, 20026), 'myokit.Name', 'myokit.Name', (['dose_rate'], {}), '(dose_rate)\n', (20015, 20026), False, 'import myokit\n'), ((33874, 33905), 'numpy.sum', 'np.sum', (['self._fixed_params_mask'], {}), '(self._fixed_params_mask)\n', (33880, 33905), True, 'import numpy as np\n'), ((18356, 18384), 'myokit.Name', 'myokit.Name', (['absorption_rate'], {}), '(absorption_rate)\n', (18367, 18384), False, 'import myokit\n'), ((18683, 18711), 'myokit.Name', 'myokit.Name', (['absorption_rate'], {}), '(absorption_rate)\n', (18694, 18711), False, 'import myokit\n'), ((18733, 18762), 'myokit.Name', 'myokit.Name', (['dose_drug_amount'], {}), '(dose_drug_amount)\n', (18744, 18762), False, 'import myokit\n')]
|
import bpy
import bmesh
import numpy
from random import randint
import time
# pointsToVoxels() has been modified from the function generate_blocks() in https://github.com/cagcoach/BlenderPlot/blob/master/blendplot.py
# Some changes to accomodate Blender 2.8's API changes were made,
# and the function has been made much more efficient through creative usage of numpy.
def pointsToVoxels(points, name="VoxelMesh"):
# For now, we'll combine the voxels from each of the six views into one array and then just take the unique values.
# Later on, this could be re-structured to, for example, render the voxels from each face in a separate colour
points = numpy.concatenate(tuple(points.values()))
points = numpy.unique(points, axis=0)
print("Number of points:", len(points))
mesh = bpy.data.meshes.new("mesh") # add a new mesh
obj = bpy.data.objects.new(name, mesh)
bpy.context.collection.objects.link(obj) # put the object into the scene (link)
bpy.context.view_layer.objects.active = obj
obj.select_set(state=True) # select object
mesh = obj.data
bm = bmesh.new()
# 0 1 2 3 4 5 6 7
block=numpy.array([ [-1,-1,-1],[-1,-1,1],[-1,1,-1],[-1,1,1],[1,-1,-1],[1,-1,1],[1,1,-1],[1,1,1] ]).astype(float)
block*=0.5
print("Creating vertices...")
# Function to apply each point to each element of "block" as efficiently as possible
# First, produce 8 copies of each point. numpy.tile() is apparently the most efficient way to do so.
pointsTiled = numpy.tile(points, (1,8))
# This will make each tuple 24 items long. To fix this, we need to reshape pointsTiled, and split each 24-long tuple into 8 3-longs.
pointsDuplicated = numpy.reshape(pointsTiled, (pointsTiled.shape[0], 8, 3))
# Then, a lambda to piecewise add the elements of "block" to a respective set of 8 duplicate points in pointsDuplicated
blockerize = lambda x : x + block
# Apply it
pointsBlockerized = blockerize(pointsDuplicated)
# pointsBlockerized is now a 2D array of thruples. Convert back to a 1D array.
verts = numpy.reshape(pointsBlockerized, (pointsBlockerized.shape[0]*pointsBlockerized.shape[1], 3) )
#print("points shape:", points.shape)
#print("verts shape:", verts.shape)
#print("verts:", verts)
'''for pt in points:
print((block+pt))
verts=numpy.append(verts, (block+pt),axis=0)'''
printAfterCount = 100000
nextThreshold = 0
pointsDone = 0
#print(verts)
for v in verts:
bm.verts.new(v)
pointsDone += 1
if pointsDone > nextThreshold:
print(pointsDone, "vertices have been added so far.")
nextThreshold += printAfterCount
print("Calling to_mesh().")
bm.to_mesh(mesh)
print("Ensuring lookup table.")
bm.verts.ensure_lookup_table()
nextThreshold = 0
cubesDone = 0
for i in range(0,len(bm.verts),8):
bm.faces.new( [bm.verts[i+0], bm.verts[i+1],bm.verts[i+3], bm.verts[i+2]])
bm.faces.new( [bm.verts[i+4], bm.verts[i+5],bm.verts[i+1], bm.verts[i+0]])
bm.faces.new( [bm.verts[i+6], bm.verts[i+7],bm.verts[i+5], bm.verts[i+4]])
bm.faces.new( [bm.verts[i+2], bm.verts[i+3],bm.verts[i+7], bm.verts[i+6]])
bm.faces.new( [bm.verts[i+5], bm.verts[i+7],bm.verts[i+3], bm.verts[i+1]]) #top
bm.faces.new( [bm.verts[i+0], bm.verts[i+2],bm.verts[i+6], bm.verts[i+4]]) #bottom
cubesDone += 1
if cubesDone > nextThreshold:
print(cubesDone, "cubes have been made so far.")
nextThreshold += printAfterCount
if bpy.context.mode == 'EDIT_MESH':
bmesh.update_edit_mesh(obj.data)
else:
bm.to_mesh(obj.data)
obj.data.update()
bm.free
return obj
# Given a 3D array of 0 and 1's it'll place a voxel in every cell that has a 1 in it
def imagesToVoxelsInefficient(image3D):
for xValue in range(len(image3D)):
for yValue in range(len(image3D[xValue])):
for zValue in range(len(image3D[xValue][yValue])):
if(image3D[xValue][yValue][zValue]==0):
createVoxel((xValue,yValue,zValue))
# place a voxel at a given position, using mesh.primitive_cube_add is really slow so it might be worth making this faster
def createVoxel(position):
bpy.ops.mesh.primitive_cube_add(location=position,size=1)
# print(position)
if __name__ == "__main__":
# calculate the runtime of this script
startTime = time.time()
# createVoxel((1,2,3))
# Generate a 10*10*10 3D texture
testImageArray = []
for x in range(10):
yArray = []
for y in range(10):
zArray = []
for z in range(10):
zArray.append(0)
# zArray.append(randint(0,1))
yArray.append(zArray)
testImageArray.append(yArray)
# print(testImageArray)
# place voxels based on that 10*10*10 array
imagesToVoxelsInefficient(testImageArray)
# testImage = [[[0,0],[1,1]],[[1,1],[1,0]]]
stopTime = time.time()
print("Script took:",stopTime-startTime)
|
[
"numpy.tile",
"numpy.reshape",
"numpy.unique",
"bmesh.update_edit_mesh",
"bpy.data.objects.new",
"bpy.data.meshes.new",
"bpy.context.collection.objects.link",
"bmesh.new",
"numpy.array",
"time.time",
"bpy.ops.mesh.primitive_cube_add"
] |
[((721, 749), 'numpy.unique', 'numpy.unique', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (733, 749), False, 'import numpy\n'), ((806, 833), 'bpy.data.meshes.new', 'bpy.data.meshes.new', (['"""mesh"""'], {}), "('mesh')\n", (825, 833), False, 'import bpy\n'), ((862, 894), 'bpy.data.objects.new', 'bpy.data.objects.new', (['name', 'mesh'], {}), '(name, mesh)\n', (882, 894), False, 'import bpy\n'), ((899, 939), 'bpy.context.collection.objects.link', 'bpy.context.collection.objects.link', (['obj'], {}), '(obj)\n', (934, 939), False, 'import bpy\n'), ((1105, 1116), 'bmesh.new', 'bmesh.new', ([], {}), '()\n', (1114, 1116), False, 'import bmesh\n'), ((1590, 1616), 'numpy.tile', 'numpy.tile', (['points', '(1, 8)'], {}), '(points, (1, 8))\n', (1600, 1616), False, 'import numpy\n'), ((1776, 1832), 'numpy.reshape', 'numpy.reshape', (['pointsTiled', '(pointsTiled.shape[0], 8, 3)'], {}), '(pointsTiled, (pointsTiled.shape[0], 8, 3))\n', (1789, 1832), False, 'import numpy\n'), ((2158, 2256), 'numpy.reshape', 'numpy.reshape', (['pointsBlockerized', '(pointsBlockerized.shape[0] * pointsBlockerized.shape[1], 3)'], {}), '(pointsBlockerized, (pointsBlockerized.shape[0] *\n pointsBlockerized.shape[1], 3))\n', (2171, 2256), False, 'import numpy\n'), ((4383, 4441), 'bpy.ops.mesh.primitive_cube_add', 'bpy.ops.mesh.primitive_cube_add', ([], {'location': 'position', 'size': '(1)'}), '(location=position, size=1)\n', (4414, 4441), False, 'import bpy\n'), ((4559, 4570), 'time.time', 'time.time', ([], {}), '()\n', (4568, 4570), False, 'import time\n'), ((5130, 5141), 'time.time', 'time.time', ([], {}), '()\n', (5139, 5141), False, 'import time\n'), ((3716, 3748), 'bmesh.update_edit_mesh', 'bmesh.update_edit_mesh', (['obj.data'], {}), '(obj.data)\n', (3738, 3748), False, 'import bmesh\n'), ((1221, 1339), 'numpy.array', 'numpy.array', (['[[-1, -1, -1], [-1, -1, 1], [-1, 1, -1], [-1, 1, 1], [1, -1, -1], [1, -1, 1\n ], [1, 1, -1], [1, 1, 1]]'], {}), '([[-1, -1, -1], [-1, -1, 1], [-1, 1, -1], [-1, 1, 1], [1, -1, -1\n ], [1, -1, 1], [1, 1, -1], [1, 1, 1]])\n', (1232, 1339), False, 'import numpy\n')]
|
import unittest
from hashlib import sha1
import pickle
import numpy as np
from datasketch.lsh import MinHashLSH
from datasketch.minhash import MinHash
from datasketch.weighted_minhash import WeightedMinHashGenerator
class TestMinHashLSH(unittest.TestCase):
def test_init(self):
lsh = MinHashLSH(threshold=0.8)
self.assertTrue(lsh.is_empty())
b1, r1 = lsh.b, lsh.r
lsh = MinHashLSH(threshold=0.8, weights=(0.2,0.8))
b2, r2 = lsh.b, lsh.r
self.assertTrue(b1 < b2)
self.assertTrue(r1 > r2)
def test_insert(self):
lsh = MinHashLSH(threshold=0.5, num_perm=16)
m1 = MinHash(16)
m1.update("a".encode("utf8"))
m2 = MinHash(16)
m2.update("b".encode("utf8"))
lsh.insert("a", m1)
lsh.insert("b", m2)
for t in lsh.hashtables:
self.assertTrue(len(t) >= 1)
items = []
for H in t:
items.extend(t[H])
self.assertTrue("a" in items)
self.assertTrue("b" in items)
self.assertTrue("a" in lsh)
self.assertTrue("b" in lsh)
for i, H in enumerate(lsh.keys["a"]):
self.assertTrue("a" in lsh.hashtables[i][H])
m3 = MinHash(18)
self.assertRaises(ValueError, lsh.insert, "c", m3)
def test_query(self):
lsh = MinHashLSH(threshold=0.5, num_perm=16)
m1 = MinHash(16)
m1.update("a".encode("utf8"))
m2 = MinHash(16)
m2.update("b".encode("utf8"))
lsh.insert("a", m1)
lsh.insert("b", m2)
result = lsh.query(m1)
self.assertTrue("a" in result)
result = lsh.query(m2)
self.assertTrue("b" in result)
m3 = MinHash(18)
self.assertRaises(ValueError, lsh.query, m3)
def test_remove(self):
lsh = MinHashLSH(threshold=0.5, num_perm=16)
m1 = MinHash(16)
m1.update("a".encode("utf8"))
m2 = MinHash(16)
m2.update("b".encode("utf8"))
lsh.insert("a", m1)
lsh.insert("b", m2)
lsh.remove("a")
self.assertTrue("a" not in lsh.keys)
for table in lsh.hashtables:
for H in table:
self.assertGreater(len(table[H]), 0)
self.assertTrue("a" not in table[H])
self.assertRaises(ValueError, lsh.remove, "c")
def test_pickle(self):
lsh = MinHashLSH(threshold=0.5, num_perm=16)
m1 = MinHash(16)
m1.update("a".encode("utf8"))
m2 = MinHash(16)
m2.update("b".encode("utf8"))
lsh.insert("a", m1)
lsh.insert("b", m2)
lsh2 = pickle.loads(pickle.dumps(lsh))
result = lsh.query(m1)
self.assertTrue("a" in result)
result = lsh.query(m2)
self.assertTrue("b" in result)
class TestWeightedMinHashLSH(unittest.TestCase):
def test_init(self):
lsh = MinHashLSH(threshold=0.8)
self.assertTrue(lsh.is_empty())
b1, r1 = lsh.b, lsh.r
lsh = MinHashLSH(threshold=0.8, weights=(0.2,0.8))
b2, r2 = lsh.b, lsh.r
self.assertTrue(b1 < b2)
self.assertTrue(r1 > r2)
def test_insert(self):
lsh = MinHashLSH(threshold=0.5, num_perm=4)
mg = WeightedMinHashGenerator(10, 4)
m1 = mg.minhash(np.random.uniform(1, 10, 10))
m2 = mg.minhash(np.random.uniform(1, 10, 10))
lsh.insert("a", m1)
lsh.insert("b", m2)
for t in lsh.hashtables:
self.assertTrue(len(t) >= 1)
items = []
for H in t:
items.extend(t[H])
self.assertTrue("a" in items)
self.assertTrue("b" in items)
self.assertTrue("a" in lsh)
self.assertTrue("b" in lsh)
for i, H in enumerate(lsh.keys["a"]):
self.assertTrue("a" in lsh.hashtables[i][H])
mg = WeightedMinHashGenerator(10, 5)
m3 = mg.minhash(np.random.uniform(1, 10, 10))
self.assertRaises(ValueError, lsh.insert, "c", m3)
def test_query(self):
lsh = MinHashLSH(threshold=0.5, num_perm=4)
mg = WeightedMinHashGenerator(10, 4)
m1 = mg.minhash(np.random.uniform(1, 10, 10))
m2 = mg.minhash(np.random.uniform(1, 10, 10))
lsh.insert("a", m1)
lsh.insert("b", m2)
result = lsh.query(m1)
self.assertTrue("a" in result)
result = lsh.query(m2)
self.assertTrue("b" in result)
mg = WeightedMinHashGenerator(10, 5)
m3 = mg.minhash(np.random.uniform(1, 10, 10))
self.assertRaises(ValueError, lsh.query, m3)
def test_remove(self):
lsh = MinHashLSH(threshold=0.5, num_perm=4)
mg = WeightedMinHashGenerator(10, 4)
m1 = mg.minhash(np.random.uniform(1, 10, 10))
m2 = mg.minhash(np.random.uniform(1, 10, 10))
lsh.insert("a", m1)
lsh.insert("b", m2)
lsh.remove("a")
self.assertTrue("a" not in lsh.keys)
for table in lsh.hashtables:
for H in table:
self.assertGreater(len(table[H]), 0)
self.assertTrue("a" not in table[H])
self.assertRaises(ValueError, lsh.remove, "c")
def test_pickle(self):
lsh = MinHashLSH(threshold=0.5, num_perm=4)
mg = WeightedMinHashGenerator(10, 4)
m1 = mg.minhash(np.random.uniform(1, 10, 10))
m2 = mg.minhash(np.random.uniform(1, 10, 10))
lsh.insert("a", m1)
lsh.insert("b", m2)
lsh2 = pickle.loads(pickle.dumps(lsh))
result = lsh.query(m1)
self.assertTrue("a" in result)
result = lsh.query(m2)
self.assertTrue("b" in result)
if __name__ == "__main__":
unittest.main()
|
[
"datasketch.lsh.MinHashLSH",
"datasketch.weighted_minhash.WeightedMinHashGenerator",
"pickle.dumps",
"datasketch.minhash.MinHash",
"numpy.random.uniform",
"unittest.main"
] |
[((5700, 5715), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5713, 5715), False, 'import unittest\n'), ((299, 324), 'datasketch.lsh.MinHashLSH', 'MinHashLSH', ([], {'threshold': '(0.8)'}), '(threshold=0.8)\n', (309, 324), False, 'from datasketch.lsh import MinHashLSH\n'), ((409, 454), 'datasketch.lsh.MinHashLSH', 'MinHashLSH', ([], {'threshold': '(0.8)', 'weights': '(0.2, 0.8)'}), '(threshold=0.8, weights=(0.2, 0.8))\n', (419, 454), False, 'from datasketch.lsh import MinHashLSH\n'), ((592, 630), 'datasketch.lsh.MinHashLSH', 'MinHashLSH', ([], {'threshold': '(0.5)', 'num_perm': '(16)'}), '(threshold=0.5, num_perm=16)\n', (602, 630), False, 'from datasketch.lsh import MinHashLSH\n'), ((644, 655), 'datasketch.minhash.MinHash', 'MinHash', (['(16)'], {}), '(16)\n', (651, 655), False, 'from datasketch.minhash import MinHash\n'), ((707, 718), 'datasketch.minhash.MinHash', 'MinHash', (['(16)'], {}), '(16)\n', (714, 718), False, 'from datasketch.minhash import MinHash\n'), ((1242, 1253), 'datasketch.minhash.MinHash', 'MinHash', (['(18)'], {}), '(18)\n', (1249, 1253), False, 'from datasketch.minhash import MinHash\n'), ((1354, 1392), 'datasketch.lsh.MinHashLSH', 'MinHashLSH', ([], {'threshold': '(0.5)', 'num_perm': '(16)'}), '(threshold=0.5, num_perm=16)\n', (1364, 1392), False, 'from datasketch.lsh import MinHashLSH\n'), ((1406, 1417), 'datasketch.minhash.MinHash', 'MinHash', (['(16)'], {}), '(16)\n', (1413, 1417), False, 'from datasketch.minhash import MinHash\n'), ((1469, 1480), 'datasketch.minhash.MinHash', 'MinHash', (['(16)'], {}), '(16)\n', (1476, 1480), False, 'from datasketch.minhash import MinHash\n'), ((1729, 1740), 'datasketch.minhash.MinHash', 'MinHash', (['(18)'], {}), '(18)\n', (1736, 1740), False, 'from datasketch.minhash import MinHash\n'), ((1836, 1874), 'datasketch.lsh.MinHashLSH', 'MinHashLSH', ([], {'threshold': '(0.5)', 'num_perm': '(16)'}), '(threshold=0.5, num_perm=16)\n', (1846, 1874), False, 'from datasketch.lsh import MinHashLSH\n'), ((1888, 1899), 'datasketch.minhash.MinHash', 'MinHash', (['(16)'], {}), '(16)\n', (1895, 1899), False, 'from datasketch.minhash import MinHash\n'), ((1951, 1962), 'datasketch.minhash.MinHash', 'MinHash', (['(16)'], {}), '(16)\n', (1958, 1962), False, 'from datasketch.minhash import MinHash\n'), ((2404, 2442), 'datasketch.lsh.MinHashLSH', 'MinHashLSH', ([], {'threshold': '(0.5)', 'num_perm': '(16)'}), '(threshold=0.5, num_perm=16)\n', (2414, 2442), False, 'from datasketch.lsh import MinHashLSH\n'), ((2456, 2467), 'datasketch.minhash.MinHash', 'MinHash', (['(16)'], {}), '(16)\n', (2463, 2467), False, 'from datasketch.minhash import MinHash\n'), ((2519, 2530), 'datasketch.minhash.MinHash', 'MinHash', (['(16)'], {}), '(16)\n', (2526, 2530), False, 'from datasketch.minhash import MinHash\n'), ((2903, 2928), 'datasketch.lsh.MinHashLSH', 'MinHashLSH', ([], {'threshold': '(0.8)'}), '(threshold=0.8)\n', (2913, 2928), False, 'from datasketch.lsh import MinHashLSH\n'), ((3013, 3058), 'datasketch.lsh.MinHashLSH', 'MinHashLSH', ([], {'threshold': '(0.8)', 'weights': '(0.2, 0.8)'}), '(threshold=0.8, weights=(0.2, 0.8))\n', (3023, 3058), False, 'from datasketch.lsh import MinHashLSH\n'), ((3196, 3233), 'datasketch.lsh.MinHashLSH', 'MinHashLSH', ([], {'threshold': '(0.5)', 'num_perm': '(4)'}), '(threshold=0.5, num_perm=4)\n', (3206, 3233), False, 'from datasketch.lsh import MinHashLSH\n'), ((3247, 3278), 'datasketch.weighted_minhash.WeightedMinHashGenerator', 'WeightedMinHashGenerator', (['(10)', '(4)'], {}), '(10, 4)\n', (3271, 3278), False, 'from datasketch.weighted_minhash import WeightedMinHashGenerator\n'), ((3872, 3903), 'datasketch.weighted_minhash.WeightedMinHashGenerator', 'WeightedMinHashGenerator', (['(10)', '(5)'], {}), '(10, 5)\n', (3896, 3903), False, 'from datasketch.weighted_minhash import WeightedMinHashGenerator\n'), ((4058, 4095), 'datasketch.lsh.MinHashLSH', 'MinHashLSH', ([], {'threshold': '(0.5)', 'num_perm': '(4)'}), '(threshold=0.5, num_perm=4)\n', (4068, 4095), False, 'from datasketch.lsh import MinHashLSH\n'), ((4109, 4140), 'datasketch.weighted_minhash.WeightedMinHashGenerator', 'WeightedMinHashGenerator', (['(10)', '(4)'], {}), '(10, 4)\n', (4133, 4140), False, 'from datasketch.weighted_minhash import WeightedMinHashGenerator\n'), ((4459, 4490), 'datasketch.weighted_minhash.WeightedMinHashGenerator', 'WeightedMinHashGenerator', (['(10)', '(5)'], {}), '(10, 5)\n', (4483, 4490), False, 'from datasketch.weighted_minhash import WeightedMinHashGenerator\n'), ((4640, 4677), 'datasketch.lsh.MinHashLSH', 'MinHashLSH', ([], {'threshold': '(0.5)', 'num_perm': '(4)'}), '(threshold=0.5, num_perm=4)\n', (4650, 4677), False, 'from datasketch.lsh import MinHashLSH\n'), ((4691, 4722), 'datasketch.weighted_minhash.WeightedMinHashGenerator', 'WeightedMinHashGenerator', (['(10)', '(4)'], {}), '(10, 4)\n', (4715, 4722), False, 'from datasketch.weighted_minhash import WeightedMinHashGenerator\n'), ((5234, 5271), 'datasketch.lsh.MinHashLSH', 'MinHashLSH', ([], {'threshold': '(0.5)', 'num_perm': '(4)'}), '(threshold=0.5, num_perm=4)\n', (5244, 5271), False, 'from datasketch.lsh import MinHashLSH\n'), ((5285, 5316), 'datasketch.weighted_minhash.WeightedMinHashGenerator', 'WeightedMinHashGenerator', (['(10)', '(4)'], {}), '(10, 4)\n', (5309, 5316), False, 'from datasketch.weighted_minhash import WeightedMinHashGenerator\n'), ((2653, 2670), 'pickle.dumps', 'pickle.dumps', (['lsh'], {}), '(lsh)\n', (2665, 2670), False, 'import pickle\n'), ((3303, 3331), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)', '(10)'], {}), '(1, 10, 10)\n', (3320, 3331), True, 'import numpy as np\n'), ((3357, 3385), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)', '(10)'], {}), '(1, 10, 10)\n', (3374, 3385), True, 'import numpy as np\n'), ((3928, 3956), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)', '(10)'], {}), '(1, 10, 10)\n', (3945, 3956), True, 'import numpy as np\n'), ((4165, 4193), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)', '(10)'], {}), '(1, 10, 10)\n', (4182, 4193), True, 'import numpy as np\n'), ((4219, 4247), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)', '(10)'], {}), '(1, 10, 10)\n', (4236, 4247), True, 'import numpy as np\n'), ((4515, 4543), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)', '(10)'], {}), '(1, 10, 10)\n', (4532, 4543), True, 'import numpy as np\n'), ((4747, 4775), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)', '(10)'], {}), '(1, 10, 10)\n', (4764, 4775), True, 'import numpy as np\n'), ((4801, 4829), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)', '(10)'], {}), '(1, 10, 10)\n', (4818, 4829), True, 'import numpy as np\n'), ((5341, 5369), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)', '(10)'], {}), '(1, 10, 10)\n', (5358, 5369), True, 'import numpy as np\n'), ((5395, 5423), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)', '(10)'], {}), '(1, 10, 10)\n', (5412, 5423), True, 'import numpy as np\n'), ((5509, 5526), 'pickle.dumps', 'pickle.dumps', (['lsh'], {}), '(lsh)\n', (5521, 5526), False, 'import pickle\n')]
|
import logging
from typing import Callable
from typing import List
import numpy as np
import torch.utils.data
from .video_dataset import VideoDataset
from .video_dataset import VideoRecord
LOG = logging.getLogger(__name__)
# line_profiler injects a "profile" into __builtins__. When not running under
# line_profiler we need to inject our own passthrough
if type(__builtins__) is not dict or "profile" not in __builtins__:
profile = lambda f: f
class TsnDataset(torch.utils.data.Dataset):
"""
Wraps a :class:`VideoDataset` to implement TSN sampling
"""
def __init__(
self,
dataset: VideoDataset,
num_segments: int = 3,
segment_length: int = 1,
transform: Callable = None,
random_shift: bool = True,
test_mode: bool = False,
):
"""
Args:
dataset: Video dataset to load TSN-sampled segments from.
num_segments: Number of segments per clip.
segment_length: Length of segment in number of frames.
transform: A applied to the list of frames sampled from the clip
random_shift:
test_mode: Whether to return center sampled frames from each segment.
"""
self.dataset = dataset
self.num_segments = num_segments
self.segment_length = segment_length
self.transform = transform
self.random_shift = random_shift
self.test_mode = test_mode
def __getitem__(self, index):
record = self.dataset.video_records[index]
if self.test_mode:
segment_start_idxs = self._get_test_indices(record)
else:
segment_start_idxs = (
self._sample_indices(record)
if self.random_shift
else self._get_val_indices(record)
)
return self._get(record, segment_start_idxs)
def __len__(self):
return len(self.dataset)
@profile
def _get(self, record: VideoRecord, segment_start_idxs: List[int]):
images = self.dataset.load_frames(
record, self._get_frame_idxs(segment_start_idxs, record)
)
if self.transform is not None:
images = self.transform(images)
metadata = record.metadata
return images, metadata
def _sample_indices(self, record: VideoRecord):
average_duration = (
record.num_frames - self.segment_length + 1
) // self.num_segments
if average_duration > 0:
offsets = np.multiply(
list(range(self.num_segments)), average_duration
) + np.random.randint(average_duration, size=self.num_segments)
elif record.num_frames > self.num_segments:
offsets = np.sort(
np.random.randint(
record.num_frames - self.segment_length + 1, size=self.num_segments
)
)
else:
offsets = np.zeros((self.num_segments,))
return offsets
def _get_val_indices(self, record: VideoRecord):
if record.num_frames > self.num_segments + self.segment_length - 1:
tick = (record.num_frames - self.segment_length + 1) / float(
self.num_segments
)
offsets = np.array(
[int(tick / 2.0 + tick * x) for x in range(self.num_segments)]
)
else:
offsets = np.zeros((self.num_segments,))
return offsets
def _get_test_indices(self, record: VideoRecord):
tick = (record.num_frames - self.segment_length + 1) / float(self.num_segments)
offsets = np.array(
[int(tick / 2.0 + tick * x) for x in range(self.num_segments)]
)
return offsets
def _get_frame_idxs(
self, segment_start_idxs: List[int], record: VideoRecord
) -> List[int]:
seg_idxs = []
for seg_ind in segment_start_idxs:
p = int(seg_ind)
for i in range(self.segment_length):
seg_idxs.append(p)
if p < record.num_frames:
p += 1
return seg_idxs
|
[
"logging.getLogger",
"numpy.zeros",
"numpy.random.randint"
] |
[((199, 226), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (216, 226), False, 'import logging\n'), ((3419, 3449), 'numpy.zeros', 'np.zeros', (['(self.num_segments,)'], {}), '((self.num_segments,))\n', (3427, 3449), True, 'import numpy as np\n'), ((2618, 2677), 'numpy.random.randint', 'np.random.randint', (['average_duration'], {'size': 'self.num_segments'}), '(average_duration, size=self.num_segments)\n', (2635, 2677), True, 'import numpy as np\n'), ((2952, 2982), 'numpy.zeros', 'np.zeros', (['(self.num_segments,)'], {}), '((self.num_segments,))\n', (2960, 2982), True, 'import numpy as np\n'), ((2777, 2868), 'numpy.random.randint', 'np.random.randint', (['(record.num_frames - self.segment_length + 1)'], {'size': 'self.num_segments'}), '(record.num_frames - self.segment_length + 1, size=self.\n num_segments)\n', (2794, 2868), True, 'import numpy as np\n')]
|
# The MIT License (MIT)
#
# Copyright © 2021 <NAME>, <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import numpy as np
from random import random, seed
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from sklearn.model_selection import train_test_split
from sklearn import linear_model
from sklearn.preprocessing import StandardScaler
from sklearn.utils import resample
# FrankeFunction: a two-variables function to create the dataset of our vanilla problem
def FrankeFunction(x,y):
term1 = 0.75*np.exp(-(0.25*(9*x-2)**2) - 0.25*((9*y-2)**2))
term2 = 0.75*np.exp(-((9*x+1)**2)/49.0 - 0.1*(9*y+1))
term3 = 0.5*np.exp(-(9*x-7)**2/4.0 - 0.25*((9*y-3)**2))
term4 = -0.2*np.exp(-(9*x-4)**2 - (9*y-7)**2)
return term1 + term2 + term3 + term4
# 3D plot of FrankeFunction
def Plot_FrankeFunction(x,y,z, title="Dataset"):
fig = plt.figure(figsize=(8, 7))
ax = fig.gca(projection="3d")
# Plot the surface.
surf = ax.plot_surface(x, y, z, cmap=cm.coolwarm, linewidth=0, antialiased=False)
# Customize the z axis.
ax.set_zlim(-0.10, 1.40)
ax.set_xlabel(r"$x$")
ax.set_ylabel(r"$y$")
ax.set_zlabel(r"$z$")
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.title(title)
plt.show()
# Create xyz dataset from the FrankeFunction with a added normal distributed noise
def create_xyz_dataset(n,mu_N, sigma_N):
x = np.linspace(0,1,n)
y = np.linspace(0,1,n)
x,y = np.meshgrid(x,y)
z = FrankeFunction(x,y) +mu_N +sigma_N*np.random.randn(n,n)
return x,y,z
# Error analysis: MSE and R2 score
def R2(z_data, z_model):
return 1 - np.sum((z_data - z_model) ** 2) / np.sum((z_data - np.mean(z_data)) ** 2)
def MSE(z_data,z_model):
n = np.size(z_model)
return np.sum((z_data-z_model)**2)/n
# SVD theorem
def SVD(A):
U, S, VT = np.linalg.svd(A,full_matrices=True)
D = np.zeros((len(U),len(VT)))
print("shape D= ", np.shape(D))
print("Shape S= ",np.shape(S))
print("lenVT =",len(VT))
print("lenU =",len(U))
D = np.eye(len(U),len(VT))*S
"""
for i in range(0,VT.shape[0]): #was len(VT)
D[i,i]=S[i]
print("i=",i)"""
return U @ D @ VT
# SVD inversion
def SVDinv(A):
U, s, VT = np.linalg.svd(A)
# reciprocals of singular values of s
d = 1.0 / s
# create m x n D matrix
D = np.zeros(A.shape)
# populate D with n x n diagonal matrix
D[:A.shape[1], :A.shape[1]] = np.diag(d)
UT = np.transpose(U)
V = np.transpose(VT)
return np.matmul(V,np.matmul(D.T,UT))
# Design matrix for two indipendent variables x,y
def create_X(x, y, n):
if len(x.shape) > 1:
x = np.ravel(x)
y = np.ravel(y)
N = len(x)
l = int((n+1)*(n+2)/2) # Number of elements in beta, number of feutures (degree of polynomial)
X = np.ones((N,l))
for i in range(1,n+1):
q = int((i)*(i+1)/2)
for k in range(i+1):
X[:,q+k] = (x**(i-k))*(y**k)
return X
def scale_Xz(X_train, X_test, z_train, z_test, with_std=False):
scaler_X = StandardScaler(with_std=with_std) #with_std=False
scaler_X.fit(X_train)
X_train = scaler_X.transform(X_train)
X_test = scaler_X.transform(X_test)
scaler_z = StandardScaler(with_std=with_std) #with_std=False
z_train = np.squeeze(scaler_z.fit_transform(z_train.reshape(-1, 1))) #scaler_z.fit_transform(z_train) #
z_test = np.squeeze(scaler_z.transform(z_test.reshape(-1, 1))) #scaler_z.transform(z_test) #
return X_train, X_test, z_train, z_test
# Splitting and rescaling data (rescaling is optional)
# Default values: 20% of test data and the scaler is StandardScaler without std.dev.
def Split_and_Scale(X,z,test_size=0.2, scale=True, with_std=False):
#Splitting training and test data
X_train, X_test, z_train, z_test = train_test_split(X, z, test_size=test_size)
# Rescaling X and z (optional)
if scale:
X_train, X_test, z_train, z_test = scale_Xz(X_train, X_test, z_train, z_test, with_std=with_std)
return X_train, X_test, z_train, z_test
# OLS equation
def OLS_solver(X_train, X_test, z_train, z_test):
# Calculating Beta Ordinary Least Square Equation with matrix pseudoinverse
# Altervatively to Numpy pseudoinverse it is possible to use the SVD theorem to evalute the inverse of a matrix (even in case it is singular). Just replace 'np.linalg.pinv' with 'SVDinv'.
ols_beta = np.linalg.pinv(X_train.T @ X_train) @ X_train.T @ z_train
z_tilde = X_train @ ols_beta # z_prediction of the train data
z_predict = X_test @ ols_beta # z_prediction of the test data
return ols_beta, z_tilde, z_predict
# Return the rolling mean of a vector and two values at one sigma from the rolling average
def Rolling_Mean(vector, windows=3):
vector_df = pd.DataFrame({'vector': vector})
# computing the rolling average
rolling_mean = vector_df.vector.rolling(windows).mean().to_numpy()
# computing the values at two sigmas from the rolling average
rolling_std = vector_df.vector.rolling(windows).std().to_numpy()
value_up = rolling_mean + rolling_std
value_down = rolling_mean - rolling_std
return rolling_mean, value_down, value_up
# Plot MSE in function of complexity of the model (rolling mean)
def plot_ols_complexity(x, y, z, maxdegree = 20, title="MSE as a function of model complexity"):
complexity = np.arange(0,maxdegree+1)
MSE_train_set = []
MSE_test_set = []
for degree in complexity:
X = create_X(x, y, degree)
X_train, X_test, z_train, z_test = Split_and_Scale(X,np.ravel(z)) #StardardScaler, test_size=0.2, scale=true
ols_beta, z_tilde,z_predict = OLS_solver(X_train, X_test, z_train, z_test)
MSE_train_set.append(MSE(z_train,z_tilde))
MSE_test_set.append(MSE(z_test,z_predict))
plt.figure( figsize = ( 10, 7))
MSE_train_mean, MSE_train_down, MSE_train_up = Rolling_Mean(MSE_train_set)
plt.plot(complexity, MSE_train_mean, label ="Train (rolling ave.)", color="purple")
plt.fill_between(complexity, MSE_train_down, MSE_train_up, alpha=0.2, color="purple")
MSE_test_mean, MSE_test_down, MSE_test_up = Rolling_Mean(MSE_test_set)
plt.plot(complexity, MSE_test_mean, label ="Test (rolling ave.)", color="orange")
plt.fill_between(complexity, MSE_test_down, MSE_test_up, alpha=0.2, color="orange")
plt.plot(complexity, MSE_train_set, '--', alpha=0.3, color="purple", label ="Train (actual values)")
plt.plot(complexity, MSE_test_set, '--', alpha=0.3, color="orange", label ="Test (actual values)")
plt.xlabel("Complexity")
plt.ylabel("MSE")
plt.xlim(complexity[~np.isnan(MSE_train_mean)][0]-1,complexity[-1]+1)
plt.title("Plot of the MSE as a function of complexity of the model\n– Rolling mean and one-sigma region –")
plt.legend()
plt.grid()
plt.show()
def ridge_reg(X_train, X_test, z_train, z_test, lmd = 10**(-12)):
ridge_beta = np.linalg.pinv(X_train.T @ X_train + lmd*np.eye(len(X_train.T))) @ X_train.T @ z_train #psudoinverse
z_model = X_train @ ridge_beta #calculates model
z_predict = X_test @ ridge_beta
#finds the lambda that gave the best MSE
#best_lamda = lambdas[np.where(MSE_values == np.min(MSE_values))[0]]
return ridge_beta, z_model, z_predict
def lasso_reg(X_train, X_test, z_train, z_test, lmd = 10**(-12)):
RegLasso = linear_model.Lasso(lmd)
_ = RegLasso.fit(X_train,z_train)
z_model = RegLasso.predict(X_train)
z_predict = RegLasso.predict(X_test)
return z_model, z_predict
|
[
"matplotlib.pyplot.grid",
"numpy.linalg.pinv",
"sklearn.linear_model.Lasso",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.fill_between",
"numpy.arange",
"numpy.mean",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.exp",
"numpy.linspace",
"numpy.matmul",
"pandas.DataFrame",
"numpy.meshgrid",
"numpy.ones",
"matplotlib.ticker.LinearLocator",
"sklearn.model_selection.train_test_split",
"numpy.size",
"numpy.isnan",
"numpy.linalg.svd",
"matplotlib.pyplot.title",
"numpy.shape",
"numpy.transpose",
"numpy.random.randn",
"matplotlib.ticker.FormatStrFormatter",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"numpy.diag",
"sklearn.preprocessing.StandardScaler",
"numpy.sum",
"matplotlib.pyplot.figure",
"numpy.zeros",
"numpy.ravel"
] |
[((1979, 2005), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 7)'}), '(figsize=(8, 7))\n', (1989, 2005), True, 'import matplotlib.pyplot as plt\n'), ((2500, 2516), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2509, 2516), True, 'import matplotlib.pyplot as plt\n'), ((2521, 2531), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2529, 2531), True, 'import matplotlib.pyplot as plt\n'), ((2669, 2689), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n'], {}), '(0, 1, n)\n', (2680, 2689), True, 'import numpy as np\n'), ((2696, 2716), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n'], {}), '(0, 1, n)\n', (2707, 2716), True, 'import numpy as np\n'), ((2726, 2743), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (2737, 2743), True, 'import numpy as np\n'), ((3012, 3028), 'numpy.size', 'np.size', (['z_model'], {}), '(z_model)\n', (3019, 3028), True, 'import numpy as np\n'), ((3112, 3148), 'numpy.linalg.svd', 'np.linalg.svd', (['A'], {'full_matrices': '(True)'}), '(A, full_matrices=True)\n', (3125, 3148), True, 'import numpy as np\n'), ((3517, 3533), 'numpy.linalg.svd', 'np.linalg.svd', (['A'], {}), '(A)\n', (3530, 3533), True, 'import numpy as np\n'), ((3628, 3645), 'numpy.zeros', 'np.zeros', (['A.shape'], {}), '(A.shape)\n', (3636, 3645), True, 'import numpy as np\n'), ((3724, 3734), 'numpy.diag', 'np.diag', (['d'], {}), '(d)\n', (3731, 3734), True, 'import numpy as np\n'), ((3744, 3759), 'numpy.transpose', 'np.transpose', (['U'], {}), '(U)\n', (3756, 3759), True, 'import numpy as np\n'), ((3768, 3784), 'numpy.transpose', 'np.transpose', (['VT'], {}), '(VT)\n', (3780, 3784), True, 'import numpy as np\n'), ((4075, 4090), 'numpy.ones', 'np.ones', (['(N, l)'], {}), '((N, l))\n', (4082, 4090), True, 'import numpy as np\n'), ((4284, 4317), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {'with_std': 'with_std'}), '(with_std=with_std)\n', (4298, 4317), False, 'from sklearn.preprocessing import StandardScaler\n'), ((4458, 4491), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {'with_std': 'with_std'}), '(with_std=with_std)\n', (4472, 4491), False, 'from sklearn.preprocessing import StandardScaler\n'), ((5046, 5089), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'z'], {'test_size': 'test_size'}), '(X, z, test_size=test_size)\n', (5062, 5089), False, 'from sklearn.model_selection import train_test_split\n'), ((6015, 6047), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': vector}"], {}), "({'vector': vector})\n", (6027, 6047), True, 'import pandas as pd\n'), ((6608, 6635), 'numpy.arange', 'np.arange', (['(0)', '(maxdegree + 1)'], {}), '(0, maxdegree + 1)\n', (6617, 6635), True, 'import numpy as np\n'), ((7057, 7084), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (7067, 7084), True, 'import matplotlib.pyplot as plt\n'), ((7181, 7268), 'matplotlib.pyplot.plot', 'plt.plot', (['complexity', 'MSE_train_mean'], {'label': '"""Train (rolling ave.)"""', 'color': '"""purple"""'}), "(complexity, MSE_train_mean, label='Train (rolling ave.)', color=\n 'purple')\n", (7189, 7268), True, 'import matplotlib.pyplot as plt\n'), ((7269, 7359), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['complexity', 'MSE_train_down', 'MSE_train_up'], {'alpha': '(0.2)', 'color': '"""purple"""'}), "(complexity, MSE_train_down, MSE_train_up, alpha=0.2, color\n ='purple')\n", (7285, 7359), True, 'import matplotlib.pyplot as plt\n'), ((7434, 7519), 'matplotlib.pyplot.plot', 'plt.plot', (['complexity', 'MSE_test_mean'], {'label': '"""Test (rolling ave.)"""', 'color': '"""orange"""'}), "(complexity, MSE_test_mean, label='Test (rolling ave.)', color='orange'\n )\n", (7442, 7519), True, 'import matplotlib.pyplot as plt\n'), ((7520, 7608), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['complexity', 'MSE_test_down', 'MSE_test_up'], {'alpha': '(0.2)', 'color': '"""orange"""'}), "(complexity, MSE_test_down, MSE_test_up, alpha=0.2, color=\n 'orange')\n", (7536, 7608), True, 'import matplotlib.pyplot as plt\n'), ((7613, 7717), 'matplotlib.pyplot.plot', 'plt.plot', (['complexity', 'MSE_train_set', '"""--"""'], {'alpha': '(0.3)', 'color': '"""purple"""', 'label': '"""Train (actual values)"""'}), "(complexity, MSE_train_set, '--', alpha=0.3, color='purple', label=\n 'Train (actual values)')\n", (7621, 7717), True, 'import matplotlib.pyplot as plt\n'), ((7718, 7820), 'matplotlib.pyplot.plot', 'plt.plot', (['complexity', 'MSE_test_set', '"""--"""'], {'alpha': '(0.3)', 'color': '"""orange"""', 'label': '"""Test (actual values)"""'}), "(complexity, MSE_test_set, '--', alpha=0.3, color='orange', label=\n 'Test (actual values)')\n", (7726, 7820), True, 'import matplotlib.pyplot as plt\n'), ((7827, 7851), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Complexity"""'], {}), "('Complexity')\n", (7837, 7851), True, 'import matplotlib.pyplot as plt\n'), ((7856, 7873), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""MSE"""'], {}), "('MSE')\n", (7866, 7873), True, 'import matplotlib.pyplot as plt\n'), ((7952, 8073), 'matplotlib.pyplot.title', 'plt.title', (['"""Plot of the MSE as a function of complexity of the model\n– Rolling mean and one-sigma region –"""'], {}), '(\n """Plot of the MSE as a function of complexity of the model\n– Rolling mean and one-sigma region –"""\n )\n', (7961, 8073), True, 'import matplotlib.pyplot as plt\n'), ((8065, 8077), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8075, 8077), True, 'import matplotlib.pyplot as plt\n'), ((8082, 8092), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (8090, 8092), True, 'import matplotlib.pyplot as plt\n'), ((8097, 8107), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8105, 8107), True, 'import matplotlib.pyplot as plt\n'), ((8637, 8660), 'sklearn.linear_model.Lasso', 'linear_model.Lasso', (['lmd'], {}), '(lmd)\n', (8655, 8660), False, 'from sklearn import linear_model\n'), ((1646, 1706), 'numpy.exp', 'np.exp', (['(-(0.25 * (9 * x - 2) ** 2) - 0.25 * (9 * y - 2) ** 2)'], {}), '(-(0.25 * (9 * x - 2) ** 2) - 0.25 * (9 * y - 2) ** 2)\n', (1652, 1706), True, 'import numpy as np\n'), ((1707, 1759), 'numpy.exp', 'np.exp', (['(-(9 * x + 1) ** 2 / 49.0 - 0.1 * (9 * y + 1))'], {}), '(-(9 * x + 1) ** 2 / 49.0 - 0.1 * (9 * y + 1))\n', (1713, 1759), True, 'import numpy as np\n'), ((1761, 1818), 'numpy.exp', 'np.exp', (['(-(9 * x - 7) ** 2 / 4.0 - 0.25 * (9 * y - 3) ** 2)'], {}), '(-(9 * x - 7) ** 2 / 4.0 - 0.25 * (9 * y - 3) ** 2)\n', (1767, 1818), True, 'import numpy as np\n'), ((1819, 1863), 'numpy.exp', 'np.exp', (['(-(9 * x - 4) ** 2 - (9 * y - 7) ** 2)'], {}), '(-(9 * x - 4) ** 2 - (9 * y - 7) ** 2)\n', (1825, 1863), True, 'import numpy as np\n'), ((2318, 2335), 'matplotlib.ticker.LinearLocator', 'LinearLocator', (['(10)'], {}), '(10)\n', (2331, 2335), False, 'from matplotlib.ticker import LinearLocator, FormatStrFormatter\n'), ((2370, 2397), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.02f"""'], {}), "('%.02f')\n", (2388, 2397), False, 'from matplotlib.ticker import LinearLocator, FormatStrFormatter\n'), ((3040, 3071), 'numpy.sum', 'np.sum', (['((z_data - z_model) ** 2)'], {}), '((z_data - z_model) ** 2)\n', (3046, 3071), True, 'import numpy as np\n'), ((3206, 3217), 'numpy.shape', 'np.shape', (['D'], {}), '(D)\n', (3214, 3217), True, 'import numpy as np\n'), ((3241, 3252), 'numpy.shape', 'np.shape', (['S'], {}), '(S)\n', (3249, 3252), True, 'import numpy as np\n'), ((3808, 3826), 'numpy.matmul', 'np.matmul', (['D.T', 'UT'], {}), '(D.T, UT)\n', (3817, 3826), True, 'import numpy as np\n'), ((3930, 3941), 'numpy.ravel', 'np.ravel', (['x'], {}), '(x)\n', (3938, 3941), True, 'import numpy as np\n'), ((3948, 3959), 'numpy.ravel', 'np.ravel', (['y'], {}), '(y)\n', (3956, 3959), True, 'import numpy as np\n'), ((2786, 2807), 'numpy.random.randn', 'np.random.randn', (['n', 'n'], {}), '(n, n)\n', (2801, 2807), True, 'import numpy as np\n'), ((2905, 2936), 'numpy.sum', 'np.sum', (['((z_data - z_model) ** 2)'], {}), '((z_data - z_model) ** 2)\n', (2911, 2936), True, 'import numpy as np\n'), ((5644, 5679), 'numpy.linalg.pinv', 'np.linalg.pinv', (['(X_train.T @ X_train)'], {}), '(X_train.T @ X_train)\n', (5658, 5679), True, 'import numpy as np\n'), ((6806, 6817), 'numpy.ravel', 'np.ravel', (['z'], {}), '(z)\n', (6814, 6817), True, 'import numpy as np\n'), ((2956, 2971), 'numpy.mean', 'np.mean', (['z_data'], {}), '(z_data)\n', (2963, 2971), True, 'import numpy as np\n'), ((7899, 7923), 'numpy.isnan', 'np.isnan', (['MSE_train_mean'], {}), '(MSE_train_mean)\n', (7907, 7923), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
import os.path as path
import abydos.distance as abd
import abydos.phonetic as abp
import pytest
from scipy.sparse import csc_matrix
from sklearn.feature_extraction.text import TfidfVectorizer
import name_matching.name_matcher as nm
@pytest.fixture
def name_match():
package_dir = path.dirname(path.dirname(path.dirname(path.abspath(__file__))))
data = pd.read_csv(path.join(package_dir, 'test','test_names.csv'))
name_matcher = nm.NameMatcher()
name_matcher.load_and_process_master_data(
'company_name', data, start_processing=False, transform=False)
return name_matcher
@pytest.fixture
def adjusted_name():
package_dir = path.dirname(path.dirname(path.dirname(path.abspath(__file__))))
return pd.read_csv(path.join(package_dir, 'test','adjusted_test_names.csv'))
@pytest.fixture
def words():
return ['fun', 'small', 'pool', 'fun', 'small', 'pool', 'sign',
'small', 'pool', 'sign', 'sign', 'small', 'pool', 'sign', 'paper',
'oppose', 'paper', 'oppose', 'brown', 'pig', 'fat', 'oppose', 'paper',
'oppose', 'brown', 'pig', 'fat', 'snail']
@pytest.mark.parametrize("method",
["",
None,
'no_method']
)
def test_make_distance_metrics_error(name_match, method):
with pytest.raises(TypeError):
name_match.set_distance_metrics([method])
@pytest.mark.parametrize("method, result",
[['indel', abd.Indel()],
['discounted_levenshtein', abd.DiscountedLevenshtein()],
['tichy', abd.Tichy()],
['cormodeL_z', abd.CormodeLZ()],
['iterative_sub_string', abd.IterativeSubString()],
['baulieu_xiii', abd.BaulieuXIII()],
['clement', abd.Clement()],
['dice_asymmetricI', abd.DiceAsymmetricI()],
['kuhns_iii', abd.KuhnsIII()],
['overlap', abd.Overlap()],
['pearson_ii', abd.PearsonII()],
['weighted_jaccard', abd.WeightedJaccard()],
['warrens_iv', abd.WarrensIV()],
['bag', abd.Bag()],
['rouge_l', abd.RougeL()],
['ratcliff_obershelp', abd.RatcliffObershelp()],
['ncd_bz2', abd.NCDbz2()],
['fuzzy_wuzzy_partial_string',
abd.FuzzyWuzzyPartialString()],
['fuzzy_wuzzy_token_sort', abd.FuzzyWuzzyTokenSort()],
['fuzzy_wuzzy_token_set', abd.FuzzyWuzzyTokenSet()],
['editex', abd.Editex()],
['typo', abd.Typo()],
['lig_3', abd.LIG3()],
['ssk', abd.SSK()],
['refined_soundex', abd.PhoneticDistance(transforms=abp.RefinedSoundex(
max_length=30), metric=abd.Levenshtein(), encode_alpha=True)],
['double_metaphone', abd.PhoneticDistance(transforms=abp.DoubleMetaphone(max_length=30), metric=abd.Levenshtein(), encode_alpha=True)]]
)
def test_make_distance_metrics(name_match, method, result):
name_match.set_distance_metrics([method])
assert type(name_match._distance_metrics.popitem()[1][0]) == type(result)
@pytest.mark.parametrize("kwargs_str, result_1, result_2, result_3, result_4",
[[{"ngrams": (4, 5)}, 0, False, (4, 5), 5000],
[{"low_memory": True}, 0, True, (2, 3), 5000],
[{"legal_suffixes": True}, 244, False, (2, 3), 5000],
[{"legal_suffixes": True, "number_of_rows": 8,
"ngrams": (1, 2, 3)}, 244, False, (1, 2, 3), 8],
])
def test_initialisation(kwargs_str, result_1, result_2, result_3, result_4):
name_match = nm.NameMatcher(**kwargs_str)
assert len(name_match._word_set) == result_1
assert name_match._low_memory == result_2
assert name_match._vec.ngram_range == result_3
assert name_match._number_of_rows == result_4
@pytest.mark.parametrize("occ, result_1, result_2, result_3, result_4, result_5",
[[1, '', '', '', '', ''],
[2, 'a-nd', 'Hndkiewicz,2Nicolas',
'Tashirian', '<NAME>', 'Marquardt,'],
[3, '<NAME>-nd', 'Hndkiewicz,2Nicolas',
'Runolfsson, <NAME>', '<NAME>', '<NAME>,'],
])
def test_preprocess_reduce(name_match, adjusted_name, occ, result_1, result_2, result_3, result_4, result_5):
name_match._column_matching = 'company_name'
new_names = name_match._preprocess_reduce(
adjusted_name, occurence_count=occ)
assert new_names.loc[1866, 'company_name'] == result_1
assert new_names.loc[1423, 'company_name'] == result_2
assert new_names.loc[268, 'company_name'] == result_3
assert new_names.loc[859, 'company_name'] == result_4
assert new_names.loc[1918, 'company_name'] == result_5
@pytest.mark.parametrize("col, start_pro, transform",
[['company_name', False, False],
['no_name', False, False],
['company_name', True, False],
['company_name', True, True],
['company_name', True, True],
])
def test_load_and_process_master_data(adjusted_name, col, start_pro, transform):
name_matcher = nm.NameMatcher()
name_matcher.load_and_process_master_data(
column=col,
df_matching_data=adjusted_name,
start_processing=start_pro,
transform=transform)
assert name_matcher._column == col
pd.testing.assert_frame_equal(
name_matcher._df_matching_data, adjusted_name)
assert name_matcher._preprocessed == start_pro
if transform & start_pro:
assert type(name_matcher._n_grams_matching) == csc_matrix
@pytest.mark.parametrize("trans, common",
[[False, False],
[True, False],
[False, True],
[True, True],
])
def test_process_matching_data(name_match, trans, common):
name_match._postprocess_common_words = common
name_match._process_matching_data(transform=trans)
assert name_match._preprocessed
if trans:
assert type(name_match._n_grams_matching) == csc_matrix
else:
assert name_match._n_grams_matching is None
if common:
assert len(name_match._word_set) > 0
else:
assert len(name_match._word_set) == 0
@pytest.mark.parametrize("lower_case, punctuations, ascii, result_1, result_2, result_3",
[[False, False, False, 'Schumm PLC', 'Towne, Johnston and Murray', 'Ösinski-Schinner'],
[True, False, False, 'schumm plc',
'towne, johnston and murray', 'ösinski-schinner'],
[False, True, False, 'Schumm PLC',
'Towne Johnston and Murray', 'ÖsinskiSchinner'],
[False, False, True, 'Schumm PLC',
'Towne, Johnston and Murray', 'Osinski-Schinner'],
[False, True, True, 'Schumm PLC',
'Towne Johnston and Murray', 'OsinskiSchinner'],
[True, False, True, 'schumm plc',
'towne, johnston and murray', 'osinski-schinner'],
[True, True, False, 'schumm plc',
'towne johnston and murray', 'ösinskischinner'],
[True, True, True, 'schumm plc',
'towne johnston and murray', 'osinskischinner'],
])
def test_preprocess(name_match, lower_case, punctuations, ascii, result_1, result_2, result_3):
name_match._preprocess_lowercase = lower_case
name_match._preprocess_punctuations = punctuations
name_match._preprocess_ascii = ascii
new_df = name_match.preprocess(
name_match._df_matching_data, 'company_name')
assert new_df.loc[0, 'company_name'] == result_1
assert new_df.loc[2, 'company_name'] == result_2
assert new_df.loc[784, 'company_name'] == result_3
@pytest.mark.parametrize("low_memory, ngrams, result_1, result_2, result_3",
[[1, (5, 6), 0.02579, 0.00781, 0.01738],
[6, (2, 3), 0.009695, 0.01022, 0.01120],
[8, (1, 2), 0.027087, 0.02765, 0.02910],
[0, (5, 6), 0.02579, 0.00781, 0.01738],
[0, (2, 3), 0.009695, 0.01022, 0.01120],
[0, (1, 2), 0.027087, 0.02765, 0.02910],
])
def test_transform_data(name_match, low_memory, ngrams, result_1, result_2, result_3):
name_match._low_memory = low_memory
name_match._vec = TfidfVectorizer(
lowercase=False, analyzer="char", ngram_range=ngrams)
name_match._process_matching_data(transform=False)
name_match.transform_data()
assert name_match._n_grams_matching.data[10] == pytest.approx(
result_1, 0.001)
assert name_match._n_grams_matching.data[181] == pytest.approx(
result_2, 0.001)
assert name_match._n_grams_matching.data[1000] == pytest.approx(
result_3, 0.001)
@pytest.mark.parametrize("to_be_matched, possible_matches, metrics, result",
[('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandse Bank', 'Bank de Nederlandsche'], ['weighted_jaccard'], 2),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandse Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'discounted_levenshtein'], 5),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandse Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'discounted_levenshtein', 'iterative_sub_string'], 7),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandse Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'overlap', 'iterative_sub_string'], 6),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandse Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'overlap', 'bag'], 11),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank',
'De Nederlandsche Bank', 'Bank de Nederlandsche'], ['weighted_jaccard'], 2),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandsche Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'discounted_levenshtein'], 4),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandsche Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'discounted_levenshtein', 'iterative_sub_string'], 6),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandsche Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'overlap', 'iterative_sub_string'], 6),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandsche Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'overlap', 'bag'], 6),
('Schumm PLC', ['Torphy-Corkery', 'Hansen, Hoppe and Tillman',
'Gerlach and Sons', 'Bank de Nederlandsche'], ['weighted_jaccard'], 2),
('Schumm PLC', ['Torphy-Corkery', 'Hansen, Hoppe and Tillman', 'Gerlach and Sons',
'Bank de Nederlandsche'], ['weighted_jaccard', 'discounted_levenshtein'], 4),
('Schumm PLC', ['Torphy-Corkery', '<NAME>', 'Gerlach and Sons', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'discounted_levenshtein', 'iterative_sub_string'], 6),
('Schumm PLC', ['Torphy-Corkery', '<NAME> and Tillman', 'Gerlach and Sons',
'Bank de Nederlandsche'], ['weighted_jaccard', 'overlap', 'iterative_sub_string'], 8),
('Schumm PLC', ['Torphy-Corkery', '<NAME>', 'Gerlach and Sons',
'Bank de Nederlandsche'], ['weighted_jaccard', 'overlap', 'bag'], 8)
])
def test_score_matches(to_be_matched, possible_matches, metrics, result):
name_match = nm.NameMatcher()
name_match.set_distance_metrics(metrics)
assert np.argmax(name_match._score_matches(
to_be_matched, possible_matches)) == result
@pytest.mark.parametrize("number_of_matches, match_score, metrics, result",
[(1, np.array([[0.9, 0.3, 0.5, 0.2, 0.1]]), ['weighted_jaccard'], [0]),
(2, np.array([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5]]), [
'weighted_jaccard', 'discounted_levenshtein'], [0, 1]),
(3, np.array([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5], [1, 0.2, 0.3, 0.2, 0.1]]), [
'weighted_jaccard', 'discounted_levenshtein', 'iterative_sub_string'], [2, 1, 1]),
(2, np.array([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5], [
1, 0.2, 0.3, 0.2, 0.1]]), ['tichy', 'overlap', 'bag'], [2, 1]),
(2, np.array([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5]]), [
'overlap', 'bag'], [0, 2]),
(1, np.array([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5], [
1, 0.2, 0.3, 0.2, 0.1]]), ['weighted_jaccard', 'overlap', 'iterative_sub_string'], [1]),
(2, np.array([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5], [
1, 0.2, 0.3, 0.2, 0.1]]), ['weighted_jaccard', 'overlap', 'bag'], [1, 0]),
(1, np.array([[0.3, 0.3, 0.8, 0.2, 0.2]]), [
'weighted_jaccard'], [0]),
(3, np.array([[0.3, 0.3, 0.8, 0.2, 0.2], [0.3, 0.3, 0.8, 0.1, 0.1]]), [
'weighted_jaccard', 'discounted_levenshtein'], [0, 1]),
(2, np.array([[0.3, 0.3, 0.2, 0.1, 0.02], [0.1, 0.1, 0.2, 0.3, 0.02]]), [
'weighted_jaccard', 'iterative_sub_string'], [0, 0]),
(1, np.array([[0.3, 0.3, 0.2, 0.1, 0.02], [0.3, 0.3, 0.2, 0.3, 0.02]]), [
'overlap', 'iterative_sub_string'], [1]),
(1, np.array(
[[-0.5, -0.8, -0.3, -0.7, 0, 2]]), ['bag'], [0]),
(3, np.array([[10, 8, 7, 6, 12, 15, 14, 88]]), [
'weighted_jaccard'], [0]),
(2, np.array([[1, 0.3], [0.1, 0.4]]), [
'weighted_jaccard', 'discounted_levenshtein'], [0, 1])
])
def test_rate_matches(number_of_matches, match_score, metrics, result):
name_match = nm.NameMatcher()
name_match._number_of_matches = number_of_matches
name_match.set_distance_metrics(metrics)
ind = name_match._rate_matches(match_score)
print(ind)
assert len(ind) == np.min([number_of_matches, match_score.shape[0]])
assert list(ind) == result
def test_vectorise_data(name_match):
name_match._vectorise_data(transform=False)
assert len(name_match._vec.vocabulary_) > 0
@pytest.mark.parametrize("match, number_of_matches, word_set, score, result",
[(pd.Series(['Nederandsche', 0, 2, 'De Nederlandsche Bank'], index=['match_name_0', 'score_0', 'match_index_0', 'original_name']), 1, set(['De', 'Bank', 'nl']), 0, 94.553),
(pd.Series(['Nederandsche', 0, 2, 'De Nederlandsche Bank'], index=[
'match_name_0', 'score_0', 'match_index_0', 'original_name']), 1, set(['komt', 'niet', 'voor']), 0, 69.713),
(pd.Series(['nederandsche', 0, 2, 'de nederand bank', 0.4, 3, 'De Nederlandsche Bank'], index=[
'match_name_0', 'score_0', 'match_index_0', 'match_name_1', 'score_1', 'match_index_1', 'original_name']), 1, set(['De', 'Bank', 'nl']), 1, 0.4),
(pd.Series(['nederandsche', 0, 2, 'de nederand bank', 0.4, 3, 'De Nederlandsche Bank'], index=[
'match_name_0', 'score_0', 'match_index_0', 'match_name_1', 'score_1', 'match_index_1', 'original_name']), 1, set(['De', 'Bank', 'nl']), 0, 86.031),
])
def test_postprocess(name_match, match, number_of_matches, word_set, score, result):
name_match._number_of_matches = number_of_matches
name_match._word_set = word_set
new_match = name_match.postprocess(match)
assert new_match.loc[f'score_{score}'] == pytest.approx(result, 0.0001)
@pytest.mark.parametrize("indicator, punctuations, word_set, cut_off, result_1, result_2",
[('legal', False, set(), 0.01, 'plc.', 'bedrijf'),
('legal', True, set(), 0.01, 'plc', 'bedrijf'),
('legal', True, set(['bedrijf']),
0.01, 'bedrijf', 'Group'),
('common', True, set(), 0.01, 'Group', 'West'),
('common', True, set(), 0.3, 'and', 'Group'),
('common', True, set(['West']),
0.3, 'West', 'bedrijf'),
('someting', True, set(['key']), 0.01, 'key', 'val')
])
def test_make_no_scoring_words(name_match, indicator, punctuations, word_set, cut_off, result_1, result_2):
name_match._preprocess_punctuations = punctuations
new_word_set = name_match._make_no_scoring_words(
indicator, word_set, cut_off)
print(new_word_set)
assert new_word_set.issuperset(set([result_1]))
assert not new_word_set.issuperset(set([result_2]))
def test_search_for_possible_matches_error(adjusted_name):
name_matcher = nm.NameMatcher()
with pytest.raises(RuntimeError):
name_matcher._search_for_possible_matches(adjusted_name)
@pytest.mark.parametrize("top_n, low_memory, result_1, result_2",
[(10, 0, 1518, 144),
(50, 0, 1992, 9),
(100, 0, 1999, 6),
(1, 0, 44, 144),
(10, 8, 1518, 144),
(50, 8, 1992, 9),
(100, 8, 1999, 6),
(1, 8, 44, 144)
])
def test_search_for_possible_matches(name_match, adjusted_name, top_n, low_memory, result_1, result_2):
name_match._column_matching = 'company_name'
name_match._low_memory = low_memory
name_match._top_n = top_n
name_match._process_matching_data(True)
possible_match = name_match._search_for_possible_matches(adjusted_name)
assert possible_match.shape[1] == top_n
assert np.max(possible_match) < len(adjusted_name)
assert np.all(possible_match.astype(int) == possible_match)
assert np.max(possible_match[44, :]) == result_1
assert np.min(possible_match[144, :]) == result_2
@pytest.mark.parametrize("common_words, num_matches, possible_matches, matching_series, result_0, result_1",
[(True, 3, np.array([29, 343, 727, 855, 1702]), pd.Series(
['Company and Sons'], index=['company_name']), 36.03, 31.33),
(False, 2, np.array([29, 343, 727, ]), pd.Series(
['Company and Sons'], index=['company_name']), 71.28, 68.6),
(False, 2, np.array([29, 343]), pd.Series(
['Company and Sons'], index=['company_name']), 71.28, 68.6),
(False, 2, np.array([[29, 343], [0, 0]]), pd.Series(
['Company and Sons'], index=['company_name']), 71.28, 68.6),
(False, 2, np.array([29, 343, 727, 855, 1702]), pd.Series(
['Company and Sons'], index=['company_name']), 72.28, 71.28)
])
def test_fuzzy_matches(name_match, common_words, num_matches, possible_matches, matching_series, result_0, result_1):
name_match._column_matching = 'company_name'
name_match._number_of_matches = num_matches
name_match._postprocess_common_words = common_words
name_match._word_set = set(['Sons', 'and'])
match = name_match.fuzzy_matches(possible_matches, matching_series)
assert match['score_0'] == pytest.approx(result_0, 0.0001)
assert match['score_1'] == pytest.approx(result_1, 0.0001)
assert match['match_index_0'] in possible_matches
assert match['match_index_1'] in possible_matches
def test_do_name_matching_full(name_match, adjusted_name):
result = name_match.match_names(adjusted_name, 'company_name')
assert np.sum(result['match_index'] == result.index) == 1922
def test_do_name_matching_split(name_match, adjusted_name):
name_match._preprocess_split = True
result = name_match.match_names(adjusted_name.iloc[44, :], 'company_name')
assert np.any(result['match_index'] == 44)
def test_do_name_matching_series(name_match, adjusted_name):
result = name_match.match_names(adjusted_name.iloc[44, :], 'company_name')
assert np.any(result['match_index'] == 44)
def test_do_name_matching_error(adjusted_name):
name_match = nm.NameMatcher()
with pytest.raises(ValueError):
name_match.match_names(adjusted_name, 'company_name')
@pytest.mark.parametrize("verbose", [True, False])
def test_do_name_matching_print(capfd, name_match, adjusted_name, verbose):
name_match._verbose = verbose
name_match.match_names(adjusted_name.iloc[:5].copy(), 'company_name')
out, err = capfd.readouterr()
if verbose:
assert out.find('preprocessing') > -1
assert out.find('searching') > -1
assert out.find('possible') > -1
assert out.find('fuzzy') > -1
assert out.find('done') > -1
else:
assert out == ''
@pytest.mark.parametrize("word, occurence_count, result",
[['fun snail pool', 2, 'snail'],
['fun snail pool', 3, 'fun snail'],
['fun snail pool', 1, ''],
['fun small pool', 3, 'fun small pool'],
['fun snail', 3, 'fun snail'],
['fun small pool', 5, 'fun small pool']])
def test_select_top_words(word, words, occurence_count, result):
word_counts = pd.Series(words).value_counts()
name_match = nm.NameMatcher()
new_word = name_match._select_top_words(
word.split(), word_counts, occurence_count)
assert new_word == result
@pytest.mark.parametrize("match, num_of_matches, result",
[[{'match_name_1': 'fun', 'match_name_2': 'dog',
'match_name_0': 'cat'}, 3, ['cat', 'fun', 'dog']],
[{'match_name_1': 'fun', 'match_name_2': 'dog',
'match_name_0': 'cat'}, 2, ['cat', 'fun']],
[{'match_name_1': 'fun', 'match_name_0': 'cat'},
2, ['cat', 'fun']],
[{'match_name_1': 'fun', 'match_name_2': 'dog', 'match_name_0': 'cat'}, 0, []]])
def test_get_alternative_names(match, num_of_matches, result):
name_match = nm.NameMatcher(number_of_matches=num_of_matches)
res = name_match._get_alternative_names(pd.Series(match))
assert res == result
@pytest.mark.parametrize("preprocess_punctuations, output, input, x",
[[True, '_blame_', {'test': ['fun...', 'done'], 'num':['_.blame._']}, 2],
[True, 'done', {'test': ['fun. . . ',
'done'], 'num':['_.blame._']}, 1],
[True, 'fun', {
'test': ['fun. . . ', 'done'], 'num':['_.blame._']}, 0],
[False, 'fun. . .', {
'test': ['fun. . . ', 'done'], 'num':['_.blame._']}, 0],
[False, 'fun. . .', {
'num': ['_.blame._'], 'test': ['fun. . . ', 'done']}, 1]
])
def test_preprocess_word_list(preprocess_punctuations, output, input, x):
name_match = nm.NameMatcher(punctuations=preprocess_punctuations)
res = name_match._preprocess_word_list(input)
print(res)
assert res[x] == output
@pytest.mark.parametrize("num_matches, match_score, match, result, y",
[[3, np.array([[1, 1, 1], [1, 1, 1], [0, 0, 0]]), pd.Series(dtype=float), 100, 0],
[2, np.array([[1, 1], [0.4, 0.4], [0, 0]]),
pd.Series(dtype=float), 40, 1],
[1, np.array([[1, 1], [1, 1], [0, 0]]),
pd.Series(dtype=float), 100, 0]
])
def test_adjust_scores(num_matches, match_score, match, result, y):
name_match = nm.NameMatcher(number_of_matches=num_matches)
match = name_match._adjust_scores(match_score, match)
assert match[y] == result
@pytest.mark.parametrize("string, stringlist, result_1, result_2, y",
[['know sign first', ['know', 'know sign', 'know sign first'], 'know first', 'know first', 2],
['know sign first', ['know', 'know sign',
'know sign first'], 'know first', 'know', 1],
['know sign first', ['know', 'know sign',
'know sign first'], 'know first', 'know', 0],
['know first', ['know', 'know', 'know'],
'know first', 'know', 1],
['pool sign small', ['sign small',
'small pool sign', 'small'], '', '', 0],
['pool sign small know', ['sign small',
'small pool sign', 'small'], 'know', '', 0],
['know pool sign small', ['sign small',
'small pool sign', 'small'], 'know', '', 0],
['pool sign small', ['sign small',
'small pool know sign', 'small'], '', 'know', 1],
])
def test_process_words(words, string, stringlist, result_1, result_2, y):
name_match = nm.NameMatcher()
name_match._word_set = set(words)
string, stringlist = name_match._process_words(string, stringlist)
assert string == result_1
assert stringlist[y] == result_2
@pytest.mark.parametrize("word_set, cut_off, result_1, result_2",
[[set(), 0, 1518, 'Group'],
[set(), 0, 1518, 'and'],
[set(), 0.1, 7, 'Group'],
[set(), 0.1, 7, 'LLC'],
[set(), 0.12, 6, 'LLC'],
[set(), 0.2, 1, 'and'],
[set(['apple']), 1, 1, 'apple'],
[set(['apple']), 0, 1519, 'apple'],
[set(['apple']), 0, 1519, 'Group']
])
def test_process_common_words(name_match, word_set, cut_off, result_1, result_2):
words = name_match._process_common_words(word_set, cut_off)
assert result_2 in words
assert len(words) == result_1
@pytest.mark.parametrize("word_set, preprocess, result_1, result_2",
[[set(), True, 244, 'company'],
[set(), True, 244, '3ao'],
[set(), True, 244, 'gmbh'],
[set(), False, 312, '& company'],
[set(), False, 312, '3ao'],
[set(), False, 312, 'g.m.b.h.'],
[set(['apple']), True, 245, 'apple'],
[set(['apple']), False, 313, 'apple'],
[set(['apple..']), True, 245, 'apple..'],
[set(['apple..']), False, 313, 'apple..']
])
def test_process_legal_words(word_set, preprocess, result_1, result_2):
name_match = nm.NameMatcher()
name_match._preprocess_punctuations = preprocess
words = name_match._process_legal_words(word_set)
assert result_2 in words
assert len(words) == result_1
|
[
"abydos.distance.Overlap",
"abydos.phonetic.RefinedSoundex",
"numpy.array",
"abydos.distance.Levenshtein",
"abydos.distance.KuhnsIII",
"abydos.distance.BaulieuXIII",
"name_matching.name_matcher.NameMatcher",
"pandas.testing.assert_frame_equal",
"abydos.distance.WeightedJaccard",
"abydos.phonetic.DoubleMetaphone",
"abydos.distance.Clement",
"numpy.max",
"abydos.distance.Tichy",
"abydos.distance.FuzzyWuzzyTokenSet",
"abydos.distance.WarrensIV",
"numpy.min",
"abydos.distance.RougeL",
"abydos.distance.SSK",
"abydos.distance.LIG3",
"abydos.distance.Bag",
"abydos.distance.PearsonII",
"abydos.distance.FuzzyWuzzyPartialString",
"numpy.any",
"abydos.distance.IterativeSubString",
"pytest.raises",
"pytest.approx",
"pandas.Series",
"abydos.distance.Indel",
"abydos.distance.DiceAsymmetricI",
"abydos.distance.Editex",
"abydos.distance.CormodeLZ",
"abydos.distance.RatcliffObershelp",
"abydos.distance.DiscountedLevenshtein",
"abydos.distance.NCDbz2",
"os.path.join",
"abydos.distance.FuzzyWuzzyTokenSort",
"pytest.mark.parametrize",
"sklearn.feature_extraction.text.TfidfVectorizer",
"numpy.sum",
"os.path.abspath",
"abydos.distance.Typo"
] |
[((1163, 1221), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""method"""', "['', None, 'no_method']"], {}), "('method', ['', None, 'no_method'])\n", (1186, 1221), False, 'import pytest\n'), ((3589, 3929), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""kwargs_str, result_1, result_2, result_3, result_4"""', "[[{'ngrams': (4, 5)}, 0, False, (4, 5), 5000], [{'low_memory': True}, 0, \n True, (2, 3), 5000], [{'legal_suffixes': True}, 244, False, (2, 3), \n 5000], [{'legal_suffixes': True, 'number_of_rows': 8, 'ngrams': (1, 2, \n 3)}, 244, False, (1, 2, 3), 8]]"], {}), "('kwargs_str, result_1, result_2, result_3, result_4',\n [[{'ngrams': (4, 5)}, 0, False, (4, 5), 5000], [{'low_memory': True}, 0,\n True, (2, 3), 5000], [{'legal_suffixes': True}, 244, False, (2, 3), \n 5000], [{'legal_suffixes': True, 'number_of_rows': 8, 'ngrams': (1, 2, \n 3)}, 244, False, (1, 2, 3), 8]])\n", (3612, 3929), False, 'import pytest\n'), ((4395, 4672), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""occ, result_1, result_2, result_3, result_4, result_5"""', "[[1, '', '', '', '', ''], [2, 'a-nd', 'Hndkiewicz,2Nicolas', 'Tashirian',\n '<NAME>', 'Marquardt,'], [3, '<NAME>-nd', 'Hndkiewicz,2Nicolas',\n 'Runolfsson, <NAME>', '<NAME>', '<NAME>,']]"], {}), "('occ, result_1, result_2, result_3, result_4, result_5'\n , [[1, '', '', '', '', ''], [2, 'a-nd', 'Hndkiewicz,2Nicolas',\n 'Tashirian', '<NAME>', 'Marquardt,'], [3, '<NAME>-nd',\n 'Hndkiewicz,2Nicolas', 'Runolfsson, <NAME>', '<NAME>', '<NAME>,']])\n", (4418, 4672), False, 'import pytest\n'), ((5372, 5585), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""col, start_pro, transform"""', "[['company_name', False, False], ['no_name', False, False], ['company_name',\n True, False], ['company_name', True, True], ['company_name', True, True]]"], {}), "('col, start_pro, transform', [['company_name', \n False, False], ['no_name', False, False], ['company_name', True, False],\n ['company_name', True, True], ['company_name', True, True]])\n", (5395, 5585), False, 'import pytest\n'), ((6303, 6410), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""trans, common"""', '[[False, False], [True, False], [False, True], [True, True]]'], {}), "('trans, common', [[False, False], [True, False], [\n False, True], [True, True]])\n", (6326, 6410), False, 'import pytest\n'), ((6997, 7814), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""lower_case, punctuations, ascii, result_1, result_2, result_3"""', "[[False, False, False, 'Schumm PLC', 'Towne, Johnston and Murray',\n 'Ösinski-Schinner'], [True, False, False, 'schumm plc',\n 'towne, johnston and murray', 'ösinski-schinner'], [False, True, False,\n 'Schumm PLC', 'Towne Johnston and Murray', 'ÖsinskiSchinner'], [False, \n False, True, 'Schumm PLC', 'Towne, Johnston and Murray',\n 'Osinski-Schinner'], [False, True, True, 'Schumm PLC',\n 'Towne Johnston and Murray', 'OsinskiSchinner'], [True, False, True,\n 'schumm plc', 'towne, johnston and murray', 'osinski-schinner'], [True,\n True, False, 'schumm plc', 'towne johnston and murray',\n 'ösinskischinner'], [True, True, True, 'schumm plc',\n 'towne johnston and murray', 'osinskischinner']]"], {}), "(\n 'lower_case, punctuations, ascii, result_1, result_2, result_3', [[\n False, False, False, 'Schumm PLC', 'Towne, Johnston and Murray',\n 'Ösinski-Schinner'], [True, False, False, 'schumm plc',\n 'towne, johnston and murray', 'ösinski-schinner'], [False, True, False,\n 'Schumm PLC', 'Towne Johnston and Murray', 'ÖsinskiSchinner'], [False, \n False, True, 'Schumm PLC', 'Towne, Johnston and Murray',\n 'Osinski-Schinner'], [False, True, True, 'Schumm PLC',\n 'Towne Johnston and Murray', 'OsinskiSchinner'], [True, False, True,\n 'schumm plc', 'towne, johnston and murray', 'osinski-schinner'], [True,\n True, False, 'schumm plc', 'towne johnston and murray',\n 'ösinskischinner'], [True, True, True, 'schumm plc',\n 'towne johnston and murray', 'osinskischinner']])\n", (7020, 7814), False, 'import pytest\n'), ((8705, 9040), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""low_memory, ngrams, result_1, result_2, result_3"""', '[[1, (5, 6), 0.02579, 0.00781, 0.01738], [6, (2, 3), 0.009695, 0.01022, \n 0.0112], [8, (1, 2), 0.027087, 0.02765, 0.0291], [0, (5, 6), 0.02579, \n 0.00781, 0.01738], [0, (2, 3), 0.009695, 0.01022, 0.0112], [0, (1, 2), \n 0.027087, 0.02765, 0.0291]]'], {}), "('low_memory, ngrams, result_1, result_2, result_3',\n [[1, (5, 6), 0.02579, 0.00781, 0.01738], [6, (2, 3), 0.009695, 0.01022,\n 0.0112], [8, (1, 2), 0.027087, 0.02765, 0.0291], [0, (5, 6), 0.02579, \n 0.00781, 0.01738], [0, (2, 3), 0.009695, 0.01022, 0.0112], [0, (1, 2), \n 0.027087, 0.02765, 0.0291]])\n", (8728, 9040), False, 'import pytest\n'), ((9808, 12548), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""to_be_matched, possible_matches, metrics, result"""', "[('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank',\n 'De Nederlandse Bank', 'Bank de Nederlandsche'], ['weighted_jaccard'], \n 2), ('De Nederlandsche Bank', ['Nederlandsche Bank',\n 'De Nederlancsh Bank', 'De Nederlandse Bank', 'Bank de Nederlandsche'],\n ['weighted_jaccard', 'discounted_levenshtein'], 5), (\n 'De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank',\n 'De Nederlandse Bank', 'Bank de Nederlandsche'], ['weighted_jaccard',\n 'discounted_levenshtein', 'iterative_sub_string'], 7), (\n 'De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank',\n 'De Nederlandse Bank', 'Bank de Nederlandsche'], ['weighted_jaccard',\n 'overlap', 'iterative_sub_string'], 6), ('De Nederlandsche Bank', [\n 'Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandse Bank',\n 'Bank de Nederlandsche'], ['weighted_jaccard', 'overlap', 'bag'], 11),\n ('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank',\n 'De Nederlandsche Bank', 'Bank de Nederlandsche'], ['weighted_jaccard'],\n 2), ('De Nederlandsche Bank', ['Nederlandsche Bank',\n 'De Nederlancsh Bank', 'De Nederlandsche Bank', 'Bank de Nederlandsche'\n ], ['weighted_jaccard', 'discounted_levenshtein'], 4), (\n 'De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank',\n 'De Nederlandsche Bank', 'Bank de Nederlandsche'], ['weighted_jaccard',\n 'discounted_levenshtein', 'iterative_sub_string'], 6), (\n 'De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank',\n 'De Nederlandsche Bank', 'Bank de Nederlandsche'], ['weighted_jaccard',\n 'overlap', 'iterative_sub_string'], 6), ('De Nederlandsche Bank', [\n 'Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandsche Bank',\n 'Bank de Nederlandsche'], ['weighted_jaccard', 'overlap', 'bag'], 6), (\n 'Schumm PLC', ['Torphy-Corkery', 'Hansen, Hoppe and Tillman',\n 'Gerlach and Sons', 'Bank de Nederlandsche'], ['weighted_jaccard'], 2),\n ('Schumm PLC', ['Torphy-Corkery', 'Hansen, Hoppe and Tillman',\n 'Gerlach and Sons', 'Bank de Nederlandsche'], ['weighted_jaccard',\n 'discounted_levenshtein'], 4), ('Schumm PLC', ['Torphy-Corkery',\n '<NAME>', 'Gerlach and Sons', 'Bank de Nederlandsche'], [\n 'weighted_jaccard', 'discounted_levenshtein', 'iterative_sub_string'], \n 6), ('Schumm PLC', ['Torphy-Corkery', '<NAME> and Tillman',\n 'Gerlach and Sons', 'Bank de Nederlandsche'], ['weighted_jaccard',\n 'overlap', 'iterative_sub_string'], 8), ('Schumm PLC', [\n 'Torphy-Corkery', '<NAME>', 'Gerlach and Sons', 'Bank de Nederlandsche'\n ], ['weighted_jaccard', 'overlap', 'bag'], 8)]"], {}), "('to_be_matched, possible_matches, metrics, result',\n [('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank',\n 'De Nederlandse Bank', 'Bank de Nederlandsche'], ['weighted_jaccard'], \n 2), ('De Nederlandsche Bank', ['Nederlandsche Bank',\n 'De Nederlancsh Bank', 'De Nederlandse Bank', 'Bank de Nederlandsche'],\n ['weighted_jaccard', 'discounted_levenshtein'], 5), (\n 'De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank',\n 'De Nederlandse Bank', 'Bank de Nederlandsche'], ['weighted_jaccard',\n 'discounted_levenshtein', 'iterative_sub_string'], 7), (\n 'De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank',\n 'De Nederlandse Bank', 'Bank de Nederlandsche'], ['weighted_jaccard',\n 'overlap', 'iterative_sub_string'], 6), ('De Nederlandsche Bank', [\n 'Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandse Bank',\n 'Bank de Nederlandsche'], ['weighted_jaccard', 'overlap', 'bag'], 11),\n ('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank',\n 'De Nederlandsche Bank', 'Bank de Nederlandsche'], ['weighted_jaccard'],\n 2), ('De Nederlandsche Bank', ['Nederlandsche Bank',\n 'De Nederlancsh Bank', 'De Nederlandsche Bank', 'Bank de Nederlandsche'\n ], ['weighted_jaccard', 'discounted_levenshtein'], 4), (\n 'De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank',\n 'De Nederlandsche Bank', 'Bank de Nederlandsche'], ['weighted_jaccard',\n 'discounted_levenshtein', 'iterative_sub_string'], 6), (\n 'De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank',\n 'De Nederlandsche Bank', 'Bank de Nederlandsche'], ['weighted_jaccard',\n 'overlap', 'iterative_sub_string'], 6), ('De Nederlandsche Bank', [\n 'Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandsche Bank',\n 'Bank de Nederlandsche'], ['weighted_jaccard', 'overlap', 'bag'], 6), (\n 'Schumm PLC', ['Torphy-Corkery', 'Hansen, Hoppe and Tillman',\n 'Gerlach and Sons', 'Bank de Nederlandsche'], ['weighted_jaccard'], 2),\n ('Schumm PLC', ['Torphy-Corkery', 'Hansen, Hoppe and Tillman',\n 'Gerlach and Sons', 'Bank de Nederlandsche'], ['weighted_jaccard',\n 'discounted_levenshtein'], 4), ('Schumm PLC', ['Torphy-Corkery',\n '<NAME>', 'Gerlach and Sons', 'Bank de Nederlandsche'], [\n 'weighted_jaccard', 'discounted_levenshtein', 'iterative_sub_string'], \n 6), ('Schumm PLC', ['Torphy-Corkery', '<NAME> and Tillman',\n 'Gerlach and Sons', 'Bank de Nederlandsche'], ['weighted_jaccard',\n 'overlap', 'iterative_sub_string'], 8), ('Schumm PLC', [\n 'Torphy-Corkery', '<NAME>', 'Gerlach and Sons', 'Bank de Nederlandsche'\n ], ['weighted_jaccard', 'overlap', 'bag'], 8)])\n", (9831, 12548), False, 'import pytest\n'), ((19203, 19427), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""top_n, low_memory, result_1, result_2"""', '[(10, 0, 1518, 144), (50, 0, 1992, 9), (100, 0, 1999, 6), (1, 0, 44, 144),\n (10, 8, 1518, 144), (50, 8, 1992, 9), (100, 8, 1999, 6), (1, 8, 44, 144)]'], {}), "('top_n, low_memory, result_1, result_2', [(10, 0, \n 1518, 144), (50, 0, 1992, 9), (100, 0, 1999, 6), (1, 0, 44, 144), (10, \n 8, 1518, 144), (50, 8, 1992, 9), (100, 8, 1999, 6), (1, 8, 44, 144)])\n", (19226, 19427), False, 'import pytest\n'), ((22674, 22723), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""verbose"""', '[True, False]'], {}), "('verbose', [True, False])\n", (22697, 22723), False, 'import pytest\n'), ((23200, 23478), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""word, occurence_count, result"""', "[['fun snail pool', 2, 'snail'], ['fun snail pool', 3, 'fun snail'], [\n 'fun snail pool', 1, ''], ['fun small pool', 3, 'fun small pool'], [\n 'fun snail', 3, 'fun snail'], ['fun small pool', 5, 'fun small pool']]"], {}), "('word, occurence_count, result', [['fun snail pool',\n 2, 'snail'], ['fun snail pool', 3, 'fun snail'], ['fun snail pool', 1,\n ''], ['fun small pool', 3, 'fun small pool'], ['fun snail', 3,\n 'fun snail'], ['fun small pool', 5, 'fun small pool']])\n", (23223, 23478), False, 'import pytest\n'), ((23901, 24319), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""match, num_of_matches, result"""', "[[{'match_name_1': 'fun', 'match_name_2': 'dog', 'match_name_0': 'cat'}, 3,\n ['cat', 'fun', 'dog']], [{'match_name_1': 'fun', 'match_name_2': 'dog',\n 'match_name_0': 'cat'}, 2, ['cat', 'fun']], [{'match_name_1': 'fun',\n 'match_name_0': 'cat'}, 2, ['cat', 'fun']], [{'match_name_1': 'fun',\n 'match_name_2': 'dog', 'match_name_0': 'cat'}, 0, []]]"], {}), "('match, num_of_matches, result', [[{'match_name_1':\n 'fun', 'match_name_2': 'dog', 'match_name_0': 'cat'}, 3, ['cat', 'fun',\n 'dog']], [{'match_name_1': 'fun', 'match_name_2': 'dog', 'match_name_0':\n 'cat'}, 2, ['cat', 'fun']], [{'match_name_1': 'fun', 'match_name_0':\n 'cat'}, 2, ['cat', 'fun']], [{'match_name_1': 'fun', 'match_name_2':\n 'dog', 'match_name_0': 'cat'}, 0, []]])\n", (23924, 24319), False, 'import pytest\n'), ((24708, 25183), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""preprocess_punctuations, output, input, x"""', "[[True, '_blame_', {'test': ['fun...', 'done'], 'num': ['_.blame._']}, 2],\n [True, 'done', {'test': ['fun. . . ', 'done'], 'num': ['_.blame._']}, 1\n ], [True, 'fun', {'test': ['fun. . . ', 'done'], 'num': ['_.blame._']},\n 0], [False, 'fun. . .', {'test': ['fun. . . ', 'done'], 'num': [\n '_.blame._']}, 0], [False, 'fun. . .', {'num': ['_.blame._'], 'test': [\n 'fun. . . ', 'done']}, 1]]"], {}), "('preprocess_punctuations, output, input, x', [[True,\n '_blame_', {'test': ['fun...', 'done'], 'num': ['_.blame._']}, 2], [\n True, 'done', {'test': ['fun. . . ', 'done'], 'num': ['_.blame._']}, 1],\n [True, 'fun', {'test': ['fun. . . ', 'done'], 'num': ['_.blame._']}, 0],\n [False, 'fun. . .', {'test': ['fun. . . ', 'done'], 'num': ['_.blame._'\n ]}, 0], [False, 'fun. . .', {'num': ['_.blame._'], 'test': ['fun. . . ',\n 'done']}, 1]])\n", (24731, 25183), False, 'import pytest\n'), ((26401, 27187), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""string, stringlist, result_1, result_2, y"""', "[['know sign first', ['know', 'know sign', 'know sign first'], 'know first',\n 'know first', 2], ['know sign first', ['know', 'know sign',\n 'know sign first'], 'know first', 'know', 1], ['know sign first', [\n 'know', 'know sign', 'know sign first'], 'know first', 'know', 0], [\n 'know first', ['know', 'know', 'know'], 'know first', 'know', 1], [\n 'pool sign small', ['sign small', 'small pool sign', 'small'], '', '', \n 0], ['pool sign small know', ['sign small', 'small pool sign', 'small'],\n 'know', '', 0], ['know pool sign small', ['sign small',\n 'small pool sign', 'small'], 'know', '', 0], ['pool sign small', [\n 'sign small', 'small pool know sign', 'small'], '', 'know', 1]]"], {}), "('string, stringlist, result_1, result_2, y', [[\n 'know sign first', ['know', 'know sign', 'know sign first'],\n 'know first', 'know first', 2], ['know sign first', ['know',\n 'know sign', 'know sign first'], 'know first', 'know', 1], [\n 'know sign first', ['know', 'know sign', 'know sign first'],\n 'know first', 'know', 0], ['know first', ['know', 'know', 'know'],\n 'know first', 'know', 1], ['pool sign small', ['sign small',\n 'small pool sign', 'small'], '', '', 0], ['pool sign small know', [\n 'sign small', 'small pool sign', 'small'], 'know', '', 0], [\n 'know pool sign small', ['sign small', 'small pool sign', 'small'],\n 'know', '', 0], ['pool sign small', ['sign small',\n 'small pool know sign', 'small'], '', 'know', 1]])\n", (26424, 27187), False, 'import pytest\n'), ((483, 499), 'name_matching.name_matcher.NameMatcher', 'nm.NameMatcher', ([], {}), '()\n', (497, 499), True, 'import name_matching.name_matcher as nm\n'), ((4167, 4195), 'name_matching.name_matcher.NameMatcher', 'nm.NameMatcher', ([], {}), '(**kwargs_str)\n', (4181, 4195), True, 'import name_matching.name_matcher as nm\n'), ((5834, 5850), 'name_matching.name_matcher.NameMatcher', 'nm.NameMatcher', ([], {}), '()\n', (5848, 5850), True, 'import name_matching.name_matcher as nm\n'), ((6067, 6143), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['name_matcher._df_matching_data', 'adjusted_name'], {}), '(name_matcher._df_matching_data, adjusted_name)\n', (6096, 6143), True, 'import pandas as pd\n'), ((9359, 9428), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'lowercase': '(False)', 'analyzer': '"""char"""', 'ngram_range': 'ngrams'}), "(lowercase=False, analyzer='char', ngram_range=ngrams)\n", (9374, 9428), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((13365, 13381), 'name_matching.name_matcher.NameMatcher', 'nm.NameMatcher', ([], {}), '()\n', (13379, 13381), True, 'import name_matching.name_matcher as nm\n'), ((16040, 16056), 'name_matching.name_matcher.NameMatcher', 'nm.NameMatcher', ([], {}), '()\n', (16054, 16056), True, 'import name_matching.name_matcher as nm\n'), ((19080, 19096), 'name_matching.name_matcher.NameMatcher', 'nm.NameMatcher', ([], {}), '()\n', (19094, 19096), True, 'import name_matching.name_matcher as nm\n'), ((22264, 22299), 'numpy.any', 'np.any', (["(result['match_index'] == 44)"], {}), "(result['match_index'] == 44)\n", (22270, 22299), True, 'import numpy as np\n'), ((22453, 22488), 'numpy.any', 'np.any', (["(result['match_index'] == 44)"], {}), "(result['match_index'] == 44)\n", (22459, 22488), True, 'import numpy as np\n'), ((22556, 22572), 'name_matching.name_matcher.NameMatcher', 'nm.NameMatcher', ([], {}), '()\n', (22570, 22572), True, 'import name_matching.name_matcher as nm\n'), ((23754, 23770), 'name_matching.name_matcher.NameMatcher', 'nm.NameMatcher', ([], {}), '()\n', (23768, 23770), True, 'import name_matching.name_matcher as nm\n'), ((24569, 24617), 'name_matching.name_matcher.NameMatcher', 'nm.NameMatcher', ([], {'number_of_matches': 'num_of_matches'}), '(number_of_matches=num_of_matches)\n', (24583, 24617), True, 'import name_matching.name_matcher as nm\n'), ((25563, 25615), 'name_matching.name_matcher.NameMatcher', 'nm.NameMatcher', ([], {'punctuations': 'preprocess_punctuations'}), '(punctuations=preprocess_punctuations)\n', (25577, 25615), True, 'import name_matching.name_matcher as nm\n'), ((26264, 26309), 'name_matching.name_matcher.NameMatcher', 'nm.NameMatcher', ([], {'number_of_matches': 'num_matches'}), '(number_of_matches=num_matches)\n', (26278, 26309), True, 'import name_matching.name_matcher as nm\n'), ((27824, 27840), 'name_matching.name_matcher.NameMatcher', 'nm.NameMatcher', ([], {}), '()\n', (27838, 27840), True, 'import name_matching.name_matcher as nm\n'), ((29648, 29664), 'name_matching.name_matcher.NameMatcher', 'nm.NameMatcher', ([], {}), '()\n', (29662, 29664), True, 'import name_matching.name_matcher as nm\n'), ((415, 463), 'os.path.join', 'path.join', (['package_dir', '"""test"""', '"""test_names.csv"""'], {}), "(package_dir, 'test', 'test_names.csv')\n", (424, 463), True, 'import os.path as path\n'), ((787, 844), 'os.path.join', 'path.join', (['package_dir', '"""test"""', '"""adjusted_test_names.csv"""'], {}), "(package_dir, 'test', 'adjusted_test_names.csv')\n", (796, 844), True, 'import os.path as path\n'), ((1392, 1416), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1405, 1416), False, 'import pytest\n'), ((9578, 9608), 'pytest.approx', 'pytest.approx', (['result_1', '(0.001)'], {}), '(result_1, 0.001)\n', (9591, 9608), False, 'import pytest\n'), ((9671, 9701), 'pytest.approx', 'pytest.approx', (['result_2', '(0.001)'], {}), '(result_2, 0.001)\n', (9684, 9701), False, 'import pytest\n'), ((9765, 9795), 'pytest.approx', 'pytest.approx', (['result_3', '(0.001)'], {}), '(result_3, 0.001)\n', (9778, 9795), False, 'import pytest\n'), ((16242, 16291), 'numpy.min', 'np.min', (['[number_of_matches, match_score.shape[0]]'], {}), '([number_of_matches, match_score.shape[0]])\n', (16248, 16291), True, 'import numpy as np\n'), ((17864, 17893), 'pytest.approx', 'pytest.approx', (['result', '(0.0001)'], {}), '(result, 0.0001)\n', (17877, 17893), False, 'import pytest\n'), ((19106, 19133), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (19119, 19133), False, 'import pytest\n'), ((20050, 20072), 'numpy.max', 'np.max', (['possible_match'], {}), '(possible_match)\n', (20056, 20072), True, 'import numpy as np\n'), ((20169, 20198), 'numpy.max', 'np.max', (['possible_match[44, :]'], {}), '(possible_match[44, :])\n', (20175, 20198), True, 'import numpy as np\n'), ((20222, 20252), 'numpy.min', 'np.min', (['possible_match[144, :]'], {}), '(possible_match[144, :])\n', (20228, 20252), True, 'import numpy as np\n'), ((21676, 21707), 'pytest.approx', 'pytest.approx', (['result_0', '(0.0001)'], {}), '(result_0, 0.0001)\n', (21689, 21707), False, 'import pytest\n'), ((21739, 21770), 'pytest.approx', 'pytest.approx', (['result_1', '(0.0001)'], {}), '(result_1, 0.0001)\n', (21752, 21770), False, 'import pytest\n'), ((22018, 22063), 'numpy.sum', 'np.sum', (["(result['match_index'] == result.index)"], {}), "(result['match_index'] == result.index)\n", (22024, 22063), True, 'import numpy as np\n'), ((22582, 22607), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (22595, 22607), False, 'import pytest\n'), ((24662, 24678), 'pandas.Series', 'pd.Series', (['match'], {}), '(match)\n', (24671, 24678), True, 'import pandas as pd\n'), ((1549, 1560), 'abydos.distance.Indel', 'abd.Indel', ([], {}), '()\n', (1558, 1560), True, 'import abydos.distance as abd\n'), ((1616, 1643), 'abydos.distance.DiscountedLevenshtein', 'abd.DiscountedLevenshtein', ([], {}), '()\n', (1641, 1643), True, 'import abydos.distance as abd\n'), ((1682, 1693), 'abydos.distance.Tichy', 'abd.Tichy', ([], {}), '()\n', (1691, 1693), True, 'import abydos.distance as abd\n'), ((1737, 1752), 'abydos.distance.CormodeLZ', 'abd.CormodeLZ', ([], {}), '()\n', (1750, 1752), True, 'import abydos.distance as abd\n'), ((1806, 1830), 'abydos.distance.IterativeSubString', 'abd.IterativeSubString', ([], {}), '()\n', (1828, 1830), True, 'import abydos.distance as abd\n'), ((1876, 1893), 'abydos.distance.BaulieuXIII', 'abd.BaulieuXIII', ([], {}), '()\n', (1891, 1893), True, 'import abydos.distance as abd\n'), ((1934, 1947), 'abydos.distance.Clement', 'abd.Clement', ([], {}), '()\n', (1945, 1947), True, 'import abydos.distance as abd\n'), ((1997, 2018), 'abydos.distance.DiceAsymmetricI', 'abd.DiceAsymmetricI', ([], {}), '()\n', (2016, 2018), True, 'import abydos.distance as abd\n'), ((2061, 2075), 'abydos.distance.KuhnsIII', 'abd.KuhnsIII', ([], {}), '()\n', (2073, 2075), True, 'import abydos.distance as abd\n'), ((2116, 2129), 'abydos.distance.Overlap', 'abd.Overlap', ([], {}), '()\n', (2127, 2129), True, 'import abydos.distance as abd\n'), ((2173, 2188), 'abydos.distance.PearsonII', 'abd.PearsonII', ([], {}), '()\n', (2186, 2188), True, 'import abydos.distance as abd\n'), ((2238, 2259), 'abydos.distance.WeightedJaccard', 'abd.WeightedJaccard', ([], {}), '()\n', (2257, 2259), True, 'import abydos.distance as abd\n'), ((2303, 2318), 'abydos.distance.WarrensIV', 'abd.WarrensIV', ([], {}), '()\n', (2316, 2318), True, 'import abydos.distance as abd\n'), ((2355, 2364), 'abydos.distance.Bag', 'abd.Bag', ([], {}), '()\n', (2362, 2364), True, 'import abydos.distance as abd\n'), ((2405, 2417), 'abydos.distance.RougeL', 'abd.RougeL', ([], {}), '()\n', (2415, 2417), True, 'import abydos.distance as abd\n'), ((2469, 2492), 'abydos.distance.RatcliffObershelp', 'abd.RatcliffObershelp', ([], {}), '()\n', (2490, 2492), True, 'import abydos.distance as abd\n'), ((2533, 2545), 'abydos.distance.NCDbz2', 'abd.NCDbz2', ([], {}), '()\n', (2543, 2545), True, 'import abydos.distance as abd\n'), ((2635, 2664), 'abydos.distance.FuzzyWuzzyPartialString', 'abd.FuzzyWuzzyPartialString', ([], {}), '()\n', (2662, 2664), True, 'import abydos.distance as abd\n'), ((2720, 2745), 'abydos.distance.FuzzyWuzzyTokenSort', 'abd.FuzzyWuzzyTokenSort', ([], {}), '()\n', (2743, 2745), True, 'import abydos.distance as abd\n'), ((2800, 2824), 'abydos.distance.FuzzyWuzzyTokenSet', 'abd.FuzzyWuzzyTokenSet', ([], {}), '()\n', (2822, 2824), True, 'import abydos.distance as abd\n'), ((2864, 2876), 'abydos.distance.Editex', 'abd.Editex', ([], {}), '()\n', (2874, 2876), True, 'import abydos.distance as abd\n'), ((2914, 2924), 'abydos.distance.Typo', 'abd.Typo', ([], {}), '()\n', (2922, 2924), True, 'import abydos.distance as abd\n'), ((2963, 2973), 'abydos.distance.LIG3', 'abd.LIG3', ([], {}), '()\n', (2971, 2973), True, 'import abydos.distance as abd\n'), ((3010, 3019), 'abydos.distance.SSK', 'abd.SSK', ([], {}), '()\n', (3017, 3019), True, 'import abydos.distance as abd\n'), ((13635, 13672), 'numpy.array', 'np.array', (['[[0.9, 0.3, 0.5, 0.2, 0.1]]'], {}), '([[0.9, 0.3, 0.5, 0.2, 0.1]])\n', (13643, 13672), True, 'import numpy as np\n'), ((13732, 13796), 'numpy.array', 'np.array', (['[[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5]]'], {}), '([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5]])\n', (13740, 13796), True, 'import numpy as np\n'), ((13913, 14007), 'numpy.array', 'np.array', (['[[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5], [1, 0.2, 0.3, 0.2, 0.1]]'], {}), '([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5], [1, 0.2, \n 0.3, 0.2, 0.1]])\n', (13921, 14007), True, 'import numpy as np\n'), ((14146, 14240), 'numpy.array', 'np.array', (['[[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5], [1, 0.2, 0.3, 0.2, 0.1]]'], {}), '([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5], [1, 0.2, \n 0.3, 0.2, 0.1]])\n', (14154, 14240), True, 'import numpy as np\n'), ((14333, 14397), 'numpy.array', 'np.array', (['[[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5]]'], {}), '([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5]])\n', (14341, 14397), True, 'import numpy as np\n'), ((14486, 14580), 'numpy.array', 'np.array', (['[[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5], [1, 0.2, 0.3, 0.2, 0.1]]'], {}), '([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5], [1, 0.2, \n 0.3, 0.2, 0.1]])\n', (14494, 14580), True, 'import numpy as np\n'), ((14698, 14792), 'numpy.array', 'np.array', (['[[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5], [1, 0.2, 0.3, 0.2, 0.1]]'], {}), '([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5], [1, 0.2, \n 0.3, 0.2, 0.1]])\n', (14706, 14792), True, 'import numpy as np\n'), ((14896, 14933), 'numpy.array', 'np.array', (['[[0.3, 0.3, 0.8, 0.2, 0.2]]'], {}), '([[0.3, 0.3, 0.8, 0.2, 0.2]])\n', (14904, 14933), True, 'import numpy as np\n'), ((15021, 15085), 'numpy.array', 'np.array', (['[[0.3, 0.3, 0.8, 0.2, 0.2], [0.3, 0.3, 0.8, 0.1, 0.1]]'], {}), '([[0.3, 0.3, 0.8, 0.2, 0.2], [0.3, 0.3, 0.8, 0.1, 0.1]])\n', (15029, 15085), True, 'import numpy as np\n'), ((15202, 15268), 'numpy.array', 'np.array', (['[[0.3, 0.3, 0.2, 0.1, 0.02], [0.1, 0.1, 0.2, 0.3, 0.02]]'], {}), '([[0.3, 0.3, 0.2, 0.1, 0.02], [0.1, 0.1, 0.2, 0.3, 0.02]])\n', (15210, 15268), True, 'import numpy as np\n'), ((15383, 15449), 'numpy.array', 'np.array', (['[[0.3, 0.3, 0.2, 0.1, 0.02], [0.3, 0.3, 0.2, 0.3, 0.02]]'], {}), '([[0.3, 0.3, 0.2, 0.1, 0.02], [0.3, 0.3, 0.2, 0.3, 0.02]])\n', (15391, 15449), True, 'import numpy as np\n'), ((15552, 15594), 'numpy.array', 'np.array', (['[[-0.5, -0.8, -0.3, -0.7, 0, 2]]'], {}), '([[-0.5, -0.8, -0.3, -0.7, 0, 2]])\n', (15560, 15594), True, 'import numpy as np\n'), ((15672, 15713), 'numpy.array', 'np.array', (['[[10, 8, 7, 6, 12, 15, 14, 88]]'], {}), '([[10, 8, 7, 6, 12, 15, 14, 88]])\n', (15680, 15713), True, 'import numpy as np\n'), ((15801, 15833), 'numpy.array', 'np.array', (['[[1, 0.3], [0.1, 0.4]]'], {}), '([[1, 0.3], [0.1, 0.4]])\n', (15809, 15833), True, 'import numpy as np\n'), ((16565, 16697), 'pandas.Series', 'pd.Series', (["['Nederandsche', 0, 2, 'De Nederlandsche Bank']"], {'index': "['match_name_0', 'score_0', 'match_index_0', 'original_name']"}), "(['Nederandsche', 0, 2, 'De Nederlandsche Bank'], index=[\n 'match_name_0', 'score_0', 'match_index_0', 'original_name'])\n", (16574, 16697), True, 'import pandas as pd\n'), ((16763, 16895), 'pandas.Series', 'pd.Series', (["['Nederandsche', 0, 2, 'De Nederlandsche Bank']"], {'index': "['match_name_0', 'score_0', 'match_index_0', 'original_name']"}), "(['Nederandsche', 0, 2, 'De Nederlandsche Bank'], index=[\n 'match_name_0', 'score_0', 'match_index_0', 'original_name'])\n", (16772, 16895), True, 'import pandas as pd\n'), ((16996, 17207), 'pandas.Series', 'pd.Series', (["['nederandsche', 0, 2, 'de nederand bank', 0.4, 3, 'De Nederlandsche Bank']"], {'index': "['match_name_0', 'score_0', 'match_index_0', 'match_name_1', 'score_1',\n 'match_index_1', 'original_name']"}), "(['nederandsche', 0, 2, 'de nederand bank', 0.4, 3,\n 'De Nederlandsche Bank'], index=['match_name_0', 'score_0',\n 'match_index_0', 'match_name_1', 'score_1', 'match_index_1',\n 'original_name'])\n", (17005, 17207), True, 'import pandas as pd\n'), ((17294, 17505), 'pandas.Series', 'pd.Series', (["['nederandsche', 0, 2, 'de nederand bank', 0.4, 3, 'De Nederlandsche Bank']"], {'index': "['match_name_0', 'score_0', 'match_index_0', 'match_name_1', 'score_1',\n 'match_index_1', 'original_name']"}), "(['nederandsche', 0, 2, 'de nederand bank', 0.4, 3,\n 'De Nederlandsche Bank'], index=['match_name_0', 'score_0',\n 'match_index_0', 'match_name_1', 'score_1', 'match_index_1',\n 'original_name'])\n", (17303, 17505), True, 'import pandas as pd\n'), ((20412, 20447), 'numpy.array', 'np.array', (['[29, 343, 727, 855, 1702]'], {}), '([29, 343, 727, 855, 1702])\n', (20420, 20447), True, 'import numpy as np\n'), ((20449, 20504), 'pandas.Series', 'pd.Series', (["['Company and Sons']"], {'index': "['company_name']"}), "(['Company and Sons'], index=['company_name'])\n", (20458, 20504), True, 'import pandas as pd\n'), ((20589, 20613), 'numpy.array', 'np.array', (['[29, 343, 727]'], {}), '([29, 343, 727])\n', (20597, 20613), True, 'import numpy as np\n'), ((20617, 20672), 'pandas.Series', 'pd.Series', (["['Company and Sons']"], {'index': "['company_name']"}), "(['Company and Sons'], index=['company_name'])\n", (20626, 20672), True, 'import pandas as pd\n'), ((20756, 20775), 'numpy.array', 'np.array', (['[29, 343]'], {}), '([29, 343])\n', (20764, 20775), True, 'import numpy as np\n'), ((20777, 20832), 'pandas.Series', 'pd.Series', (["['Company and Sons']"], {'index': "['company_name']"}), "(['Company and Sons'], index=['company_name'])\n", (20786, 20832), True, 'import pandas as pd\n'), ((20916, 20945), 'numpy.array', 'np.array', (['[[29, 343], [0, 0]]'], {}), '([[29, 343], [0, 0]])\n', (20924, 20945), True, 'import numpy as np\n'), ((20947, 21002), 'pandas.Series', 'pd.Series', (["['Company and Sons']"], {'index': "['company_name']"}), "(['Company and Sons'], index=['company_name'])\n", (20956, 21002), True, 'import pandas as pd\n'), ((21086, 21121), 'numpy.array', 'np.array', (['[29, 343, 727, 855, 1702]'], {}), '([29, 343, 727, 855, 1702])\n', (21094, 21121), True, 'import numpy as np\n'), ((21123, 21178), 'pandas.Series', 'pd.Series', (["['Company and Sons']"], {'index': "['company_name']"}), "(['Company and Sons'], index=['company_name'])\n", (21132, 21178), True, 'import pandas as pd\n'), ((23705, 23721), 'pandas.Series', 'pd.Series', (['words'], {}), '(words)\n', (23714, 23721), True, 'import pandas as pd\n'), ((25812, 25855), 'numpy.array', 'np.array', (['[[1, 1, 1], [1, 1, 1], [0, 0, 0]]'], {}), '([[1, 1, 1], [1, 1, 1], [0, 0, 0]])\n', (25820, 25855), True, 'import numpy as np\n'), ((25857, 25879), 'pandas.Series', 'pd.Series', ([], {'dtype': 'float'}), '(dtype=float)\n', (25866, 25879), True, 'import pandas as pd\n'), ((25920, 25958), 'numpy.array', 'np.array', (['[[1, 1], [0.4, 0.4], [0, 0]]'], {}), '([[1, 1], [0.4, 0.4], [0, 0]])\n', (25928, 25958), True, 'import numpy as np\n'), ((25987, 26009), 'pandas.Series', 'pd.Series', ([], {'dtype': 'float'}), '(dtype=float)\n', (25996, 26009), True, 'import pandas as pd\n'), ((26052, 26086), 'numpy.array', 'np.array', (['[[1, 1], [1, 1], [0, 0]]'], {}), '([[1, 1], [1, 1], [0, 0]])\n', (26060, 26086), True, 'import numpy as np\n'), ((26118, 26140), 'pandas.Series', 'pd.Series', ([], {'dtype': 'float'}), '(dtype=float)\n', (26127, 26140), True, 'import pandas as pd\n'), ((366, 388), 'os.path.abspath', 'path.abspath', (['__file__'], {}), '(__file__)\n', (378, 388), True, 'import os.path as path\n'), ((738, 760), 'os.path.abspath', 'path.abspath', (['__file__'], {}), '(__file__)\n', (750, 760), True, 'import os.path as path\n'), ((3100, 3133), 'abydos.phonetic.RefinedSoundex', 'abp.RefinedSoundex', ([], {'max_length': '(30)'}), '(max_length=30)\n', (3118, 3133), True, 'import abydos.phonetic as abp\n'), ((3173, 3190), 'abydos.distance.Levenshtein', 'abd.Levenshtein', ([], {}), '()\n', (3188, 3190), True, 'import abydos.distance as abd\n'), ((3292, 3326), 'abydos.phonetic.DoubleMetaphone', 'abp.DoubleMetaphone', ([], {'max_length': '(30)'}), '(max_length=30)\n', (3311, 3326), True, 'import abydos.phonetic as abp\n'), ((3335, 3352), 'abydos.distance.Levenshtein', 'abd.Levenshtein', ([], {}), '()\n', (3350, 3352), True, 'import abydos.distance as abd\n')]
|
import numpy as np
import math
import ROOT
import sys
class DistrReader:
def __init__(self, dataset):
self.stat_error = 0
self.sys_error = 0
self.plambda = 0
self.dataset = str(dataset)
self.hist = ROOT.TH1D('','', 100, -0.2, 0.2)
self.distr = ROOT.TH1D('','', 64, 0, 64)
self.CalcLambda()
def GetStatError(self):
return self.stat_error
def GetSysError(self):
return self.sys_error
def GetLambda(self):
return self.plambda
def Reset(self):
self.stat_error = 0
self.sys_error = 0
self.plambda = 0
self.dataset = ''
def CalcLambda(self):
for asic in range(4):
for channel in range(16):
hfile = ROOT.TFile("%s/hist_as%d_ch%d.root" %(self.dataset, asic, channel))
self.hNoise = hfile.Get('noise')
self.hSignal = hfile.Get('signal')
self.hNoise.SetDirectory(0)
self.hSignal.SetDirectory(0)
hfile.Close()
hist_s = self.hSignal.Clone()
hist_n = self.hNoise.Clone()
hist_s.GetXaxis().SetRangeUser(-40, 100) # 0pe position
p0 = hist_s.GetMaximumBin()
hist_s.GetXaxis().SetRangeUser(120, 250) # 1pe position
p1 = hist_s.GetMaximumBin()
thrsh = int((p0+p1)/1.9)
del hist_s
del hist_n
hist_s = self.hSignal
hist_n = self.hNoise
N0_s = hist_s.Integral(1, thrsh)
N0_su = hist_s.Integral(1, hist_s.FindBin(hist_s.GetXaxis().GetBinCenter(thrsh) + 30))
N0_sl = hist_s.Integral(1, hist_s.FindBin(hist_s.GetXaxis().GetBinCenter(thrsh) - 30))
N0_n = hist_n.Integral(1, thrsh)
N0_nu = hist_n.Integral(1, hist_n.FindBin(hist_n.GetXaxis().GetBinCenter(thrsh) + 30))
N0_nl = hist_n.Integral(1, hist_n.FindBin(hist_n.GetXaxis().GetBinCenter(thrsh) - 30))
N_s = hist_s.Integral() + hist_s.GetBinContent(hist_s.GetNbinsX() + 1)
N_n = hist_n.Integral() + hist_n.GetBinContent(hist_n.GetNbinsX() + 1)
P0_s = N0_s / N_s
P0_su = N0_su / N_s
P0_sl = N0_sl / N_s
P0_n = N0_n / N_n
P0_nu = N0_nu / N_n
P0_nl = N0_nl / N_n
err_s_stat = np.sqrt(N_s * (1 - P0_s) * P0_s) / N0_s
err_n_stat = np.sqrt(N_n * (1 - P0_n) * P0_n) / N0_n
err_s_sys = ROOT.TMath.Log(P0_sl) - ROOT.TMath.Log(P0_su)
err_n_sys = ROOT.TMath.Log(P0_nl) - ROOT.TMath.Log(P0_nu)
err_tot_sys = np.sqrt(np.power(err_s_sys, 2) + np.power(err_n_sys, 2))
err_tot_stat = np.sqrt(np.power(err_s_stat, 2) + np.power(err_n_stat, 2))
self.sys_error += np.power(err_tot_sys, 2)
self.stat_error += np.power(err_tot_stat, 2)
Plambda = - (ROOT.TMath.Log(P0_s) - ROOT.TMath.Log(P0_n))
self.plambda += Plambda
self.hist.Fill(Plambda)
self.distr.Fill(asic * 16 + channel, Plambda)
hist_s.Delete()
hist_n.Delete()
self.stat_error = np.sqrt(self.GetStatError())
self.sys_error = np.sqrt(self.GetSysError())
def GetLambdaHist(self):
return self.hist
def GetLambdaDistr(self):
return self.distr
# #
# PEd = PEdistr('/Volumes/Untitled/zenin/linearity_465/linearity_465_sipm/hists/3500_4_465')
#
# total = PEd.GetLambda()
# stat_err = PEd.GetStatError()
# sys_err = PEd.GetSysError()
#
# print('total lambda = %f \u00B1 %f stat \u00B1 %f sys'%(total, stat_err, sys_err))
# print('relative uncertainty = %f%% stat + %f%% sys'%(stat_err/total*100, sys_err/total*100))
#
# h = PEd.GetLambdaDistr().Clone()
# print(h.GetBinContent(9))
# h.Draw()
|
[
"numpy.sqrt",
"ROOT.TH1D",
"numpy.power",
"ROOT.TMath.Log",
"ROOT.TFile"
] |
[((243, 276), 'ROOT.TH1D', 'ROOT.TH1D', (['""""""', '""""""', '(100)', '(-0.2)', '(0.2)'], {}), "('', '', 100, -0.2, 0.2)\n", (252, 276), False, 'import ROOT\n'), ((297, 325), 'ROOT.TH1D', 'ROOT.TH1D', (['""""""', '""""""', '(64)', '(0)', '(64)'], {}), "('', '', 64, 0, 64)\n", (306, 325), False, 'import ROOT\n'), ((814, 882), 'ROOT.TFile', 'ROOT.TFile', (["('%s/hist_as%d_ch%d.root' % (self.dataset, asic, channel))"], {}), "('%s/hist_as%d_ch%d.root' % (self.dataset, asic, channel))\n", (824, 882), False, 'import ROOT\n'), ((3065, 3089), 'numpy.power', 'np.power', (['err_tot_sys', '(2)'], {}), '(err_tot_sys, 2)\n', (3073, 3089), True, 'import numpy as np\n'), ((3126, 3151), 'numpy.power', 'np.power', (['err_tot_stat', '(2)'], {}), '(err_tot_stat, 2)\n', (3134, 3151), True, 'import numpy as np\n'), ((2594, 2626), 'numpy.sqrt', 'np.sqrt', (['(N_s * (1 - P0_s) * P0_s)'], {}), '(N_s * (1 - P0_s) * P0_s)\n', (2601, 2626), True, 'import numpy as np\n'), ((2663, 2695), 'numpy.sqrt', 'np.sqrt', (['(N_n * (1 - P0_n) * P0_n)'], {}), '(N_n * (1 - P0_n) * P0_n)\n', (2670, 2695), True, 'import numpy as np\n'), ((2732, 2753), 'ROOT.TMath.Log', 'ROOT.TMath.Log', (['P0_sl'], {}), '(P0_sl)\n', (2746, 2753), False, 'import ROOT\n'), ((2756, 2777), 'ROOT.TMath.Log', 'ROOT.TMath.Log', (['P0_su'], {}), '(P0_su)\n', (2770, 2777), False, 'import ROOT\n'), ((2806, 2827), 'ROOT.TMath.Log', 'ROOT.TMath.Log', (['P0_nl'], {}), '(P0_nl)\n', (2820, 2827), False, 'import ROOT\n'), ((2830, 2851), 'ROOT.TMath.Log', 'ROOT.TMath.Log', (['P0_nu'], {}), '(P0_nu)\n', (2844, 2851), False, 'import ROOT\n'), ((2891, 2913), 'numpy.power', 'np.power', (['err_s_sys', '(2)'], {}), '(err_s_sys, 2)\n', (2899, 2913), True, 'import numpy as np\n'), ((2916, 2938), 'numpy.power', 'np.power', (['err_n_sys', '(2)'], {}), '(err_n_sys, 2)\n', (2924, 2938), True, 'import numpy as np\n'), ((2979, 3002), 'numpy.power', 'np.power', (['err_s_stat', '(2)'], {}), '(err_s_stat, 2)\n', (2987, 3002), True, 'import numpy as np\n'), ((3005, 3028), 'numpy.power', 'np.power', (['err_n_stat', '(2)'], {}), '(err_n_stat, 2)\n', (3013, 3028), True, 'import numpy as np\n'), ((3186, 3206), 'ROOT.TMath.Log', 'ROOT.TMath.Log', (['P0_s'], {}), '(P0_s)\n', (3200, 3206), False, 'import ROOT\n'), ((3209, 3229), 'ROOT.TMath.Log', 'ROOT.TMath.Log', (['P0_n'], {}), '(P0_n)\n', (3223, 3229), False, 'import ROOT\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from lottery.branch import base
import models.registry
from pruning.mask import Mask
from pruning.pruned_model import PrunedModel
from training import train
from utils.tensor_utils import shuffle_state_dict, weight_erank, feature_erank, activation, generate_mask_active, features_spectral, features_frobenius, features_spectral_fro_ratio, erank
from platforms.platform import get_platform
from foundations import paths
import json
import os
import datasets.registry
import copy
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
import tqdm
import seaborn as sns
import pandas as pd
import numpy as np
from utils.tensor_utils import generate_mask_active, erank, shuffle_tensor, mutual_coherence
sns.set_style("whitegrid")
class Branch(base.Branch):
def branch_function(self, seed: int, erank_path: str = '', coherence_path: str = '',
frobenius_path: str = '', min_singular_path: str = '', nuclear_path: str = '',
normalized: bool = False, batch_average: int = 1):
# Randomize the mask.
orig_mask = Mask.load(self.level_root)
best_mask = Mask()
start_step = self.lottery_desc.str_to_step('0ep')
# Use level 0 model for dense pre-pruned model
if not get_platform().is_primary_process: return
base_model = models.registry.load(self.level_root.replace(f'level_{self.level}', 'level_0'), start_step,
self.lottery_desc.model_hparams)
orig_model = PrunedModel(base_model, Mask.ones_like(base_model))
model_graduate = copy.deepcopy(orig_model)
model = copy.deepcopy(orig_model)
lth_model = PrunedModel(copy.deepcopy(base_model), orig_mask)
# Randomize while keeping the same layerwise proportions as the original mask.
prunable_tensors = set(orig_model.prunable_layer_names) - set(orig_model.prunable_conv_names)
tensors = {k[6:]: v.clone() for k, v in orig_model.state_dict().items() if k[6:] in prunable_tensors}
train_loader = datasets.registry.get(self.lottery_desc.dataset_hparams, train=True)
input = []
offset = 1 if batch_average > 1 else 0
for b in range(batch_average):
input.append(list(train_loader)[b+offset][0])
singular_values = []
eranks_values = []
# lth_features = lth_model.intermediate(input)
# _, s, _ = torch.svd(lth_features[-1], compute_uv=False)
# if normalized:
# s = s / s[0]
# singular_values.append(s)
eranks = np.load(os.path.join(self.level_root, '../', erank_path), allow_pickle=True)
coherence = np.load(os.path.join(self.level_root, '../', coherence_path), allow_pickle=True)
frobenius = np.load(os.path.join(self.level_root, '../', frobenius_path), allow_pickle=True)
min_singular = np.load(os.path.join(self.level_root, '../', min_singular_path), allow_pickle=True)
nuclear = np.load(os.path.join(self.level_root, '../', nuclear_path), allow_pickle=True)
erank_seeds = []
coherence_seeds = []
frobenius_seeds = []
min_singular_seeds = []
nuclear_seeds = []
for layer in range(eranks.shape[0]):
erank_seeds.append(np.argmax(eranks[layer, :]))
coherence_seeds.append(np.argmax(coherence[layer, :]))
frobenius_seeds.append(np.argmax(frobenius[layer, :]))
min_singular_seeds.append(np.argmax(min_singular[layer, :]))
nuclear_seeds.append(np.argmax(nuclear[layer, :]))
# Assign all masks to model
for b in range(batch_average):
lth_features = lth_model.intermediate(input[b])
_, s, _ = torch.svd(lth_features[-1], compute_uv=False)
if normalized:
s = s / s[0]
eranks_values.append(erank(lth_features[-1]))
singular_values.append(s)
for seeds in [erank_seeds, coherence_seeds, frobenius_seeds, min_singular_seeds, nuclear_seeds, [seed] * len(erank_seeds)]:
curr_mask = Mask()
for i, (name, param) in enumerate(tensors.items()):
curr_mask[name] = shuffle_tensor(orig_mask[name], int(seed + seeds[i])).int()
model_graduate.register_buffer(PrunedModel.to_mask_name(name), curr_mask[name].float())
features = model_graduate.intermediate(input[b])
_, s, _ = torch.svd(features[-1], compute_uv=False)
if normalized:
s = s / s[0]
eranks_values.append(erank(features[-1]))
singular_values.append(s)
model_graduate = copy.deepcopy(orig_model)
# features = lth_model(in)
types = ['lth', 'erank', 'mutual coherence', 'frobenius', 'min singular', 'nuclear', 'random']
data = pd.concat([pd.DataFrame(
{'svd_value': list(singular_values[i].detach().numpy()), 'type': [types[i % len(types)]] * len(singular_values[i]),
'svd_index': list(range(len(singular_values[i])))}) for i in range(len(types) * batch_average)], ignore_index=True)
#
f = sns.lineplot(data=data.loc[data['type'] != 'nuclear'], x='svd_index', y='svd_value', hue='type', markers=True, dashes=False, style="type")
f.set(yscale='log')
f.get_figure().savefig(os.path.join(self.branch_root, 'svd_plot.pdf'))
@staticmethod
def description():
return "Plot singular values."
@staticmethod
def name():
return 'singular_values'
|
[
"utils.tensor_utils.erank",
"pruning.pruned_model.PrunedModel.to_mask_name",
"matplotlib.use",
"pruning.mask.Mask.load",
"pruning.mask.Mask.ones_like",
"os.path.join",
"numpy.argmax",
"seaborn.set_style",
"pruning.mask.Mask",
"seaborn.lineplot",
"torch.svd",
"copy.deepcopy",
"platforms.platform.get_platform"
] |
[((689, 710), 'matplotlib.use', 'matplotlib.use', (['"""pdf"""'], {}), "('pdf')\n", (703, 710), False, 'import matplotlib\n'), ((909, 935), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (922, 935), True, 'import seaborn as sns\n'), ((1281, 1307), 'pruning.mask.Mask.load', 'Mask.load', (['self.level_root'], {}), '(self.level_root)\n', (1290, 1307), False, 'from pruning.mask import Mask\n'), ((1328, 1334), 'pruning.mask.Mask', 'Mask', ([], {}), '()\n', (1332, 1334), False, 'from pruning.mask import Mask\n'), ((1791, 1816), 'copy.deepcopy', 'copy.deepcopy', (['orig_model'], {}), '(orig_model)\n', (1804, 1816), False, 'import copy\n'), ((1833, 1858), 'copy.deepcopy', 'copy.deepcopy', (['orig_model'], {}), '(orig_model)\n', (1846, 1858), False, 'import copy\n'), ((5386, 5529), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': "data.loc[data['type'] != 'nuclear']", 'x': '"""svd_index"""', 'y': '"""svd_value"""', 'hue': '"""type"""', 'markers': '(True)', 'dashes': '(False)', 'style': '"""type"""'}), "(data=data.loc[data['type'] != 'nuclear'], x='svd_index', y=\n 'svd_value', hue='type', markers=True, dashes=False, style='type')\n", (5398, 5529), True, 'import seaborn as sns\n'), ((1738, 1764), 'pruning.mask.Mask.ones_like', 'Mask.ones_like', (['base_model'], {}), '(base_model)\n', (1752, 1764), False, 'from pruning.mask import Mask\n'), ((1891, 1916), 'copy.deepcopy', 'copy.deepcopy', (['base_model'], {}), '(base_model)\n', (1904, 1916), False, 'import copy\n'), ((2778, 2826), 'os.path.join', 'os.path.join', (['self.level_root', '"""../"""', 'erank_path'], {}), "(self.level_root, '../', erank_path)\n", (2790, 2826), False, 'import os\n'), ((2875, 2927), 'os.path.join', 'os.path.join', (['self.level_root', '"""../"""', 'coherence_path'], {}), "(self.level_root, '../', coherence_path)\n", (2887, 2927), False, 'import os\n'), ((2976, 3028), 'os.path.join', 'os.path.join', (['self.level_root', '"""../"""', 'frobenius_path'], {}), "(self.level_root, '../', frobenius_path)\n", (2988, 3028), False, 'import os\n'), ((3080, 3135), 'os.path.join', 'os.path.join', (['self.level_root', '"""../"""', 'min_singular_path'], {}), "(self.level_root, '../', min_singular_path)\n", (3092, 3135), False, 'import os\n'), ((3182, 3232), 'os.path.join', 'os.path.join', (['self.level_root', '"""../"""', 'nuclear_path'], {}), "(self.level_root, '../', nuclear_path)\n", (3194, 3232), False, 'import os\n'), ((3929, 3974), 'torch.svd', 'torch.svd', (['lth_features[-1]'], {'compute_uv': '(False)'}), '(lth_features[-1], compute_uv=False)\n', (3938, 3974), False, 'import torch\n'), ((5584, 5630), 'os.path.join', 'os.path.join', (['self.branch_root', '"""svd_plot.pdf"""'], {}), "(self.branch_root, 'svd_plot.pdf')\n", (5596, 5630), False, 'import os\n'), ((1463, 1477), 'platforms.platform.get_platform', 'get_platform', ([], {}), '()\n', (1475, 1477), False, 'from platforms.platform import get_platform\n'), ((3472, 3499), 'numpy.argmax', 'np.argmax', (['eranks[layer, :]'], {}), '(eranks[layer, :])\n', (3481, 3499), True, 'import numpy as np\n'), ((3536, 3566), 'numpy.argmax', 'np.argmax', (['coherence[layer, :]'], {}), '(coherence[layer, :])\n', (3545, 3566), True, 'import numpy as np\n'), ((3603, 3633), 'numpy.argmax', 'np.argmax', (['frobenius[layer, :]'], {}), '(frobenius[layer, :])\n', (3612, 3633), True, 'import numpy as np\n'), ((3673, 3706), 'numpy.argmax', 'np.argmax', (['min_singular[layer, :]'], {}), '(min_singular[layer, :])\n', (3682, 3706), True, 'import numpy as np\n'), ((3741, 3769), 'numpy.argmax', 'np.argmax', (['nuclear[layer, :]'], {}), '(nuclear[layer, :])\n', (3750, 3769), True, 'import numpy as np\n'), ((4064, 4087), 'utils.tensor_utils.erank', 'erank', (['lth_features[-1]'], {}), '(lth_features[-1])\n', (4069, 4087), False, 'from utils.tensor_utils import generate_mask_active, erank, shuffle_tensor, mutual_coherence\n'), ((4291, 4297), 'pruning.mask.Mask', 'Mask', ([], {}), '()\n', (4295, 4297), False, 'from pruning.mask import Mask\n'), ((4663, 4704), 'torch.svd', 'torch.svd', (['features[-1]'], {'compute_uv': '(False)'}), '(features[-1], compute_uv=False)\n', (4672, 4704), False, 'import torch\n'), ((4902, 4927), 'copy.deepcopy', 'copy.deepcopy', (['orig_model'], {}), '(orig_model)\n', (4915, 4927), False, 'import copy\n'), ((4806, 4825), 'utils.tensor_utils.erank', 'erank', (['features[-1]'], {}), '(features[-1])\n', (4811, 4825), False, 'from utils.tensor_utils import generate_mask_active, erank, shuffle_tensor, mutual_coherence\n'), ((4515, 4545), 'pruning.pruned_model.PrunedModel.to_mask_name', 'PrunedModel.to_mask_name', (['name'], {}), '(name)\n', (4539, 4545), False, 'from pruning.pruned_model import PrunedModel\n')]
|
"""
==============
GLVQ Benchmark
==============
This example shows the differences between the 4 different GLVQ implementations and LMNN.
The Image Segmentation dataset is used for training and test. Each plot shows the projection
and classification from each implementation. Because Glvq can't project the data on its own
a PCA is used.
"""
from __future__ import with_statement
import numpy as np
import matplotlib.pyplot as plt
from metric_learn import LMNN
from sklearn.decomposition import PCA
from sklearn_lvq import GlvqModel, GrlvqModel, LgmlvqModel, GmlvqModel
from sklearn_lvq.utils import _to_tango_colors, _tango_color
print(__doc__)
def plot(data, target, target_p, prototype, prototype_label, p):
p.scatter(data[:, 0], data[:, 1], c=_to_tango_colors(target, 0), alpha=0.5)
p.scatter(data[:, 0], data[:, 1], c=_to_tango_colors(target_p, 0),
marker='.')
p.scatter(prototype[:, 0], prototype[:, 1],
c=_tango_color('aluminium', 5), marker='D')
try:
p.scatter(prototype[:, 0], prototype[:, 1], s=60,
c=_to_tango_colors(prototype_label, 0), marker='.')
except:
p.scatter(prototype[:, 0], prototype[:, 1], s=60,
c=_tango_color(prototype_label), marker='.')
p.axis('equal')
y = []
x = []
with open('segmentation.data') as f:
for line in f:
v = line.split(',')
y.append(v[0])
x.append(v[1:])
x = np.asarray(x, dtype='float64')
y = np.asarray(y)
lmnn = LMNN(k=5, learn_rate=1e-6)
lmnn.fit(x, y)
x_t = lmnn.transform(x)
p1 = plt.subplot(231)
p1.scatter(x_t[:, 0], x_t[:, 1], c=_to_tango_colors(y, 0))
p1.axis('equal')
p1.set_title('LMNN')
# GLVQ
glvq = GlvqModel()
glvq.fit(x, y)
p2 = plt.subplot(232)
p2.set_title('GLVQ')
plot(PCA().fit_transform(x), y, glvq.predict(x), glvq.w_, glvq.c_w_, p2)
# GRLVQ
grlvq = GrlvqModel()
grlvq.fit(x, y)
p3 = plt.subplot(233)
p3.set_title('GRLVQ')
plot(grlvq.project(x, 2),
y, grlvq.predict(x), grlvq.project(grlvq.w_, 2),
grlvq.c_w_, p3)
# GMLVQ
gmlvq = GmlvqModel()
gmlvq.fit(x, y)
p4 = plt.subplot(234)
p4.set_title('GMLVQ')
plot(gmlvq.project(x, 2),
y, gmlvq.predict(x), gmlvq.project(gmlvq.w_, 2),
gmlvq.c_w_, p4)
# LGMLVQ
lgmlvq = LgmlvqModel()
lgmlvq.fit(x, y)
p5 = plt.subplot(235)
elem_set = list(set(lgmlvq.c_w_))
p5.set_title('LGMLVQ 1')
plot(lgmlvq.project(x, 1, 2, True),
y, lgmlvq.predict(x), lgmlvq.project(np.array([lgmlvq.w_[1]]), 1, 2),
elem_set.index(lgmlvq.c_w_[1]), p5)
p6 = plt.subplot(236)
p6.set_title('LGMLVQ 2')
plot(lgmlvq.project(x, 6, 2, True),
y, lgmlvq.predict(x), lgmlvq.project(np.array([lgmlvq.w_[6]]), 6, 2),
elem_set.index(lgmlvq.c_w_[6]), p6)
plt.show()
|
[
"sklearn_lvq.GmlvqModel",
"sklearn_lvq.LgmlvqModel",
"sklearn.decomposition.PCA",
"numpy.asarray",
"sklearn_lvq.utils._to_tango_colors",
"numpy.array",
"metric_learn.LMNN",
"sklearn_lvq.utils._tango_color",
"sklearn_lvq.GlvqModel",
"matplotlib.pyplot.subplot",
"sklearn_lvq.GrlvqModel",
"matplotlib.pyplot.show"
] |
[((1441, 1471), 'numpy.asarray', 'np.asarray', (['x'], {'dtype': '"""float64"""'}), "(x, dtype='float64')\n", (1451, 1471), True, 'import numpy as np\n'), ((1476, 1489), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (1486, 1489), True, 'import numpy as np\n'), ((1498, 1525), 'metric_learn.LMNN', 'LMNN', ([], {'k': '(5)', 'learn_rate': '(1e-06)'}), '(k=5, learn_rate=1e-06)\n', (1502, 1525), False, 'from metric_learn import LMNN\n'), ((1570, 1586), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(231)'], {}), '(231)\n', (1581, 1586), True, 'import matplotlib.pyplot as plt\n'), ((1699, 1710), 'sklearn_lvq.GlvqModel', 'GlvqModel', ([], {}), '()\n', (1708, 1710), False, 'from sklearn_lvq import GlvqModel, GrlvqModel, LgmlvqModel, GmlvqModel\n'), ((1731, 1747), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(232)'], {}), '(232)\n', (1742, 1747), True, 'import matplotlib.pyplot as plt\n'), ((1859, 1871), 'sklearn_lvq.GrlvqModel', 'GrlvqModel', ([], {}), '()\n', (1869, 1871), False, 'from sklearn_lvq import GlvqModel, GrlvqModel, LgmlvqModel, GmlvqModel\n'), ((1893, 1909), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(233)'], {}), '(233)\n', (1904, 1909), True, 'import matplotlib.pyplot as plt\n'), ((2050, 2062), 'sklearn_lvq.GmlvqModel', 'GmlvqModel', ([], {}), '()\n', (2060, 2062), False, 'from sklearn_lvq import GlvqModel, GrlvqModel, LgmlvqModel, GmlvqModel\n'), ((2084, 2100), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(234)'], {}), '(234)\n', (2095, 2100), True, 'import matplotlib.pyplot as plt\n'), ((2243, 2256), 'sklearn_lvq.LgmlvqModel', 'LgmlvqModel', ([], {}), '()\n', (2254, 2256), False, 'from sklearn_lvq import GlvqModel, GrlvqModel, LgmlvqModel, GmlvqModel\n'), ((2279, 2295), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(235)'], {}), '(235)\n', (2290, 2295), True, 'import matplotlib.pyplot as plt\n'), ((2512, 2528), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(236)'], {}), '(236)\n', (2523, 2528), True, 'import matplotlib.pyplot as plt\n'), ((2707, 2717), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2715, 2717), True, 'import matplotlib.pyplot as plt\n'), ((1622, 1644), 'sklearn_lvq.utils._to_tango_colors', '_to_tango_colors', (['y', '(0)'], {}), '(y, 0)\n', (1638, 1644), False, 'from sklearn_lvq.utils import _to_tango_colors, _tango_color\n'), ((2433, 2457), 'numpy.array', 'np.array', (['[lgmlvq.w_[1]]'], {}), '([lgmlvq.w_[1]])\n', (2441, 2457), True, 'import numpy as np\n'), ((2632, 2656), 'numpy.array', 'np.array', (['[lgmlvq.w_[6]]'], {}), '([lgmlvq.w_[6]])\n', (2640, 2656), True, 'import numpy as np\n'), ((757, 784), 'sklearn_lvq.utils._to_tango_colors', '_to_tango_colors', (['target', '(0)'], {}), '(target, 0)\n', (773, 784), False, 'from sklearn_lvq.utils import _to_tango_colors, _tango_color\n'), ((837, 866), 'sklearn_lvq.utils._to_tango_colors', '_to_tango_colors', (['target_p', '(0)'], {}), '(target_p, 0)\n', (853, 866), False, 'from sklearn_lvq.utils import _to_tango_colors, _tango_color\n'), ((958, 986), 'sklearn_lvq.utils._tango_color', '_tango_color', (['"""aluminium"""', '(5)'], {}), "('aluminium', 5)\n", (970, 986), False, 'from sklearn_lvq.utils import _to_tango_colors, _tango_color\n'), ((1774, 1779), 'sklearn.decomposition.PCA', 'PCA', ([], {}), '()\n', (1777, 1779), False, 'from sklearn.decomposition import PCA\n'), ((1087, 1123), 'sklearn_lvq.utils._to_tango_colors', '_to_tango_colors', (['prototype_label', '(0)'], {}), '(prototype_label, 0)\n', (1103, 1123), False, 'from sklearn_lvq.utils import _to_tango_colors, _tango_color\n'), ((1227, 1256), 'sklearn_lvq.utils._tango_color', '_tango_color', (['prototype_label'], {}), '(prototype_label)\n', (1239, 1256), False, 'from sklearn_lvq.utils import _to_tango_colors, _tango_color\n')]
|
import pytest
import re
import unittest
import metric_learn
import numpy as np
from sklearn import clone
from test.test_utils import ids_metric_learners, metric_learners, remove_y
from metric_learn.sklearn_shims import set_random_state, SKLEARN_AT_LEAST_0_22
def remove_spaces(s):
return re.sub(r'\s+', '', s)
def sk_repr_kwargs(def_kwargs, nndef_kwargs):
"""Given the non-default arguments, and the default
keywords arguments, build the string that will appear
in the __repr__ of the estimator, depending on the
version of scikit-learn.
"""
if SKLEARN_AT_LEAST_0_22:
def_kwargs = {}
def_kwargs.update(nndef_kwargs)
args_str = ",".join(f"{key}={repr(value)}"
for key, value in def_kwargs.items())
return args_str
class TestStringRepr(unittest.TestCase):
def test_covariance(self):
def_kwargs = {'preprocessor': None}
nndef_kwargs = {}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.Covariance())),
remove_spaces(f"Covariance({merged_kwargs})"))
def test_lmnn(self):
def_kwargs = {'convergence_tol': 0.001, 'init': 'auto', 'k': 3,
'learn_rate': 1e-07, 'max_iter': 1000, 'min_iter': 50,
'n_components': None, 'preprocessor': None,
'random_state': None, 'regularization': 0.5,
'verbose': False}
nndef_kwargs = {'convergence_tol': 0.01, 'k': 6}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(
remove_spaces(str(metric_learn.LMNN(convergence_tol=0.01, k=6))),
remove_spaces(f"LMNN({merged_kwargs})"))
def test_nca(self):
def_kwargs = {'init': 'auto', 'max_iter': 100, 'n_components': None,
'preprocessor': None, 'random_state': None, 'tol': None,
'verbose': False}
nndef_kwargs = {'max_iter': 42}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.NCA(max_iter=42))),
remove_spaces(f"NCA({merged_kwargs})"))
def test_lfda(self):
def_kwargs = {'embedding_type': 'weighted', 'k': None,
'n_components': None, 'preprocessor': None}
nndef_kwargs = {'k': 2}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.LFDA(k=2))),
remove_spaces(f"LFDA({merged_kwargs})"))
def test_itml(self):
def_kwargs = {'convergence_threshold': 0.001, 'gamma': 1.0,
'max_iter': 1000, 'preprocessor': None,
'prior': 'identity', 'random_state': None, 'verbose': False}
nndef_kwargs = {'gamma': 0.5}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.ITML(gamma=0.5))),
remove_spaces(f"ITML({merged_kwargs})"))
def_kwargs = {'convergence_threshold': 0.001, 'gamma': 1.0,
'max_iter': 1000, 'num_constraints': None,
'preprocessor': None, 'prior': 'identity',
'random_state': None, 'verbose': False}
nndef_kwargs = {'num_constraints': 7}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(
remove_spaces(str(metric_learn.ITML_Supervised(num_constraints=7))),
remove_spaces(f"ITML_Supervised({merged_kwargs})"))
def test_lsml(self):
def_kwargs = {'max_iter': 1000, 'preprocessor': None, 'prior': 'identity',
'random_state': None, 'tol': 0.001, 'verbose': False}
nndef_kwargs = {'tol': 0.1}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.LSML(tol=0.1))),
remove_spaces(f"LSML({merged_kwargs})"))
def_kwargs = {'max_iter': 1000, 'num_constraints': None,
'preprocessor': None, 'prior': 'identity',
'random_state': None, 'tol': 0.001, 'verbose': False,
'weights': None}
nndef_kwargs = {'verbose': True}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(
remove_spaces(str(metric_learn.LSML_Supervised(verbose=True))),
remove_spaces(f"LSML_Supervised({merged_kwargs})"))
def test_sdml(self):
def_kwargs = {'balance_param': 0.5, 'preprocessor': None,
'prior': 'identity', 'random_state': None,
'sparsity_param': 0.01, 'verbose': False}
nndef_kwargs = {'verbose': True}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.SDML(verbose=True))),
remove_spaces(f"SDML({merged_kwargs})"))
def_kwargs = {'balance_param': 0.5, 'num_constraints': None,
'preprocessor': None, 'prior': 'identity',
'random_state': None, 'sparsity_param': 0.01,
'verbose': False}
nndef_kwargs = {'sparsity_param': 0.5}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(
remove_spaces(str(metric_learn.SDML_Supervised(sparsity_param=0.5))),
remove_spaces(f"SDML_Supervised({merged_kwargs})"))
def test_rca(self):
def_kwargs = {'n_components': None, 'preprocessor': None}
nndef_kwargs = {'n_components': 3}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.RCA(n_components=3))),
remove_spaces(f"RCA({merged_kwargs})"))
def_kwargs = {'chunk_size': 2, 'n_components': None, 'num_chunks': 100,
'preprocessor': None, 'random_state': None}
nndef_kwargs = {'num_chunks': 5}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(
remove_spaces(str(metric_learn.RCA_Supervised(num_chunks=5))),
remove_spaces(f"RCA_Supervised({merged_kwargs})"))
def test_mlkr(self):
def_kwargs = {'init': 'auto', 'max_iter': 1000,
'n_components': None, 'preprocessor': None,
'random_state': None, 'tol': None, 'verbose': False}
nndef_kwargs = {'max_iter': 777}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.MLKR(max_iter=777))),
remove_spaces(f"MLKR({merged_kwargs})"))
def test_mmc(self):
def_kwargs = {'convergence_threshold': 0.001, 'diagonal': False,
'diagonal_c': 1.0, 'init': 'identity', 'max_iter': 100,
'max_proj': 10000, 'preprocessor': None,
'random_state': None, 'verbose': False}
nndef_kwargs = {'diagonal': True}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.MMC(diagonal=True))),
remove_spaces(f"MMC({merged_kwargs})"))
def_kwargs = {'convergence_threshold': 1e-06, 'diagonal': False,
'diagonal_c': 1.0, 'init': 'identity', 'max_iter': 100,
'max_proj': 10000, 'num_constraints': None,
'preprocessor': None, 'random_state': None,
'verbose': False}
nndef_kwargs = {'max_iter': 1}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(
remove_spaces(str(metric_learn.MMC_Supervised(max_iter=1))),
remove_spaces(f"MMC_Supervised({merged_kwargs})"))
@pytest.mark.parametrize('estimator, build_dataset', metric_learners,
ids=ids_metric_learners)
def test_get_metric_is_independent_from_metric_learner(estimator,
build_dataset):
"""Tests that the get_metric method returns a function that is independent
from the original metric learner"""
input_data, labels, _, X = build_dataset()
model = clone(estimator)
set_random_state(model)
# we fit the metric learner on it and then we compute the metric on some
# points
model.fit(*remove_y(model, input_data, labels))
metric = model.get_metric()
score = metric(X[0], X[1])
# then we refit the estimator on another dataset
model.fit(*remove_y(model, np.sin(input_data), labels))
# we recompute the distance between the two points: it should be the same
score_bis = metric(X[0], X[1])
assert score_bis == score
@pytest.mark.parametrize('estimator, build_dataset', metric_learners,
ids=ids_metric_learners)
def test_get_metric_raises_error(estimator, build_dataset):
"""Tests that the metric returned by get_metric raises errors similar to
the distance functions in scipy.spatial.distance"""
input_data, labels, _, X = build_dataset()
model = clone(estimator)
set_random_state(model)
model.fit(*remove_y(model, input_data, labels))
metric = model.get_metric()
list_test_get_metric_raises = [(X[0].tolist() + [5.2], X[1]), # vectors with
# different dimensions
(X[0:4], X[1:5]), # 2D vectors
(X[0].tolist() + [5.2], X[1] + [7.2])]
# vectors of same dimension but incompatible with what the metric learner
# was trained on
for u, v in list_test_get_metric_raises:
with pytest.raises(ValueError):
metric(u, v)
@pytest.mark.parametrize('estimator, build_dataset', metric_learners,
ids=ids_metric_learners)
def test_get_metric_works_does_not_raise(estimator, build_dataset):
"""Tests that the metric returned by get_metric does not raise errors (or
warnings) similarly to the distance functions in scipy.spatial.distance"""
input_data, labels, _, X = build_dataset()
model = clone(estimator)
set_random_state(model)
model.fit(*remove_y(model, input_data, labels))
metric = model.get_metric()
list_test_get_metric_doesnt_raise = [(X[0], X[1]),
(X[0].tolist(), X[1].tolist()),
(X[0][None], X[1][None])]
for u, v in list_test_get_metric_doesnt_raise:
with pytest.warns(None) as record:
metric(u, v)
assert len(record) == 0
# Test that the scalar case works
model.components_ = np.array([3.1])
metric = model.get_metric()
for u, v in [(5, 6.7), ([5], [6.7]), ([[5]], [[6.7]])]:
with pytest.warns(None) as record:
metric(u, v)
assert len(record) == 0
@pytest.mark.parametrize('estimator, build_dataset', metric_learners,
ids=ids_metric_learners)
def test_n_components(estimator, build_dataset):
"""Check that estimators that have a n_components parameters can use it
and that it actually works as expected"""
input_data, labels, _, X = build_dataset()
model = clone(estimator)
if hasattr(model, 'n_components'):
set_random_state(model)
model.set_params(n_components=None)
model.fit(*remove_y(model, input_data, labels))
assert model.components_.shape == (X.shape[1], X.shape[1])
model = clone(estimator)
set_random_state(model)
model.set_params(n_components=X.shape[1] - 1)
model.fit(*remove_y(model, input_data, labels))
assert model.components_.shape == (X.shape[1] - 1, X.shape[1])
model = clone(estimator)
set_random_state(model)
model.set_params(n_components=X.shape[1] + 1)
with pytest.raises(ValueError) as expected_err:
model.fit(*remove_y(model, input_data, labels))
assert (str(expected_err.value) ==
'Invalid n_components, must be in [1, {}]'.format(X.shape[1]))
model = clone(estimator)
set_random_state(model)
model.set_params(n_components=0)
with pytest.raises(ValueError) as expected_err:
model.fit(*remove_y(model, input_data, labels))
assert (str(expected_err.value) ==
'Invalid n_components, must be in [1, {}]'.format(X.shape[1]))
if __name__ == '__main__':
unittest.main()
|
[
"metric_learn.SDML",
"metric_learn.sklearn_shims.set_random_state",
"numpy.array",
"sklearn.clone",
"numpy.sin",
"unittest.main",
"metric_learn.MMC",
"metric_learn.MMC_Supervised",
"metric_learn.MLKR",
"metric_learn.NCA",
"test.test_utils.remove_y",
"metric_learn.SDML_Supervised",
"metric_learn.LFDA",
"metric_learn.LSML",
"pytest.warns",
"pytest.raises",
"re.sub",
"metric_learn.ITML_Supervised",
"metric_learn.RCA_Supervised",
"pytest.mark.parametrize",
"metric_learn.LMNN",
"metric_learn.LSML_Supervised",
"metric_learn.Covariance",
"metric_learn.RCA",
"metric_learn.ITML"
] |
[((7493, 7591), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""estimator, build_dataset"""', 'metric_learners'], {'ids': 'ids_metric_learners'}), "('estimator, build_dataset', metric_learners, ids=\n ids_metric_learners)\n", (7516, 7591), False, 'import pytest\n'), ((8409, 8507), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""estimator, build_dataset"""', 'metric_learners'], {'ids': 'ids_metric_learners'}), "('estimator, build_dataset', metric_learners, ids=\n ids_metric_learners)\n", (8432, 8507), False, 'import pytest\n'), ((9366, 9464), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""estimator, build_dataset"""', 'metric_learners'], {'ids': 'ids_metric_learners'}), "('estimator, build_dataset', metric_learners, ids=\n ids_metric_learners)\n", (9389, 9464), False, 'import pytest\n'), ((10462, 10560), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""estimator, build_dataset"""', 'metric_learners'], {'ids': 'ids_metric_learners'}), "('estimator, build_dataset', metric_learners, ids=\n ids_metric_learners)\n", (10485, 10560), False, 'import pytest\n'), ((292, 313), 're.sub', 're.sub', (['"""\\\\s+"""', '""""""', 's'], {}), "('\\\\s+', '', s)\n", (298, 313), False, 'import re\n'), ((7919, 7935), 'sklearn.clone', 'clone', (['estimator'], {}), '(estimator)\n', (7924, 7935), False, 'from sklearn import clone\n'), ((7938, 7961), 'metric_learn.sklearn_shims.set_random_state', 'set_random_state', (['model'], {}), '(model)\n', (7954, 7961), False, 'from metric_learn.sklearn_shims import set_random_state, SKLEARN_AT_LEAST_0_22\n'), ((8772, 8788), 'sklearn.clone', 'clone', (['estimator'], {}), '(estimator)\n', (8777, 8788), False, 'from sklearn import clone\n'), ((8791, 8814), 'metric_learn.sklearn_shims.set_random_state', 'set_random_state', (['model'], {}), '(model)\n', (8807, 8814), False, 'from metric_learn.sklearn_shims import set_random_state, SKLEARN_AT_LEAST_0_22\n'), ((9761, 9777), 'sklearn.clone', 'clone', (['estimator'], {}), '(estimator)\n', (9766, 9777), False, 'from sklearn import clone\n'), ((9780, 9803), 'metric_learn.sklearn_shims.set_random_state', 'set_random_state', (['model'], {}), '(model)\n', (9796, 9803), False, 'from metric_learn.sklearn_shims import set_random_state, SKLEARN_AT_LEAST_0_22\n'), ((10269, 10284), 'numpy.array', 'np.array', (['[3.1]'], {}), '([3.1])\n', (10277, 10284), True, 'import numpy as np\n'), ((10803, 10819), 'sklearn.clone', 'clone', (['estimator'], {}), '(estimator)\n', (10808, 10819), False, 'from sklearn import clone\n'), ((11942, 11957), 'unittest.main', 'unittest.main', ([], {}), '()\n', (11955, 11957), False, 'import unittest\n'), ((10862, 10885), 'metric_learn.sklearn_shims.set_random_state', 'set_random_state', (['model'], {}), '(model)\n', (10878, 10885), False, 'from metric_learn.sklearn_shims import set_random_state, SKLEARN_AT_LEAST_0_22\n'), ((11054, 11070), 'sklearn.clone', 'clone', (['estimator'], {}), '(estimator)\n', (11059, 11070), False, 'from sklearn import clone\n'), ((11075, 11098), 'metric_learn.sklearn_shims.set_random_state', 'set_random_state', (['model'], {}), '(model)\n', (11091, 11098), False, 'from metric_learn.sklearn_shims import set_random_state, SKLEARN_AT_LEAST_0_22\n'), ((11281, 11297), 'sklearn.clone', 'clone', (['estimator'], {}), '(estimator)\n', (11286, 11297), False, 'from sklearn import clone\n'), ((11302, 11325), 'metric_learn.sklearn_shims.set_random_state', 'set_random_state', (['model'], {}), '(model)\n', (11318, 11325), False, 'from metric_learn.sklearn_shims import set_random_state, SKLEARN_AT_LEAST_0_22\n'), ((11609, 11625), 'sklearn.clone', 'clone', (['estimator'], {}), '(estimator)\n', (11614, 11625), False, 'from sklearn import clone\n'), ((11630, 11653), 'metric_learn.sklearn_shims.set_random_state', 'set_random_state', (['model'], {}), '(model)\n', (11646, 11653), False, 'from metric_learn.sklearn_shims import set_random_state, SKLEARN_AT_LEAST_0_22\n'), ((8062, 8097), 'test.test_utils.remove_y', 'remove_y', (['model', 'input_data', 'labels'], {}), '(model, input_data, labels)\n', (8070, 8097), False, 'from test.test_utils import ids_metric_learners, metric_learners, remove_y\n'), ((8828, 8863), 'test.test_utils.remove_y', 'remove_y', (['model', 'input_data', 'labels'], {}), '(model, input_data, labels)\n', (8836, 8863), False, 'from test.test_utils import ids_metric_learners, metric_learners, remove_y\n'), ((9317, 9342), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (9330, 9342), False, 'import pytest\n'), ((9817, 9852), 'test.test_utils.remove_y', 'remove_y', (['model', 'input_data', 'labels'], {}), '(model, input_data, labels)\n', (9825, 9852), False, 'from test.test_utils import ids_metric_learners, metric_learners, remove_y\n'), ((10133, 10151), 'pytest.warns', 'pytest.warns', (['None'], {}), '(None)\n', (10145, 10151), False, 'import pytest\n'), ((10382, 10400), 'pytest.warns', 'pytest.warns', (['None'], {}), '(None)\n', (10394, 10400), False, 'import pytest\n'), ((11385, 11410), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (11398, 11410), False, 'import pytest\n'), ((11700, 11725), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (11713, 11725), False, 'import pytest\n'), ((8239, 8257), 'numpy.sin', 'np.sin', (['input_data'], {}), '(input_data)\n', (8245, 8257), True, 'import numpy as np\n'), ((10941, 10976), 'test.test_utils.remove_y', 'remove_y', (['model', 'input_data', 'labels'], {}), '(model, input_data, labels)\n', (10949, 10976), False, 'from test.test_utils import ids_metric_learners, metric_learners, remove_y\n'), ((11164, 11199), 'test.test_utils.remove_y', 'remove_y', (['model', 'input_data', 'labels'], {}), '(model, input_data, labels)\n', (11172, 11199), False, 'from test.test_utils import ids_metric_learners, metric_learners, remove_y\n'), ((1022, 1047), 'metric_learn.Covariance', 'metric_learn.Covariance', ([], {}), '()\n', (1045, 1047), False, 'import metric_learn\n'), ((1607, 1651), 'metric_learn.LMNN', 'metric_learn.LMNN', ([], {'convergence_tol': '(0.01)', 'k': '(6)'}), '(convergence_tol=0.01, k=6)\n', (1624, 1651), False, 'import metric_learn\n'), ((2047, 2076), 'metric_learn.NCA', 'metric_learn.NCA', ([], {'max_iter': '(42)'}), '(max_iter=42)\n', (2063, 2076), False, 'import metric_learn\n'), ((2414, 2436), 'metric_learn.LFDA', 'metric_learn.LFDA', ([], {'k': '(2)'}), '(k=2)\n', (2431, 2436), False, 'import metric_learn\n'), ((2861, 2889), 'metric_learn.ITML', 'metric_learn.ITML', ([], {'gamma': '(0.5)'}), '(gamma=0.5)\n', (2878, 2889), False, 'import metric_learn\n'), ((3350, 3397), 'metric_learn.ITML_Supervised', 'metric_learn.ITML_Supervised', ([], {'num_constraints': '(7)'}), '(num_constraints=7)\n', (3378, 3397), False, 'import metric_learn\n'), ((3768, 3794), 'metric_learn.LSML', 'metric_learn.LSML', ([], {'tol': '(0.1)'}), '(tol=0.1)\n', (3785, 3794), False, 'import metric_learn\n'), ((4235, 4277), 'metric_learn.LSML_Supervised', 'metric_learn.LSML_Supervised', ([], {'verbose': '(True)'}), '(verbose=True)\n', (4263, 4277), False, 'import metric_learn\n'), ((4685, 4716), 'metric_learn.SDML', 'metric_learn.SDML', ([], {'verbose': '(True)'}), '(verbose=True)\n', (4702, 4716), False, 'import metric_learn\n'), ((5160, 5208), 'metric_learn.SDML_Supervised', 'metric_learn.SDML_Supervised', ([], {'sparsity_param': '(0.5)'}), '(sparsity_param=0.5)\n', (5188, 5208), False, 'import metric_learn\n'), ((5496, 5528), 'metric_learn.RCA', 'metric_learn.RCA', ([], {'n_components': '(3)'}), '(n_components=3)\n', (5512, 5528), False, 'import metric_learn\n'), ((5877, 5918), 'metric_learn.RCA_Supervised', 'metric_learn.RCA_Supervised', ([], {'num_chunks': '(5)'}), '(num_chunks=5)\n', (5904, 5918), False, 'import metric_learn\n'), ((6327, 6358), 'metric_learn.MLKR', 'metric_learn.MLKR', ([], {'max_iter': '(777)'}), '(max_iter=777)\n', (6344, 6358), False, 'import metric_learn\n'), ((6845, 6876), 'metric_learn.MMC', 'metric_learn.MMC', ([], {'diagonal': '(True)'}), '(diagonal=True)\n', (6861, 6876), False, 'import metric_learn\n'), ((7388, 7427), 'metric_learn.MMC_Supervised', 'metric_learn.MMC_Supervised', ([], {'max_iter': '(1)'}), '(max_iter=1)\n', (7415, 7427), False, 'import metric_learn\n'), ((11445, 11480), 'test.test_utils.remove_y', 'remove_y', (['model', 'input_data', 'labels'], {}), '(model, input_data, labels)\n', (11453, 11480), False, 'from test.test_utils import ids_metric_learners, metric_learners, remove_y\n'), ((11760, 11795), 'test.test_utils.remove_y', 'remove_y', (['model', 'input_data', 'labels'], {}), '(model, input_data, labels)\n', (11768, 11795), False, 'from test.test_utils import ids_metric_learners, metric_learners, remove_y\n')]
|
import numpy as np
import pandas as pd
import scipy as sc
from scipy.stats import randint, norm, multivariate_normal, ortho_group
from scipy import linalg
from scipy.linalg import subspace_angles, orth
from scipy.optimize import fmin
import math
from statistics import mean
import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
import itertools as it
import seaborn as sns
import matplotlib.pyplot as plt
from cluster.selfrepresentation import ElasticNetSubspaceClustering
import time
# functions for simulate data
def first_simulation(p, dim, k):
b = [orth(np.random.rand(p, dim)) for i in range(k + 1)]
return b
def find_theta_max(b, t, k):
theta_max = []
for i in range(1, k + 1):
for j in range(1, i):
theta_max.append(subspace_angles(b[i], b[j]).max())
max_avg_theta = mean(theta_max)
theta = max_avg_theta * t
return theta
def second_simulation(p, k, dim, theta, b):
def find_a_for_theta(a, b=b, k=k, theta=theta):
temp_theta = []
for i in range(1, k + 1):
for j in range(1, i):
temp_theta.append(subspace_angles(b[0] * (1 - a) + b[i] * a, b[0] * (1 - a) + b[j] * a).max())
return mean(temp_theta) - theta
a = sc.optimize.bisect(find_a_for_theta, 0, 1)
B = [b[0] * (1 - a) + b[i] * a for i in range(1, k + 1)]
return B
def third_simulation(n, p, dim, B, k, theta):
z = np.random.randint(0, k, n)
w = np.random.multivariate_normal(mean=np.zeros(dim), cov=np.diag(np.ones(dim)), size=n)
X = np.zeros((n, p))
for i in range(n):
X[i,] = np.random.multivariate_normal(mean=np.array(np.dot(np.array(w[i, :]), B[z[i]].T)).flatten(),
cov=np.diag(np.ones(p))) # sigma value is missing
return n, p, dim, theta, X, z, B
# data simulation
def final_data_simulation(k):
nn = [2 ** j for j in range(3, 11)]
pp = [2 ** j for j in range(4, 8)]
dd = [2 ** -j for j in range(1, 5)]
tt = [10 ** -j for j in range(0, 3)]
df = pd.DataFrame(columns=['n', 'p', 'dim', 'theta', 'X', 'z', 'B'])
for p in pp:
for d in dd:
dim = int(d * p)
b = first_simulation(p=p, dim=dim, k=k)
for t in tt:
theta = find_theta_max(b=b, t=t, k=k)
for n in nn:
B = second_simulation(p=p, k=k, dim=dim, theta=theta, b=b)
row = pd.Series(list(third_simulation(n=n, p=p, dim=dim, B=B, k=k, theta=theta)[0:7]),
["n", "p", "dim", "theta", "X", "z", "B"])
df = df.append([row], ignore_index=True)
return df
df = final_data_simulation(4)
X = df['X'][31]
z = df['z'][31]
z
dim = 4
p = 16
k = 4
kmeans = KMeans(n_clusters=k)
kmeans
temp_df = pd.DataFrame(X)
temp_df['cluster'] = kmeans.fit_predict(X)
# for i in range(k) :
i = 1
df_new = temp_df[temp_df['cluster'] == i].drop(['cluster'], axis=1)
cluster_kmean = KMeans(n_clusters=k).fit_predict(X)
data = {'cluster1': z, 'cluster2': cluster_kmean}
clusters = pd.DataFrame(data, index=range(len(z)))
all_per = list(it.permutations(range(k)))
accuracy_rate_all_per = np.zeros(len(all_per))
c = [i for i in range(k)]
for l, p in enumerate(all_per):
dic = dict(zip(c, p))
clusters['premut_cluster'] = clusters['cluster2'].transform(lambda x: dic[x] if x in dic else None)
m = clusters.groupby(['cluster1', 'premut_cluster']).size().unstack(fill_value=0)
accuracy_rate_all_per[l] = np.trace(m)
accuracy_rate_all_per.max(), len(cluster_kmean)
per = all_per[2]
dic = dict(zip(c, per))
clusters['premut_cluster'] = clusters['cluster2'].transform(lambda x: dic[x] if x in dic else None)
clusters.groupby(['cluster2', 'premut_cluster']).size()
# find kmeans clusters and subspaces
def pca_subspace(df, i, dim):
df_new = df[df['cluster'] == i].drop(['cluster'], axis=1)
pca_components_number = len(df_new) - 1 if len(df_new) < dim else dim # handling with low n (lower than dim)
pca = PCA(n_components=pca_components_number)
pca.fit_transform(df_new)
B_kmeans = pca.components_
return B_kmeans.T
def find_kmeans_subspace(X, k, dim):
kmeans = KMeans(n_clusters=k)
temp_df = pd.DataFrame(X)
temp_df['cluster'] = kmeans.fit_predict(X)
B_kmean = [pca_subspace(temp_df, i, dim) for i in range(k)]
return B_kmean
def find_ensc_subspace(X, k, dim):
temp_df = pd.DataFrame(X)
temp_df['cluster'] = ElasticNetSubspaceClustering(n_clusters=k, algorithm='lasso_lars', gamma=50).fit(X.T)
B_ensc = [pca_subspace(temp_df, i, dim) for i in range(k)]
return B_ensc
# Recovery Performance
def performance_measure1(k, B1, B2):
all_per = list(it.permutations(range(k)))
sum_cos_angles_all_per = np.zeros(len(all_per))
for l, val in enumerate(all_per):
for i in range(k):
if B2[val[i]].shape[1] > 0: # handling with empty clusters
sum_cos_angles_all_per[l] += (math.cos(
subspace_angles(B1[i], B2[val[i]]).max())) ** 2 # use min or max????????????????
cost_subspace = sum_cos_angles_all_per.max()
return cost_subspace
# WHAT ARE WE DOING WITH EMPTY CLUSTERS
def performance_measure2(k, cluster1, cluster2):
data = {'cluster1': cluster1, 'cluster2': cluster2}
clusters = pd.DataFrame(data, index=range(len(cluster1)))
all_per = list(it.permutations(range(k)))
accuracy_rate_all_per = np.zeros(len(all_per))
for l, per in enumerate(all_per):
c = [i for i in range(k)]
dic = dict(zip(c, per))
clusters['premut_cluster'] = clusters['cluster2'].transform(lambda x: dic[x] if x in dic else None)
m = clusters.groupby(['cluster1', 'premut_cluster']).size().unstack(fill_value=0)
accuracy_rate_all_per[l] = np.trace(m)
cost_cluster = (accuracy_rate_all_per.max()) / len(cluster1)
return cost_cluster
def all_process(k):
df = final_data_simulation(k)
df['B_kmean'] = df.apply(lambda x: find_kmeans_subspace(x['X'], k, x['dim']), axis=1)
df['cluster_kmean'] = df.apply(lambda x: KMeans(n_clusters=k).fit_predict(x['X']),
axis=1) # try to return the clusters in "find_kmeans_subspace"
# df['B_ensc'] = df.apply(lambda x: find_ensc_subspace(x['X'], k, x['dim']), axis=1)
# df['cluster_ensc']=df.apply(lambda x: ElasticNetSubspaceClustering(n_clusters=k,algorithm='lasso_lars',gamma=50).fit(x['X'].T), axis=1)
return df
measure1_kmean = pd.DataFrame()
measure2_kmean = pd.DataFrame()
k = 4
for iter in range(2):
df = all_process(k)
measure1_kmean.insert(iter, "", df.apply(lambda x: performance_measure1(k, x['B'], x['B_kmean']), axis=1), True)
measure2_kmean.insert(iter, "", df.apply(lambda x: performance_measure2(k, x['z'], x['cluster_kmean']), axis=1),
True)
# measure1_ensc.insert(iter, "", df.apply(lambda x: performance_measure1(k, x['B'], x['B_ensc']), axis=1), True)
# measure2_ensc.insert(iter, "", df.apply(lambda x: performance_measure2(k, x['z'], x['cluster_ensc']), axis=1), True)
df['measure1_kmean'] = measure1_kmean.apply(lambda x: mean(x), axis=1)
df['measure2_kmean'] = measure2_kmean.apply(lambda x: mean(x), axis=1)
# df['measure1_ensc'] = measure1_ensc.apply(lambda x: mean(x), axis=1)
# df['measure2_ensc'] = measure2_ensc.apply(lambda x: mean(x), axis=1)
df['theta_degree'] = df.apply(lambda x: math.degrees(x['theta']), axis=1)
# ploting
def plotting_performance_measure(df, measure):
pp = [2 ** j for j in range(4, 8)]
dd = [2 ** -j for j in range(1, 5)]
plt.title("PERFORMANCE MEASURE1 - KMEANS")
i = 1
for p in pp:
for d in dd:
dim = int(d * p)
sns_df = df[(df['p'] == p) & (df['dim'] == dim)]
sns_df = sns_df.pivot("theta_degree", "n", measure)
plt.subplot(4, 4, i)
ax = sns.heatmap(sns_df)
plt.title('p= {p} ,dim= {dim} '.format(p=p, dim=dim))
i += 1
plotting_performance_measure(df, "measure1_kmean")
plotting_performance_measure(df, "measure2_kmean")
plotting_performance_measure(df, "measure1_ensc")
plotting_performance_measure(df, "measure2_ensc")
|
[
"sklearn.cluster.KMeans",
"statistics.mean",
"numpy.trace",
"scipy.optimize.bisect",
"numpy.random.rand",
"cluster.selfrepresentation.ElasticNetSubspaceClustering",
"numpy.ones",
"sklearn.decomposition.PCA",
"math.degrees",
"seaborn.heatmap",
"numpy.array",
"numpy.random.randint",
"numpy.zeros",
"scipy.linalg.subspace_angles",
"pandas.DataFrame",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot"
] |
[((2804, 2824), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'k'}), '(n_clusters=k)\n', (2810, 2824), False, 'from sklearn.cluster import KMeans\n'), ((2842, 2857), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {}), '(X)\n', (2854, 2857), True, 'import pandas as pd\n'), ((6547, 6561), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (6559, 6561), True, 'import pandas as pd\n'), ((6579, 6593), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (6591, 6593), True, 'import pandas as pd\n'), ((859, 874), 'statistics.mean', 'mean', (['theta_max'], {}), '(theta_max)\n', (863, 874), False, 'from statistics import mean\n'), ((1272, 1314), 'scipy.optimize.bisect', 'sc.optimize.bisect', (['find_a_for_theta', '(0)', '(1)'], {}), '(find_a_for_theta, 0, 1)\n', (1290, 1314), True, 'import scipy as sc\n'), ((1445, 1471), 'numpy.random.randint', 'np.random.randint', (['(0)', 'k', 'n'], {}), '(0, k, n)\n', (1462, 1471), True, 'import numpy as np\n'), ((1573, 1589), 'numpy.zeros', 'np.zeros', (['(n, p)'], {}), '((n, p))\n', (1581, 1589), True, 'import numpy as np\n'), ((2075, 2138), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['n', 'p', 'dim', 'theta', 'X', 'z', 'B']"}), "(columns=['n', 'p', 'dim', 'theta', 'X', 'z', 'B'])\n", (2087, 2138), True, 'import pandas as pd\n'), ((3544, 3555), 'numpy.trace', 'np.trace', (['m'], {}), '(m)\n', (3552, 3555), True, 'import numpy as np\n'), ((4058, 4097), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'pca_components_number'}), '(n_components=pca_components_number)\n', (4061, 4097), False, 'from sklearn.decomposition import PCA\n'), ((4233, 4253), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'k'}), '(n_clusters=k)\n', (4239, 4253), False, 'from sklearn.cluster import KMeans\n'), ((4268, 4283), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {}), '(X)\n', (4280, 4283), True, 'import pandas as pd\n'), ((4465, 4480), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {}), '(X)\n', (4477, 4480), True, 'import pandas as pd\n'), ((7647, 7689), 'matplotlib.pyplot.title', 'plt.title', (['"""PERFORMANCE MEASURE1 - KMEANS"""'], {}), "('PERFORMANCE MEASURE1 - KMEANS')\n", (7656, 7689), True, 'import matplotlib.pyplot as plt\n'), ((3013, 3033), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'k'}), '(n_clusters=k)\n', (3019, 3033), False, 'from sklearn.cluster import KMeans\n'), ((5850, 5861), 'numpy.trace', 'np.trace', (['m'], {}), '(m)\n', (5858, 5861), True, 'import numpy as np\n'), ((7201, 7208), 'statistics.mean', 'mean', (['x'], {}), '(x)\n', (7205, 7208), False, 'from statistics import mean\n'), ((7272, 7279), 'statistics.mean', 'mean', (['x'], {}), '(x)\n', (7276, 7279), False, 'from statistics import mean\n'), ((7471, 7495), 'math.degrees', 'math.degrees', (["x['theta']"], {}), "(x['theta'])\n", (7483, 7495), False, 'import math\n'), ((605, 627), 'numpy.random.rand', 'np.random.rand', (['p', 'dim'], {}), '(p, dim)\n', (619, 627), True, 'import numpy as np\n'), ((1238, 1254), 'statistics.mean', 'mean', (['temp_theta'], {}), '(temp_theta)\n', (1242, 1254), False, 'from statistics import mean\n'), ((1515, 1528), 'numpy.zeros', 'np.zeros', (['dim'], {}), '(dim)\n', (1523, 1528), True, 'import numpy as np\n'), ((4506, 4582), 'cluster.selfrepresentation.ElasticNetSubspaceClustering', 'ElasticNetSubspaceClustering', ([], {'n_clusters': 'k', 'algorithm': '"""lasso_lars"""', 'gamma': '(50)'}), "(n_clusters=k, algorithm='lasso_lars', gamma=50)\n", (4534, 4582), False, 'from cluster.selfrepresentation import ElasticNetSubspaceClustering\n'), ((7904, 7924), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(4)', 'i'], {}), '(4, 4, i)\n', (7915, 7924), True, 'import matplotlib.pyplot as plt\n'), ((7942, 7961), 'seaborn.heatmap', 'sns.heatmap', (['sns_df'], {}), '(sns_df)\n', (7953, 7961), True, 'import seaborn as sns\n'), ((1542, 1554), 'numpy.ones', 'np.ones', (['dim'], {}), '(dim)\n', (1549, 1554), True, 'import numpy as np\n'), ((1780, 1790), 'numpy.ones', 'np.ones', (['p'], {}), '(p)\n', (1787, 1790), True, 'import numpy as np\n'), ((6142, 6162), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'k'}), '(n_clusters=k)\n', (6148, 6162), False, 'from sklearn.cluster import KMeans\n'), ((804, 831), 'scipy.linalg.subspace_angles', 'subspace_angles', (['b[i]', 'b[j]'], {}), '(b[i], b[j])\n', (819, 831), False, 'from scipy.linalg import subspace_angles, orth\n'), ((1146, 1215), 'scipy.linalg.subspace_angles', 'subspace_angles', (['(b[0] * (1 - a) + b[i] * a)', '(b[0] * (1 - a) + b[j] * a)'], {}), '(b[0] * (1 - a) + b[i] * a, b[0] * (1 - a) + b[j] * a)\n', (1161, 1215), False, 'from scipy.linalg import subspace_angles, orth\n'), ((1680, 1697), 'numpy.array', 'np.array', (['w[i, :]'], {}), '(w[i, :])\n', (1688, 1697), True, 'import numpy as np\n'), ((5046, 5080), 'scipy.linalg.subspace_angles', 'subspace_angles', (['B1[i]', 'B2[val[i]]'], {}), '(B1[i], B2[val[i]])\n', (5061, 5080), False, 'from scipy.linalg import subspace_angles, orth\n')]
|
# LinearRegression.py
# March 2018
#
# This script builds a Linear regression class to analyse data.
# It supports a continuous response and several continuous features.
# The class has a constructor building and fitting the model, and
# a plotting method for residuals.
#
# Dependencies:
#
# Usage:
# from pythia.LinearRegression import LinearRegression
# lm = LinearRegression(X,y)
# print(lm.weights)
# plot_pythia(lm)
## Imports
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
import os
sys.path.insert(0, os.path.abspath("."))
sys.path.insert(0, os.path.abspath("../"))
import pandas as pd
import numpy as np
import numpy.random as random
## The LinearRegression class
class LinearRegression:
"""
LinearRegression is a class performing a linear regression on a data frame
containing continuous features.
Its attributes are the coefficients estimates, the fitted values
and the residuals from fitting a linear regression of y on X.
Args:
X: a pandas.dataframe containing continuous variables (including the response)
y: a pandas.Series of same length containing the response
Attributes:
weights: a pandas.Series, the estimated coefficients
fitted: a pandas.Series, the fitted values
residuals: a pandas.Series, the residuals
"""
def __init__(self, X, y):
# Check the type of the features and select the numeric ones
X_mat = X.select_dtypes(include=[np.number], exclude=None)
if X_mat.shape[1] == 0:
raise NameError("You need at least one continuous features")
try:
for var in X_mat.columns:
assert np.all(X_mat[[var]].notnull())
except AssertionError:
raise NameError("Some of your numeric features contain missing values. Please deal with them (remove, impute...) before using this function.")
else:
# Add an intercept column and convert the data frame in a matrix
n = X_mat.shape[0]
X_mat['intercept'] = pd.Series(np.ones(n), index=X_mat.index)
names = X_mat.columns
X_mat = X_mat.as_matrix()
d = X_mat.shape[1]
y = np.array(y).reshape((10,1))
# Set hyperparameters
alpha = 0.001
n_iter = 1000000
# The gradient of the squared error
def ols_grad(w):
return np.dot(np.transpose(X_mat), np.dot(X_mat, w) - y)
# A norm function for Frobenius
def norm(x):
return np.sum(np.abs(x))
# Update the weights using gradient method
weights = np.zeros(d).reshape((d,1))
i = 0
grad = ols_grad(weights)
while i < n_iter and norm(grad) > 1e-7:
grad = ols_grad(weights)
weights = weights - alpha*grad
i += 1
temp = {}
for i in range(len(weights)):
temp[names[i]] = weights[i,0]
self.weights = temp
# Calculate the fitted values
self.fitted = np.dot(X_mat, weights)
# Calculate the residuals
self.residuals = y - self.fitted
def plot_residuals(self):
"""
This script makes various diagnostic plots for linear regression analysis.
It supports a continuous response and several continuous features.
Args:
A LinearRegression object containing
weights: the estimates of the parameters of the linear regression
fitted: the fitted values
residuals: the residuals.
Returns:
Residuals vs Fitted Plot
Normal Q-Q Plot
Fitted vs True Value Plot(s)
"""
assert len(self.residuals) > 0, "There are no residuals"
assert len(self.fitted) > 0, "There are no fitted values"
assert len(self.residuals) == len(self.fitted), "The number of residuals and fitted values do not match"
# Get fitted values and residuals
residuals = self.residuals
fitted = self.fitted
residuals = residuals.flatten()
fitted = fitted.flatten()
# Fitted vs Residuals
plt.figure(figsize=(10,6))
plt.scatter(fitted, residuals, color='grey')
plt.axhline(y = 0, linewidth = 1, color = 'red')
plt.xlabel('Fitted Values')
plt.ylabel('Residuals')
plt.title('Residuals vs. Fitted Values')
resfit = plt.show()
# Normal QQ Plot
res = np.asarray(residuals)
res.sort()
# Generate normal distribution
ndist = random.normal(loc = 0, scale = 1, size = len(res))
ndist.sort()
# Fit Normal Trendline.
fit = np.polyfit(ndist, res, 1)
fit = fit.tolist()
func = np.poly1d(fit)
trendline_y = func(ndist)
plt.figure(figsize=(10,6))
plt.scatter(ndist, res, color = 'grey')
plt.plot(ndist, trendline_y, color = 'red')
plt.title("Normal QQ Plot")
plt.xlabel("Theoretical quantiles")
plt.ylabel("Expreimental quantiles")
qqplot = plt.show()
return (resfit,qqplot)
|
[
"matplotlib.pyplot.ylabel",
"numpy.polyfit",
"numpy.array",
"numpy.poly1d",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.asarray",
"matplotlib.pyplot.axhline",
"numpy.dot",
"matplotlib.pyplot.scatter",
"numpy.abs",
"numpy.ones",
"matplotlib.use",
"matplotlib.pyplot.title",
"numpy.transpose",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure",
"numpy.zeros",
"os.path.abspath"
] |
[((468, 489), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (482, 489), False, 'import matplotlib\n'), ((563, 583), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (578, 583), False, 'import os\n'), ((604, 626), 'os.path.abspath', 'os.path.abspath', (['"""../"""'], {}), "('../')\n", (619, 626), False, 'import os\n'), ((4297, 4324), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (4307, 4324), True, 'import matplotlib.pyplot as plt\n'), ((4332, 4376), 'matplotlib.pyplot.scatter', 'plt.scatter', (['fitted', 'residuals'], {'color': '"""grey"""'}), "(fitted, residuals, color='grey')\n", (4343, 4376), True, 'import matplotlib.pyplot as plt\n'), ((4386, 4428), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(0)', 'linewidth': '(1)', 'color': '"""red"""'}), "(y=0, linewidth=1, color='red')\n", (4397, 4428), True, 'import matplotlib.pyplot as plt\n'), ((4443, 4470), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Fitted Values"""'], {}), "('Fitted Values')\n", (4453, 4470), True, 'import matplotlib.pyplot as plt\n'), ((4479, 4502), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Residuals"""'], {}), "('Residuals')\n", (4489, 4502), True, 'import matplotlib.pyplot as plt\n'), ((4511, 4551), 'matplotlib.pyplot.title', 'plt.title', (['"""Residuals vs. Fitted Values"""'], {}), "('Residuals vs. Fitted Values')\n", (4520, 4551), True, 'import matplotlib.pyplot as plt\n'), ((4569, 4579), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4577, 4579), True, 'import matplotlib.pyplot as plt\n'), ((4620, 4641), 'numpy.asarray', 'np.asarray', (['residuals'], {}), '(residuals)\n', (4630, 4641), True, 'import numpy as np\n'), ((4836, 4861), 'numpy.polyfit', 'np.polyfit', (['ndist', 'res', '(1)'], {}), '(ndist, res, 1)\n', (4846, 4861), True, 'import numpy as np\n'), ((4904, 4918), 'numpy.poly1d', 'np.poly1d', (['fit'], {}), '(fit)\n', (4913, 4918), True, 'import numpy as np\n'), ((4962, 4989), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (4972, 4989), True, 'import matplotlib.pyplot as plt\n'), ((4997, 5034), 'matplotlib.pyplot.scatter', 'plt.scatter', (['ndist', 'res'], {'color': '"""grey"""'}), "(ndist, res, color='grey')\n", (5008, 5034), True, 'import matplotlib.pyplot as plt\n'), ((5045, 5086), 'matplotlib.pyplot.plot', 'plt.plot', (['ndist', 'trendline_y'], {'color': '"""red"""'}), "(ndist, trendline_y, color='red')\n", (5053, 5086), True, 'import matplotlib.pyplot as plt\n'), ((5097, 5124), 'matplotlib.pyplot.title', 'plt.title', (['"""Normal QQ Plot"""'], {}), "('Normal QQ Plot')\n", (5106, 5124), True, 'import matplotlib.pyplot as plt\n'), ((5133, 5168), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Theoretical quantiles"""'], {}), "('Theoretical quantiles')\n", (5143, 5168), True, 'import matplotlib.pyplot as plt\n'), ((5177, 5213), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Expreimental quantiles"""'], {}), "('Expreimental quantiles')\n", (5187, 5213), True, 'import matplotlib.pyplot as plt\n'), ((5231, 5241), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5239, 5241), True, 'import matplotlib.pyplot as plt\n'), ((3157, 3179), 'numpy.dot', 'np.dot', (['X_mat', 'weights'], {}), '(X_mat, weights)\n', (3163, 3179), True, 'import numpy as np\n'), ((2092, 2102), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (2099, 2102), True, 'import numpy as np\n'), ((2242, 2253), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (2250, 2253), True, 'import numpy as np\n'), ((2468, 2487), 'numpy.transpose', 'np.transpose', (['X_mat'], {}), '(X_mat)\n', (2480, 2487), True, 'import numpy as np\n'), ((2611, 2620), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (2617, 2620), True, 'import numpy as np\n'), ((2700, 2711), 'numpy.zeros', 'np.zeros', (['d'], {}), '(d)\n', (2708, 2711), True, 'import numpy as np\n'), ((2489, 2505), 'numpy.dot', 'np.dot', (['X_mat', 'w'], {}), '(X_mat, w)\n', (2495, 2505), True, 'import numpy as np\n')]
|
#!python3
#
# Copyright (C) 2014-2015 <NAME>. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""
PYPOWER-Dynamics
Functions for standard blocks (solves a step)
"""
import numpy as np
# Gain block
# yo = p * yi
# p is a scalar gain coefficient
def gain_block(yi, p):
yo = p * yi
return yo
# Divide block
# yo = yi / p
# p is a scalar gain coefficient
def gain_block(yi, p):
if p != 0:
yo = yi / p
else:
print('Error: division by zero, ignoring dividion operation')
yo = yi
return yo
# Integrator block
# K / sT
# p = [K, T]
def int_block(h, x0, yi, p):
f = yi * p[0] / p[1]
x1 = x0 + h * f
yo = x1
return yo, x1, f
# Lag block
# K / (1 + sT)
# p = [K, T]
def lag_block(h, x0, yi, p):
f = (yi - x0) / p[1]
x1 = x0 + h * f
yo = p[0] * x1
return yo, x1, f
# Lead-Lag block
# (1 + sTa) / (1 + sTb)
# p = [Ta, Tb]
def leadlag_block(h, x0, yi, p):
f = (yi - x0) / p[1]
x1 = x0 + h * f
yo = x1 + p[0] * (yi - x0) / p[1]
return yo, x1, f
# Limiter block
# yo = min_lim, if yi < min_lim
# yo = max_lim, if yi > max_lim
# yo = yi, min_lim <= yi <= max_lim
# p = [min_lim, max_lim]
def lim_block(yi, p):
min_lim = p[0]
max_lim = p[1]
if yi < min_lim:
yo = min_lim
elif yi > max_lim:
yo = max_lim
else:
yo = yi
return yo
# Multiplication block
# yo = yi1 * yi2 * ... * yin
# yi = [yi1, yi2, ... yin]
def mult_block(yi):
yo = np.prod(yi)
return yo
# Summation block
# yo = yi1 + yi2 + ... + yin
# yi = [yi1, yi2, ... yin]
def sum_block(yi):
yo = sum(yi)
return yo
# Washout block
# (s / (1 + sT)
# p is the time constant T
def wout_block(h, x0, yi, p):
f = (yi - x0) / p
x1 = x0 + h * f
yo = (yi - x1) / p
return yo, x1, f
|
[
"numpy.prod"
] |
[((1560, 1571), 'numpy.prod', 'np.prod', (['yi'], {}), '(yi)\n', (1567, 1571), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""Supports F10.7 index values. Downloads data from LASP and the SWPC.
Properties
----------
platform
'sw'
name
'f107'
tag
- 'historic' LASP F10.7 data (downloads by month, loads by day)
- 'prelim' Preliminary SWPC daily solar indices
- 'daily' Daily SWPC solar indices (contains last 30 days)
- 'forecast' Grab forecast data from SWPC (next 3 days)
- '45day' 45-Day Forecast data from the Air Force
Example
-------
Download and load all of the historic F10.7 data. Note that it will not
stop on the current date, but a point in the past when post-processing has
been successfully completed.
::
f107 = pysat.Instrument('sw', 'f107', tag='historic')
f107.download(start=f107.lasp_stime, stop=f107.today(), freq='MS')
f107.load(date=f107.lasp_stime, end_date=f107.today())
Note
----
The forecast data is stored by generation date, where each file contains the
forecast for the next three days. Forecast data downloads are only supported
for the current day. When loading forecast data, the date specified with the
load command is the date the forecast was generated. The data loaded will span
three days. To always ensure you are loading the most recent data, load
the data with tomorrow's date.
::
f107 = pysat.Instrument('sw', 'f107', tag='forecast')
f107.download()
f107.load(date=f107.tomorrow())
Warnings
--------
The 'forecast' F10.7 data loads three days at a time. Loading multiple files,
loading multiple days, the data padding feature, and multi_file_day feature
available from the pyast.Instrument object is not appropriate for 'forecast'
data.
Like 'forecast', the '45day' forecast loads a specific period of time (45 days)
and subsequent files contain overlapping data. Thus, loading multiple files,
loading multiple days, the data padding feature, and multi_file_day feature
available from the pyast.Instrument object is not appropriate for '45day' data.
"""
import datetime as dt
import ftplib
import json
import numpy as np
import os
import requests
import sys
import warnings
import pandas as pds
import pysat
from pysatSpaceWeather.instruments.methods import f107 as mm_f107
from pysatSpaceWeather.instruments.methods.ace import load_csv_data
from pysatSpaceWeather.instruments.methods import general
logger = pysat.logger
# ----------------------------------------------------------------------------
# Instrument attributes
platform = 'sw'
name = 'f107'
tags = {'historic': 'Daily LASP value of F10.7',
'prelim': 'Preliminary SWPC daily solar indices',
'daily': 'Daily SWPC solar indices (contains last 30 days)',
'forecast': 'SWPC Forecast F107 data next (3 days)',
'45day': 'Air Force 45-day Forecast'}
# Dict keyed by inst_id that lists supported tags for each inst_id
inst_ids = {'': [tag for tag in tags.keys()]}
# Dict keyed by inst_id that lists supported tags and a good day of test data
# generate todays date to support loading forecast data
now = dt.datetime.utcnow()
today = dt.datetime(now.year, now.month, now.day)
tomorrow = today + pds.DateOffset(days=1)
# The LASP archive start day is also important
lasp_stime = dt.datetime(1947, 2, 14)
# ----------------------------------------------------------------------------
# Instrument test attributes
_test_dates = {'': {'historic': dt.datetime(2009, 1, 1),
'prelim': dt.datetime(2009, 1, 1),
'daily': tomorrow,
'forecast': tomorrow,
'45day': tomorrow}}
# Other tags assumed to be True
_test_download_travis = {'': {'prelim': False}}
# ----------------------------------------------------------------------------
# Instrument methods
preprocess = general.preprocess
def init(self):
"""Initializes the Instrument object with instrument specific values.
Runs once upon instantiation.
"""
self.acknowledgements = mm_f107.acknowledgements(self.name, self.tag)
self.references = mm_f107.references(self.name, self.tag)
logger.info(self.acknowledgements)
# Define the historic F10.7 starting time
if self.tag == 'historic':
self.lasp_stime = lasp_stime
return
def clean(self):
""" Cleaning function for Space Weather indices
Note
----
F10.7 doesn't require cleaning
"""
return
# ----------------------------------------------------------------------------
# Instrument functions
def load(fnames, tag=None, inst_id=None):
"""Load F10.7 index files
Parameters
----------
fnames : pandas.Series
Series of filenames
tag : str or NoneType
tag or None (default=None)
inst_id : str or NoneType
satellite id or None (default=None)
Returns
-------
data : pandas.DataFrame
Object containing satellite data
meta : pysat.Meta
Object containing metadata such as column names and units
Note
----
Called by pysat. Not intended for direct use by user.
"""
# Get the desired file dates and file names from the daily indexed list
file_dates = list()
if tag in ['historic', 'prelim']:
unique_files = list()
for fname in fnames:
file_dates.append(dt.datetime.strptime(fname[-10:], '%Y-%m-%d'))
if fname[0:-11] not in unique_files:
unique_files.append(fname[0:-11])
fnames = unique_files
# Load the CSV data files
data = load_csv_data(fnames, read_csv_kwargs={"index_col": 0,
"parse_dates": True})
# If there is a date range, downselect here
if len(file_dates) > 0:
idx, = np.where((data.index >= min(file_dates))
& (data.index < max(file_dates) + dt.timedelta(days=1)))
data = data.iloc[idx, :]
# Initialize the metadata
meta = pysat.Meta()
meta['f107'] = {meta.labels.units: 'SFU',
meta.labels.name: 'F10.7 cm solar index',
meta.labels.notes: '',
meta.labels.desc:
'F10.7 cm radio flux in Solar Flux Units (SFU)',
meta.labels.fill_val: np.nan,
meta.labels.min_val: 0,
meta.labels.max_val: np.inf}
if tag == '45day':
meta['ap'] = {meta.labels.units: '',
meta.labels.name: 'Daily Ap index',
meta.labels.notes: '',
meta.labels.desc: 'Daily average of 3-h ap indices',
meta.labels.fill_val: np.nan,
meta.labels.min_val: 0,
meta.labels.max_val: 400}
elif tag == 'daily' or tag == 'prelim':
meta['ssn'] = {meta.labels.units: '',
meta.labels.name: 'Sunspot Number',
meta.labels.notes: '',
meta.labels.desc: 'SESC Sunspot Number',
meta.labels.fill_val: -999,
meta.labels.min_val: 0,
meta.labels.max_val: np.inf}
meta['ss_area'] = {meta.labels.units: '10$^-6$ Solar Hemisphere',
meta.labels.name: 'Sunspot Area',
meta.labels.notes: '',
meta.labels.desc:
''.join(['Sunspot Area in Millionths of the ',
'Visible Hemisphere']),
meta.labels.fill_val: -999,
meta.labels.min_val: 0,
meta.labels.max_val: 1.0e6}
meta['new_reg'] = {meta.labels.units: '',
meta.labels.name: 'New Regions',
meta.labels.notes: '',
meta.labels.desc: 'New active solar regions',
meta.labels.fill_val: -999,
meta.labels.min_val: 0,
meta.labels.max_val: np.inf}
meta['smf'] = {meta.labels.units: 'G',
meta.labels.name: 'Solar Mean Field',
meta.labels.notes: '',
meta.labels.desc: 'Standford Solar Mean Field',
meta.labels.fill_val: -999,
meta.labels.min_val: 0,
meta.labels.max_val: np.inf}
meta['goes_bgd_flux'] = {meta.labels.units: 'W/m^2',
meta.labels.name: 'X-ray Background Flux',
meta.labels.notes: '',
meta.labels.desc:
'GOES15 X-ray Background Flux',
meta.labels.fill_val: '*',
meta.labels.min_val: -np.inf,
meta.labels.max_val: np.inf}
meta['c_flare'] = {meta.labels.units: '',
meta.labels.name: 'C X-Ray Flares',
meta.labels.notes: '',
meta.labels.desc: 'C-class X-Ray Flares',
meta.labels.fill_val: -1,
meta.labels.min_val: 0,
meta.labels.max_val: 9}
meta['m_flare'] = {meta.labels.units: '',
meta.labels.name: 'M X-Ray Flares',
meta.labels.notes: '',
meta.labels.desc: 'M-class X-Ray Flares',
meta.labels.fill_val: -1,
meta.labels.min_val: 0,
meta.labels.max_val: 9}
meta['x_flare'] = {meta.labels.units: '',
meta.labels.name: 'X X-Ray Flares',
meta.labels.notes: '',
meta.labels.desc: 'X-class X-Ray Flares',
meta.labels.fill_val: -1,
meta.labels.min_val: 0,
meta.labels.max_val: 9}
meta['o1_flare'] = {meta.labels.units: '',
meta.labels.name: '1 Optical Flares',
meta.labels.notes: '',
meta.labels.desc: '1-class Optical Flares',
meta.labels.fill_val: -1,
meta.labels.min_val: 0,
meta.labels.max_val: 9}
meta['o2_flare'] = {meta.labels.units: '',
meta.labels.name: '2 Optical Flares',
meta.labels.notes: '',
meta.labels.desc: '2-class Optical Flares',
meta.labels.fill_val: -1,
meta.labels.min_val: 0,
meta.labels.max_val: 9}
meta['o3_flare'] = {meta.labels.units: '',
meta.labels.name: '3 Optical Flares',
meta.labels.notes: '',
meta.labels.desc: '3-class Optical Flares',
meta.labels.fill_val: -1,
meta.labels.min_val: 0,
meta.labels.max_val: 9}
return data, meta
def list_files(tag=None, inst_id=None, data_path=None, format_str=None):
"""Return a Pandas Series of every file for F10.7 data
Parameters
----------
tag : string or NoneType
Denotes type of file to load.
(default=None)
inst_id : string or NoneType
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : string or NoneType
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : string or NoneType
User specified file format. If None is specified, the default
formats associated with the supplied tags are used. (default=None)
Returns
-------
out_files : pysat._files.Files
A class containing the verified available files
Note
----
Called by pysat. Not intended for direct use by user.
"""
if data_path is not None:
if tag == 'historic':
# Files are by month, going to add date to monthly filename for
# each day of the month. The load routine will load a month of
# data and use the appended date to select out appropriate data.
if format_str is None:
format_str = 'f107_monthly_{year:04d}-{month:02d}.txt'
out_files = pysat.Files.from_os(data_path=data_path,
format_str=format_str)
if not out_files.empty:
out_files.loc[out_files.index[-1] + pds.DateOffset(months=1)
- pds.DateOffset(days=1)] = out_files.iloc[-1]
out_files = out_files.asfreq('D', 'pad')
out_files = out_files + '_' + out_files.index.strftime(
'%Y-%m-%d')
elif tag == 'prelim':
# Files are by year (and quarter)
if format_str is None:
format_str = ''.join(['f107_prelim_{year:04d}_{month:02d}',
'_v{version:01d}.txt'])
out_files = pysat.Files.from_os(data_path=data_path,
format_str=format_str)
if not out_files.empty:
# Set each file's valid length at a 1-day resolution
orig_files = out_files.sort_index().copy()
new_files = list()
for orig in orig_files.iteritems():
# Version determines each file's valid length
version = int(orig[1].split("_v")[1][0])
doff = pds.DateOffset(years=1) if version == 2 \
else pds.DateOffset(months=3)
istart = orig[0]
iend = istart + doff - pds.DateOffset(days=1)
# Ensure the end time does not extend past the number of
# possible days included based on the file's download time
fname = os.path.join(data_path, orig[1])
dend = dt.datetime.utcfromtimestamp(os.path.getctime(fname))
dend = dend - pds.DateOffset(days=1)
if dend < iend:
iend = dend
# Pad the original file index
out_files.loc[iend] = orig[1]
out_files = out_files.sort_index()
# Save the files at a daily cadence over the desired period
new_files.append(out_files.loc[istart:
iend].asfreq('D', 'pad'))
# Add the newly indexed files to the file output
out_files = pds.concat(new_files, sort=True)
out_files = out_files.dropna()
out_files = out_files.sort_index()
out_files = out_files + '_' + out_files.index.strftime(
'%Y-%m-%d')
elif tag in ['daily', 'forecast', '45day']:
format_str = ''.join(['f107_', tag,
'_{year:04d}-{month:02d}-{day:02d}.txt'])
out_files = pysat.Files.from_os(data_path=data_path,
format_str=format_str)
# Pad list of files data to include most recent file under tomorrow
if not out_files.empty:
pds_off = pds.DateOffset(days=1)
out_files.loc[out_files.index[-1]
+ pds_off] = out_files.values[-1]
out_files.loc[out_files.index[-1]
+ pds_off] = out_files.values[-1]
else:
raise ValueError(' '.join(('Unrecognized tag name for Space',
'Weather Index F107:', tag)))
else:
raise ValueError(' '.join(('A data_path must be passed to the loading',
'routine for F107')))
return out_files
def download(date_array, tag, inst_id, data_path, update_files=False):
"""Routine to download F107 index data
Parameters
-----------
date_array : list-like
Sequence of dates to download date for.
tag : string or NoneType
Denotes type of file to load.
inst_id : string or NoneType
Specifies the satellite ID for a constellation.
data_path : string or NoneType
Path to data directory.
update_files : bool
Re-download data for files that already exist if True (default=False)
Note
----
Called by pysat. Not intended for direct use by user.
Warnings
--------
Only able to download current forecast data, not archived forecasts.
"""
# download standard F107 data
if tag == 'historic':
# Test the date array, updating it if necessary
if date_array.freq != 'MS':
warnings.warn(''.join(['Historic F10.7 downloads should be invoked',
" with the `freq='MS'` option."]))
date_array = pysat.utils.time.create_date_range(
dt.datetime(date_array[0].year, date_array[0].month, 1),
date_array[-1], freq='MS')
# Download from LASP, by month
for dl_date in date_array:
# Create the name to which the local file will be saved
str_date = dl_date.strftime('%Y-%m')
data_file = os.path.join(data_path,
'f107_monthly_{:s}.txt'.format(str_date))
if update_files or not os.path.isfile(data_file):
# Set the download webpage
dstr = ''.join(['http://lasp.colorado.edu/lisird/latis/dap/',
'noaa_radio_flux.json?time%3E=',
dl_date.strftime('%Y-%m-%d'),
'T00:00:00.000Z&time%3C=',
(dl_date + pds.DateOffset(months=1)
- pds.DateOffset(days=1)).strftime('%Y-%m-%d'),
'T00:00:00.000Z'])
# The data is returned as a JSON file
req = requests.get(dstr)
# Process the JSON file
raw_dict = json.loads(req.text)['noaa_radio_flux']
data = pds.DataFrame.from_dict(raw_dict['samples'])
if data.empty:
warnings.warn("no data for {:}".format(dl_date),
UserWarning)
else:
# The file format changed over time
try:
# This is the new data format
times = [dt.datetime.strptime(time, '%Y%m%d')
for time in data.pop('time')]
except ValueError:
# Accepts old file formats
times = [dt.datetime.strptime(time, '%Y %m %d')
for time in data.pop('time')]
data.index = times
# Replace fill value with NaNs
idx, = np.where(data['f107'] == -99999.0)
data.iloc[idx, :] = np.nan
# Create a local CSV file
data.to_csv(data_file, header=True)
elif tag == 'prelim':
ftp = ftplib.FTP('ftp.swpc.noaa.gov') # connect to host, default port
ftp.login() # user anonymous, passwd <PASSWORD>@
ftp.cwd('/pub/indices/old_indices')
bad_fname = list()
# Get the local files, to ensure that the version 1 files are
# downloaded again if more data has been added
local_files = list_files(tag, inst_id, data_path)
# To avoid downloading multiple files, cycle dates based on file length
dl_date = date_array[0]
while dl_date <= date_array[-1]:
# The file name changes, depending on how recent the requested
# data is
qnum = (dl_date.month - 1) // 3 + 1 # Integer floor division
qmonth = (qnum - 1) * 3 + 1
quar = 'Q{:d}_'.format(qnum)
fnames = ['{:04d}{:s}DSD.txt'.format(dl_date.year, ss)
for ss in ['_', quar]]
versions = ["01_v2", "{:02d}_v1".format(qmonth)]
vend = [dt.datetime(dl_date.year, 12, 31),
dt.datetime(dl_date.year, qmonth, 1)
+ pds.DateOffset(months=3) - pds.DateOffset(days=1)]
downloaded = False
rewritten = False
# Attempt the download(s)
for iname, fname in enumerate(fnames):
# Test to see if we already tried this filename
if fname in bad_fname:
continue
local_fname = fname
saved_fname = os.path.join(data_path, local_fname)
ofile = '_'.join(['f107', 'prelim',
'{:04d}'.format(dl_date.year),
'{:s}.txt'.format(versions[iname])])
outfile = os.path.join(data_path, ofile)
if os.path.isfile(outfile):
downloaded = True
# Check the date to see if this should be rewritten
checkfile = os.path.split(outfile)[-1]
has_file = local_files == checkfile
if np.any(has_file):
if has_file[has_file].index[-1] < vend[iname]:
# This file will be updated again, but only attempt
# to do so if enough time has passed from the
# last time it was downloaded
yesterday = today - pds.DateOffset(days=1)
if has_file[has_file].index[-1] < yesterday:
rewritten = True
else:
# The file does not exist, if it can be downloaded, it
# should be 'rewritten'
rewritten = True
# Attempt to download if the file does not exist or if the
# file has been updated
if rewritten or not downloaded:
try:
sys.stdout.flush()
ftp.retrbinary('RETR ' + fname,
open(saved_fname, 'wb').write)
downloaded = True
logger.info(' '.join(('Downloaded file for ',
dl_date.strftime('%x'))))
except ftplib.error_perm as exception:
# Could not fetch, so cannot rewrite
rewritten = False
# Test for an error
if str(exception.args[0]).split(" ", 1)[0] != '550':
raise RuntimeError(exception)
else:
# file isn't actually there, try the next name
os.remove(saved_fname)
# Save this so we don't try again
# Because there are two possible filenames for
# each time, it's ok if one isn't there. We just
# don't want to keep looking for it.
bad_fname.append(fname)
# If the first file worked, don't try again
if downloaded:
break
if not downloaded:
logger.info(' '.join(('File not available for',
dl_date.strftime('%x'))))
elif rewritten:
with open(saved_fname, 'r') as fprelim:
lines = fprelim.read()
mm_f107.rewrite_daily_file(dl_date.year, outfile, lines)
os.remove(saved_fname)
# Cycle to the next date
dl_date = vend[iname] + pds.DateOffset(days=1)
# Close connection after downloading all dates
ftp.close()
elif tag == 'daily':
logger.info('This routine can only download the latest 30 day file')
# Set the download webpage
furl = 'https://services.swpc.noaa.gov/text/daily-solar-indices.txt'
req = requests.get(furl)
# Save the output
data_file = 'f107_daily_{:s}.txt'.format(today.strftime('%Y-%m-%d'))
outfile = os.path.join(data_path, data_file)
mm_f107.rewrite_daily_file(today.year, outfile, req.text)
elif tag == 'forecast':
logger.info(' '.join(('This routine can only download the current',
'forecast, not archived forecasts')))
# Set the download webpage
furl = ''.join(('https://services.swpc.noaa.gov/text/',
'3-day-solar-geomag-predictions.txt'))
req = requests.get(furl)
# Parse text to get the date the prediction was generated
date_str = req.text.split(':Issued: ')[-1].split(' UTC')[0]
dl_date = dt.datetime.strptime(date_str, '%Y %b %d %H%M')
# Get starting date of the forecasts
raw_data = req.text.split(':Prediction_dates:')[-1]
forecast_date = dt.datetime.strptime(raw_data[3:14], '%Y %b %d')
# Set the times for output data
times = pds.date_range(forecast_date, periods=3, freq='1D')
# String data is the forecast value for the next three days
raw_data = req.text.split('10cm_flux:')[-1]
raw_data = raw_data.split('\n')[1]
val1 = int(raw_data[24:27])
val2 = int(raw_data[38:41])
val3 = int(raw_data[52:])
# Put data into nicer DataFrame
data = pds.DataFrame([val1, val2, val3], index=times, columns=['f107'])
# Write out as a file
data_file = 'f107_forecast_{:s}.txt'.format(
dl_date.strftime('%Y-%m-%d'))
data.to_csv(os.path.join(data_path, data_file), header=True)
elif tag == '45day':
logger.info(' '.join(('This routine can only download the current',
'forecast, not archived forecasts')))
# Set the download webpage
furl = 'https://services.swpc.noaa.gov/text/45-day-ap-forecast.txt'
req = requests.get(furl)
# Parse text to get the date the prediction was generated
date_str = req.text.split(':Issued: ')[-1].split(' UTC')[0]
dl_date = dt.datetime.strptime(date_str, '%Y %b %d %H%M')
# Get to the forecast data
raw_data = req.text.split('45-DAY AP FORECAST')[-1]
# Grab AP part
raw_ap = raw_data.split('45-DAY F10.7 CM FLUX FORECAST')[0]
raw_ap = raw_ap.split('\n')[1:-1]
# Get the F107
raw_f107 = raw_data.split('45-DAY F10.7 CM FLUX FORECAST')[-1]
raw_f107 = raw_f107.split('\n')[1:-4]
# Parse the AP data
ap_times, ap = mm_f107.parse_45day_block(raw_ap)
# Parse the F10.7 data
f107_times, f107 = mm_f107.parse_45day_block(raw_f107)
# Collect into DataFrame
data = pds.DataFrame(f107, index=f107_times, columns=['f107'])
data['ap'] = ap
# Write out as a file
data_file = 'f107_45day_{:s}.txt'.format(dl_date.strftime('%Y-%m-%d'))
data.to_csv(os.path.join(data_path, data_file), header=True)
return
|
[
"pysat.Files.from_os",
"datetime.timedelta",
"pysatSpaceWeather.instruments.methods.f107.rewrite_daily_file",
"pandas.date_range",
"os.remove",
"datetime.datetime",
"ftplib.FTP",
"numpy.where",
"pandas.DataFrame.from_dict",
"os.path.split",
"pandas.DataFrame",
"sys.stdout.flush",
"json.loads",
"pysat.Meta",
"os.path.getctime",
"requests.get",
"numpy.any",
"os.path.isfile",
"pysatSpaceWeather.instruments.methods.f107.parse_45day_block",
"pysatSpaceWeather.instruments.methods.f107.acknowledgements",
"pandas.DateOffset",
"pysatSpaceWeather.instruments.methods.f107.references",
"datetime.datetime.utcnow",
"datetime.datetime.strptime",
"os.path.join",
"pysatSpaceWeather.instruments.methods.ace.load_csv_data",
"pandas.concat"
] |
[((3004, 3024), 'datetime.datetime.utcnow', 'dt.datetime.utcnow', ([], {}), '()\n', (3022, 3024), True, 'import datetime as dt\n'), ((3033, 3074), 'datetime.datetime', 'dt.datetime', (['now.year', 'now.month', 'now.day'], {}), '(now.year, now.month, now.day)\n', (3044, 3074), True, 'import datetime as dt\n'), ((3178, 3202), 'datetime.datetime', 'dt.datetime', (['(1947)', '(2)', '(14)'], {}), '(1947, 2, 14)\n', (3189, 3202), True, 'import datetime as dt\n'), ((3094, 3116), 'pandas.DateOffset', 'pds.DateOffset', ([], {'days': '(1)'}), '(days=1)\n', (3108, 3116), True, 'import pandas as pds\n'), ((3926, 3971), 'pysatSpaceWeather.instruments.methods.f107.acknowledgements', 'mm_f107.acknowledgements', (['self.name', 'self.tag'], {}), '(self.name, self.tag)\n', (3950, 3971), True, 'from pysatSpaceWeather.instruments.methods import f107 as mm_f107\n'), ((3994, 4033), 'pysatSpaceWeather.instruments.methods.f107.references', 'mm_f107.references', (['self.name', 'self.tag'], {}), '(self.name, self.tag)\n', (4012, 4033), True, 'from pysatSpaceWeather.instruments.methods import f107 as mm_f107\n'), ((5456, 5532), 'pysatSpaceWeather.instruments.methods.ace.load_csv_data', 'load_csv_data', (['fnames'], {'read_csv_kwargs': "{'index_col': 0, 'parse_dates': True}"}), "(fnames, read_csv_kwargs={'index_col': 0, 'parse_dates': True})\n", (5469, 5532), False, 'from pysatSpaceWeather.instruments.methods.ace import load_csv_data\n'), ((5872, 5884), 'pysat.Meta', 'pysat.Meta', ([], {}), '()\n', (5882, 5884), False, 'import pysat\n'), ((3345, 3368), 'datetime.datetime', 'dt.datetime', (['(2009)', '(1)', '(1)'], {}), '(2009, 1, 1)\n', (3356, 3368), True, 'import datetime as dt\n'), ((3400, 3423), 'datetime.datetime', 'dt.datetime', (['(2009)', '(1)', '(1)'], {}), '(2009, 1, 1)\n', (3411, 3423), True, 'import datetime as dt\n'), ((12614, 12677), 'pysat.Files.from_os', 'pysat.Files.from_os', ([], {'data_path': 'data_path', 'format_str': 'format_str'}), '(data_path=data_path, format_str=format_str)\n', (12633, 12677), False, 'import pysat\n'), ((19601, 19632), 'ftplib.FTP', 'ftplib.FTP', (['"""ftp.swpc.noaa.gov"""'], {}), "('ftp.swpc.noaa.gov')\n", (19611, 19632), False, 'import ftplib\n'), ((5238, 5283), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['fname[-10:]', '"""%Y-%m-%d"""'], {}), "(fname[-10:], '%Y-%m-%d')\n", (5258, 5283), True, 'import datetime as dt\n'), ((13347, 13410), 'pysat.Files.from_os', 'pysat.Files.from_os', ([], {'data_path': 'data_path', 'format_str': 'format_str'}), '(data_path=data_path, format_str=format_str)\n', (13366, 13410), False, 'import pysat\n'), ((17335, 17390), 'datetime.datetime', 'dt.datetime', (['date_array[0].year', 'date_array[0].month', '(1)'], {}), '(date_array[0].year, date_array[0].month, 1)\n', (17346, 17390), True, 'import datetime as dt\n'), ((18401, 18419), 'requests.get', 'requests.get', (['dstr'], {}), '(dstr)\n', (18413, 18419), False, 'import requests\n'), ((18551, 18595), 'pandas.DataFrame.from_dict', 'pds.DataFrame.from_dict', (["raw_dict['samples']"], {}), "(raw_dict['samples'])\n", (18574, 18595), True, 'import pandas as pds\n'), ((24637, 24655), 'requests.get', 'requests.get', (['furl'], {}), '(furl)\n', (24649, 24655), False, 'import requests\n'), ((24778, 24812), 'os.path.join', 'os.path.join', (['data_path', 'data_file'], {}), '(data_path, data_file)\n', (24790, 24812), False, 'import os\n'), ((24821, 24878), 'pysatSpaceWeather.instruments.methods.f107.rewrite_daily_file', 'mm_f107.rewrite_daily_file', (['today.year', 'outfile', 'req.text'], {}), '(today.year, outfile, req.text)\n', (24847, 24878), True, 'from pysatSpaceWeather.instruments.methods import f107 as mm_f107\n'), ((14955, 14987), 'pandas.concat', 'pds.concat', (['new_files'], {'sort': '(True)'}), '(new_files, sort=True)\n', (14965, 14987), True, 'import pandas as pds\n'), ((15391, 15454), 'pysat.Files.from_os', 'pysat.Files.from_os', ([], {'data_path': 'data_path', 'format_str': 'format_str'}), '(data_path=data_path, format_str=format_str)\n', (15410, 15454), False, 'import pysat\n'), ((17790, 17815), 'os.path.isfile', 'os.path.isfile', (['data_file'], {}), '(data_file)\n', (17804, 17815), False, 'import os\n'), ((18488, 18508), 'json.loads', 'json.loads', (['req.text'], {}), '(req.text)\n', (18498, 18508), False, 'import json\n'), ((19376, 19410), 'numpy.where', 'np.where', (["(data['f107'] == -99999.0)"], {}), "(data['f107'] == -99999.0)\n", (19384, 19410), True, 'import numpy as np\n'), ((20579, 20612), 'datetime.datetime', 'dt.datetime', (['dl_date.year', '(12)', '(31)'], {}), '(dl_date.year, 12, 31)\n', (20590, 20612), True, 'import datetime as dt\n'), ((21094, 21130), 'os.path.join', 'os.path.join', (['data_path', 'local_fname'], {}), '(data_path, local_fname)\n', (21106, 21130), False, 'import os\n'), ((21345, 21375), 'os.path.join', 'os.path.join', (['data_path', 'ofile'], {}), '(data_path, ofile)\n', (21357, 21375), False, 'import os\n'), ((21396, 21419), 'os.path.isfile', 'os.path.isfile', (['outfile'], {}), '(outfile)\n', (21410, 21419), False, 'import os\n'), ((24308, 24330), 'pandas.DateOffset', 'pds.DateOffset', ([], {'days': '(1)'}), '(days=1)\n', (24322, 24330), True, 'import pandas as pds\n'), ((25228, 25246), 'requests.get', 'requests.get', (['furl'], {}), '(furl)\n', (25240, 25246), False, 'import requests\n'), ((25400, 25447), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['date_str', '"""%Y %b %d %H%M"""'], {}), "(date_str, '%Y %b %d %H%M')\n", (25420, 25447), True, 'import datetime as dt\n'), ((25578, 25626), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['raw_data[3:14]', '"""%Y %b %d"""'], {}), "(raw_data[3:14], '%Y %b %d')\n", (25598, 25626), True, 'import datetime as dt\n'), ((25684, 25735), 'pandas.date_range', 'pds.date_range', (['forecast_date'], {'periods': '(3)', 'freq': '"""1D"""'}), "(forecast_date, periods=3, freq='1D')\n", (25698, 25735), True, 'import pandas as pds\n'), ((26062, 26126), 'pandas.DataFrame', 'pds.DataFrame', (['[val1, val2, val3]'], {'index': 'times', 'columns': "['f107']"}), "([val1, val2, val3], index=times, columns=['f107'])\n", (26075, 26126), True, 'import pandas as pds\n'), ((5774, 5794), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (5786, 5794), True, 'import datetime as dt\n'), ((12867, 12889), 'pandas.DateOffset', 'pds.DateOffset', ([], {'days': '(1)'}), '(days=1)\n', (12881, 12889), True, 'import pandas as pds\n'), ((14246, 14278), 'os.path.join', 'os.path.join', (['data_path', 'orig[1]'], {}), '(data_path, orig[1])\n', (14258, 14278), False, 'import os\n'), ((15642, 15664), 'pandas.DateOffset', 'pds.DateOffset', ([], {'days': '(1)'}), '(days=1)\n', (15656, 15664), True, 'import pandas as pds\n'), ((20720, 20742), 'pandas.DateOffset', 'pds.DateOffset', ([], {'days': '(1)'}), '(days=1)\n', (20734, 20742), True, 'import pandas as pds\n'), ((21670, 21686), 'numpy.any', 'np.any', (['has_file'], {}), '(has_file)\n', (21676, 21686), True, 'import numpy as np\n'), ((24138, 24194), 'pysatSpaceWeather.instruments.methods.f107.rewrite_daily_file', 'mm_f107.rewrite_daily_file', (['dl_date.year', 'outfile', 'lines'], {}), '(dl_date.year, outfile, lines)\n', (24164, 24194), True, 'from pysatSpaceWeather.instruments.methods import f107 as mm_f107\n'), ((24211, 24233), 'os.remove', 'os.remove', (['saved_fname'], {}), '(saved_fname)\n', (24220, 24233), False, 'import os\n'), ((26273, 26307), 'os.path.join', 'os.path.join', (['data_path', 'data_file'], {}), '(data_path, data_file)\n', (26285, 26307), False, 'import os\n'), ((26618, 26636), 'requests.get', 'requests.get', (['furl'], {}), '(furl)\n', (26630, 26636), False, 'import requests\n'), ((26790, 26837), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['date_str', '"""%Y %b %d %H%M"""'], {}), "(date_str, '%Y %b %d %H%M')\n", (26810, 26837), True, 'import datetime as dt\n'), ((27261, 27294), 'pysatSpaceWeather.instruments.methods.f107.parse_45day_block', 'mm_f107.parse_45day_block', (['raw_ap'], {}), '(raw_ap)\n', (27286, 27294), True, 'from pysatSpaceWeather.instruments.methods import f107 as mm_f107\n'), ((27354, 27389), 'pysatSpaceWeather.instruments.methods.f107.parse_45day_block', 'mm_f107.parse_45day_block', (['raw_f107'], {}), '(raw_f107)\n', (27379, 27389), True, 'from pysatSpaceWeather.instruments.methods import f107 as mm_f107\n'), ((27439, 27494), 'pandas.DataFrame', 'pds.DataFrame', (['f107'], {'index': 'f107_times', 'columns': "['f107']"}), "(f107, index=f107_times, columns=['f107'])\n", (27452, 27494), True, 'import pandas as pds\n'), ((12810, 12834), 'pandas.DateOffset', 'pds.DateOffset', ([], {'months': '(1)'}), '(months=1)\n', (12824, 12834), True, 'import pandas as pds\n'), ((13862, 13885), 'pandas.DateOffset', 'pds.DateOffset', ([], {'years': '(1)'}), '(years=1)\n', (13876, 13885), True, 'import pandas as pds\n'), ((13933, 13957), 'pandas.DateOffset', 'pds.DateOffset', ([], {'months': '(3)'}), '(months=3)\n', (13947, 13957), True, 'import pandas as pds\n'), ((14038, 14060), 'pandas.DateOffset', 'pds.DateOffset', ([], {'days': '(1)'}), '(days=1)\n', (14052, 14060), True, 'import pandas as pds\n'), ((14335, 14358), 'os.path.getctime', 'os.path.getctime', (['fname'], {}), '(fname)\n', (14351, 14358), False, 'import os\n'), ((14394, 14416), 'pandas.DateOffset', 'pds.DateOffset', ([], {'days': '(1)'}), '(days=1)\n', (14408, 14416), True, 'import pandas as pds\n'), ((18933, 18969), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['time', '"""%Y%m%d"""'], {}), "(time, '%Y%m%d')\n", (18953, 18969), True, 'import datetime as dt\n'), ((20634, 20670), 'datetime.datetime', 'dt.datetime', (['dl_date.year', 'qmonth', '(1)'], {}), '(dl_date.year, qmonth, 1)\n', (20645, 20670), True, 'import datetime as dt\n'), ((20693, 20717), 'pandas.DateOffset', 'pds.DateOffset', ([], {'months': '(3)'}), '(months=3)\n', (20707, 20717), True, 'import pandas as pds\n'), ((21564, 21586), 'os.path.split', 'os.path.split', (['outfile'], {}), '(outfile)\n', (21577, 21586), False, 'import os\n'), ((22555, 22573), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (22571, 22573), False, 'import sys\n'), ((27649, 27683), 'os.path.join', 'os.path.join', (['data_path', 'data_file'], {}), '(data_path, data_file)\n', (27661, 27683), False, 'import os\n'), ((19156, 19194), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['time', '"""%Y %m %d"""'], {}), "(time, '%Y %m %d')\n", (19176, 19194), True, 'import datetime as dt\n'), ((18227, 18249), 'pandas.DateOffset', 'pds.DateOffset', ([], {'days': '(1)'}), '(days=1)\n', (18241, 18249), True, 'import pandas as pds\n'), ((22019, 22041), 'pandas.DateOffset', 'pds.DateOffset', ([], {'days': '(1)'}), '(days=1)\n', (22033, 22041), True, 'import pandas as pds\n'), ((23360, 23382), 'os.remove', 'os.remove', (['saved_fname'], {}), '(saved_fname)\n', (23369, 23382), False, 'import os\n'), ((18167, 18191), 'pandas.DateOffset', 'pds.DateOffset', ([], {'months': '(1)'}), '(months=1)\n', (18181, 18191), True, 'import pandas as pds\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Usage: %(scriptName) <bug_report_file> <data_prefix>
"""
import json
from timeit import default_timer
import datetime
import numpy as np
import pickle
import sys
from multiprocessing import Pool
from operator import itemgetter
from scipy import sparse
from sklearn.feature_extraction.text import TfidfTransformer
from tqdm import tqdm
from unqlite import UnQLite
from date_utils import convert_commit_date
def main():
print("Start", datetime.datetime.now().isoformat())
before = default_timer()
bug_report_file_path = sys.argv[1]
print("bug report file path", bug_report_file_path)
data_prefix = sys.argv[2]
print("data prefix", data_prefix)
fixes_list = extract_fixes_list(bug_report_file_path)
vectorize_enriched_api(fixes_list, data_prefix)
after = default_timer()
total = after - before
print("End", datetime.datetime.now().isoformat())
print("total time", total)
def load_bug_reports(bug_report_file_path):
"""load bug report file (the one generated from xml)"""
with open(bug_report_file_path) as bug_report_file:
bug_reports = json.load(bug_report_file)
return bug_reports
def sort_bug_reports_by_commit_date(bug_reports):
commit_dates = []
for index, commit in enumerate(tqdm(bug_reports)):
sha = bug_reports[commit]['commit']['metadata']['sha'].replace('commit ','').strip()
commit_date = convert_commit_date(bug_reports[commit]['commit']['metadata']['date'].replace('Date:','').strip())
commit_dates.append((sha, commit_date))
sorted_commit_dates = sorted(commit_dates, key=itemgetter(1))
sorted_commits = [commit_date[0] for commit_date in sorted_commit_dates]
return sorted_commits
def extract_fixes_list(bug_report_file_path):
bug_reports = load_bug_reports(bug_report_file_path)
return sort_bug_reports_by_commit_date(bug_reports)
def find_supertype_shas(types, class_name_lookup, variable_sha):
if variable_sha not in types:
return []
# variable_type = types[variable_sha]
variable_type = pickle.loads(types[variable_sha])
shas = []
for name in variable_type['superclassNames']:
if name in class_name_lookup:
shas.append(class_name_lookup[name])
for name in variable_type['interfaceNames']:
if name in class_name_lookup:
shas.append(class_name_lookup[name])
return shas
def find_types_shas(types, class_name_lookup, sha):
result = []
to_check = [sha]
while to_check:
current_sha = to_check.pop(0)
if current_sha not in result:
result.append(current_sha)
supertypes = find_supertype_shas(types, class_name_lookup, current_sha)
to_check.extend(supertypes)
return result
def get_indexes(asts, shas):
indexes = []
for sha in shas:
# indexes.append(asts[sha]['source'])
source_index = pickle.loads(asts[sha])['source']
indexes.append(source_index)
return indexes
def add_types_source_to_bug_report_data(data, data_prefix, class_name_lookup, ast_sha):
asts = UnQLite(data_prefix+"_ast_index_collection_index_db", flags = 0x00000100 | 0x00000001)
types = UnQLite(data_prefix+"_ast_types_collection_index_db", flags = 0x00000100 | 0x00000001)
# current_type = types[ast_sha]
# print "searching", ast_sha
current_type = pickle.loads(types[ast_sha])
# print "found", ast_sha
# print current_type['methodVariableTypes']
# exit(0)
types_per_method = current_type['methodVariableTypes']
cl = data.shape[1]
current_index = 0
start = current_index
enriched_apis = []
for method_types in types_per_method:
method_type_shas = []
for method_type in method_types:
if method_type in class_name_lookup:
method_type_shas.append(class_name_lookup[method_type])
supertypes_shas_per_type = [set(find_types_shas(types, class_name_lookup, s)) for s in method_type_shas]
indexes = []
for supertypes in supertypes_shas_per_type:
indexes.extend(get_indexes(asts, supertypes))
if indexes == []:
method_enriched_api = sparse.coo_matrix(np.zeros(cl).reshape(1,cl))
else:
method_enriched_api = sparse.coo_matrix(np.sum((data[indexes,:]), axis = 0))
enriched_apis.append(method_enriched_api)
if enriched_apis == []:
class_enriched_api = sparse.coo_matrix(np.zeros(cl).reshape(1,cl))
else:
class_enriched_api = sparse.coo_matrix(np.sum(enriched_apis, axis = 0))
enriched_apis.append(class_enriched_api)
current_index += len(enriched_apis)
asts.close()
types.close()
lookup = {}
lookup['enrichedApiStart'] = start
lookup['enrichedApiEnd'] = current_index - 1
enriched_apis_matrix = sparse.vstack(enriched_apis)
return (enriched_apis_matrix, lookup, ast_sha)
def vectorize_enriched_api(bug_report_fixing_commits, data_prefix):
work = []
for fixing_commit in bug_report_fixing_commits:
work.append((data_prefix, fixing_commit))
pool = Pool(12, maxtasksperchild=1)
r = list(tqdm(pool.imap(_f, work), total=len(work)))
print("r", len(r))
def _f(args):
return extract_enriched_api(args[0], args[1])
def extract_enriched_api(data_prefix, bug_report_full_sha):
data = sparse.load_npz(data_prefix+'_raw_count_data.npz')
bug_report_files_collection_db = UnQLite(data_prefix+"_bug_report_files_collection_db", flags = 0x00000100 | 0x00000001)
current_files = pickle.loads(bug_report_files_collection_db[bug_report_full_sha])
bug_report_files_collection_db.close()
bug_report_id = bug_report_full_sha[0:7]
shas = current_files['shas']
class_name_lookup = current_files['class_name_to_sha']
bug_report_data = []
bug_report_lookup = {}
n_rows = 0
for ast_sha in shas:
ast_data, lookup, current_ast_sha = add_types_source_to_bug_report_data(data, data_prefix, class_name_lookup, ast_sha)
current_index = n_rows
bug_report_data.append(ast_data)
for k in lookup:
lookup[k] += current_index
bug_report_lookup[current_ast_sha] = lookup
n_rows += ast_data.shape[0]
bug_report_row = get_bug_report(data_prefix, data, bug_report_id)
bug_report_data.append(bug_report_row)
bug_report_data_matrix = sparse.vstack(bug_report_data)
sparse.save_npz(data_prefix+'_'+bug_report_id+'_partial_enriched_api', bug_report_data_matrix)
with open(data_prefix+'_'+bug_report_id+'_partial_enriched_api_index_lookup', 'w') as outfile:
json.dump(bug_report_lookup, outfile)
transformer = TfidfTransformer()
tf_idf_data = transformer.fit_transform(bug_report_data_matrix)
sparse.save_npz(data_prefix+'_'+bug_report_id+'_tfidf_enriched_api', tf_idf_data)
# print "bug_report_id", bug_report_id
return bug_report_id
def get_bug_report(data_prefix, vectorized_data, bug_report_id):
bug_report_index_collection = UnQLite(data_prefix+"_bug_report_index_collection_index_db")
bug_report = pickle.loads(bug_report_index_collection[bug_report_id])
bug_report_index_collection.close()
index = bug_report['report']
return vectorized_data[index, :]
if __name__ == '__main__':
main()
|
[
"sklearn.feature_extraction.text.TfidfTransformer",
"unqlite.UnQLite",
"timeit.default_timer",
"scipy.sparse.load_npz",
"tqdm.tqdm",
"operator.itemgetter",
"json.load",
"numpy.sum",
"datetime.datetime.now",
"numpy.zeros",
"multiprocessing.Pool",
"pickle.loads",
"scipy.sparse.save_npz",
"scipy.sparse.vstack",
"json.dump"
] |
[((543, 558), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (556, 558), False, 'from timeit import default_timer\n'), ((846, 861), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (859, 861), False, 'from timeit import default_timer\n'), ((2114, 2147), 'pickle.loads', 'pickle.loads', (['types[variable_sha]'], {}), '(types[variable_sha])\n', (2126, 2147), False, 'import pickle\n'), ((3143, 3213), 'unqlite.UnQLite', 'UnQLite', (["(data_prefix + '_ast_index_collection_index_db')"], {'flags': '(256 | 1)'}), "(data_prefix + '_ast_index_collection_index_db', flags=256 | 1)\n", (3150, 3213), False, 'from unqlite import UnQLite\n'), ((3242, 3312), 'unqlite.UnQLite', 'UnQLite', (["(data_prefix + '_ast_types_collection_index_db')"], {'flags': '(256 | 1)'}), "(data_prefix + '_ast_types_collection_index_db', flags=256 | 1)\n", (3249, 3312), False, 'from unqlite import UnQLite\n'), ((3416, 3444), 'pickle.loads', 'pickle.loads', (['types[ast_sha]'], {}), '(types[ast_sha])\n', (3428, 3444), False, 'import pickle\n'), ((4883, 4911), 'scipy.sparse.vstack', 'sparse.vstack', (['enriched_apis'], {}), '(enriched_apis)\n', (4896, 4911), False, 'from scipy import sparse\n'), ((5174, 5202), 'multiprocessing.Pool', 'Pool', (['(12)'], {'maxtasksperchild': '(1)'}), '(12, maxtasksperchild=1)\n', (5178, 5202), False, 'from multiprocessing import Pool\n'), ((5420, 5472), 'scipy.sparse.load_npz', 'sparse.load_npz', (["(data_prefix + '_raw_count_data.npz')"], {}), "(data_prefix + '_raw_count_data.npz')\n", (5435, 5472), False, 'from scipy import sparse\n'), ((5508, 5579), 'unqlite.UnQLite', 'UnQLite', (["(data_prefix + '_bug_report_files_collection_db')"], {'flags': '(256 | 1)'}), "(data_prefix + '_bug_report_files_collection_db', flags=256 | 1)\n", (5515, 5579), False, 'from unqlite import UnQLite\n'), ((5617, 5682), 'pickle.loads', 'pickle.loads', (['bug_report_files_collection_db[bug_report_full_sha]'], {}), '(bug_report_files_collection_db[bug_report_full_sha])\n', (5629, 5682), False, 'import pickle\n'), ((6456, 6486), 'scipy.sparse.vstack', 'sparse.vstack', (['bug_report_data'], {}), '(bug_report_data)\n', (6469, 6486), False, 'from scipy import sparse\n'), ((6492, 6596), 'scipy.sparse.save_npz', 'sparse.save_npz', (["(data_prefix + '_' + bug_report_id + '_partial_enriched_api')", 'bug_report_data_matrix'], {}), "(data_prefix + '_' + bug_report_id + '_partial_enriched_api',\n bug_report_data_matrix)\n", (6507, 6596), False, 'from scipy import sparse\n'), ((6751, 6769), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {}), '()\n', (6767, 6769), False, 'from sklearn.feature_extraction.text import TfidfTransformer\n'), ((6842, 6933), 'scipy.sparse.save_npz', 'sparse.save_npz', (["(data_prefix + '_' + bug_report_id + '_tfidf_enriched_api')", 'tf_idf_data'], {}), "(data_prefix + '_' + bug_report_id + '_tfidf_enriched_api',\n tf_idf_data)\n", (6857, 6933), False, 'from scipy import sparse\n'), ((7093, 7155), 'unqlite.UnQLite', 'UnQLite', (["(data_prefix + '_bug_report_index_collection_index_db')"], {}), "(data_prefix + '_bug_report_index_collection_index_db')\n", (7100, 7155), False, 'from unqlite import UnQLite\n'), ((7171, 7227), 'pickle.loads', 'pickle.loads', (['bug_report_index_collection[bug_report_id]'], {}), '(bug_report_index_collection[bug_report_id])\n', (7183, 7227), False, 'import pickle\n'), ((1159, 1185), 'json.load', 'json.load', (['bug_report_file'], {}), '(bug_report_file)\n', (1168, 1185), False, 'import json\n'), ((1321, 1338), 'tqdm.tqdm', 'tqdm', (['bug_reports'], {}), '(bug_reports)\n', (1325, 1338), False, 'from tqdm import tqdm\n'), ((6694, 6731), 'json.dump', 'json.dump', (['bug_report_lookup', 'outfile'], {}), '(bug_report_lookup, outfile)\n', (6703, 6731), False, 'import json\n'), ((1655, 1668), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (1665, 1668), False, 'from operator import itemgetter\n'), ((2953, 2976), 'pickle.loads', 'pickle.loads', (['asts[sha]'], {}), '(asts[sha])\n', (2965, 2976), False, 'import pickle\n'), ((4593, 4622), 'numpy.sum', 'np.sum', (['enriched_apis'], {'axis': '(0)'}), '(enriched_apis, axis=0)\n', (4599, 4622), True, 'import numpy as np\n'), ((493, 516), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (514, 516), False, 'import datetime\n'), ((906, 929), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (927, 929), False, 'import datetime\n'), ((4344, 4376), 'numpy.sum', 'np.sum', (['data[indexes, :]'], {'axis': '(0)'}), '(data[indexes, :], axis=0)\n', (4350, 4376), True, 'import numpy as np\n'), ((4508, 4520), 'numpy.zeros', 'np.zeros', (['cl'], {}), '(cl)\n', (4516, 4520), True, 'import numpy as np\n'), ((4250, 4262), 'numpy.zeros', 'np.zeros', (['cl'], {}), '(cl)\n', (4258, 4262), True, 'import numpy as np\n')]
|
import PIL
import numpy as np
def to_grayscale(img):
return np.dot(img, [0.299, 0.587, 0.144])
def zero_center(img):
return img - 127.0
def crop(img, bottom=12, left=6, right=6):
height, width = img.shape
return img[0: height - bottom, left: width - right]
def save(img, path):
pil_img = PIL.Image.fromarray(img)
pil_img.save(path)
|
[
"numpy.dot",
"PIL.Image.fromarray"
] |
[((66, 100), 'numpy.dot', 'np.dot', (['img', '[0.299, 0.587, 0.144]'], {}), '(img, [0.299, 0.587, 0.144])\n', (72, 100), True, 'import numpy as np\n'), ((316, 340), 'PIL.Image.fromarray', 'PIL.Image.fromarray', (['img'], {}), '(img)\n', (335, 340), False, 'import PIL\n')]
|
"""
S3AIO Class
Array access to a single S3 object
"""
from __future__ import absolute_import
import SharedArray as sa
import zstd
from itertools import repeat, product
import numpy as np
from pathos.multiprocessing import ProcessingPool
from six.moves import zip
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from .s3io import S3IO, generate_array_name
class S3AIO(object):
def __init__(self, enable_compression=True, enable_s3=True, file_path=None, num_workers=30):
"""Initialise the S3 array IO interface.
:param bool enable_s3: Flag to store objects in s3 or disk.
True: store in S3
False: store on disk (for testing purposes)
:param str file_path: The root directory for the emulated s3 buckets when enable_se is set to False.
:param int num_workers: The number of workers for parallel IO.
"""
self.s3io = S3IO(enable_s3, file_path, num_workers)
self.pool = ProcessingPool(num_workers)
self.enable_compression = enable_compression
def to_1d(self, index, shape):
"""Converts nD index to 1D index.
:param tuple index: N-D Index to be converted.
:param tuple shape: Shape to be used for conversion.
:return: Returns the 1D index.
"""
return np.ravel_multi_index(index, shape)
def to_nd(self, index, shape):
"""Converts 1D index to nD index.
:param tuple index: 1D Index to be converted.
:param tuple shape: Shape to be used for conversion.
:return: Returns the ND index.
"""
return np.unravel_index(index, shape)
def get_point(self, index_point, shape, dtype, s3_bucket, s3_key):
"""Gets a point in the nd array stored in S3.
Only works if compression is off.
:param tuple index_point: Index of the point to be retrieved.
:param tuple shape: Shape of the stored data.
:param numpy.dtype: dtype of the stored data.
:param str s3_bucket: S3 bucket name
:param str s3_key: S3 key name
:return: Returns the point data.
"""
item_size = np.dtype(dtype).itemsize
idx = self.to_1d(index_point, shape) * item_size
if self.enable_compression:
b = self.s3io.get_bytes(s3_bucket, s3_key)
cctx = zstd.ZstdDecompressor()
b = cctx.decompress(b)[idx:idx + item_size]
else:
b = self.s3io.get_byte_range(s3_bucket, s3_key, idx, idx + item_size)
a = np.frombuffer(b, dtype=dtype, count=-1, offset=0)
return a
def cdims(self, slices, shape):
return [sl.start == 0 and sl.stop == sh and (sl.step is None or sl.step == 1)
for sl, sh in zip(slices, shape)]
def get_slice(self, array_slice, shape, dtype, s3_bucket, s3_key): # pylint: disable=too-many-locals
"""Gets a slice of the nd array stored in S3.
Only works if compression is off.
:param tuple array_slice: tuple of slices to retrieve.
:param tuple shape: Shape of the stored data.
:param numpy.dtype: dtype of the stored data.
:param str s3_bucket: S3 bucket name
:param str s3_key: S3 key name
:return: Returns the data slice.
"""
# convert array_slice into into sub-slices of maximum contiguous blocks
# Todo:
# - parallelise reads and writes
# - option 1. get memory rows in parallel and merge
# - option 2. smarter byte range subsets depending on:
# - data size
# - data contiguity
if self.enable_compression:
return self.get_slice_by_bbox(array_slice, shape, dtype, s3_bucket, s3_key)
# truncate array_slice to shape
# array_slice = [slice(max(0, s.start) - min(sh, s.stop)) for s, sh in zip(array_sliced, shape)]
array_slice = [slice(max(0, s.start), min(sh, s.stop)) for s, sh in zip(array_slice, shape)]
cdim = self.cdims(array_slice, shape)
try:
end = cdim[::-1].index(False) + 1
except ValueError:
end = len(shape)
start = len(shape) - end
outer = array_slice[:-end]
outer_ranges = [range(s.start, s.stop) for s in outer]
outer_cells = list(product(*outer_ranges))
blocks = list(zip(outer_cells, repeat(array_slice[start:])))
item_size = np.dtype(dtype).itemsize
results = []
for cell, sub_range in blocks:
# print(cell, sub_range)
s3_start = (np.ravel_multi_index(cell + tuple([s.start for s in sub_range]), shape)) * item_size
s3_end = (np.ravel_multi_index(cell + tuple([s.stop - 1 for s in sub_range]), shape) + 1) * item_size
# print(s3_start, s3_end)
data = self.s3io.get_byte_range(s3_bucket, s3_key, s3_start, s3_end)
results.append((cell, sub_range, data))
result = np.empty([s.stop - s.start for s in array_slice], dtype=dtype)
offset = [s.start for s in array_slice]
for cell, sub_range, data in results:
t = [slice(x.start - o, x.stop - o) if isinstance(x, slice) else x - o for x, o in
zip(cell + tuple(sub_range), offset)]
if data.dtype != dtype:
data = np.frombuffer(data, dtype=dtype, count=-1, offset=0)
result[t] = data.reshape([s.stop - s.start for s in sub_range])
return result
def get_slice_mp(self, array_slice, shape, dtype, s3_bucket, s3_key): # pylint: disable=too-many-locals
"""Gets a slice of the nd array stored in S3 in parallel.
Only works if compression is off.
:param tuple array_slice: tuple of slices to retrieve.
:param tuple shape: Shape of the stored data.
:param numpy.dtype: dtype of the stored data.
:param str s3_bucket: S3 bucket name
:param str s3_key: S3 key name
:return: Returns the data slice.
"""
# pylint: disable=too-many-locals
def work_get_slice(block, array_name, offset, s3_bucket, s3_key, shape, dtype):
result = sa.attach(array_name)
cell, sub_range = block
item_size = np.dtype(dtype).itemsize
s3_start = (np.ravel_multi_index(cell + tuple([s.start for s in sub_range]), shape)) * item_size
s3_end = (np.ravel_multi_index(cell + tuple([s.stop - 1 for s in sub_range]), shape) + 1) * item_size
data = self.s3io.get_byte_range(s3_bucket, s3_key, s3_start, s3_end)
t = [slice(x.start - o, x.stop - o) if isinstance(x, slice) else x - o for x, o in
zip(cell + tuple(sub_range), offset)]
if data.dtype != dtype:
data = np.frombuffer(data, dtype=dtype, count=-1, offset=0)
# data = data.reshape([s.stop - s.start for s in sub_range])
result[t] = data.reshape([s.stop - s.start for s in sub_range])
if self.enable_compression:
return self.get_slice_by_bbox(array_slice, shape, dtype, s3_bucket, s3_key)
cdim = self.cdims(array_slice, shape)
try:
end = cdim[::-1].index(False) + 1
except ValueError:
end = len(shape)
start = len(shape) - end
outer = array_slice[:-end]
outer_ranges = [range(s.start, s.stop) for s in outer]
outer_cells = list(product(*outer_ranges))
blocks = list(zip(outer_cells, repeat(array_slice[start:])))
offset = [s.start for s in array_slice]
array_name = generate_array_name('S3AIO')
sa.create(array_name, shape=[s.stop - s.start for s in array_slice], dtype=dtype)
shared_array = sa.attach(array_name)
self.pool.map(work_get_slice, blocks, repeat(array_name), repeat(offset), repeat(s3_bucket),
repeat(s3_key), repeat(shape), repeat(dtype))
sa.delete(array_name)
return shared_array
def get_slice_by_bbox(self, array_slice, shape, dtype, s3_bucket, s3_key): # pylint: disable=too-many-locals
"""Gets a slice of the nd array stored in S3 by bounding box.
:param tuple array_slice: tuple of slices to retrieve.
:param tuple shape: Shape of the stored data.
:param numpy.dtype: dtype of the stored data.
:param str s3_bucket: S3 bucket name
:param str s3_key: S3 key name
:return: Returns the data slice.
"""
# Todo:
# - parallelise reads and writes
# - option 1. use get_byte_range_mp
# - option 2. smarter byte range subsets depending on:
# - data size
# - data contiguity
item_size = np.dtype(dtype).itemsize
s3_begin = (np.ravel_multi_index(tuple([s.start for s in array_slice]), shape)) * item_size
s3_end = (np.ravel_multi_index(tuple([s.stop - 1 for s in array_slice]), shape) + 1) * item_size
# if s3_end-s3_begin <= 5*1024*1024:
# d = self.s3io.get_byte_range(s3_bucket, s3_key, s3_begin, s3_end)
# else:
# d = self.s3io.get_byte_range_mp(s3_bucket, s3_key, s3_begin, s3_end, 5*1024*1024)
d = self.s3io.get_bytes(s3_bucket, s3_key)
if self.enable_compression:
cctx = zstd.ZstdDecompressor()
d = cctx.decompress(d)
d = np.frombuffer(d, dtype=np.uint8, count=-1, offset=0)
d = d[s3_begin:s3_end]
cdim = self.cdims(array_slice, shape)
try:
end = cdim[::-1].index(False) + 1
except ValueError:
end = len(shape)
start = len(shape) - end
outer = array_slice[:-end]
outer_ranges = [range(s.start, s.stop) for s in outer]
outer_cells = list(product(*outer_ranges))
blocks = list(zip(outer_cells, repeat(array_slice[start:])))
item_size = np.dtype(dtype).itemsize
results = []
for cell, sub_range in blocks:
s3_start = (np.ravel_multi_index(cell + tuple([s.start for s in sub_range]), shape)) * item_size
s3_end = (np.ravel_multi_index(cell + tuple([s.stop - 1 for s in sub_range]), shape) + 1) * item_size
data = d[s3_start - s3_begin:s3_end - s3_begin]
results.append((cell, sub_range, data))
result = np.empty([s.stop - s.start for s in array_slice], dtype=dtype)
offset = [s.start for s in array_slice]
for cell, sub_range, data in results:
t = [slice(x.start - o, x.stop - o) if isinstance(x, slice) else x - o for x, o in
zip(cell + tuple(sub_range), offset)]
if data.dtype != dtype:
data = np.frombuffer(data, dtype=dtype, count=-1, offset=0)
result[t] = data.reshape([s.stop - s.start for s in sub_range])
return result
|
[
"SharedArray.create",
"numpy.ravel_multi_index",
"itertools.product",
"zstd.ZstdDecompressor",
"SharedArray.delete",
"numpy.empty",
"numpy.unravel_index",
"numpy.frombuffer",
"numpy.dtype",
"SharedArray.attach",
"six.moves.zip",
"pathos.multiprocessing.ProcessingPool",
"itertools.repeat"
] |
[((998, 1025), 'pathos.multiprocessing.ProcessingPool', 'ProcessingPool', (['num_workers'], {}), '(num_workers)\n', (1012, 1025), False, 'from pathos.multiprocessing import ProcessingPool\n'), ((1340, 1374), 'numpy.ravel_multi_index', 'np.ravel_multi_index', (['index', 'shape'], {}), '(index, shape)\n', (1360, 1374), True, 'import numpy as np\n'), ((1635, 1665), 'numpy.unravel_index', 'np.unravel_index', (['index', 'shape'], {}), '(index, shape)\n', (1651, 1665), True, 'import numpy as np\n'), ((2551, 2600), 'numpy.frombuffer', 'np.frombuffer', (['b'], {'dtype': 'dtype', 'count': '(-1)', 'offset': '(0)'}), '(b, dtype=dtype, count=-1, offset=0)\n', (2564, 2600), True, 'import numpy as np\n'), ((4980, 5044), 'numpy.empty', 'np.empty', (['[(s.stop - s.start) for s in array_slice]'], {'dtype': 'dtype'}), '([(s.stop - s.start) for s in array_slice], dtype=dtype)\n', (4988, 5044), True, 'import numpy as np\n'), ((7656, 7743), 'SharedArray.create', 'sa.create', (['array_name'], {'shape': '[(s.stop - s.start) for s in array_slice]', 'dtype': 'dtype'}), '(array_name, shape=[(s.stop - s.start) for s in array_slice],\n dtype=dtype)\n', (7665, 7743), True, 'import SharedArray as sa\n'), ((7761, 7782), 'SharedArray.attach', 'sa.attach', (['array_name'], {}), '(array_name)\n', (7770, 7782), True, 'import SharedArray as sa\n'), ((7962, 7983), 'SharedArray.delete', 'sa.delete', (['array_name'], {}), '(array_name)\n', (7971, 7983), True, 'import SharedArray as sa\n'), ((9411, 9463), 'numpy.frombuffer', 'np.frombuffer', (['d'], {'dtype': 'np.uint8', 'count': '(-1)', 'offset': '(0)'}), '(d, dtype=np.uint8, count=-1, offset=0)\n', (9424, 9463), True, 'import numpy as np\n'), ((10370, 10434), 'numpy.empty', 'np.empty', (['[(s.stop - s.start) for s in array_slice]'], {'dtype': 'dtype'}), '([(s.stop - s.start) for s in array_slice], dtype=dtype)\n', (10378, 10434), True, 'import numpy as np\n'), ((2171, 2186), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (2179, 2186), True, 'import numpy as np\n'), ((2363, 2386), 'zstd.ZstdDecompressor', 'zstd.ZstdDecompressor', ([], {}), '()\n', (2384, 2386), False, 'import zstd\n'), ((4332, 4354), 'itertools.product', 'product', (['*outer_ranges'], {}), '(*outer_ranges)\n', (4339, 4354), False, 'from itertools import repeat, product\n'), ((4445, 4460), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (4453, 4460), True, 'import numpy as np\n'), ((6179, 6200), 'SharedArray.attach', 'sa.attach', (['array_name'], {}), '(array_name)\n', (6188, 6200), True, 'import SharedArray as sa\n'), ((7456, 7478), 'itertools.product', 'product', (['*outer_ranges'], {}), '(*outer_ranges)\n', (7463, 7478), False, 'from itertools import repeat, product\n'), ((7830, 7848), 'itertools.repeat', 'repeat', (['array_name'], {}), '(array_name)\n', (7836, 7848), False, 'from itertools import repeat, product\n'), ((7850, 7864), 'itertools.repeat', 'repeat', (['offset'], {}), '(offset)\n', (7856, 7864), False, 'from itertools import repeat, product\n'), ((7866, 7883), 'itertools.repeat', 'repeat', (['s3_bucket'], {}), '(s3_bucket)\n', (7872, 7883), False, 'from itertools import repeat, product\n'), ((7907, 7921), 'itertools.repeat', 'repeat', (['s3_key'], {}), '(s3_key)\n', (7913, 7921), False, 'from itertools import repeat, product\n'), ((7923, 7936), 'itertools.repeat', 'repeat', (['shape'], {}), '(shape)\n', (7929, 7936), False, 'from itertools import repeat, product\n'), ((7938, 7951), 'itertools.repeat', 'repeat', (['dtype'], {}), '(dtype)\n', (7944, 7951), False, 'from itertools import repeat, product\n'), ((8763, 8778), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (8771, 8778), True, 'import numpy as np\n'), ((9339, 9362), 'zstd.ZstdDecompressor', 'zstd.ZstdDecompressor', ([], {}), '()\n', (9360, 9362), False, 'import zstd\n'), ((9818, 9840), 'itertools.product', 'product', (['*outer_ranges'], {}), '(*outer_ranges)\n', (9825, 9840), False, 'from itertools import repeat, product\n'), ((9931, 9946), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (9939, 9946), True, 'import numpy as np\n'), ((2771, 2789), 'six.moves.zip', 'zip', (['slices', 'shape'], {}), '(slices, shape)\n', (2774, 2789), False, 'from six.moves import zip\n'), ((3984, 4007), 'six.moves.zip', 'zip', (['array_slice', 'shape'], {}), '(array_slice, shape)\n', (3987, 4007), False, 'from six.moves import zip\n'), ((4395, 4422), 'itertools.repeat', 'repeat', (['array_slice[start:]'], {}), '(array_slice[start:])\n', (4401, 4422), False, 'from itertools import repeat, product\n'), ((5347, 5399), 'numpy.frombuffer', 'np.frombuffer', (['data'], {'dtype': 'dtype', 'count': '(-1)', 'offset': '(0)'}), '(data, dtype=dtype, count=-1, offset=0)\n', (5360, 5399), True, 'import numpy as np\n'), ((6262, 6277), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (6270, 6277), True, 'import numpy as np\n'), ((6801, 6853), 'numpy.frombuffer', 'np.frombuffer', (['data'], {'dtype': 'dtype', 'count': '(-1)', 'offset': '(0)'}), '(data, dtype=dtype, count=-1, offset=0)\n', (6814, 6853), True, 'import numpy as np\n'), ((7519, 7546), 'itertools.repeat', 'repeat', (['array_slice[start:]'], {}), '(array_slice[start:])\n', (7525, 7546), False, 'from itertools import repeat, product\n'), ((9881, 9908), 'itertools.repeat', 'repeat', (['array_slice[start:]'], {}), '(array_slice[start:])\n', (9887, 9908), False, 'from itertools import repeat, product\n'), ((10737, 10789), 'numpy.frombuffer', 'np.frombuffer', (['data'], {'dtype': 'dtype', 'count': '(-1)', 'offset': '(0)'}), '(data, dtype=dtype, count=-1, offset=0)\n', (10750, 10789), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.