code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
from __future__ import print_function
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
try:
import cPickle as pickle
except ImportError:
import pickle
import copy
import numpy as np
from src.SpectralAnalysis import utils
from src.SpectralAnalysis import powerspectrum
from src.SpectralAnalysis import mcmc
from src.SpectralAnalysis import mle
from src.SpectralAnalysis import posterior
##########################################
#
# class Bayes: Bayesian data analysis for time series
#
# This class defines a Bayes object that can:
# - pick between two models using likelihood ratio tests
# - find periodicities by picking out the largest power in
# an observation/set of fake periodograms
# - search for QPOs via a model selection approach using LRTs
#
#
# TO DO: Need to add smoothing for picking out narrow signals
#
#
#
class Bayes(object):
""" Bayesian time series analysis
This class defines a Bayes object that can:
- pick between two models using likelihood ratio tests
- find periodicities by picking out the largest power in
an observation/set of fake periodograms
- search for QPOs via a model selection approach using LRTs
Parameters
----------
ps : powerspectrum.Powerspectrum
A periodogram object that is to be searched for QPOs
namestr: string, optional, default "test"
The string that will be used to identify this periodogram when
saving output (text files and plots)
plot: boolean, optional, default True
If True, several diagnostic plots will be saved to disk
m: integer, optional, default 1
If the periodogram used is the result of averaging several
individual periodograms (or bins), this changes the statistical
distributions. Set m to the number of periodograms
averaged to be sure to use the right distribution
Attributes
----------
Examples
--------
"""
def __init__(self, ps, namestr='test', plot=True, m=1):
assert isinstance(ps, powerspectrum.PowerSpectrum), "ps must be of type powerspectrum.PowerSpectrum!"
self.ps = ps
self.namestr = namestr
self.plot = plot
self.m = m
def choose_noise_model(self, func1, par1, func2, par2,
fitmethod='bfgs',
nchain=10,
niter=5000,
nsim=1000,
covfactor=1.0,
use_emcee=True,
parname=None,
noise1=-1,
noise2=-1,
writefile=True):
"""
Fit two models func1 and func2, compute the likelihood
ratio at the maximum-a-posteriori paramters.
If func1 and func2 differ in complexity, the less complex
should be func1.
Then sample the posterior distribution for the the simpler
model (func1), pick parameter sets from the posterior
to create fake periodograms.
Fit each fake periodogram with the same models as the data, and
compute the likelihood ratios such that it is possible to
build up a posterior distribution for the likelihood
ratios and compute a posterior predictive p-value
that the data can be explained sufficiently with the simpler
model.
Parameters
----------
func1 : function
Parametric model for the periodogram.
Needs to be a function that takes an array of frequencies and
k parameters, and returns an array of model powers.
The function should include a parameter setting a constant background
level, and this parameter should be last!
par1 : {list, array-like}
Input guesses for the MAP fit using func1.
The number of elements *must* equal the number of parameters k
taken by func1.
func2 : function
Parametric model for the periodogram.
Needs to be a function that takes an array of frequencies and n
parameters, and returns an array of model powers
The function should include a parameter setting a constant background
level, and this parameter should be last!
par2 : {list, array-like}
Input guesses for the MAP fit using func2.
The number of elements *must* equal the number of parameters n
taken by func2.
fitmethod : string, optional, default bfgs
Allows the choice of different minimization algorithms.
Default uses BFGS, which is pretty robust for most purposes.
nchain : int, optional, default 10
The number of chains or walkers to use in MCMC.
For Metropolis-Hastings, use ~10-20 and many samples
For emcee, use as many as you can afford (~500) and fewer samples
niter : int, optional, default 5000
Sets the length of the Markov chains.
For Metropolis-Hastings, this needs to be large (>10000)
For emcee, this can be smaller, but it's a good idea to
verify that the chains have mixed.
nsim : int, optional, default 1000
The number of simulations to use when computing the
posterior distribution of the likelihood ratio.
Note that this also sets the maximum precision of the
posterior predictive p-value (for 1000 simulations, the
p-value can be constrained only to 0.001).
covfactor : float, optional, default 1.0
A tuning parameter for the MCMC step. Used only in
Metropolis-Hastings.
use_emcee : boolean, optional, default True
If True (STRONGLY RECOMMENDED), use the emcee package
for running MCMC. If False, use Metropolis-Hastings.
parname : list, optional, default None
Include a list of strings here to set parameter names for
plotting
noise1, noise2 : int, optional, default -1
The index for the noise parameter in func1 and func2.
In the pre-defined models, this index is *always* -1.
"""
resfilename = self.namestr + "_choosenoisemodel.dat"
resfile = utils.TwoPrint(resfilename)
### make strings for function names from function definition
func1name = "model1"
func2name = "model2"
### step 1: fit both models to observation and compute LRT
psfit = mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True)
obslrt = psfit.compute_lrt(func1, par1, func2, par2, noise1=noise1, noise2=noise2, m=self.m)
### get out best fit parameters and associated quantities
fitpars1 = getattr(psfit, func1name + 'fit')
fitpars2 = getattr(psfit, func2name + 'fit')
if self.plot:
### plot the periodogram and best fit models
psfit.plotfits(fitpars1, fitpars2, namestr=self.namestr, log=True)
if self.m == 1:
lpost = posterior.PerPosterior(self.ps, func1)
else:
lpost = posterior.StackPerPosterior(self.ps, func1, self.m)
### Step 2: Set up Markov Chain Monte Carlo Simulations
### of model 1:
mcobs = mcmc.MarkovChainMonteCarlo(self.ps.freq, self.ps.ps, lpost,
topt=fitpars1['popt'],
tcov=fitpars1['cov'],
covfactor=covfactor,
niter=niter,
nchain=nchain,
parname=parname,
check_conv=True,
namestr=self.namestr,
use_emcee=use_emcee,
plot=self.plot,
printobj=resfile,
m=self.m)
### Step 3: create fake periodograms out of MCMCs
fakeper = mcobs.simulate_periodogram(nsim=nsim)
### empty lists for simulated quantities of interest:
sim_lrt, sim_deviance, sim_ksp, sim_maxpow, sim_merit, sim_fpeak, sim_y0, sim_srat = [], [], [], [], [], [], [], []
### Step 4: Fit fake periodograms and read out parameters of interest from each fit:
for i, x in enumerate(fakeper):
try:
fitfake = mle.PerMaxLike(x, fitmethod=fitmethod, obs=False)
lrt = fitfake.compute_lrt(func1, par1, func2, par2, noise1=noise1, noise2=noise2, m=self.m)
# resfile('Fitting of fake periodogram ' + str(i) + ' failed! Returning ...')
# return psfit, fakeper, mcobs
sim_pars1 = getattr(fitfake, func1name + 'fit')
sim_pars2 = getattr(fitfake, func2name + 'fit')
# if lrt > 20:
# fitfake.plotfits(sim_pars1, sim_pars2, namestr=self.namestr+'_'+str(i))
sim_lrt.append(lrt)
sim_deviance.append(sim_pars1['deviance'])
sim_ksp.append(sim_pars1['ksp'])
sim_maxpow.append(sim_pars1['maxpow'])
sim_merit.append(sim_pars1['merit'])
sim_fpeak.append(sim_pars1['maxfreq'])
sim_y0.append(sim_pars1['mfit'][sim_pars1['maxind']])
sim_srat.append(sim_pars1['sobs'])
except KeyboardInterrupt:
break
if len(sim_maxpow) == 0:
resfile("Analysis of Burst failed! Returning ...")
return False, False, False
else:
### Step 5: Compute Bayesian posterior probabilities of individual quantities
p_maxpow = float(len([x for x in sim_maxpow if x > fitpars1['maxpow']])) / float(len(sim_maxpow))
p_deviance = float(len([x for x in sim_deviance if x > fitpars1['deviance']])) / float(len(sim_deviance))
p_ksp = float(len([x for x in sim_ksp if x > fitpars1['ksp']])) / float(len(sim_ksp))
p_merit = float(len([x for x in sim_merit if x > fitpars1['merit']])) / float(len(sim_merit))
p_lrt = float(len([x for x in sim_lrt if x > obslrt])) / float(len(sim_lrt))
p_srat = float(len([x for x in sim_srat if x > fitpars1['sobs']])) / float(len(sim_srat))
resfile('simulated srat: ' + str(sim_srat))
resfile('observed srat: ' + str(fitpars1['sobs']))
resfile("p(LRT) = " + str(p_lrt))
resfile("KSP(obs) = " + str(fitpars1['ksp']))
resfile("mean(sim_ksp) = " + str(np.mean(sim_ksp)))
resfile("Merit(obs) = " + str(fitpars1['merit']))
resfile("mean(sim_merit) = " + str(np.mean(sim_merit)))
resfile("Srat(obs) = " + str(fitpars1['sobs']))
resfile("mean(sim_srat) = " + str(np.mean(sim_srat)))
### Step 6: Compute errors of Bayesian posterior probabilities
pmaxpow_err = np.sqrt(p_maxpow * (1.0 - p_maxpow) / float(len(sim_ksp)))
pdeviance_err = np.sqrt(p_deviance * (1.0 - p_deviance) / float(len(sim_ksp)))
pksp_err = np.sqrt(p_ksp * (1.0 - p_ksp) / float(len(sim_ksp)))
pmerit_err = np.sqrt(p_merit * (1.0 - p_merit) / float(len(sim_ksp)))
plrt_err = np.sqrt(p_lrt * (1.0 - p_lrt) / float(len(sim_ksp)))
psrat_err = np.sqrt(p_srat * (1.0 - p_srat) / float(len(sim_ksp)))
### Display results on screen and make funky plots
resfile("Bayesian p-value for maximum power P_max = " + str(p_maxpow) + " +/- " + str(pmaxpow_err))
resfile("Bayesian p-value for deviance D = " + str(p_deviance) + " +/- " + str(pdeviance_err))
resfile("Bayesian p-value for KS test: " + str(p_ksp) + " +/- " + str(pksp_err))
resfile("Bayesian p-value for Merit function: " + str(p_merit) + " +/- " + str(pmerit_err))
resfile("Bayesian p-value for the np.sum of residuals: " + str(p_srat) + " +/- " + str(psrat_err))
resfile("Bayesian p-value for Likelihood Ratio: " + str(p_lrt) + " +/- " + str(plrt_err))
if self.plot:
n, bins, patches = plt.hist(sim_lrt, bins=100, normed=True, color="cyan", histtype='stepfilled')
plt.vlines(obslrt, 0.0, 0.8 * max(n), lw=4, color='navy')
plt.savefig(self.namestr + '_lrt.png', format='png')
plt.close()
summary = {"p_lrt": [p_lrt, plrt_err], "p_maxpow": [p_maxpow, pmaxpow_err],
"p_deviance": [p_deviance, pdeviance_err], "p_ksp": [p_ksp, pksp_err],
"p_merit": [p_merit, pmerit_err], "p_srat": [p_srat, psrat_err], "postmean": mcobs.mean,
"posterr": mcobs.std, "postquantiles": mcobs.ci, "rhat": mcobs.rhat, "acor": mcobs.acor,
"acceptance": mcobs.acceptance}
return psfit, fakeper, summary
def find_periodicity(self, func, par,
fitmethod='bfgs',
nchain=10,
niter=5000,
nsim=1000,
covfactor=1.0,
parname=None,
noise=-1,
use_emcee=True,
searchfreq=None):
"""
Find periodicities in observed data and compute significance via MCMCs.
First, fit the periodogram with func and compute the
maximum-a-posteriori (MAP) estimate.
Divide the data by the MAP model; for a perfect data-model fit,
the resulting residuals should follow a chi-square distribution
with two degrees of freedom.
Find the highest power in the residuals and its frequency.
Sample the posterior distribution of parameters for func using MCMC,
and create fake periodograms from samples of the posterior.
For each fake periodogram, find the MAP estimate, divide out the
MAP model and find the highest power in that periodogram.
Create a posterior distribution of maximum powers and compute
a posterior predictive p-value of seeing the maximum power
in the data under the null hypothesis (no QPO).
Parameters
----------
func : function
Parametric model for the periodogram.
Needs to be a function that takes an array of frequencies and
k parameters, and returns an array of model powers.
The function should include a parameter setting a constant background
level, and this parameter should be last!
par : {list, array-like}
Input guesses for the parameters taken by func.
The number of elements in this list or array must match the
number of parameters k taken by func.
fitmethod : string, optional, default "bfgs"
Choose the optimization algorithm used when minimizing the
-log-likelihood. Choices are listed in mle.py, but the default
(bfgs) should be sufficient for most applications.
nchain : int, optional, default 10
The number of chains or walkers to use in MCMC.
For Metropolis-Hastings, use ~10-20 and many samples
For emcee, use as many as you can afford (~500) and fewer samples
niter : int, optional, default 5000
Sets the length of the Markov chains.
For Metropolis-Hastings, this needs to be large (>10000)
For emcee, this can be smaller, but it's a good idea to
verify that the chains have mixed.
nsim : int, optional, default 1000
The number of simulations to use when computing the
posterior distribution of the likelihood ratio.
Note that this also sets the maximum precision of the
posterior predictive p-value (for 1000 simulations, the
p-value can be constrained only to 0.001).
covfactor : float, optional, default 1.0
A tuning parameter for the MCMC step. Used only in
Metropolis-Hastings.
parname : list, optional, default None
Include a list of strings here to set parameter names for
plotting
noise: int, optional, default -1
The index for the noise parameter in func.
In the pre-defined models, this index is *always* -1.
use_emcee : boolean, optional, default True
If True (STRONGLY RECOMMENDED), use the emcee package
for running MCMC. If False, use Metropolis-Hastings.
"""
## the file name where the output will be stored
resfilename = self.namestr + "_findperiodicity_results.dat"
## open the output log file
resfile = utils.TwoPrint(resfilename)
### step 1: fit model to observation
psfit = mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True)
fitpars = psfit.mlest(func, par, obs=True, noise=noise, m=self.m)
bindict = fitpars['bindict']
# print('popt: ' + str(fitpars['popt']))
## which posterior do I need to use?
if self.m == 1:
lpost = posterior.PerPosterior(self.ps, func)
else:
lpost = posterior.StackPerPosterior(self.ps, func, self.m)
### Step 2: Set up Markov Chain Monte Carlo Simulations
### of model 1:
mcobs = mcmc.MarkovChainMonteCarlo(self.ps.freq, self.ps.ps, lpost,
topt=fitpars['popt'],
tcov=fitpars['cov'],
covfactor=covfactor,
niter=niter,
nchain=nchain,
parname=parname,
check_conv=True,
namestr=self.namestr,
use_emcee=True,
plot=self.plot,
printobj=resfile,
m=self.m)
### Step 3: create fake periodograms out of MCMCs
fakeper = mcobs.simulate_periodogram(nsim=nsim)
sim_pars_all, sim_deviance, sim_ksp, sim_fpeak, sim_srat, \
sim_maxpow, sim_merit, sim_y0, sim_s3max, sim_s5max, sim_s11max = [], [], [], [], [], [], [], [], [], [], []
bmax = int(self.ps.freq[-1] / (2.0 * (self.ps.freq[1] - self.ps.freq[0])))
bins = [1, 3, 5, 7, 10, 15, 20, 30, 50, 70, 100, 200, 300, 500, 700, 1000]
binlist = [r for r in fitpars["bindict"].keys()]
nbins = len(binlist) / 4
sain = copy.copy(fitpars['popt'])
# print('popt2: ' + str(fitpars['popt']))
### Step 4: Fit fake periodograms:
for i, x in enumerate(fakeper):
try:
# print('popt' + str(i) + 'a : ' + str(fitpars['popt']))
fitfake = mle.PerMaxLike(x, fitmethod=fitmethod, obs=False)
# print('popt' + str(i) + 'b : ' + str(fitpars['popt']))
sim_pars = fitfake.mlest(func, sain, obs=False, noise=noise, m=self.m)
# print('popt' + str(i) + 'c : ' + str(fitpars['popt']))
sim_pars_all.append(sim_pars)
sim_deviance.append(sim_pars['deviance'])
sim_ksp.append(sim_pars['ksp'])
sim_maxpow.append(sim_pars['maxpow'])
sim_merit.append(sim_pars['merit'])
sim_fpeak.append(sim_pars['maxfreq'])
sim_y0.append(sim_pars['mfit'][sim_pars['maxind']])
sim_srat.append(sim_pars['sobs'])
sim_s3max.append(sim_pars['s3max'])
sim_s5max.append(sim_pars['s5max'])
sim_s11max.append(sim_pars['s11max'])
except KeyboardInterrupt:
break
# except:
# print("Simulation failed! Continuing ...")
# continue
# print('popt' + str(i) + 'd : ' + str(fitpars['popt']))
# print('popt3: ' + str(fitpars['popt']))
### upper limit is the power in the sorted array where p_maxpow would be 0.05
### i.e. when only 0.05*nsim simulations are higher than this
### note: sometimes simulations fail, therefore the 5% limit should be 0.05*len(sims)
fiveperlim = int(0.05 * len(sim_maxpow))
if fiveperlim == 0:
resfile('Warning! Too few simulations to compute five percent limit reliably!')
fiveperlim = 1
ninetyfiveperlim = len(sim_maxpow) - fiveperlim
# print('popt4: ' + str(fitpars['popt']))
bindicts = [x["bindict"] for x in sim_pars_all]
### get out binned powers:
maxpows_all = {}
binprob = {}
for b in bins[:nbins]:
binps = fitpars['bindict']['bin' + str(b)]
bmaxpow = np.array([x["bmax" + str(b)] for x in bindicts])
maxpows_all["bin" + str(b)] = bmaxpow
bindict['sim_bmaxpow' + str(b)] = bmaxpow
p_bmaxpow = float(len([x for x in bmaxpow if x > fitpars['bindict']["bmax" + str(b)]])) / float(
len(bmaxpow))
bindict["p_maxpow" + str(b)] = p_bmaxpow
bmaxpow_err = np.sqrt(p_bmaxpow * (1.0 - p_bmaxpow) / float(len(bmaxpow)))
bindict['p_maxpow' + str(b) + 'err'] = bmaxpow_err
sim_bmaxpow_sort = np.msort(bmaxpow)
### note: this is the limit for 2*I/S --> multiply by S to get powers for each frequency
### Like everything else, this is n-trial corrected!
# print('len(bmaxpow_sort) : ' + str(len(sim_bmaxpow_sort)))
resfile('ninetyfiveperlim: ' + str(ninetyfiveperlim))
bmaxpow_ul = sim_bmaxpow_sort[ninetyfiveperlim]
bindict['bmax' + str(b) + '_ul'] = bmaxpow_ul
resfile('The posterior p-value for the maximum residual power for a binning of ' + str(
self.ps.df * b) + 'Hz is p = ' + str(p_bmaxpow) + ' +/- ' + str(bmaxpow_err))
resfile('The corresponding value of the T_R statistic at frequency f = ' + str(
fitpars["bindict"]["bmaxfreq" + str(b)]) + ' is 2I/S = ' + str(fitpars['bindict']["bmax" + str(b)]))
resfile('The upper limit on the T_R statistic is 2I/S = ' + str(bmaxpow_ul))
### now turn upper limit into an rms amplitude:
## first compute broadband noise model for binned frequencies
bintemplate = func(fitpars['bindict']['bin' + str(b)].freq, *fitpars['popt'])
resfile("bintemplate[0]: " + str(bintemplate[0]))
## then compute upper limits for powers I_j depending on frequency
binpowers = bmaxpow_ul * bintemplate / 2.0 - bintemplate
## now compute rms amplitude at 40, 70, 100 and 300 Hz
## first, convert powers into rms normalization, if they're not already
if self.ps.norm == 'leahy':
binpowers = binpowers / (self.ps.df * b * self.ps.nphots)
elif self.ps.norm == 'variance':
binpowers = binpowers * self.ps.n ** 2.0 / (self.ps.df * b * self.ps.nphots ** 2.0)
# print('len(binps.freq): ' + str(len(binps.freq)))
# print('len(binpowers): ' + str(len(binpowers)))
if searchfreq is None:
searchfreq = [40.0, 70.0, 100.0, 300.0, 500.0, 1000.0]
## for 40 Hz:
print(searchfreq)
for bc in searchfreq:
if bc > (binps.freq[1] - binps.freq[0]):
bind = np.searchsorted(binps.freq, bc) - 1
bpow = binpowers[bind]
brms = np.sqrt(bpow * b * self.ps.df)
resfile('The upper limit on the power at ' + str(bc) +
'Hz for a binning of ' + str(b) + ' is P = ' +
str(bpow * (self.ps.df * b * self.ps.nphots)))
resfile('The upper limit on the rms amplitude at ' + str(bc) +
'Hz for a binning of ' + str(b) + ' is rms = ' + str(brms))
bindict['bin' + str(b) + '_ul_%.4fHz' % bc] = brms
else:
continue
### Step 5: Compute Bayesian posterior probabilities of individual quantities
p_maxpow = float(len([x for x in sim_maxpow if x > fitpars['maxpow']])) / float(len(sim_maxpow))
p_deviance = float(len([x for x in sim_deviance if x > fitpars['deviance']])) / float(len(sim_deviance))
p_ksp = float(len([x for x in sim_ksp if x > fitpars['ksp']])) / float(len(sim_ksp))
p_merit = float(len([x for x in sim_merit if x > fitpars['merit']])) / float(len(sim_merit))
p_srat = float(len([x for x in sim_srat if x > fitpars['sobs']])) / float(len(sim_srat))
p_s3max = float(len([x for x in sim_s3max if x > fitpars['s3max']])) / float(len(sim_s3max))
p_s5max = float(len([x for x in sim_s5max if x > fitpars['s5max']])) / float(len(sim_s5max))
p_s11max = float(len([x for x in sim_s11max if x > fitpars['s11max']])) / float(len(sim_s11max))
### sort maximum powers from lowest to highest
sim_maxpow_sort = np.msort(sim_maxpow)
sim_s3max_sort = np.msort(sim_s3max)
sim_s5max_sort = np.msort(sim_s5max)
sim_s11max_sort = np.msort(sim_s11max)
### note: this is the limit for 2*I/S --> multiply by S to get powers for each frequency
### Like everything else, this is n-trial corrected!
maxpow_ul = sim_maxpow_sort[ninetyfiveperlim]
### Step 6: Compute errors of Bayesian posterior probabilities
pmaxpow_err = np.sqrt(p_maxpow * (1.0 - p_maxpow) / float(len(sim_ksp)))
pdeviance_err = np.sqrt(p_deviance * (1.0 - p_deviance) / float(len(sim_ksp)))
pksp_err = np.sqrt(p_ksp * (1.0 - p_ksp) / float(len(sim_ksp)))
pmerit_err = np.sqrt(p_merit * (1.0 - p_merit) / float(len(sim_ksp)))
psrat_err = np.sqrt(p_srat * (1.0 - p_srat) / float(len(sim_ksp)))
ps3max_err = np.sqrt(p_s3max * (1.0 - p_s3max) / float(len(sim_ksp)))
ps5max_err = np.sqrt(p_s5max * (1.0 - p_s5max) / float(len(sim_ksp)))
ps11max_err = np.sqrt(p_s11max * (1.0 - p_s11max) / float(len(sim_ksp)))
### Display results on screen and make funky plots
resfile("Bayesian p-value for maximum power P_max = " + str(p_maxpow) + " +/- " + str(pmaxpow_err))
# resfile('Upper limit on maximum signal power P_max_ul = ' + str(maxpow_ul))
resfile("Bayesian p-value for maximum power P_max = " + str(p_s3max) + " +/- " + str(ps3max_err))
# resfile('Upper limit on maximum signal power P_max_ul = ' + str(s3max_ul))
resfile("Bayesian p-value for maximum power P_max = " + str(p_s5max) + " +/- " + str(ps5max_err))
# resfile('Upper limit on maximum signal power P_max_ul = ' + str(s5max_ul))
resfile("Bayesian p-value for maximum power P_max = " + str(p_s11max) + " +/- " + str(ps11max_err))
# resfile('Upper limit on maximum signal power P_max_ul = ' + str(s11max_ul))
resfile("Bayesian p-value for deviance D = " + str(p_deviance) + " +/- " + str(pdeviance_err))
resfile("Bayesian p-value for KS test: " + str(p_ksp) + " +/- " + str(pksp_err))
resfile("Bayesian p-value for Merit function: " + str(p_merit) + " +/- " + str(pmerit_err))
resfile("Bayesian p-value for the np.sum of residuals: " + str(p_srat) + " +/- " + str(psrat_err))
if self.plot:
plt.subplot(2, 2, 1)
n, bins, patches = plt.hist(sim_maxpow, bins=100, normed=True, color="cyan", histtype='stepfilled')
xmin, xmax = min(min(bins), fitpars['maxpow']) / 1.2, max(25, fitpars['maxpow'] * 1.2)
plt.axis([xmin, xmax, 0.0, max(n)])
plt.vlines(fitpars['maxpow'], 0.0, max(n), lw=2, color='navy')
plt.title('unsmoothed data', fontsize=12)
plt.subplot(2, 2, 2)
n, bins, patches = plt.hist(sim_s3max, bins=100, normed=True, color="cyan", histtype='stepfilled')
xmin, xmax = min(min(bins), fitpars['s3max']) / 1.2, max(25, fitpars['s3max'] * 1.2)
plt.axis([xmin, xmax, 0.0, max(n)])
plt.vlines(fitpars['s3max'], 0.0, max(n), lw=2, color='navy')
plt.title('smoothed (3) data', fontsize=12)
plt.subplot(2, 2, 3)
n, bins, patches = plt.hist(sim_s3max, bins=100, normed=True, color="cyan", histtype='stepfilled')
xmin, xmax = min(min(bins), fitpars['s5max']) / 1.2, max(25, fitpars['s5max'] * 1.2)
plt.axis([xmin, xmax, 0.0, max(n)])
plt.vlines(fitpars['s5max'], 0.0, max(n), lw=2, color='navy')
plt.title('smoothed (5) data/model outlier', fontsize=12)
plt.subplot(2, 2, 4)
n, bins, patches = plt.hist(sim_s3max, bins=100, normed=True, color="cyan", histtype='stepfilled')
xmin, xmax = min(min(bins), fitpars['s11max']) / 1.2, max(25, fitpars['s3max'] * 1.2)
plt.axis([xmin, xmax, 0.0, max(n)])
plt.vlines(fitpars['s11max'], 0.0, max(n), lw=2, color='navy')
plt.title('smoothed (11) data', fontsize=12)
plt.savefig(self.namestr + '_maxpow.png', format='png')
plt.close()
results = {"fitpars": fitpars, 'bindict': bindict, 'maxpows_all': maxpows_all, 'mcobs': mcobs,
'p_maxpow': [sim_maxpow, p_maxpow, pmaxpow_err], 'maxpow_ul': maxpow_ul,
'p_s3max': [sim_s3max, p_s3max, ps3max_err], 'p_s5max': [sim_s5max, p_s5max, ps5max_err],
'p_s11max': [sim_s11max, p_s11max, ps11max_err], 'p_merit': [p_merit, pmerit_err],
'p_srat': [p_srat, psrat_err], 'p_deviance': [p_deviance, pdeviance_err], 'fitpars': fitpars,
"postmean": mcobs.mean, "posterr": mcobs.std, "postquantiles": mcobs.ci, "rhat": mcobs.rhat,
"acor": mcobs.acor, "acceptance": mcobs.acceptance}
return results
def find_qpo(self, func, ain,
fitmethod='constbfgs',
nchain=10,
niter=5000,
nsim=1000,
covfactor=1.0,
parname=None,
plotstr=None,
use_emcee=True):
"""
Find QPOs by fitting a QPO + background model to *every*
frequency.
NOTE: I rarely ever use this because it's really computationally
expensive.
Parameters
----------
func : function
Parametric model for the periodogram.
Needs to be a function that takes an array of frequencies and
k parameters, and returns an array of model powers.
The function should include a parameter setting a constant background
level, and this parameter should be last!
par : {list, array-like}
Input guesses for the parameters taken by func.
The number of elements in this list or array must match the
number of parameters k taken by func.
fitmethod : string, optional, default "bfgs"
Choose the optimization algorithm used when minimizing the
-log-likelihood. Choices are listed in mle.py, but the default
(bfgs) should be sufficient for most applications.
nchain : int, optional, default 10
The number of chains or walkers to use in MCMC.
For Metropolis-Hastings, use ~10-20 and many samples
For emcee, use as many as you can afford (~500) and fewer samples
niter : int, optional, default 5000
Sets the length of the Markov chains.
For Metropolis-Hastings, this needs to be large (>10000)
For emcee, this can be smaller, but it's a good idea to
verify that the chains have mixed.
nsim : int, optional, default 1000
The number of simulations to use when computing the
posterior distribution of the likelihood ratio.
Note that this also sets the maximum precision of the
posterior predictive p-value (for 1000 simulations, the
p-value can be constrained only to 0.001).
covfactor : float, optional, default 1.0
A tuning parameter for the MCMC step. Used only in
Metropolis-Hastings.
parname : list, optional, default None
Include a list of strings here to set parameter names for
plotting
noise: int, optional, default -1
The index for the noise parameter in func.
In the pre-defined models, this index is *always* -1.
use_emcee : boolean, optional, default True
If True (STRONGLY RECOMMENDED), use the emcee package
for running MCMC. If False, use Metropolis-Hastings.
"""
if plotstr == None:
plotstr = self.namestr
funcname = str(func).split()[1]
# print("<< --- len(self.ps beginning): " + str(len(self.ps.ps)))
### step 1: fit model to observation
psfit = mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True)
fitpars = psfit.mlest(func, ain, obs=True, noise=-1, m=self.m)
# print("<< --- len(self.ps beginning): " + str(len(self.ps.ps)))
if self.m == 1:
lpost = posterior.PerPosterior(self.ps, func)
else:
lpost = posterior.StackPerPosterior(self.ps, func, self.m)
### Step 2: Set up Markov Chain Monte Carlo Simulations
### of model 1:
mcobs = mcmc.MarkovChainMonteCarlo(self.ps.freq, self.ps.ps, lpost,
topt=fitpars['popt'],
tcov=fitpars['cov'],
covfactor=covfactor,
niter=niter,
nchain=nchain,
parname=parname,
check_conv=True,
namestr=self.namestr,
use_emcee=True,
plot=self.plot,
m=self.m)
### find optimum QPO values for the real data
obslrt, optpars, qpopars = psfit.find_qpo(func, ain, plot=True, obs=True, plotname=self.namestr + '_loglikes')
### simulate lots of realizations of the broadband noise model from MCMCs
funcfake = mcobs.simulate_periodogram(nsim=nsim)
### empty lists to store simulated LRTS and parameters in
sim_lrt, sim_optpars, sim_qpopars, sim_deviance, sim_ksp, sim_merit, sim_srat = [], [], [], [], [], [], []
simno = 0
### run QPO search on each and return likelihood ratios parameters for each
for x in funcfake:
try:
simno = simno + 1
sim_psfit = mle.PerMaxLike(x, fitmethod='constbfgs', obs=False)
slrt, soptpars, sqpopars = sim_psfit.find_qpo(func, ain, obs=False, plot=True,
plotname=plotstr + '_sim' + str(simno) + '_qposearch')
sim_lrt.append(slrt)
sim_optpars.append(soptpars)
sim_qpopars.append(sqpopars)
sim_deviance.append(soptpars['deviance'])
sim_ksp.append(soptpars['ksp'])
sim_merit.append(soptpars['merit'])
sim_srat.append(soptpars['sobs'])
except KeyboardInterrupt:
break
### Step 5: Compute Bayesian posterior probabilities of individual quantities
p_deviance = float(len([x for x in sim_deviance if x > optpars['deviance']])) / float(len(sim_deviance))
p_ksp = float(len([x for x in sim_ksp if x > optpars['ksp']])) / float(len(sim_ksp))
p_merit = float(len([x for x in sim_merit if x > optpars['merit']])) / float(len(sim_merit))
p_lrt = float(len([x for x in sim_lrt if x > obslrt])) / float(len(sim_lrt))
p_srat = float(len([x for x in sim_srat if x > optpars['sobs']])) / float(len(sim_srat))
print("p(LRT) = " + str(p_lrt))
# print("LRT(obs) = " + str(obslrt))
# print("mean(sim_lrt) = " + str(np.mean(sim_lrt)))
# print("Deviance(obs) = " + str(fitpars1['deviance']))
# print("mean(sim_deviance) = " + str(np.mean(sim_deviance)))
print("KSP(obs) = " + str(optpars['ksp']))
print("mean(sim_ksp) = " + str(np.mean(sim_ksp)))
print("Merit(obs) = " + str(optpars['merit']))
print("mean(sim_merit) = " + str(np.mean(sim_merit)))
print("Srat(obs) = " + str(optpars['sobs']))
print("mean(sim_srat) = " + str(np.mean(sim_srat)))
### Step 6: Compute errors of Bayesian posterior probabilities
pdeviance_err = np.sqrt(p_deviance * (1.0 - p_deviance) / float(len(sim_ksp)))
pksp_err = np.sqrt(p_ksp * (1.0 - p_ksp) / float(len(sim_ksp)))
pmerit_err = np.sqrt(p_merit * (1.0 - p_merit) / float(len(sim_ksp)))
plrt_err = np.sqrt(p_lrt * (1.0 - p_lrt) / float(len(sim_ksp)))
psrat_err = np.sqrt(p_srat * (1.0 - p_srat) / float(len(sim_ksp)))
### Display results on screen and make funky plots
print("Bayesian p-value for deviance D = " + str(p_deviance) + " +/- " + str(pdeviance_err))
print("Bayesian p-value for KS test: " + str(p_ksp) + " +/- " + str(pksp_err))
print("Bayesian p-value for Merit function: " + str(p_merit) + " +/- " + str(pmerit_err))
print("Bayesian p-value for the np.sum of residuals: " + str(p_srat) + " +/- " + str(psrat_err))
print("Bayesian p-value for Likelihood Ratio: " + str(p_lrt) + " +/- " + str(plrt_err))
if self.plot:
n, bins, patches = plt.hist(sim_lrt, bins=100, normed=True, histtype='stepfilled')
plt.vlines(obslrt, 0.0, 0.8 * max(n), lw=4, color='m')
plt.savefig(self.namestr + '_qpolrt.png', format='png')
plt.close()
summary = {"p_lrt": [p_lrt, plrt_err],
"p_deviance": [p_deviance, pdeviance_err],
"p_ksp": [p_ksp, pksp_err],
"p_merit": [p_merit, pmerit_err],
"p_srat": [p_srat, psrat_err],
"postmean": mcobs.mean,
"posterr": mcobs.std,
"postquantiles": mcobs.ci,
"rhat": mcobs.rhat,
"acor": mcobs.acor,
"acceptance": mcobs.acceptance}
return summary
def print_summary(self, summary):
"""
Print a summary of the results.
NOT USED!
"""
try:
keys = summary.keys()
except AttributeError:
raise Exception("Summary must be a dictionary!")
probs = dict()
postpars = dict()
### sort out p-values and posterior distribution of parameters
for x in keys:
if x[:2] == 'p_':
probs[x] = summary[x]
else:
postpars[x] = summary[x]
print("The ensemble acceptance rate is " + str(postpars["acceptance"]) + " .")
try:
print("The autocorrelation times are: " + str(postpars["acor"]))
except KeyError:
print("Module Acor not found. Cannot compute autocorrelation times for the parameters")
for i, x in enumerate(postpars["rhat"]):
print("The $R_hat$ value for Parameter " + str(i) + " is " + str(x))
### print posterior summary of parameters:
print("-- Posterior Summary of Parameters: \n")
print("parameter \t mean \t\t sd \t\t 5% \t\t 95% \n")
print("---------------------------------------------\n")
for i in range(len(postpars['postmean'])):
print("theta[" + str(i) + "] \t " + str(postpars['postmean'][i]) + "\t" + str(
postpars['posterr'][i]) + "\t" + str(postpars['postquantiles'][i][0]) + "\t" + str(
postpars["postquantiles"][i][1]) + "\n")
for x in probs.keys():
if x == 'p_lrt':
print("Bayesian p-value for Likelihood Ratio: " + str(probs[x][0]) + " +/- " + str(probs[x][1]))
elif x == 'p_deviance':
print("Bayesian p-value for deviance D = " + str(probs[x][0]) + " +/- " + str(probs[x][1]))
elif x == 'p_ksp':
print("Bayesian p-value for KS test: " + str(probs[x][0]) + " +/- " + str(probs[x][1]))
elif x == 'p_merit':
print("Bayesian p-value for Merit function: " + str(probs[x][0]) + " +/- " + str(probs[x][1]))
elif x == 'p_srat':
print("Bayesian p-value for the sum of residuals: " + str(probs[x][0]) + " +/- " + str(probs[x][1]))
elif x == 'p_maxpow':
if "fitpars" in probs.keys():
print("Highest [unsmoothed] data/model outlier at frequency F=" + str(
probs["fitpars"]["maxfreq"]) + "Hz with power P=" + str(probs["fitpars"]["maxpow"]))
print("Bayesian p-value for the highest [unsmoothed] data/model outlier: " + str(
probs[x][0]) + " +/- " + str(probs[x][1]))
elif x == 'p_s3max':
if "fitpars" in probs.keys():
print("Highest [3 bin smoothed] data/model outlier at frequency F=" + str(
probs["fitpars"]["s3maxfreq"]) + "Hz with power P=" + str(probs["fitpars"]["s3max"]))
print("Bayesian p-value for the highest [3 bin smoothed] data/model outlier: " + str(
probs[x][0]) + " +/- " + str(probs[x][1]))
elif x == 'p_s5max':
if "fitpars" in probs.keys():
print("Highest [5 bin smoothed] data/model outlier at frequency F=" + str(
probs["fitpars"]["s5maxfreq"]) + "Hz with power P=" + str(probs["fitpars"]["s5max"]))
print("Bayesian p-value for the highest [5 bin smoothed] data/model outlier: " + str(
probs[x][0]) + " +/- " + str(probs[x][1]))
elif x == 'p_s11max':
if "fitpars" in probs.keys():
print("Highest [11 bin smoothed] data/model outlier at frequency F=" + str(
probs["fitpars"]["s11maxfreq"]) + "Hz with power P=" + str(probs["fitpars"]["s11max"]))
print("Bayesian p-value for the highest [11 bin smoothed] data/model outlier: " + str(
probs[x][0]) + " +/- " + str(probs[x][1]))
return
def write_summary(self, summary, namestr=None):
"""
Write a summary of the analysis to file.
NOT USED!
:param summary:
:param namestr:
:return:
"""
if not namestr:
namestr = self.namestr
try:
keys = summary.keys()
except AttributeError:
raise Exception("Summary must be a dictionary!")
probs = dict()
postpars = dict()
### sort out p-values and posterior distribution of parameters
for x in keys:
if x[:2] == 'p_':
probs[x] = summary[x]
else:
postpars[x] = summary[x]
picklefile = open(namestr + "_summary_pickle.dat", "w")
pickle.dump(summary, picklefile)
picklefile.close()
file = open(namestr + "_summary.dat", "w")
file.write("The ensemble acceptance rate is " + str(postpars["acceptance"]) + " .\n")
try:
file.write("The autocorrelation times are: " + str(postpars["acor"]) + "\n")
except KeyError:
file.write("Module Acor not found. Cannot compute autocorrelation times for the parameters \n")
for i, x in enumerate(postpars["rhat"]):
file.write("The $R_hat$ value for Parameter " + str(i) + " is " + str(x) + "\n")
### print posterior summary of parameters:
file.write("-- Posterior Summary of Parameters: \n")
file.write("parameter \t mean \t\t sd \t\t 5% \t\t 95% \n")
file.write("---------------------------------------------\n")
for i in range(len(postpars['postmean'])):
file.write("theta[" + str(i) + "] \t " + str(postpars['postmean'][i]) + "\t" + str(
postpars['posterr'][i]) + "\t" + str(postpars['postquantiles'][i][0]) + "\t" + str(
postpars["postquantiles"][i][1]) + "\n")
for x in probs.keys():
if x == 'p_lrt':
file.write(
"Bayesian p-value for Likelihood Ratio: " + str(probs[x][0]) + " +/- " + str(probs[x][1]) + "\n")
elif x == 'p_deviance':
file.write("Bayesian p-value for deviance D = " + str(probs[x][0]) + " +/- " + str(probs[x][1]) + "\n")
elif x == 'p_ksp':
file.write("Bayesian p-value for KS test: " + str(probs[x][0]) + " +/- " + str(probs[x][1]) + "\n")
elif x == 'p_merit':
file.write(
"Bayesian p-value for Merit function: " + str(probs[x][0]) + " +/- " + str(probs[x][1]) + "\n")
elif x == 'p_srat':
file.write("Bayesian p-value for the sum of residuals: " + str(probs[x][0]) + " +/- " + str(
probs[x][1]) + "\n")
elif x == 'p_maxpow':
file.write("Bayesian p-value for the highest [unsmoothed] data/model outlier: " + str(
probs[x][0]) + " +/- " + str(probs[x][1]) + "\n")
file.write(
"Upper limit for highest [unsmoothed] data/model outlier: " + str(summary['maxpow_ul']) + "\n")
elif x == 'p_s3max':
file.write("Bayesian p-value for the highest [3 bin smoothed] data/model outlier: " + str(
probs[x][0]) + " +/- " + str(probs[x][1]) + "\n")
file.write(
"Upper limit for highest [unsmoothed] data/model outlier: " + str(summary['s3max_ul']) + "\n")
elif x == 'p_s5max':
file.write("Bayesian p-value for the highest [5 bin smoothed] data/model outlier: " + str(
probs[x][0]) + " +/- " + str(probs[x][1]) + "\n")
file.write(
"Upper limit for highest [unsmoothed] data/model outlier: " + str(summary['s5max_ul']) + "\n")
elif x == 'p_s11max':
file.write("Bayesian p-value for the highest [11 bin smoothed] data/model outlier: " + str(
probs[x][0]) + " +/- " + str(probs[x][1]) + "\n")
file.write(
"Upper limit for highest [unsmoothed] data/model outlier: " + str(summary['s11max_ul']) + "\n")
return
def plot_posteriors(namestr='test', **pars):
plotkeys = pars.keys()
N = len(plotkeys)
### number of parameters
fig = plt.figure(figsize=(2, N / 2 + 1))
plt.subplots_adjust(top=0.95, bottom=0.05, left=0.05, right=0.95, wspace=0.2, hspace=0.2)
for i in range(N):
ax = fig.add_subplot(N / 2 + 1, 2, i)
n, bins, patches = ax.hist(pars[plotkeys[i]][0], 30)
ax.vlines(pars[plotkeys[i]][0], 0.0, 0.8 * max(n), lw=4)
ax.figtext(pars[plotkeys[i]][0] + 0.01 * pars[plotkeys[i]][0], 0.8 * n, "p = " + str(pars[plotkeys[i]][1]))
ax.title("Posterior for " + plotkeys[i])
return
|
[
"matplotlib.pyplot.title",
"src.SpectralAnalysis.posterior.StackPerPosterior",
"pickle.dump",
"src.SpectralAnalysis.utils.TwoPrint",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.close",
"src.SpectralAnalysis.mle.PerMaxLike",
"src.SpectralAnalysis.posterior.PerPosterior",
"copy.copy",
"numpy.searchsorted",
"matplotlib.pyplot.figure",
"numpy.mean",
"src.SpectralAnalysis.mcmc.MarkovChainMonteCarlo",
"matplotlib.pyplot.subplots_adjust",
"numpy.msort",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] |
[((6349, 6376), 'src.SpectralAnalysis.utils.TwoPrint', 'utils.TwoPrint', (['resfilename'], {}), '(resfilename)\n', (6363, 6376), False, 'from src.SpectralAnalysis import utils\n'), ((6589, 6643), 'src.SpectralAnalysis.mle.PerMaxLike', 'mle.PerMaxLike', (['self.ps'], {'fitmethod': 'fitmethod', 'obs': '(True)'}), '(self.ps, fitmethod=fitmethod, obs=True)\n', (6603, 6643), False, 'from src.SpectralAnalysis import mle\n'), ((7352, 7640), 'src.SpectralAnalysis.mcmc.MarkovChainMonteCarlo', 'mcmc.MarkovChainMonteCarlo', (['self.ps.freq', 'self.ps.ps', 'lpost'], {'topt': "fitpars1['popt']", 'tcov': "fitpars1['cov']", 'covfactor': 'covfactor', 'niter': 'niter', 'nchain': 'nchain', 'parname': 'parname', 'check_conv': '(True)', 'namestr': 'self.namestr', 'use_emcee': 'use_emcee', 'plot': 'self.plot', 'printobj': 'resfile', 'm': 'self.m'}), "(self.ps.freq, self.ps.ps, lpost, topt=fitpars1[\n 'popt'], tcov=fitpars1['cov'], covfactor=covfactor, niter=niter, nchain\n =nchain, parname=parname, check_conv=True, namestr=self.namestr,\n use_emcee=use_emcee, plot=self.plot, printobj=resfile, m=self.m)\n", (7378, 7640), False, 'from src.SpectralAnalysis import mcmc\n'), ((17049, 17076), 'src.SpectralAnalysis.utils.TwoPrint', 'utils.TwoPrint', (['resfilename'], {}), '(resfilename)\n', (17063, 17076), False, 'from src.SpectralAnalysis import utils\n'), ((17139, 17193), 'src.SpectralAnalysis.mle.PerMaxLike', 'mle.PerMaxLike', (['self.ps'], {'fitmethod': 'fitmethod', 'obs': '(True)'}), '(self.ps, fitmethod=fitmethod, obs=True)\n', (17153, 17193), False, 'from src.SpectralAnalysis import mle\n'), ((17672, 17953), 'src.SpectralAnalysis.mcmc.MarkovChainMonteCarlo', 'mcmc.MarkovChainMonteCarlo', (['self.ps.freq', 'self.ps.ps', 'lpost'], {'topt': "fitpars['popt']", 'tcov': "fitpars['cov']", 'covfactor': 'covfactor', 'niter': 'niter', 'nchain': 'nchain', 'parname': 'parname', 'check_conv': '(True)', 'namestr': 'self.namestr', 'use_emcee': '(True)', 'plot': 'self.plot', 'printobj': 'resfile', 'm': 'self.m'}), "(self.ps.freq, self.ps.ps, lpost, topt=fitpars[\n 'popt'], tcov=fitpars['cov'], covfactor=covfactor, niter=niter, nchain=\n nchain, parname=parname, check_conv=True, namestr=self.namestr,\n use_emcee=True, plot=self.plot, printobj=resfile, m=self.m)\n", (17698, 17953), False, 'from src.SpectralAnalysis import mcmc\n'), ((19030, 19056), 'copy.copy', 'copy.copy', (["fitpars['popt']"], {}), "(fitpars['popt'])\n", (19039, 19056), False, 'import copy\n'), ((25727, 25747), 'numpy.msort', 'np.msort', (['sim_maxpow'], {}), '(sim_maxpow)\n', (25735, 25747), True, 'import numpy as np\n'), ((25773, 25792), 'numpy.msort', 'np.msort', (['sim_s3max'], {}), '(sim_s3max)\n', (25781, 25792), True, 'import numpy as np\n'), ((25818, 25837), 'numpy.msort', 'np.msort', (['sim_s5max'], {}), '(sim_s5max)\n', (25826, 25837), True, 'import numpy as np\n'), ((25864, 25884), 'numpy.msort', 'np.msort', (['sim_s11max'], {}), '(sim_s11max)\n', (25872, 25884), True, 'import numpy as np\n'), ((33685, 33739), 'src.SpectralAnalysis.mle.PerMaxLike', 'mle.PerMaxLike', (['self.ps'], {'fitmethod': 'fitmethod', 'obs': '(True)'}), '(self.ps, fitmethod=fitmethod, obs=True)\n', (33699, 33739), False, 'from src.SpectralAnalysis import mle\n'), ((34163, 34426), 'src.SpectralAnalysis.mcmc.MarkovChainMonteCarlo', 'mcmc.MarkovChainMonteCarlo', (['self.ps.freq', 'self.ps.ps', 'lpost'], {'topt': "fitpars['popt']", 'tcov': "fitpars['cov']", 'covfactor': 'covfactor', 'niter': 'niter', 'nchain': 'nchain', 'parname': 'parname', 'check_conv': '(True)', 'namestr': 'self.namestr', 'use_emcee': '(True)', 'plot': 'self.plot', 'm': 'self.m'}), "(self.ps.freq, self.ps.ps, lpost, topt=fitpars[\n 'popt'], tcov=fitpars['cov'], covfactor=covfactor, niter=niter, nchain=\n nchain, parname=parname, check_conv=True, namestr=self.namestr,\n use_emcee=True, plot=self.plot, m=self.m)\n", (34189, 34426), False, 'from src.SpectralAnalysis import mcmc\n'), ((44092, 44124), 'pickle.dump', 'pickle.dump', (['summary', 'picklefile'], {}), '(summary, picklefile)\n', (44103, 44124), False, 'import pickle\n'), ((47690, 47724), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2, N / 2 + 1)'}), '(figsize=(2, N / 2 + 1))\n', (47700, 47724), True, 'import matplotlib.pyplot as plt\n'), ((47733, 47827), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.95)', 'bottom': '(0.05)', 'left': '(0.05)', 'right': '(0.95)', 'wspace': '(0.2)', 'hspace': '(0.2)'}), '(top=0.95, bottom=0.05, left=0.05, right=0.95, wspace=\n 0.2, hspace=0.2)\n', (47752, 47827), True, 'import matplotlib.pyplot as plt\n'), ((7122, 7160), 'src.SpectralAnalysis.posterior.PerPosterior', 'posterior.PerPosterior', (['self.ps', 'func1'], {}), '(self.ps, func1)\n', (7144, 7160), False, 'from src.SpectralAnalysis import posterior\n'), ((7195, 7246), 'src.SpectralAnalysis.posterior.StackPerPosterior', 'posterior.StackPerPosterior', (['self.ps', 'func1', 'self.m'], {}), '(self.ps, func1, self.m)\n', (7222, 7246), False, 'from src.SpectralAnalysis import posterior\n'), ((17444, 17481), 'src.SpectralAnalysis.posterior.PerPosterior', 'posterior.PerPosterior', (['self.ps', 'func'], {}), '(self.ps, func)\n', (17466, 17481), False, 'from src.SpectralAnalysis import posterior\n'), ((17516, 17566), 'src.SpectralAnalysis.posterior.StackPerPosterior', 'posterior.StackPerPosterior', (['self.ps', 'func', 'self.m'], {}), '(self.ps, func, self.m)\n', (17543, 17566), False, 'from src.SpectralAnalysis import posterior\n'), ((21893, 21910), 'numpy.msort', 'np.msort', (['bmaxpow'], {}), '(bmaxpow)\n', (21901, 21910), True, 'import numpy as np\n'), ((28075, 28095), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (28086, 28095), True, 'import matplotlib.pyplot as plt\n'), ((28127, 28212), 'matplotlib.pyplot.hist', 'plt.hist', (['sim_maxpow'], {'bins': '(100)', 'normed': '(True)', 'color': '"""cyan"""', 'histtype': '"""stepfilled"""'}), "(sim_maxpow, bins=100, normed=True, color='cyan', histtype='stepfilled'\n )\n", (28135, 28212), True, 'import matplotlib.pyplot as plt\n'), ((28442, 28483), 'matplotlib.pyplot.title', 'plt.title', (['"""unsmoothed data"""'], {'fontsize': '(12)'}), "('unsmoothed data', fontsize=12)\n", (28451, 28483), True, 'import matplotlib.pyplot as plt\n'), ((28497, 28517), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (28508, 28517), True, 'import matplotlib.pyplot as plt\n'), ((28549, 28628), 'matplotlib.pyplot.hist', 'plt.hist', (['sim_s3max'], {'bins': '(100)', 'normed': '(True)', 'color': '"""cyan"""', 'histtype': '"""stepfilled"""'}), "(sim_s3max, bins=100, normed=True, color='cyan', histtype='stepfilled')\n", (28557, 28628), True, 'import matplotlib.pyplot as plt\n'), ((28860, 28903), 'matplotlib.pyplot.title', 'plt.title', (['"""smoothed (3) data"""'], {'fontsize': '(12)'}), "('smoothed (3) data', fontsize=12)\n", (28869, 28903), True, 'import matplotlib.pyplot as plt\n'), ((28917, 28937), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (28928, 28937), True, 'import matplotlib.pyplot as plt\n'), ((28969, 29048), 'matplotlib.pyplot.hist', 'plt.hist', (['sim_s3max'], {'bins': '(100)', 'normed': '(True)', 'color': '"""cyan"""', 'histtype': '"""stepfilled"""'}), "(sim_s3max, bins=100, normed=True, color='cyan', histtype='stepfilled')\n", (28977, 29048), True, 'import matplotlib.pyplot as plt\n'), ((29281, 29338), 'matplotlib.pyplot.title', 'plt.title', (['"""smoothed (5) data/model outlier"""'], {'fontsize': '(12)'}), "('smoothed (5) data/model outlier', fontsize=12)\n", (29290, 29338), True, 'import matplotlib.pyplot as plt\n'), ((29352, 29372), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (29363, 29372), True, 'import matplotlib.pyplot as plt\n'), ((29404, 29483), 'matplotlib.pyplot.hist', 'plt.hist', (['sim_s3max'], {'bins': '(100)', 'normed': '(True)', 'color': '"""cyan"""', 'histtype': '"""stepfilled"""'}), "(sim_s3max, bins=100, normed=True, color='cyan', histtype='stepfilled')\n", (29412, 29483), True, 'import matplotlib.pyplot as plt\n'), ((29718, 29762), 'matplotlib.pyplot.title', 'plt.title', (['"""smoothed (11) data"""'], {'fontsize': '(12)'}), "('smoothed (11) data', fontsize=12)\n", (29727, 29762), True, 'import matplotlib.pyplot as plt\n'), ((29776, 29831), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(self.namestr + '_maxpow.png')"], {'format': '"""png"""'}), "(self.namestr + '_maxpow.png', format='png')\n", (29787, 29831), True, 'import matplotlib.pyplot as plt\n'), ((29844, 29855), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (29853, 29855), True, 'import matplotlib.pyplot as plt\n'), ((33935, 33972), 'src.SpectralAnalysis.posterior.PerPosterior', 'posterior.PerPosterior', (['self.ps', 'func'], {}), '(self.ps, func)\n', (33957, 33972), False, 'from src.SpectralAnalysis import posterior\n'), ((34007, 34057), 'src.SpectralAnalysis.posterior.StackPerPosterior', 'posterior.StackPerPosterior', (['self.ps', 'func', 'self.m'], {}), '(self.ps, func, self.m)\n', (34034, 34057), False, 'from src.SpectralAnalysis import posterior\n'), ((38509, 38572), 'matplotlib.pyplot.hist', 'plt.hist', (['sim_lrt'], {'bins': '(100)', 'normed': '(True)', 'histtype': '"""stepfilled"""'}), "(sim_lrt, bins=100, normed=True, histtype='stepfilled')\n", (38517, 38572), True, 'import matplotlib.pyplot as plt\n'), ((38652, 38707), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(self.namestr + '_qpolrt.png')"], {'format': '"""png"""'}), "(self.namestr + '_qpolrt.png', format='png')\n", (38663, 38707), True, 'import matplotlib.pyplot as plt\n'), ((38720, 38731), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (38729, 38731), True, 'import matplotlib.pyplot as plt\n'), ((8622, 8671), 'src.SpectralAnalysis.mle.PerMaxLike', 'mle.PerMaxLike', (['x'], {'fitmethod': 'fitmethod', 'obs': '(False)'}), '(x, fitmethod=fitmethod, obs=False)\n', (8636, 8671), False, 'from src.SpectralAnalysis import mle\n'), ((12413, 12490), 'matplotlib.pyplot.hist', 'plt.hist', (['sim_lrt'], {'bins': '(100)', 'normed': '(True)', 'color': '"""cyan"""', 'histtype': '"""stepfilled"""'}), "(sim_lrt, bins=100, normed=True, color='cyan', histtype='stepfilled')\n", (12421, 12490), True, 'import matplotlib.pyplot as plt\n'), ((12581, 12633), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(self.namestr + '_lrt.png')"], {'format': '"""png"""'}), "(self.namestr + '_lrt.png', format='png')\n", (12592, 12633), True, 'import matplotlib.pyplot as plt\n'), ((12650, 12661), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (12659, 12661), True, 'import matplotlib.pyplot as plt\n'), ((19326, 19375), 'src.SpectralAnalysis.mle.PerMaxLike', 'mle.PerMaxLike', (['x'], {'fitmethod': 'fitmethod', 'obs': '(False)'}), '(x, fitmethod=fitmethod, obs=False)\n', (19340, 19375), False, 'from src.SpectralAnalysis import mle\n'), ((35592, 35643), 'src.SpectralAnalysis.mle.PerMaxLike', 'mle.PerMaxLike', (['x'], {'fitmethod': '"""constbfgs"""', 'obs': '(False)'}), "(x, fitmethod='constbfgs', obs=False)\n", (35606, 35643), False, 'from src.SpectralAnalysis import mle\n'), ((24189, 24219), 'numpy.sqrt', 'np.sqrt', (['(bpow * b * self.ps.df)'], {}), '(bpow * b * self.ps.df)\n', (24196, 24219), True, 'import numpy as np\n'), ((37200, 37216), 'numpy.mean', 'np.mean', (['sim_ksp'], {}), '(sim_ksp)\n', (37207, 37216), True, 'import numpy as np\n'), ((37316, 37334), 'numpy.mean', 'np.mean', (['sim_merit'], {}), '(sim_merit)\n', (37323, 37334), True, 'import numpy as np\n'), ((37431, 37448), 'numpy.mean', 'np.mean', (['sim_srat'], {}), '(sim_srat)\n', (37438, 37448), True, 'import numpy as np\n'), ((10814, 10830), 'numpy.mean', 'np.mean', (['sim_ksp'], {}), '(sim_ksp)\n', (10821, 10830), True, 'import numpy as np\n'), ((10943, 10961), 'numpy.mean', 'np.mean', (['sim_merit'], {}), '(sim_merit)\n', (10950, 10961), True, 'import numpy as np\n'), ((11071, 11088), 'numpy.mean', 'np.mean', (['sim_srat'], {}), '(sim_srat)\n', (11078, 11088), True, 'import numpy as np\n'), ((24083, 24114), 'numpy.searchsorted', 'np.searchsorted', (['binps.freq', 'bc'], {}), '(binps.freq, bc)\n', (24098, 24114), True, 'import numpy as np\n')]
|
from math import pi
import numpy as np
from aleph.consts import *
from reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD import svOsuMeasureLineMD, SvOsuMeasureLineEvent
from reamber.osu.OsuMap import OsuMap
# notes: 01:37:742 (97742|2,125moves993|2) -
SHAKES = np.array(
[100560, 100790, 101018, 101245,
104124, 104340, 104556, 104770,
107487, 107692, 107896, 108099,
110674, 110867, 111059, 111156, 111252, 111348,
113698, 113882, 114065, 114248,
116577, 116753, 116928, 117103,
119326, 119494, 119661, 119827,
121953, 122114, 122275, 122434,
122594, 122673, 122752, 122831, 123068,
123147, 123226, 123304, 123383, 123539,
123618, 123696, 123773, 123851, 124007,
124084, 124162, 124239, 124316, 124471,
124547, 124624, 124701, 124778, 124932,
125008, 125084, 125160, 125236, 125388,
125464, 125540, 125616, 125692, 125767, 125842, 125918, 125993])
def f247(m: OsuMap):
notes = sorted([n for n in m.notes.hits() if 97742 < n.offset <= 125993])
BASE_SHAKE_AMP = 0.010
INC_SHAKE_AMP = 0.0010
SHAKE_WINDOW = 250
NOTE_DURATION = 2000
# noinspection PyTypeChecker
events = [
*[SvOsuMeasureLineEvent(
firstOffset=n.offset - NOTE_DURATION - t, lastOffset=n.offset - t,
startX=n.offset - NOTE_DURATION - t, endX=n.offset - t,
startY=-1 + en / 500 , endY=1 - en / 500,
funcs=[
lambda x, n=n, t=t:
# This flips the board if it's < 2
(-1 if n.column < 2 else 1) *
(
np.piecewise(x,
[(i <= x) & (x < i + SHAKE_WINDOW) for i in SHAKES],
[*[lambda x, i=i, es=es:
(BASE_SHAKE_AMP + es * INC_SHAKE_AMP)
* np.sin((x - i) * pi / (SHAKE_WINDOW - es * 3))
for es, i in enumerate(SHAKES)],
lambda x: 0])
+ (x - (n.offset - t)) / NOTE_DURATION
)
]) for en, n in enumerate(notes) for t in np.linspace(0, 24, NOTE_THICKNESS)]
]
svs, bpms = svOsuMeasureLineMD(events,
scalingFactor=SCALE,
firstOffset=97742,
lastOffset=125993,
paddingSize=PADDING,
endBpm=250)
m.svs.extend(svs)
m.bpms.extend(bpms)
|
[
"numpy.sin",
"numpy.array",
"numpy.linspace",
"reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD.svOsuMeasureLineMD"
] |
[((277, 896), 'numpy.array', 'np.array', (['[100560, 100790, 101018, 101245, 104124, 104340, 104556, 104770, 107487, \n 107692, 107896, 108099, 110674, 110867, 111059, 111156, 111252, 111348,\n 113698, 113882, 114065, 114248, 116577, 116753, 116928, 117103, 119326,\n 119494, 119661, 119827, 121953, 122114, 122275, 122434, 122594, 122673,\n 122752, 122831, 123068, 123147, 123226, 123304, 123383, 123539, 123618,\n 123696, 123773, 123851, 124007, 124084, 124162, 124239, 124316, 124471,\n 124547, 124624, 124701, 124778, 124932, 125008, 125084, 125160, 125236,\n 125388, 125464, 125540, 125616, 125692, 125767, 125842, 125918, 125993]'], {}), '([100560, 100790, 101018, 101245, 104124, 104340, 104556, 104770, \n 107487, 107692, 107896, 108099, 110674, 110867, 111059, 111156, 111252,\n 111348, 113698, 113882, 114065, 114248, 116577, 116753, 116928, 117103,\n 119326, 119494, 119661, 119827, 121953, 122114, 122275, 122434, 122594,\n 122673, 122752, 122831, 123068, 123147, 123226, 123304, 123383, 123539,\n 123618, 123696, 123773, 123851, 124007, 124084, 124162, 124239, 124316,\n 124471, 124547, 124624, 124701, 124778, 124932, 125008, 125084, 125160,\n 125236, 125388, 125464, 125540, 125616, 125692, 125767, 125842, 125918,\n 125993])\n', (285, 896), True, 'import numpy as np\n'), ((2302, 2424), 'reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD.svOsuMeasureLineMD', 'svOsuMeasureLineMD', (['events'], {'scalingFactor': 'SCALE', 'firstOffset': '(97742)', 'lastOffset': '(125993)', 'paddingSize': 'PADDING', 'endBpm': '(250)'}), '(events, scalingFactor=SCALE, firstOffset=97742,\n lastOffset=125993, paddingSize=PADDING, endBpm=250)\n', (2320, 2424), False, 'from reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD import svOsuMeasureLineMD, SvOsuMeasureLineEvent\n'), ((2243, 2277), 'numpy.linspace', 'np.linspace', (['(0)', '(24)', 'NOTE_THICKNESS'], {}), '(0, 24, NOTE_THICKNESS)\n', (2254, 2277), True, 'import numpy as np\n'), ((1948, 1994), 'numpy.sin', 'np.sin', (['((x - i) * pi / (SHAKE_WINDOW - es * 3))'], {}), '((x - i) * pi / (SHAKE_WINDOW - es * 3))\n', (1954, 1994), True, 'import numpy as np\n')]
|
from __future__ import division, print_function, unicode_literals
import streamlit as st
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
st.title('Mô hình dự đoán giá nhà đất tại hồ gươm ')
# x1 là diện tích của lô đất(m2)
# x2 là chiều dài mặt tiền (m)
# x3 là số tầng nhà
# x4 là khoảng cách tới hồ gươm (m)
X = np.array([[40, 8, 2, 1800],
[36, 3.5, 6, 450],
[35, 4.5, 6, 450],
[39, 9, 2, 1800],
[40, 9, 1, 1800],
[36, 4.5, 5, 450],
[36, 4.5, 6, 450],
[40, 9, 2, 1800],
[36, 4.5, 7, 450],
[40, 9, 3, 1800],
[44, 4, 5, 350],
[41, 9, 2, 1800],
[37, 4.5, 6, 450],
[36, 5.5, 6, 450],
[40, 10, 2, 1800],
[45, 3, 4, 350],
[45, 4, 3, 350],
[45, 4, 4, 350],
[45, 4, 5, 350],
[45, 5, 4, 350],
[45, 3, 4, 350],
[60, 2.3, 5, 450],
[59, 3.3, 5, 450],
[60, 3.3, 4, 450],
[85, 4, 4, 950],
[85, 4, 5, 950],
[60, 3.3, 5, 450],
[61, 6, 1, 800],
[62, 5, 1, 800],
[85, 4, 6, 950],
[84, 6, 5, 950],
[86, 2.5, 3, 900],
[60, 3.3, 6, 450],
[85, 5, 5, 950],
[85, 3.5, 3, 900],
[86, 3.5, 2, 900],
[31.2, 3, 4, 450],
[61, 3.3, 5, 450],
[62, 6, 1, 800],
[85, 6, 5, 950],
[86, 3.5, 3, 900],
[62, 6, 2, 800],
[86, 3.5, 4, 900],
[87, 3.5, 3, 900],
[30.2, 4, 4, 450],
[62, 6, 3, 800],
[86, 4.5, 3, 900],
[86, 6, 5, 950],
[60, 4.3, 5, 450],
[62, 7, 1, 800],
[63, 6, 1, 800],
[31.2, 4, 4, 450],
[31.2, 4, 3, 450],
[62, 4, 5, 550],
[31.2, 4, 5, 450],
[63, 5, 3, 550],
[63, 4, 5, 550],
[32.2, 4 , 4, 450],
[31.2, 5, 4, 450],
[63, 5, 5, 550],
[64, 4, 5, 550],
[63, 5, 6 , 550],
[63, 6, 4, 550],
[80, 5.8, 7, 1100],
[80, 4.8, 8, 1100],
[80, 5.8, 8, 1100],
[79, 5.8, 8, 1100],
[80, 5.8, 9, 1100],
[81, 5.8, 8, 1100],
[80, 6.8, 8, 1100],
[80, 3.5, 6, 300],
[80, 4.5, 5, 300],
[80, 4.5, 6, 300],
[79, 4.5, 6, 300],
[81, 4.5, 6, 300],
[88, 3.5, 4, 850],
[88, 4.5, 3, 850],
[88, 4.5, 4, 850],
[87, 4.5, 4, 850],
[88, 4.5, 5, 850],
[89, 4.5, 4, 850],
[88, 5.5, 4, 850],
[80, 5.5, 7, 300],
[63, 6, 4, 250],
[62, 7, 4, 250],
[63, 7, 3, 250],
[63, 7, 4, 250],
[63, 7, 5, 250],
[64, 7, 4, 250],
[63, 8, 4, 250],
[140, 4.5, 5, 500],
[139, 5.5, 5, 500],
[140, 5.5, 4, 500],
[140, 5.5, 5, 500],
[140, 5.5, 6, 500],
[141, 5.5, 5, 500],
[140, 6.5, 5, 500]])
Y = np.array([[
19, 19.3, 19.45, 19.48, 19.5, 19.7, 20, 20, 20.3, 20.5,
20.5, 20.52, 20.55, 20.7, 21, 21, 21.3, 21.5, 21.7, 22,
22.5, 29, 30, 30.5, 30.5, 30.8, 31, 31, 31, 31, 31.3, 31.35,
31.5, 31.5, 31.63, 31.7, 32, 32, 32, 32, 32, 32.3, 32.3, 32.37,
32.4, 32.5, 32.65, 32.7, 33, 33, 33, 33.5, 33.5, 33.6, 34, 34, 34.3,
34.6, 35, 35, 35, 35.5, 35.7, 42.5, 42.9, 43, 43.463, 43.5, 43.537,
44.1, 50, 52.3, 53, 53.38, 53.62, 54, 54.5, 55, 55.46, 55.5, 55.54, 56,
56.7, 60, 62.3, 62.5, 63, 63.5, 63.7, 66, 96.5, 97.3, 97.5, 98, 98.5, 98.7, 99.5
]]).T
def duel_plot(X1, X2, Y):
fig = plt.figure(figsize=(15, 5))
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2)
ax1.plot(Y, X[:, 0])
ax1.set_title('xét diện tích với giá tiền')
ax1.set_xlabel('giá tiền')
ax1.set_ylabel('Diện tích m2')
ax2.plot(Y, X[:, 1])
ax2.set_title('xét số mét mặt tiền với giá tiền')
ax2.set_xlabel('giá tiền')
ax2.set_ylabel('số mét mặt tiền')
return fig
def duel_plot2(X4, X5, Y):
fig = plt.figure(figsize=(15, 5))
ax3 = fig.add_subplot(1, 2, 1)
ax4 = fig.add_subplot(1, 2, 2)
ax3.plot(Y, X[:, 2])
ax3.set_title('xét số tầng nhà với giá tiền')
ax3.set_xlabel('giá tiền')
ax3.set_ylabel('số tầng nhà')
ax4.plot(Y, X[:, 3])
ax4.set_title('xét khoảng cách với giá tiền')
ax4.set_xlabel('giá tiền')
ax4.set_ylabel('khoảng cách tới hồ gươm')
return fig
st.set_option('deprecation.showPyplotGlobalUse', False)
st.pyplot(duel_plot(X[:, 0], X[:, 1], Y))
st.pyplot(duel_plot2(X[:, 2], X[:, 3], Y))
st.sidebar.title('Dự đoán giá các mẫu nhà')
dt_name = st.sidebar.text_input('Nhập diện tích đất(m2) ')
cd_name = st.sidebar.text_input('Nhập chiều dài mặt tiền(m) ')
tn_name = st.sidebar.text_input('Nhập số tầng nhà(tầng) ')
kc_name = st.sidebar.text_input('Nhập khoảng cách nhà tới hồ gươm(m) ')
one = np.ones((X.shape[0], 1))
Xbar = np.concatenate((one, X), axis=1)
x_train, x_test, y_train, y_test = train_test_split(Xbar, Y, test_size=0.2)
A = np.dot(Xbar.T, Xbar)
b = np.dot(Xbar.T, Y)
w = np.dot(np.linalg.pinv(A), b)
w_0 = w[0][0]
w_1 = w[1][0]
w_2 = w[2][0]
w_3 = w[3][0]
w_4 = w[4][0]
st.write("Độ chính xác (R2 square) : ", r2_score(y_test, np.dot(x_test, w)))
vd = np.array([dt_name, cd_name, tn_name, kc_name, 1])
if st.sidebar.button('Dự đoán'):
y1 = w_1*float(dt_name)+w_2*float(cd_name)+w_3 * \
float(tn_name)+w_4*float(kc_name) + w_0
st.sidebar.write('Giá của ngôi nhà là : ', y1, 'tỷ đồng')
|
[
"streamlit.set_option",
"streamlit.sidebar.write",
"sklearn.model_selection.train_test_split",
"numpy.ones",
"streamlit.title",
"streamlit.sidebar.title",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.dot",
"streamlit.sidebar.text_input",
"streamlit.sidebar.button",
"numpy.linalg.pinv",
"numpy.concatenate"
] |
[((271, 323), 'streamlit.title', 'st.title', (['"""Mô hình dự đoán giá nhà đất tại hồ gươm """'], {}), "('Mô hình dự đoán giá nhà đất tại hồ gươm ')\n", (279, 323), True, 'import streamlit as st\n'), ((449, 2354), 'numpy.array', 'np.array', (['[[40, 8, 2, 1800], [36, 3.5, 6, 450], [35, 4.5, 6, 450], [39, 9, 2, 1800],\n [40, 9, 1, 1800], [36, 4.5, 5, 450], [36, 4.5, 6, 450], [40, 9, 2, 1800\n ], [36, 4.5, 7, 450], [40, 9, 3, 1800], [44, 4, 5, 350], [41, 9, 2, \n 1800], [37, 4.5, 6, 450], [36, 5.5, 6, 450], [40, 10, 2, 1800], [45, 3,\n 4, 350], [45, 4, 3, 350], [45, 4, 4, 350], [45, 4, 5, 350], [45, 5, 4, \n 350], [45, 3, 4, 350], [60, 2.3, 5, 450], [59, 3.3, 5, 450], [60, 3.3, \n 4, 450], [85, 4, 4, 950], [85, 4, 5, 950], [60, 3.3, 5, 450], [61, 6, 1,\n 800], [62, 5, 1, 800], [85, 4, 6, 950], [84, 6, 5, 950], [86, 2.5, 3, \n 900], [60, 3.3, 6, 450], [85, 5, 5, 950], [85, 3.5, 3, 900], [86, 3.5, \n 2, 900], [31.2, 3, 4, 450], [61, 3.3, 5, 450], [62, 6, 1, 800], [85, 6,\n 5, 950], [86, 3.5, 3, 900], [62, 6, 2, 800], [86, 3.5, 4, 900], [87, \n 3.5, 3, 900], [30.2, 4, 4, 450], [62, 6, 3, 800], [86, 4.5, 3, 900], [\n 86, 6, 5, 950], [60, 4.3, 5, 450], [62, 7, 1, 800], [63, 6, 1, 800], [\n 31.2, 4, 4, 450], [31.2, 4, 3, 450], [62, 4, 5, 550], [31.2, 4, 5, 450],\n [63, 5, 3, 550], [63, 4, 5, 550], [32.2, 4, 4, 450], [31.2, 5, 4, 450],\n [63, 5, 5, 550], [64, 4, 5, 550], [63, 5, 6, 550], [63, 6, 4, 550], [80,\n 5.8, 7, 1100], [80, 4.8, 8, 1100], [80, 5.8, 8, 1100], [79, 5.8, 8, \n 1100], [80, 5.8, 9, 1100], [81, 5.8, 8, 1100], [80, 6.8, 8, 1100], [80,\n 3.5, 6, 300], [80, 4.5, 5, 300], [80, 4.5, 6, 300], [79, 4.5, 6, 300],\n [81, 4.5, 6, 300], [88, 3.5, 4, 850], [88, 4.5, 3, 850], [88, 4.5, 4, \n 850], [87, 4.5, 4, 850], [88, 4.5, 5, 850], [89, 4.5, 4, 850], [88, 5.5,\n 4, 850], [80, 5.5, 7, 300], [63, 6, 4, 250], [62, 7, 4, 250], [63, 7, 3,\n 250], [63, 7, 4, 250], [63, 7, 5, 250], [64, 7, 4, 250], [63, 8, 4, 250\n ], [140, 4.5, 5, 500], [139, 5.5, 5, 500], [140, 5.5, 4, 500], [140, \n 5.5, 5, 500], [140, 5.5, 6, 500], [141, 5.5, 5, 500], [140, 6.5, 5, 500]]'], {}), '([[40, 8, 2, 1800], [36, 3.5, 6, 450], [35, 4.5, 6, 450], [39, 9, 2,\n 1800], [40, 9, 1, 1800], [36, 4.5, 5, 450], [36, 4.5, 6, 450], [40, 9, \n 2, 1800], [36, 4.5, 7, 450], [40, 9, 3, 1800], [44, 4, 5, 350], [41, 9,\n 2, 1800], [37, 4.5, 6, 450], [36, 5.5, 6, 450], [40, 10, 2, 1800], [45,\n 3, 4, 350], [45, 4, 3, 350], [45, 4, 4, 350], [45, 4, 5, 350], [45, 5, \n 4, 350], [45, 3, 4, 350], [60, 2.3, 5, 450], [59, 3.3, 5, 450], [60, \n 3.3, 4, 450], [85, 4, 4, 950], [85, 4, 5, 950], [60, 3.3, 5, 450], [61,\n 6, 1, 800], [62, 5, 1, 800], [85, 4, 6, 950], [84, 6, 5, 950], [86, 2.5,\n 3, 900], [60, 3.3, 6, 450], [85, 5, 5, 950], [85, 3.5, 3, 900], [86, \n 3.5, 2, 900], [31.2, 3, 4, 450], [61, 3.3, 5, 450], [62, 6, 1, 800], [\n 85, 6, 5, 950], [86, 3.5, 3, 900], [62, 6, 2, 800], [86, 3.5, 4, 900],\n [87, 3.5, 3, 900], [30.2, 4, 4, 450], [62, 6, 3, 800], [86, 4.5, 3, 900\n ], [86, 6, 5, 950], [60, 4.3, 5, 450], [62, 7, 1, 800], [63, 6, 1, 800],\n [31.2, 4, 4, 450], [31.2, 4, 3, 450], [62, 4, 5, 550], [31.2, 4, 5, 450\n ], [63, 5, 3, 550], [63, 4, 5, 550], [32.2, 4, 4, 450], [31.2, 5, 4, \n 450], [63, 5, 5, 550], [64, 4, 5, 550], [63, 5, 6, 550], [63, 6, 4, 550\n ], [80, 5.8, 7, 1100], [80, 4.8, 8, 1100], [80, 5.8, 8, 1100], [79, 5.8,\n 8, 1100], [80, 5.8, 9, 1100], [81, 5.8, 8, 1100], [80, 6.8, 8, 1100], [\n 80, 3.5, 6, 300], [80, 4.5, 5, 300], [80, 4.5, 6, 300], [79, 4.5, 6, \n 300], [81, 4.5, 6, 300], [88, 3.5, 4, 850], [88, 4.5, 3, 850], [88, 4.5,\n 4, 850], [87, 4.5, 4, 850], [88, 4.5, 5, 850], [89, 4.5, 4, 850], [88, \n 5.5, 4, 850], [80, 5.5, 7, 300], [63, 6, 4, 250], [62, 7, 4, 250], [63,\n 7, 3, 250], [63, 7, 4, 250], [63, 7, 5, 250], [64, 7, 4, 250], [63, 8, \n 4, 250], [140, 4.5, 5, 500], [139, 5.5, 5, 500], [140, 5.5, 4, 500], [\n 140, 5.5, 5, 500], [140, 5.5, 6, 500], [141, 5.5, 5, 500], [140, 6.5, 5,\n 500]])\n', (457, 2354), True, 'import numpy as np\n'), ((5056, 5111), 'streamlit.set_option', 'st.set_option', (['"""deprecation.showPyplotGlobalUse"""', '(False)'], {}), "('deprecation.showPyplotGlobalUse', False)\n", (5069, 5111), True, 'import streamlit as st\n'), ((5199, 5242), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""Dự đoán giá các mẫu nhà"""'], {}), "('Dự đoán giá các mẫu nhà')\n", (5215, 5242), True, 'import streamlit as st\n'), ((5253, 5301), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""Nhập diện tích đất(m2) """'], {}), "('Nhập diện tích đất(m2) ')\n", (5274, 5301), True, 'import streamlit as st\n'), ((5312, 5364), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""Nhập chiều dài mặt tiền(m) """'], {}), "('Nhập chiều dài mặt tiền(m) ')\n", (5333, 5364), True, 'import streamlit as st\n'), ((5375, 5423), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""Nhập số tầng nhà(tầng) """'], {}), "('Nhập số tầng nhà(tầng) ')\n", (5396, 5423), True, 'import streamlit as st\n'), ((5434, 5495), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""Nhập khoảng cách nhà tới hồ gươm(m) """'], {}), "('Nhập khoảng cách nhà tới hồ gươm(m) ')\n", (5455, 5495), True, 'import streamlit as st\n'), ((5502, 5526), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (5509, 5526), True, 'import numpy as np\n'), ((5534, 5566), 'numpy.concatenate', 'np.concatenate', (['(one, X)'], {'axis': '(1)'}), '((one, X), axis=1)\n', (5548, 5566), True, 'import numpy as np\n'), ((5603, 5643), 'sklearn.model_selection.train_test_split', 'train_test_split', (['Xbar', 'Y'], {'test_size': '(0.2)'}), '(Xbar, Y, test_size=0.2)\n', (5619, 5643), False, 'from sklearn.model_selection import train_test_split\n'), ((5649, 5669), 'numpy.dot', 'np.dot', (['Xbar.T', 'Xbar'], {}), '(Xbar.T, Xbar)\n', (5655, 5669), True, 'import numpy as np\n'), ((5674, 5691), 'numpy.dot', 'np.dot', (['Xbar.T', 'Y'], {}), '(Xbar.T, Y)\n', (5680, 5691), True, 'import numpy as np\n'), ((5881, 5930), 'numpy.array', 'np.array', (['[dt_name, cd_name, tn_name, kc_name, 1]'], {}), '([dt_name, cd_name, tn_name, kc_name, 1])\n', (5889, 5930), True, 'import numpy as np\n'), ((5934, 5962), 'streamlit.sidebar.button', 'st.sidebar.button', (['"""Dự đoán"""'], {}), "('Dự đoán')\n", (5951, 5962), True, 'import streamlit as st\n'), ((3591, 4164), 'numpy.array', 'np.array', (['[[19, 19.3, 19.45, 19.48, 19.5, 19.7, 20, 20, 20.3, 20.5, 20.5, 20.52, \n 20.55, 20.7, 21, 21, 21.3, 21.5, 21.7, 22, 22.5, 29, 30, 30.5, 30.5, \n 30.8, 31, 31, 31, 31, 31.3, 31.35, 31.5, 31.5, 31.63, 31.7, 32, 32, 32,\n 32, 32, 32.3, 32.3, 32.37, 32.4, 32.5, 32.65, 32.7, 33, 33, 33, 33.5, \n 33.5, 33.6, 34, 34, 34.3, 34.6, 35, 35, 35, 35.5, 35.7, 42.5, 42.9, 43,\n 43.463, 43.5, 43.537, 44.1, 50, 52.3, 53, 53.38, 53.62, 54, 54.5, 55, \n 55.46, 55.5, 55.54, 56, 56.7, 60, 62.3, 62.5, 63, 63.5, 63.7, 66, 96.5,\n 97.3, 97.5, 98, 98.5, 98.7, 99.5]]'], {}), '([[19, 19.3, 19.45, 19.48, 19.5, 19.7, 20, 20, 20.3, 20.5, 20.5, \n 20.52, 20.55, 20.7, 21, 21, 21.3, 21.5, 21.7, 22, 22.5, 29, 30, 30.5, \n 30.5, 30.8, 31, 31, 31, 31, 31.3, 31.35, 31.5, 31.5, 31.63, 31.7, 32, \n 32, 32, 32, 32, 32.3, 32.3, 32.37, 32.4, 32.5, 32.65, 32.7, 33, 33, 33,\n 33.5, 33.5, 33.6, 34, 34, 34.3, 34.6, 35, 35, 35, 35.5, 35.7, 42.5, \n 42.9, 43, 43.463, 43.5, 43.537, 44.1, 50, 52.3, 53, 53.38, 53.62, 54, \n 54.5, 55, 55.46, 55.5, 55.54, 56, 56.7, 60, 62.3, 62.5, 63, 63.5, 63.7,\n 66, 96.5, 97.3, 97.5, 98, 98.5, 98.7, 99.5]])\n', (3599, 4164), True, 'import numpy as np\n'), ((4206, 4233), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 5)'}), '(figsize=(15, 5))\n', (4216, 4233), True, 'import matplotlib.pyplot as plt\n'), ((4648, 4675), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 5)'}), '(figsize=(15, 5))\n', (4658, 4675), True, 'import matplotlib.pyplot as plt\n'), ((5703, 5720), 'numpy.linalg.pinv', 'np.linalg.pinv', (['A'], {}), '(A)\n', (5717, 5720), True, 'import numpy as np\n'), ((6071, 6128), 'streamlit.sidebar.write', 'st.sidebar.write', (['"""Giá của ngôi nhà là : """', 'y1', '"""tỷ đồng"""'], {}), "('Giá của ngôi nhà là : ', y1, 'tỷ đồng')\n", (6087, 6128), True, 'import streamlit as st\n'), ((5860, 5877), 'numpy.dot', 'np.dot', (['x_test', 'w'], {}), '(x_test, w)\n', (5866, 5877), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
#/usr/bin/python2
'''
June 2017 by <NAME>.
<EMAIL>.
https://www.github.com/kyubyong/transformer
'''
from __future__ import print_function
import codecs
import os
import tensorflow as tf
import numpy as np
from hyperparams import Hyperparams as hp
from data_load import load_test_data, load_de_vocab, load_en_vocab
from train import Graph
#from nltk.translate.bleu_score import corpus_bleu
def eval():
# Load graph
g = Graph(is_training=False)
print("Graph loaded")
# Load data
# X, Sources, Targets = load_test_data()
"""
x_list, y_list, Sources, Targets = [], [], [], []
for source_sent, target_sent in zip(source_sents, target_sents):
x = [de2idx.get(word, 1) for word in (source_sent + u" </S>").split()] # 1: OOV, </S>: End of Text
y = [en2idx.get(word, 1) for word in (target_sent + u" </S>").split()]
if max(len(x), len(y)) <=hp.maxlen:
x_list.append(np.array(x))
y_list.append(np.array(y))
Sources.append(source_sent)
Targets.append(target_sent)
# Pad
X = np.zeros([len(x_list), hp.maxlen], np.int32)
Y = np.zeros([len(y_list), hp.maxlen], np.int32)
for i, (x, y) in enumerate(zip(x_list, y_list)):
X[i] = np.lib.pad(x, [0, hp.maxlen-len(x)], 'constant', constant_values=(0, 0))
"""
en2idx, idx2en = load_en_vocab()
# Start session
with g.graph.as_default():
sv = tf.train.Supervisor()
with sv.managed_session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
## Restore parameters
sv.saver.restore(sess, tf.train.latest_checkpoint(hp.logdir))
while(True):
prompt = raw_input()
xlist = []
xval = [en2idx.get(word, 1) for word in (target_sent + u" </S>").split()]
if (len(xval) <= hp.maxlen):
xlist.append(np.array(xval))
X = np.zeros([len(xlist), hp.maxlen], np.int32)
for i, xi in enumerate(xlist):
X[i] = np.lib.pad(x, [0, hp.maxlen - len(x)], 'constant', constant_values=(0, 0))
list_of_refs, hypotheses = [], []
for i in range(len(X) // hp.batch_size):
### Get mini-batches
x = X[i*hp.batch_size: (i+1)*hp.batch_size]
prompt = raw_input()
### Autoregressive inference
preds = np.zeros((hp.batch_size, hp.maxlen), np.int32)
for j in range(hp.maxlen):
#print("j: " + str(j))
_preds = sess.run(g.preds, {g.x: x, g.y: preds})
preds[:, j] = _preds[:, j]
#print(pred) # pred should be length 1 each time due to the cycling of the while loop in main
for pred in preds:
got = " ".join(idx2en[idx] for idx in pred).split("</S>")[0].strip()
#return got
print(got)
if __name__ == '__main__':
eval()
|
[
"train.Graph",
"data_load.load_en_vocab",
"numpy.zeros",
"tensorflow.train.Supervisor",
"tensorflow.ConfigProto",
"tensorflow.train.latest_checkpoint",
"numpy.array"
] |
[((456, 480), 'train.Graph', 'Graph', ([], {'is_training': '(False)'}), '(is_training=False)\n', (461, 480), False, 'from train import Graph\n'), ((1395, 1410), 'data_load.load_en_vocab', 'load_en_vocab', ([], {}), '()\n', (1408, 1410), False, 'from data_load import load_test_data, load_de_vocab, load_en_vocab\n'), ((1494, 1515), 'tensorflow.train.Supervisor', 'tf.train.Supervisor', ([], {}), '()\n', (1513, 1515), True, 'import tensorflow as tf\n'), ((1676, 1713), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['hp.logdir'], {}), '(hp.logdir)\n', (1702, 1713), True, 'import tensorflow as tf\n'), ((1555, 1596), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (1569, 1596), True, 'import tensorflow as tf\n'), ((2533, 2579), 'numpy.zeros', 'np.zeros', (['(hp.batch_size, hp.maxlen)', 'np.int32'], {}), '((hp.batch_size, hp.maxlen), np.int32)\n', (2541, 2579), True, 'import numpy as np\n'), ((1973, 1987), 'numpy.array', 'np.array', (['xval'], {}), '(xval)\n', (1981, 1987), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
from __future__ import division
import unittest
import odelab
from odelab.scheme.stochastic import *
from odelab.system import *
from odelab.solver import *
import numpy as np
class Test_OU(unittest.TestCase):
def test_run(self):
sys = OrnsteinUhlenbeck()
scheme = EulerMaruyama()
scheme.h = .01
self.s = SingleStepSolver(scheme, sys)
self.s.initialize(u0=np.array([1.]))
self.s.run(time=1.)
class Test_Differentiator(unittest.TestCase):
t0 = 5e-9
V0 = .01
def test_run(self):
sys = Differentiator(LinBumpSignal(self.V0,self.t0))
## sys.kT = 0. # no noise
scheme = EulerMaruyama()
## scheme.h = 2.5e-11
scheme.h = self.t0
self.s = SingleStepSolver(scheme, sys)
self.s.initialize(u0 = np.array([0,0,0,0,0.]))
self.s.run(time=5*self.t0)
|
[
"numpy.array"
] |
[((397, 412), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (405, 412), True, 'import numpy as np\n'), ((745, 772), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0.0]'], {}), '([0, 0, 0, 0, 0.0])\n', (753, 772), True, 'import numpy as np\n')]
|
import numpy as np
from collections import namedtuple
import skimage.measure
#import matplotlib.pyplot as plt
#import ipdb
# could maybe turn this into a generic mutable namedtuple
class Point2D(object):
__slots__ = "x", "y"
def __init__(self, x, y):
self.x = x
self.y = y
def __iter__(self):
'''iterate over fields tuple/list style'''
for field_name in self.__slots__:
yield getattr(self, field_name)
def __getitem__(self, index):
'''tuple/list style getitem'''
return getattr(self, self.__slots__[index])
# NOTE IterateOverWindows and IterateOverSuperpixels must share the same iter() interface
# TODO create IterateOverOverlappingWindows(IterateOverWindows), which enforces
# pixel_stride <= pixels_per_cell
#
# NOTE if pixel_stride > pixels_per_cell/2, it is possible to leave data unseen on the
# right/bottom boarder of an image
#
# this is similar to matlab's im2col
class IterateOverWindows(object):
def __init__(self, pixels_per_cell, pixel_stride=None, image=None,
mode='constant', cval=0,
start_pt=(0, 0), stop_pt=(None, None)):
''' Sliding window iterator.
Parameters
----------
pixels_per_cell : array_like
x,y - let x,y be odd so the window can be easily centered
pixel_stride : array_like, optional
x,y
image : array_like, optional
like numpy.array (ndim == 2 or 3)
mode : str, optional
Points outside the boundaries of the input are filled according to the
given mode. Only ``mode='constant'``, ``mode='discard'`` and
``mode='reflect'`` are currently supported, although others could be
added (e.g., 'nearest' and 'wrap')
cval : float, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
start_pt : array_like, optional
(x,y)
stop_pt : array_like, optional
(x,y)
>>> tot = 0; im = np.arange(100).reshape((10,10))
>>> for i,ret in enumerate(IterateOverWindows((5,5),(2,2),cval=1).iter(im)):
... tot += ret[0].sum()
... #print(i, ':\n', ret[0])
>>> print(tot) # weak test
22647
>>> tot = 0; im = np.arange(81).reshape((9,9)).T
>>> for i,ret in enumerate(IterateOverWindows((5,5),(2,2),mode='reflect').iter(im)):
... tot += ret[0].sum()
... #print(i, ':\n', ret[0])
>>> print(tot) # weak test
25000
'''
assert pixels_per_cell[0] % 2 == 1 and pixels_per_cell[1] % 2 == 1, \
'provide an odd number for pixels_per_cell to easily center the window'
self.pixels_per_cell = tuple(pixels_per_cell)
self.pixel_stride = self.pixels_per_cell if pixel_stride is None else pixel_stride
self.image = image
self.mode = mode
self.cval = cval
self.start_pt = Point2D(*(int(s) for s in start_pt))
self.stop_pt = Point2D(*(stop_pt))
def setImage(self, image):
'''
Parameters
----------
image : array_like
like numpy.array (ndim == 2 or 3)
'''
self.image = image
return self
def shape(self):
if self.image is None: raise TypeError("self.image cannot be of type NoneType")
nrows, ncols = self.image.shape[0:2]
stop_x = ncols if self.stop_pt.x is None else int(self.stop_pt.x)
stop_y = nrows if self.stop_pt.y is None else int(self.stop_pt.y)
roi_height = stop_y-self.start_pt.y
roi_width = stop_x-self.start_pt.x
#print(roi_width, roi_height, self.pixel_stride)
nrows = np.ceil(float(roi_height)/self.pixel_stride[1]).astype(int)
ncols = np.ceil(float(roi_width)/self.pixel_stride[0]).astype(int)
return (nrows, ncols)
def iter(self,image=None):
'''Next window generator
Parameters
----------
image : array_like
like numpy.array (ndim == 2 or 3)
Returns
-------
numpy.array, optional
chip : pixels within the current window. Points outside the
boundaries of the input are filled according to the given mode.
numpy.array
mask : the binary mask of the window within the chip
BoundingBox
bbox : the inclusive extents of the chip (which may exceed the bounds
of the image)
MODIFICATIONS
sgr : turned into a class
sgr : added mode='reflect'
'''
if image is not None: self.image = image
elif self.image is None: raise TypeError("self.image cannot be of type NoneType")
nrows, ncols = self.image.shape[0:2]
# NOTE could iterate over the interior of the image without bounds checking
# for additional speedup
BoundingBox = namedtuple("BoundingBox", "min_x max_x min_y max_y")
pixels_per_half_cell = self.pixels_per_cell[0]//2, self.pixels_per_cell[1]//2
ystrides_per_image, xstrides_per_image = self.shape()
# iterate around the boarder of the image
for r in xrange(ystrides_per_image):
for c in xrange(xstrides_per_image):
# chip out pixels in this sliding window
min_x = self.start_pt.x + self.pixel_stride[0]*c - pixels_per_half_cell[0]
max_x = min_x+self.pixels_per_cell[0]
min_y = self.start_pt.y + self.pixel_stride[1]*r - pixels_per_half_cell[1]
max_y = min_y+self.pixels_per_cell[1]
bbox = BoundingBox(min_x,max_x,min_y,max_y)
min_x, max_x = max(0, bbox.min_x), min(ncols, bbox.max_x)
min_y, max_y = max(0, bbox.min_y), min(nrows, bbox.max_y)
#print('c=%d'%c, 'r=%d'%r, min_x, max_x, min_y, max_y)
chip = self.image[min_y:max_y, min_x:max_x, ...]
# couch chip in a fixed-size window
# REVIEW I could refactor handling the boarder into pad_image(). then mode wouldn't
# be necessary here and I could simply loop over the image.
# RE this is more efficient though
if self.mode == 'constant' or self.mode == 'reflect':
chunk = np.empty(
self.pixels_per_cell + ((self.image.shape[2],) if self.image.ndim == 3 else ()),
dtype=self.image.dtype.type)
chunk[:] = self.cval
mask = np.zeros(self.pixels_per_cell)
min_x = self.start_pt.x + self.pixel_stride[0]*c - pixels_per_half_cell[0]
max_x = min(self.pixels_per_cell[0], ncols - min_x)
min_x = max(0, -min_x)
min_y = self.start_pt.y + self.pixel_stride[1]*r - pixels_per_half_cell[1]
max_y = min(self.pixels_per_cell[1], nrows - min_y)
min_y = max(0, -min_y)
#print('c=%d'%c, 'r=%d'%r, min_x, max_x, min_y, max_y)
#print()
chunk[min_y:max_y, min_x:max_x, ...] = chip
mask[min_y:max_y, min_x:max_x] = 1
if self.mode == 'reflect':
nrows_chunk, ncols_chunk = chunk.shape[0:2]
# NOTE assume the points outside the boundaries of input can be filled from chip.
# this seems harder than it should be...
chunk[:min_y, min_x:max_x, ...] = np.flipud(np.atleast_2d( # top border
chip[:min_y, :, ...]))
chunk[min_y:max_y, :min_x, ...] = np.fliplr(np.atleast_2d( # left border
chip[:, :min_x, ...]))
# NOTE neg indice trikery (flipping first simplifies indexing)
chunk[max_y:, min_x:max_x, ...] = np.atleast_2d( # bottom border
np.flipud(chip)[:nrows_chunk-max_y, :, ...])
chunk[min_y:max_y, max_x:, ...] = np.atleast_2d( # right border
np.fliplr(chip)[:, :ncols_chunk-max_x, ...])
chunk[:min_y, :min_x, ...] = np.fliplr(np.flipud(np.atleast_2d( # top-left corner
chip[:min_y, :min_x, ...])))
chunk[:min_y, max_x:, ...] = np.flipud(np.atleast_2d( # top-right corner
np.fliplr(chip)[:min_y, :ncols_chunk-max_x, ...]))
chunk[max_y:, max_x:, ...] = np.atleast_2d( # bottom-right corner
np.flipud(np.fliplr(chip))[:nrows_chunk-max_y, :ncols_chunk-max_x, ...])
chunk[max_y:, :min_x, ...] = np.fliplr(np.atleast_2d( # bottom-left corner
np.flipud(chip)[:nrows_chunk-max_y, :min_x, ...]))
elif self.mode == 'discard':
mask = np.ones_like(chip)
chunk = chip
else:
assert False, 'unrecognized mode'
# FIXME should bbox be max-1 like in the superpixel version
yield chunk, mask, bbox
class IterateOverSuperpixels(object):
def __init__(self, segmented, image=None):
self.segmented = segmented
self.image = image
'''
Parameters
----------
segmented : array_like
Superpixel labeled segmentation (like numpy.array)
NOTE regionprops expects labels to be sequential and start
at 1: {1,2,...}. label 0 is treated as unlabeled.
image : array_like, optional
like numpy.array (ndim == 2 or 3)
'''
def setImage(self, image):
'''
Parameters
----------
image : array_like
like numpy.array (ndim == 2 or 3)
'''
self.image = image
return self
def iter(self, image=None):
'''Next superpixel generator
Parameters
----------
image : array_like, optional
like numpy.array (ndim == 2 or 3)
Returns
-------
numpy.array, optional
chip : pixels within the current window. Points outside the
boundaries of the input are filled according to the given mode.
numpy.array
mask : the binary mask of the window within the chip
BoundingBox
bbox : the inclusive extents of the chip (which may exceed the bounds
of the image)
MODIFICATIONS
sgr : optimized
sgr : turned into a class
'''
if image is not None: self.image = image
elif self.image is None: raise TypeError("self.image cannot be of type NoneType")
# regionprops() treats label zero (0) as unlabeled and ignores it
# TODO remove small, unconnected components
properties = skimage.measure.regionprops(self.segmented)
BoundingBox = namedtuple("BoundingBox", "min_x max_x min_y max_y")
for rp in properties:
if rp._slice is None: continue
(min_y,min_x,max_y,max_x) = rp.bbox
chip = image[min_y:max_y, min_x:max_x,...]
mask = rp.filled_image
bbox = BoundingBox(min_x,max_x-1,min_y,max_y-1)
yield (chip, mask, bbox)
|
[
"numpy.ones_like",
"numpy.empty",
"numpy.zeros",
"numpy.flipud",
"numpy.fliplr",
"collections.namedtuple",
"numpy.atleast_2d"
] |
[((4577, 4629), 'collections.namedtuple', 'namedtuple', (['"""BoundingBox"""', '"""min_x max_x min_y max_y"""'], {}), "('BoundingBox', 'min_x max_x min_y max_y')\n", (4587, 4629), False, 'from collections import namedtuple\n'), ((9981, 10033), 'collections.namedtuple', 'namedtuple', (['"""BoundingBox"""', '"""min_x max_x min_y max_y"""'], {}), "('BoundingBox', 'min_x max_x min_y max_y')\n", (9991, 10033), False, 'from collections import namedtuple\n'), ((5839, 5961), 'numpy.empty', 'np.empty', (['(self.pixels_per_cell + ((self.image.shape[2],) if self.image.ndim == 3 else\n ()))'], {'dtype': 'self.image.dtype.type'}), '(self.pixels_per_cell + ((self.image.shape[2],) if self.image.ndim ==\n 3 else ()), dtype=self.image.dtype.type)\n', (5847, 5961), True, 'import numpy as np\n'), ((6035, 6065), 'numpy.zeros', 'np.zeros', (['self.pixels_per_cell'], {}), '(self.pixels_per_cell)\n', (6043, 6065), True, 'import numpy as np\n'), ((8168, 8186), 'numpy.ones_like', 'np.ones_like', (['chip'], {}), '(chip)\n', (8180, 8186), True, 'import numpy as np\n'), ((6908, 6943), 'numpy.atleast_2d', 'np.atleast_2d', (['chip[:min_y, :, ...]'], {}), '(chip[:min_y, :, ...])\n', (6921, 6943), True, 'import numpy as np\n'), ((7032, 7067), 'numpy.atleast_2d', 'np.atleast_2d', (['chip[:, :min_x, ...]'], {}), '(chip[:, :min_x, ...])\n', (7045, 7067), True, 'import numpy as np\n'), ((7280, 7295), 'numpy.flipud', 'np.flipud', (['chip'], {}), '(chip)\n', (7289, 7295), True, 'import numpy as np\n'), ((7428, 7443), 'numpy.fliplr', 'np.fliplr', (['chip'], {}), '(chip)\n', (7437, 7443), True, 'import numpy as np\n'), ((7534, 7574), 'numpy.atleast_2d', 'np.atleast_2d', (['chip[:min_y, :min_x, ...]'], {}), '(chip[:min_y, :min_x, ...])\n', (7547, 7574), True, 'import numpy as np\n'), ((7719, 7734), 'numpy.fliplr', 'np.fliplr', (['chip'], {}), '(chip)\n', (7728, 7734), True, 'import numpy as np\n'), ((7890, 7905), 'numpy.fliplr', 'np.fliplr', (['chip'], {}), '(chip)\n', (7899, 7905), True, 'import numpy as np\n'), ((8062, 8077), 'numpy.flipud', 'np.flipud', (['chip'], {}), '(chip)\n', (8071, 8077), True, 'import numpy as np\n')]
|
import mxnet as mx
from mxnet import nd, autograd
import numpy as np
##################################3
# X, y - training data
# n - number of data points in dataset
# Py - desired label distribution
###################################
def tweak_dist(X, y, num_labels, n, Py):
shape = (n, *X.shape[1:])
Xshift = np.zeros(shape)
yshift = np.zeros(n, dtype=np.int8)
# get indices for each label
indices_by_label = [(y==k).nonzero()[0] for k in range(10)]
labels = np.argmax(
np.random.multinomial(1, Py, n), axis=1)
for i in range(n):
# sample an example from X with replacement
idx = np.random.choice(indices_by_label[labels[i]])
Xshift[i] = X[idx]
yshift[i] = y[idx]
return Xshift, yshift
def tweak_one(X, y, num_labels, n, knockout_label, p):
# create Py
# call down to tweak_dist
Py = np.full(num_labels, (1.-p)/(num_labels-1))
Py[knockout_label] = p
print(Py)
return tweak_dist(X, y, num_labels, n, Py)
|
[
"numpy.full",
"numpy.random.multinomial",
"numpy.zeros",
"numpy.random.choice"
] |
[((326, 341), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (334, 341), True, 'import numpy as np\n'), ((355, 381), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'np.int8'}), '(n, dtype=np.int8)\n', (363, 381), True, 'import numpy as np\n'), ((899, 948), 'numpy.full', 'np.full', (['num_labels', '((1.0 - p) / (num_labels - 1))'], {}), '(num_labels, (1.0 - p) / (num_labels - 1))\n', (906, 948), True, 'import numpy as np\n'), ((517, 548), 'numpy.random.multinomial', 'np.random.multinomial', (['(1)', 'Py', 'n'], {}), '(1, Py, n)\n', (538, 548), True, 'import numpy as np\n'), ((656, 701), 'numpy.random.choice', 'np.random.choice', (['indices_by_label[labels[i]]'], {}), '(indices_by_label[labels[i]])\n', (672, 701), True, 'import numpy as np\n')]
|
"""
Test functions for GEE
External comparisons are to R. The statmodels GEE implementation
should generally agree with the R GEE implementation for the
independence and exchangeable correlation structures. For other
correlation structures, the details of the correlation estimation
differ among implementations and the results will not agree exactly.
"""
from __future__ import print_function
from statsmodels.compat import lrange
import numpy as np
import os
from numpy.testing import assert_almost_equal
from statsmodels.genmod.generalized_estimating_equations import (GEE,
GEEMargins, Multinomial)
from statsmodels.genmod.families import Gaussian, Binomial, Poisson
from statsmodels.genmod.dependence_structures import (Exchangeable,
Independence, GlobalOddsRatio, Autoregressive, Nested)
import pandas as pd
import statsmodels.formula.api as sm
def load_data(fname, icept=True):
"""
Load a data set from the results directory. The data set should
be a CSV file with the following format:
Column 0: Group indicator
Column 1: endog variable
Columns 2-end: exog variables
If `icept` is True, an intercept is prepended to the exog
variables.
"""
cur_dir = os.path.dirname(os.path.abspath(__file__))
Z = np.genfromtxt(os.path.join(cur_dir, 'results', fname),
delimiter=",")
group = Z[:,0]
endog = Z[:,1]
exog = Z[:,2:]
if icept:
exog = np.concatenate((np.ones((exog.shape[0],1)), exog),
axis=1)
return endog,exog,group
class TestGEE(object):
def test_margins(self):
n = 300
exog = np.random.normal(size=(n, 4))
exog[:,0] = 1
exog[:,1] = 1*(exog[:,2] < 0)
group = np.kron(np.arange(n/4), np.ones(4))
time = np.zeros((n, 1))
beta = np.r_[0, 1, -1, 0.5]
lpr = np.dot(exog, beta)
prob = 1 / (1 + np.exp(-lpr))
endog = 1*(np.random.uniform(size=n) < prob)
fa = Binomial()
ex = Exchangeable()
md = GEE(endog, exog, group, time, fa, ex)
mdf = md.fit()
marg = GEEMargins(mdf, ())
marg.summary()
# This is in the release announcement for version 0.6.
def test_poisson_epil(self):
cur_dir = os.path.dirname(os.path.abspath(__file__))
fname = os.path.join(cur_dir, "results", "epil.csv")
data = pd.read_csv(fname)
fam = Poisson()
ind = Independence()
md1 = GEE.from_formula("y ~ age + trt + base", data,
groups=data["subject"], cov_struct=ind,
family=fam)
mdf1 = md1.fit()
# Coefficients should agree with GLM
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod import families
md2 = GLM.from_formula("y ~ age + trt + base", data,
family=families.Poisson())
mdf2 = md2.fit(scale="X2")
assert_almost_equal(mdf1.params, mdf2.params, decimal=6)
assert_almost_equal(mdf1.scale, mdf2.scale, decimal=6)
# TODO: why does this test fail?
def t_est_missing(self):
Y = np.random.normal(size=100)
X1 = np.random.normal(size=100)
X2 = np.random.normal(size=100)
X3 = np.random.normal(size=100)
groups = np.kron(lrange(20), np.ones(5))
Y[0] = np.nan
Y[5:7] = np.nan
X2[10:12] = np.nan
D = pd.DataFrame({"Y": Y, "X1": X1, "X2": X2, "X3": X3,
"groups": groups})
md = GEE.from_formula("Y ~ X1 + X2 + X3", D, None,
groups=D["groups"], missing='drop')
mdf = md.fit()
assert(len(md.endog) == 95)
assert(md.exog.shape) == (95,4)
def test_default_time(self):
"""
Check that the time defaults work correctly.
"""
endog,exog,group = load_data("gee_logistic_1.csv")
# Time values for the autoregressive model
T = np.zeros(len(endog))
idx = set(group)
for ii in idx:
jj = np.flatnonzero(group == ii)
T[jj] = lrange(len(jj))
family = Binomial()
va = Autoregressive()
md1 = GEE(endog, exog, group, family=family, cov_struct=va)
mdf1 = md1.fit()
md2 = GEE(endog, exog, group, time=T, family=family,
cov_struct=va)
mdf2 = md2.fit()
assert_almost_equal(mdf1.params, mdf2.params, decimal=6)
assert_almost_equal(mdf1.standard_errors(),
mdf2.standard_errors(), decimal=6)
def test_logistic(self):
"""
R code for comparing results:
library(gee)
Z = read.csv("results/gee_logistic_1.csv", header=FALSE)
Y = Z[,2]
Id = Z[,1]
X1 = Z[,3]
X2 = Z[,4]
X3 = Z[,5]
mi = gee(Y ~ X1 + X2 + X3, id=Id, family=binomial,
corstr="independence")
smi = summary(mi)
u = coefficients(smi)
cfi = paste(u[,1], collapse=",")
sei = paste(u[,4], collapse=",")
me = gee(Y ~ X1 + X2 + X3, id=Id, family=binomial,
corstr="exchangeable")
sme = summary(me)
u = coefficients(sme)
cfe = paste(u[,1], collapse=",")
see = paste(u[,4], collapse=",")
ma = gee(Y ~ X1 + X2 + X3, id=Id, family=binomial,
corstr="AR-M")
sma = summary(ma)
u = coefficients(sma)
cfa = paste(u[,1], collapse=",")
sea = paste(u[,4], collapse=",")
sprintf("cf = [[%s],[%s],[%s]]", cfi, cfe, cfa)
sprintf("se = [[%s],[%s],[%s]]", sei, see, sea)
"""
endog,exog,group = load_data("gee_logistic_1.csv")
# Time values for the autoregressive model
T = np.zeros(len(endog))
idx = set(group)
for ii in idx:
jj = np.flatnonzero(group == ii)
T[jj] = lrange(len(jj))
family = Binomial()
ve = Exchangeable()
vi = Independence()
va = Autoregressive()
# From R gee
cf = [[0.0167272965285882,1.13038654425893,
-1.86896345082962,1.09397608331333],
[0.0178982283915449,1.13118798191788,
-1.86133518416017,1.08944256230299],
[0.0109621937947958,1.13226505028438,
-1.88278757333046,1.09954623769449]]
se = [[0.127291720283049,0.166725808326067,
0.192430061340865,0.173141068839597],
[0.127045031730155,0.165470678232842,
0.192052750030501,0.173174779369249],
[0.127240302296444,0.170554083928117,
0.191045527104503,0.169776150974586]]
for j,v in enumerate((vi,ve,va)):
md = GEE(endog, exog, group, T, family, v)
mdf = md.fit()
if id(v) != id(va):
assert_almost_equal(mdf.params, cf[j], decimal=6)
assert_almost_equal(mdf.standard_errors(), se[j],
decimal=6)
# Test with formulas
D = np.concatenate((endog[:,None], group[:,None], exog[:,1:]),
axis=1)
D = pd.DataFrame(D)
D.columns = ["Y","Id",] + ["X%d" % (k+1)
for k in range(exog.shape[1]-1)]
for j,v in enumerate((vi,ve)):
md = GEE.from_formula("Y ~ X1 + X2 + X3", D, None,
groups=D.loc[:,"Id"],
family=family, cov_struct=v)
mdf = md.fit()
assert_almost_equal(mdf.params, cf[j], decimal=6)
assert_almost_equal(mdf.standard_errors(), se[j],
decimal=6)
# Check for run-time exceptions in summary
# print(mdf.summary())
def test_autoregressive(self):
dep_params_true = [0, 0.589208623896, 0.559823804948]
params_true = [[1.08043787, 1.12709319, 0.90133927],
[0.9613677, 1.05826987, 0.90832055],
[1.05370439, 0.96084864, 0.93923374]]
np.random.seed(342837482)
num_group = 100
ar_param = 0.5
k = 3
ga = Gaussian()
for gsize in 1,2,3:
ix = np.arange(gsize)[:,None] - np.arange(gsize)[None,:]
ix = np.abs(ix)
cmat = ar_param ** ix
cmat_r = np.linalg.cholesky(cmat)
endog = []
exog = []
groups = []
for i in range(num_group):
x = np.random.normal(size=(gsize,k))
exog.append(x)
expval = x.sum(1)
errors = np.dot(cmat_r, np.random.normal(size=gsize))
endog.append(expval + errors)
groups.append(i*np.ones(gsize))
endog = np.concatenate(endog)
groups = np.concatenate(groups)
exog = np.concatenate(exog, axis=0)
ar = Autoregressive()
md = GEE(endog, exog, groups, family=ga, cov_struct = ar)
mdf = md.fit()
assert_almost_equal(ar.dep_params, dep_params_true[gsize-1])
assert_almost_equal(mdf.params, params_true[gsize-1])
def test_post_estimation(self):
family = Gaussian()
endog,exog,group = load_data("gee_linear_1.csv")
ve = Exchangeable()
md = GEE(endog, exog, group, None, family, ve)
mdf = md.fit()
assert_almost_equal(np.dot(exog, mdf.params),
mdf.fittedvalues)
assert_almost_equal(endog - np.dot(exog, mdf.params),
mdf.resid)
def test_linear(self):
"""
library(gee)
Z = read.csv("results/gee_linear_1.csv", header=FALSE)
Y = Z[,2]
Id = Z[,1]
X1 = Z[,3]
X2 = Z[,4]
X3 = Z[,5]
mi = gee(Y ~ X1 + X2 + X3, id=Id, family=gaussian,
corstr="independence", tol=1e-8, maxit=100)
smi = summary(mi)
u = coefficients(smi)
cfi = paste(u[,1], collapse=",")
sei = paste(u[,4], collapse=",")
me = gee(Y ~ X1 + X2 + X3, id=Id, family=gaussian,
corstr="exchangeable", tol=1e-8, maxit=100)
sme = summary(me)
u = coefficients(sme)
cfe = paste(u[,1], collapse=",")
see = paste(u[,4], collapse=",")
sprintf("cf = [[%s],[%s]]", cfi, cfe)
sprintf("se = [[%s],[%s]]", sei, see)
"""
family = Gaussian()
endog,exog,group = load_data("gee_linear_1.csv")
vi = Independence()
ve = Exchangeable()
# From R gee
cf = [[-0.01850226507491,0.81436304278962,
-1.56167635393184,0.794239361055003],
[-0.0182920577154767,0.814898414022467,
-1.56194040106201,0.793499517527478]]
se = [[0.0440733554189401,0.0479993639119261,
0.0496045952071308,0.0479467597161284],
[0.0440369906460754,0.0480069787567662,
0.049519758758187,0.0479760443027526]]
for j,v in enumerate((vi, ve)):
md = GEE(endog, exog, group, None, family, v)
mdf = md.fit()
assert_almost_equal(mdf.params, cf[j], decimal=10)
assert_almost_equal(mdf.standard_errors(), se[j],
decimal=10)
# Test with formulas
D = np.concatenate((endog[:,None], group[:,None], exog[:,1:]),
axis=1)
D = pd.DataFrame(D)
D.columns = ["Y","Id",] + ["X%d" % (k+1)
for k in range(exog.shape[1]-1)]
for j,v in enumerate((vi,ve)):
md = GEE.from_formula("Y ~ X1 + X2 + X3", D, None,
groups=D.loc[:,"Id"],
family=family, cov_struct=v)
mdf = md.fit()
assert_almost_equal(mdf.params, cf[j], decimal=10)
assert_almost_equal(mdf.standard_errors(), se[j],
decimal=10)
def test_linear_constrained(self):
family = Gaussian()
exog = np.random.normal(size=(300,4))
exog[:,0] = 1
endog = np.dot(exog, np.r_[1, 1, 0, 0.2]) +\
np.random.normal(size=300)
group = np.kron(np.arange(100), np.r_[1,1,1])
vi = Independence()
ve = Exchangeable()
L = np.r_[[[0, 0, 0, 1]]]
R = np.r_[0,]
for j,v in enumerate((vi,ve)):
md = GEE(endog, exog, group, None, family, v,
constraint=(L,R))
mdf = md.fit()
assert_almost_equal(mdf.params[3], 0, decimal=10)
def test_nested_linear(self):
family = Gaussian()
endog,exog,group = load_data("gee_nested_linear_1.csv")
group_n = []
for i in range(endog.shape[0]//10):
group_n.extend([0,]*5)
group_n.extend([1,]*5)
group_n = np.array(group_n)[:,None]
dp = Independence()
md = GEE(endog, exog, group, None, family, dp)
mdf1 = md.fit()
# From statsmodels.GEE (not an independent test)
cf = np.r_[-0.1671073 , 1.00467426, -2.01723004, 0.97297106]
se = np.r_[0.08629606, 0.04058653, 0.04067038, 0.03777989]
assert_almost_equal(mdf1.params, cf, decimal=6)
assert_almost_equal(mdf1.standard_errors(), se,
decimal=6)
ne = Nested()
md = GEE(endog, exog, group, None, family, ne,
dep_data=group_n)
mdf2 = md.fit(start_params=mdf1.params)
# From statsmodels.GEE (not an independent test)
cf = np.r_[-0.16655319, 1.02183688, -2.00858719, 1.00101969]
se = np.r_[0.08632616, 0.02913582, 0.03114428, 0.02893991]
assert_almost_equal(mdf2.params, cf, decimal=6)
assert_almost_equal(mdf2.standard_errors(), se,
decimal=6)
def test_ordinal(self):
family = Binomial()
endog, exog, groups = load_data("gee_ordinal_1.csv",
icept=False)
v = GlobalOddsRatio("ordinal")
md = GEE(endog, exog, groups, None, family, v)
md.setup_ordinal()
mdf = md.fit()
cf = np.r_[1.09238131, 0.02148193, -0.39879146, -0.01855666,
0.02983409, 1.18123172, 0.01845318, -1.10233886]
se = np.r_[0.10878752, 0.10326078, 0.11171241, 0.05488705,
0.05995019, 0.0916574, 0.05951445, 0.08539281]
assert_almost_equal(mdf.params, cf, decimal=5)
assert_almost_equal(mdf.bse, se, decimal=5)
def test_nominal(self):
family = Multinomial(3)
endog, exog, groups = load_data("gee_nominal_1.csv",
icept=False)
# Test with independence correlation
v = Independence()
md = GEE(endog, exog, groups, None, family, v)
md.setup_nominal()
mdf1 = md.fit()
# From statsmodels.GEE (not an independent test)
cf1 = np.r_[0.44944752, 0.45569985, -0.92007064, -0.46766728]
se1 = np.r_[0.09801821, 0.07718842, 0.13229421, 0.08544553]
assert_almost_equal(mdf1.params, cf1, decimal=5)
assert_almost_equal(mdf1.standard_errors(), se1, decimal=5)
# Test with global odds ratio dependence
v = GlobalOddsRatio("nominal")
md = GEE(endog, exog, groups, None, family, v)
md.setup_nominal()
mdf2 = md.fit(start_params=mdf1.params)
# From statsmodels.GEE (not an independent test)
cf2 = np.r_[0.45397549, 0.42278345, -0.91997131, -0.50115943]
se2 = np.r_[0.09646057, 0.07405713, 0.1324629 , 0.09025019]
assert_almost_equal(mdf2.params, cf2, decimal=5)
assert_almost_equal(mdf2.standard_errors(), se2, decimal=5)
def test_poisson(self):
"""
library(gee)
Z = read.csv("results/gee_poisson_1.csv", header=FALSE)
Y = Z[,2]
Id = Z[,1]
X1 = Z[,3]
X2 = Z[,4]
X3 = Z[,5]
X4 = Z[,6]
X5 = Z[,7]
mi = gee(Y ~ X1 + X2 + X3 + X4 + X5, id=Id, family=poisson,
corstr="independence", scale.fix=TRUE)
smi = summary(mi)
u = coefficients(smi)
cfi = paste(u[,1], collapse=",")
sei = paste(u[,4], collapse=",")
me = gee(Y ~ X1 + X2 + X3 + X4 + X5, id=Id, family=poisson,
corstr="exchangeable", scale.fix=TRUE)
sme = summary(me)
u = coefficients(sme)
cfe = paste(u[,1], collapse=",")
see = paste(u[,4], collapse=",")
sprintf("cf = [[%s],[%s]]", cfi, cfe)
sprintf("se = [[%s],[%s]]", sei, see)
"""
family = Poisson()
endog,exog,group_n = load_data("gee_poisson_1.csv")
vi = Independence()
ve = Exchangeable()
# From R gee
cf = [[-0.0364450410793481,-0.0543209391301178,
0.0156642711741052,0.57628591338724,
-0.00465659951186211,-0.477093153099256],
[-0.0315615554826533,-0.0562589480840004,
0.0178419412298561,0.571512795340481,
-0.00363255566297332,-0.475971696727736]]
se = [[0.0611309237214186,0.0390680524493108,
0.0334234174505518,0.0366860768962715,
0.0304758505008105,0.0316348058881079],
[0.0610840153582275,0.0376887268649102,
0.0325168379415177,0.0369786751362213,
0.0296141014225009,0.0306115470200955]]
for j,v in enumerate((vi,ve)):
md = GEE(endog, exog, group_n, None, family, v)
mdf = md.fit()
assert_almost_equal(mdf.params, cf[j], decimal=5)
assert_almost_equal(mdf.standard_errors(), se[j],
decimal=6)
# Test with formulas
D = np.concatenate((endog[:,None], group_n[:,None],
exog[:,1:]), axis=1)
D = pd.DataFrame(D)
D.columns = ["Y","Id",] + ["X%d" % (k+1)
for k in range(exog.shape[1]-1)]
for j,v in enumerate((vi,ve)):
md = GEE.from_formula("Y ~ X1 + X2 + X3 + X4 + X5", D,
None, groups=D.loc[:,"Id"],
family=family, cov_struct=v)
mdf = md.fit()
assert_almost_equal(mdf.params, cf[j], decimal=5)
assert_almost_equal(mdf.standard_errors(), se[j],
decimal=6)
# print(mdf.params)
def test_compare_OLS(self):
"""
Gaussian GEE with independence correlation should agree
exactly with OLS for parameter estimates and standard errors
derived from the naive covariance estimate.
"""
vs = Independence()
family = Gaussian()
Y = np.random.normal(size=100)
X1 = np.random.normal(size=100)
X2 = np.random.normal(size=100)
X3 = np.random.normal(size=100)
groups = np.kron(lrange(20), np.ones(5))
D = pd.DataFrame({"Y": Y, "X1": X1, "X2": X2, "X3": X3})
md = GEE.from_formula("Y ~ X1 + X2 + X3", D, None,
groups=groups, family=family,
cov_struct=vs)
mdf = md.fit()
ols = sm.ols("Y ~ X1 + X2 + X3", data=D).fit()
assert_almost_equal(ols.params.values, mdf.params, decimal=10)
se = mdf.standard_errors(covariance_type="naive")
assert_almost_equal(ols.bse, se, decimal=10)
naive_tvalues = mdf.params / \
np.sqrt(np.diag(mdf.naive_covariance))
assert_almost_equal(naive_tvalues, ols.tvalues, decimal=10)
def test_compare_logit(self):
vs = Independence()
family = Binomial()
Y = 1*(np.random.normal(size=100) < 0)
X1 = np.random.normal(size=100)
X2 = np.random.normal(size=100)
X3 = np.random.normal(size=100)
groups = np.random.randint(0, 4, size=100)
D = pd.DataFrame({"Y": Y, "X1": X1, "X2": X2, "X3": X3})
md = GEE.from_formula("Y ~ X1 + X2 + X3", D, None, groups=groups,
family=family, cov_struct=vs).fit()
sml = sm.logit("Y ~ X1 + X2 + X3", data=D).fit(disp=False)
assert_almost_equal(sml.params.values, md.params, decimal=10)
def test_compare_poisson(self):
vs = Independence()
family = Poisson()
Y = np.ceil(-np.log(np.random.uniform(size=100)))
X1 = np.random.normal(size=100)
X2 = np.random.normal(size=100)
X3 = np.random.normal(size=100)
groups = np.random.randint(0, 4, size=100)
D = pd.DataFrame({"Y": Y, "X1": X1, "X2": X2, "X3": X3})
md = GEE.from_formula("Y ~ X1 + X2 + X3", D, None, groups=groups,
family=family, cov_struct=vs).fit()
sml = sm.poisson("Y ~ X1 + X2 + X3", data=D).fit(disp=False)
assert_almost_equal(sml.params.values, md.params, decimal=10)
if __name__=="__main__":
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
exit=False)
|
[
"numpy.random.seed",
"numpy.abs",
"pandas.read_csv",
"numpy.ones",
"statsmodels.genmod.generalized_estimating_equations.Multinomial",
"numpy.random.randint",
"numpy.arange",
"numpy.exp",
"numpy.random.normal",
"statsmodels.genmod.families.Gaussian",
"numpy.diag",
"os.path.join",
"pandas.DataFrame",
"os.path.abspath",
"numpy.testing.assert_almost_equal",
"statsmodels.compat.lrange",
"statsmodels.genmod.dependence_structures.Autoregressive",
"statsmodels.genmod.dependence_structures.GlobalOddsRatio",
"statsmodels.formula.api.ols",
"statsmodels.genmod.generalized_estimating_equations.GEEMargins",
"numpy.linalg.cholesky",
"statsmodels.genmod.dependence_structures.Independence",
"nose.runmodule",
"statsmodels.genmod.generalized_estimating_equations.GEE.from_formula",
"numpy.dot",
"statsmodels.genmod.generalized_estimating_equations.GEE",
"numpy.concatenate",
"numpy.random.uniform",
"statsmodels.formula.api.poisson",
"statsmodels.genmod.dependence_structures.Exchangeable",
"numpy.flatnonzero",
"numpy.zeros",
"statsmodels.genmod.families.Poisson",
"numpy.array",
"statsmodels.formula.api.logit",
"statsmodels.genmod.families.Binomial",
"statsmodels.genmod.dependence_structures.Nested"
] |
[((21322, 21409), 'nose.runmodule', 'nose.runmodule', ([], {'argv': "[__file__, '-vvs', '-x', '--pdb', '--pdb-failure']", 'exit': '(False)'}), "(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n exit=False)\n", (21336, 21409), False, 'import nose\n'), ((1230, 1255), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1245, 1255), False, 'import os\n'), ((1279, 1318), 'os.path.join', 'os.path.join', (['cur_dir', '"""results"""', 'fname'], {}), "(cur_dir, 'results', fname)\n", (1291, 1318), False, 'import os\n'), ((1650, 1679), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(n, 4)'}), '(size=(n, 4))\n', (1666, 1679), True, 'import numpy as np\n'), ((1808, 1824), 'numpy.zeros', 'np.zeros', (['(n, 1)'], {}), '((n, 1))\n', (1816, 1824), True, 'import numpy as np\n'), ((1876, 1894), 'numpy.dot', 'np.dot', (['exog', 'beta'], {}), '(exog, beta)\n', (1882, 1894), True, 'import numpy as np\n'), ((2001, 2011), 'statsmodels.genmod.families.Binomial', 'Binomial', ([], {}), '()\n', (2009, 2011), False, 'from statsmodels.genmod.families import Gaussian, Binomial, Poisson\n'), ((2025, 2039), 'statsmodels.genmod.dependence_structures.Exchangeable', 'Exchangeable', ([], {}), '()\n', (2037, 2039), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((2054, 2091), 'statsmodels.genmod.generalized_estimating_equations.GEE', 'GEE', (['endog', 'exog', 'group', 'time', 'fa', 'ex'], {}), '(endog, exog, group, time, fa, ex)\n', (2057, 2091), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((2131, 2150), 'statsmodels.genmod.generalized_estimating_equations.GEEMargins', 'GEEMargins', (['mdf', '()'], {}), '(mdf, ())\n', (2141, 2150), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((2346, 2390), 'os.path.join', 'os.path.join', (['cur_dir', '"""results"""', '"""epil.csv"""'], {}), "(cur_dir, 'results', 'epil.csv')\n", (2358, 2390), False, 'import os\n'), ((2406, 2424), 'pandas.read_csv', 'pd.read_csv', (['fname'], {}), '(fname)\n', (2417, 2424), True, 'import pandas as pd\n'), ((2440, 2449), 'statsmodels.genmod.families.Poisson', 'Poisson', ([], {}), '()\n', (2447, 2449), False, 'from statsmodels.genmod.families import Gaussian, Binomial, Poisson\n'), ((2464, 2478), 'statsmodels.genmod.dependence_structures.Independence', 'Independence', ([], {}), '()\n', (2476, 2478), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((2493, 2595), 'statsmodels.genmod.generalized_estimating_equations.GEE.from_formula', 'GEE.from_formula', (['"""y ~ age + trt + base"""', 'data'], {'groups': "data['subject']", 'cov_struct': 'ind', 'family': 'fam'}), "('y ~ age + trt + base', data, groups=data['subject'],\n cov_struct=ind, family=fam)\n", (2509, 2595), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((3005, 3061), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf1.params', 'mdf2.params'], {'decimal': '(6)'}), '(mdf1.params, mdf2.params, decimal=6)\n', (3024, 3061), False, 'from numpy.testing import assert_almost_equal\n'), ((3070, 3124), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf1.scale', 'mdf2.scale'], {'decimal': '(6)'}), '(mdf1.scale, mdf2.scale, decimal=6)\n', (3089, 3124), False, 'from numpy.testing import assert_almost_equal\n'), ((3207, 3233), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)'}), '(size=100)\n', (3223, 3233), True, 'import numpy as np\n'), ((3247, 3273), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)'}), '(size=100)\n', (3263, 3273), True, 'import numpy as np\n'), ((3287, 3313), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)'}), '(size=100)\n', (3303, 3313), True, 'import numpy as np\n'), ((3327, 3353), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)'}), '(size=100)\n', (3343, 3353), True, 'import numpy as np\n'), ((3490, 3560), 'pandas.DataFrame', 'pd.DataFrame', (["{'Y': Y, 'X1': X1, 'X2': X2, 'X3': X3, 'groups': groups}"], {}), "({'Y': Y, 'X1': X1, 'X2': X2, 'X3': X3, 'groups': groups})\n", (3502, 3560), True, 'import pandas as pd\n'), ((3601, 3687), 'statsmodels.genmod.generalized_estimating_equations.GEE.from_formula', 'GEE.from_formula', (['"""Y ~ X1 + X2 + X3"""', 'D', 'None'], {'groups': "D['groups']", 'missing': '"""drop"""'}), "('Y ~ X1 + X2 + X3', D, None, groups=D['groups'], missing=\n 'drop')\n", (3617, 3687), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((4216, 4226), 'statsmodels.genmod.families.Binomial', 'Binomial', ([], {}), '()\n', (4224, 4226), False, 'from statsmodels.genmod.families import Gaussian, Binomial, Poisson\n'), ((4240, 4256), 'statsmodels.genmod.dependence_structures.Autoregressive', 'Autoregressive', ([], {}), '()\n', (4254, 4256), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((4273, 4326), 'statsmodels.genmod.generalized_estimating_equations.GEE', 'GEE', (['endog', 'exog', 'group'], {'family': 'family', 'cov_struct': 'va'}), '(endog, exog, group, family=family, cov_struct=va)\n', (4276, 4326), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((4367, 4428), 'statsmodels.genmod.generalized_estimating_equations.GEE', 'GEE', (['endog', 'exog', 'group'], {'time': 'T', 'family': 'family', 'cov_struct': 'va'}), '(endog, exog, group, time=T, family=family, cov_struct=va)\n', (4370, 4428), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((4481, 4537), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf1.params', 'mdf2.params'], {'decimal': '(6)'}), '(mdf1.params, mdf2.params, decimal=6)\n', (4500, 4537), False, 'from numpy.testing import assert_almost_equal\n'), ((6039, 6049), 'statsmodels.genmod.families.Binomial', 'Binomial', ([], {}), '()\n', (6047, 6049), False, 'from statsmodels.genmod.families import Gaussian, Binomial, Poisson\n'), ((6063, 6077), 'statsmodels.genmod.dependence_structures.Exchangeable', 'Exchangeable', ([], {}), '()\n', (6075, 6077), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((6091, 6105), 'statsmodels.genmod.dependence_structures.Independence', 'Independence', ([], {}), '()\n', (6103, 6105), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((6119, 6135), 'statsmodels.genmod.dependence_structures.Autoregressive', 'Autoregressive', ([], {}), '()\n', (6133, 6135), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((7163, 7232), 'numpy.concatenate', 'np.concatenate', (['(endog[:, None], group[:, None], exog[:, 1:])'], {'axis': '(1)'}), '((endog[:, None], group[:, None], exog[:, 1:]), axis=1)\n', (7177, 7232), True, 'import numpy as np\n'), ((7269, 7284), 'pandas.DataFrame', 'pd.DataFrame', (['D'], {}), '(D)\n', (7281, 7284), True, 'import pandas as pd\n'), ((8198, 8223), 'numpy.random.seed', 'np.random.seed', (['(342837482)'], {}), '(342837482)\n', (8212, 8223), True, 'import numpy as np\n'), ((8300, 8310), 'statsmodels.genmod.families.Gaussian', 'Gaussian', ([], {}), '()\n', (8308, 8310), False, 'from statsmodels.genmod.families import Gaussian, Binomial, Poisson\n'), ((9371, 9381), 'statsmodels.genmod.families.Gaussian', 'Gaussian', ([], {}), '()\n', (9379, 9381), False, 'from statsmodels.genmod.families import Gaussian, Binomial, Poisson\n'), ((9453, 9467), 'statsmodels.genmod.dependence_structures.Exchangeable', 'Exchangeable', ([], {}), '()\n', (9465, 9467), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((9482, 9523), 'statsmodels.genmod.generalized_estimating_equations.GEE', 'GEE', (['endog', 'exog', 'group', 'None', 'family', 've'], {}), '(endog, exog, group, None, family, ve)\n', (9485, 9523), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((10612, 10622), 'statsmodels.genmod.families.Gaussian', 'Gaussian', ([], {}), '()\n', (10620, 10622), False, 'from statsmodels.genmod.families import Gaussian, Binomial, Poisson\n'), ((10695, 10709), 'statsmodels.genmod.dependence_structures.Independence', 'Independence', ([], {}), '()\n', (10707, 10709), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((10723, 10737), 'statsmodels.genmod.dependence_structures.Exchangeable', 'Exchangeable', ([], {}), '()\n', (10735, 10737), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((11527, 11596), 'numpy.concatenate', 'np.concatenate', (['(endog[:, None], group[:, None], exog[:, 1:])'], {'axis': '(1)'}), '((endog[:, None], group[:, None], exog[:, 1:]), axis=1)\n', (11541, 11596), True, 'import numpy as np\n'), ((11633, 11648), 'pandas.DataFrame', 'pd.DataFrame', (['D'], {}), '(D)\n', (11645, 11648), True, 'import pandas as pd\n'), ((12241, 12251), 'statsmodels.genmod.families.Gaussian', 'Gaussian', ([], {}), '()\n', (12249, 12251), False, 'from statsmodels.genmod.families import Gaussian, Binomial, Poisson\n'), ((12268, 12299), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(300, 4)'}), '(size=(300, 4))\n', (12284, 12299), True, 'import numpy as np\n'), ((12481, 12495), 'statsmodels.genmod.dependence_structures.Independence', 'Independence', ([], {}), '()\n', (12493, 12495), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((12509, 12523), 'statsmodels.genmod.dependence_structures.Exchangeable', 'Exchangeable', ([], {}), '()\n', (12521, 12523), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((12861, 12871), 'statsmodels.genmod.families.Gaussian', 'Gaussian', ([], {}), '()\n', (12869, 12871), False, 'from statsmodels.genmod.families import Gaussian, Binomial, Poisson\n'), ((13131, 13145), 'statsmodels.genmod.dependence_structures.Independence', 'Independence', ([], {}), '()\n', (13143, 13145), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((13159, 13200), 'statsmodels.genmod.generalized_estimating_equations.GEE', 'GEE', (['endog', 'exog', 'group', 'None', 'family', 'dp'], {}), '(endog, exog, group, None, family, dp)\n', (13162, 13200), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((13432, 13479), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf1.params', 'cf'], {'decimal': '(6)'}), '(mdf1.params, cf, decimal=6)\n', (13451, 13479), False, 'from numpy.testing import assert_almost_equal\n'), ((13589, 13597), 'statsmodels.genmod.dependence_structures.Nested', 'Nested', ([], {}), '()\n', (13595, 13597), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((13611, 13670), 'statsmodels.genmod.generalized_estimating_equations.GEE', 'GEE', (['endog', 'exog', 'group', 'None', 'family', 'ne'], {'dep_data': 'group_n'}), '(endog, exog, group, None, family, ne, dep_data=group_n)\n', (13614, 13670), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((13943, 13990), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf2.params', 'cf'], {'decimal': '(6)'}), '(mdf2.params, cf, decimal=6)\n', (13962, 13990), False, 'from numpy.testing import assert_almost_equal\n'), ((14134, 14144), 'statsmodels.genmod.families.Binomial', 'Binomial', ([], {}), '()\n', (14142, 14144), False, 'from statsmodels.genmod.families import Gaussian, Binomial, Poisson\n'), ((14273, 14299), 'statsmodels.genmod.dependence_structures.GlobalOddsRatio', 'GlobalOddsRatio', (['"""ordinal"""'], {}), "('ordinal')\n", (14288, 14299), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((14314, 14355), 'statsmodels.genmod.generalized_estimating_equations.GEE', 'GEE', (['endog', 'exog', 'groups', 'None', 'family', 'v'], {}), '(endog, exog, groups, None, family, v)\n', (14317, 14355), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((14691, 14737), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf.params', 'cf'], {'decimal': '(5)'}), '(mdf.params, cf, decimal=5)\n', (14710, 14737), False, 'from numpy.testing import assert_almost_equal\n'), ((14746, 14789), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf.bse', 'se'], {'decimal': '(5)'}), '(mdf.bse, se, decimal=5)\n', (14765, 14789), False, 'from numpy.testing import assert_almost_equal\n'), ((14838, 14852), 'statsmodels.genmod.generalized_estimating_equations.Multinomial', 'Multinomial', (['(3)'], {}), '(3)\n', (14849, 14852), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((15026, 15040), 'statsmodels.genmod.dependence_structures.Independence', 'Independence', ([], {}), '()\n', (15038, 15040), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((15054, 15095), 'statsmodels.genmod.generalized_estimating_equations.GEE', 'GEE', (['endog', 'exog', 'groups', 'None', 'family', 'v'], {}), '(endog, exog, groups, None, family, v)\n', (15057, 15095), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((15355, 15403), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf1.params', 'cf1'], {'decimal': '(5)'}), '(mdf1.params, cf1, decimal=5)\n', (15374, 15403), False, 'from numpy.testing import assert_almost_equal\n'), ((15534, 15560), 'statsmodels.genmod.dependence_structures.GlobalOddsRatio', 'GlobalOddsRatio', (['"""nominal"""'], {}), "('nominal')\n", (15549, 15560), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((15574, 15615), 'statsmodels.genmod.generalized_estimating_equations.GEE', 'GEE', (['endog', 'exog', 'groups', 'None', 'family', 'v'], {}), '(endog, exog, groups, None, family, v)\n', (15577, 15615), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((15899, 15947), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf2.params', 'cf2'], {'decimal': '(5)'}), '(mdf2.params, cf2, decimal=5)\n', (15918, 15947), False, 'from numpy.testing import assert_almost_equal\n'), ((16924, 16933), 'statsmodels.genmod.families.Poisson', 'Poisson', ([], {}), '()\n', (16931, 16933), False, 'from statsmodels.genmod.families import Gaussian, Binomial, Poisson\n'), ((17009, 17023), 'statsmodels.genmod.dependence_structures.Independence', 'Independence', ([], {}), '()\n', (17021, 17023), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((17037, 17051), 'statsmodels.genmod.dependence_structures.Exchangeable', 'Exchangeable', ([], {}), '()\n', (17049, 17051), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((18071, 18142), 'numpy.concatenate', 'np.concatenate', (['(endog[:, None], group_n[:, None], exog[:, 1:])'], {'axis': '(1)'}), '((endog[:, None], group_n[:, None], exog[:, 1:]), axis=1)\n', (18085, 18142), True, 'import numpy as np\n'), ((18180, 18195), 'pandas.DataFrame', 'pd.DataFrame', (['D'], {}), '(D)\n', (18192, 18195), True, 'import pandas as pd\n'), ((19035, 19049), 'statsmodels.genmod.dependence_structures.Independence', 'Independence', ([], {}), '()\n', (19047, 19049), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((19067, 19077), 'statsmodels.genmod.families.Gaussian', 'Gaussian', ([], {}), '()\n', (19075, 19077), False, 'from statsmodels.genmod.families import Gaussian, Binomial, Poisson\n'), ((19091, 19117), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)'}), '(size=100)\n', (19107, 19117), True, 'import numpy as np\n'), ((19131, 19157), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)'}), '(size=100)\n', (19147, 19157), True, 'import numpy as np\n'), ((19171, 19197), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)'}), '(size=100)\n', (19187, 19197), True, 'import numpy as np\n'), ((19211, 19237), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)'}), '(size=100)\n', (19227, 19237), True, 'import numpy as np\n'), ((19300, 19352), 'pandas.DataFrame', 'pd.DataFrame', (["{'Y': Y, 'X1': X1, 'X2': X2, 'X3': X3}"], {}), "({'Y': Y, 'X1': X1, 'X2': X2, 'X3': X3})\n", (19312, 19352), True, 'import pandas as pd\n'), ((19367, 19461), 'statsmodels.genmod.generalized_estimating_equations.GEE.from_formula', 'GEE.from_formula', (['"""Y ~ X1 + X2 + X3"""', 'D', 'None'], {'groups': 'groups', 'family': 'family', 'cov_struct': 'vs'}), "('Y ~ X1 + X2 + X3', D, None, groups=groups, family=family,\n cov_struct=vs)\n", (19383, 19461), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((19606, 19668), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['ols.params.values', 'mdf.params'], {'decimal': '(10)'}), '(ols.params.values, mdf.params, decimal=10)\n', (19625, 19668), False, 'from numpy.testing import assert_almost_equal\n'), ((19736, 19780), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['ols.bse', 'se'], {'decimal': '(10)'}), '(ols.bse, se, decimal=10)\n', (19755, 19780), False, 'from numpy.testing import assert_almost_equal\n'), ((19880, 19939), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['naive_tvalues', 'ols.tvalues'], {'decimal': '(10)'}), '(naive_tvalues, ols.tvalues, decimal=10)\n', (19899, 19939), False, 'from numpy.testing import assert_almost_equal\n'), ((19990, 20004), 'statsmodels.genmod.dependence_structures.Independence', 'Independence', ([], {}), '()\n', (20002, 20004), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((20022, 20032), 'statsmodels.genmod.families.Binomial', 'Binomial', ([], {}), '()\n', (20030, 20032), False, 'from statsmodels.genmod.families import Gaussian, Binomial, Poisson\n'), ((20094, 20120), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)'}), '(size=100)\n', (20110, 20120), True, 'import numpy as np\n'), ((20134, 20160), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)'}), '(size=100)\n', (20150, 20160), True, 'import numpy as np\n'), ((20174, 20200), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)'}), '(size=100)\n', (20190, 20200), True, 'import numpy as np\n'), ((20218, 20251), 'numpy.random.randint', 'np.random.randint', (['(0)', '(4)'], {'size': '(100)'}), '(0, 4, size=100)\n', (20235, 20251), True, 'import numpy as np\n'), ((20265, 20317), 'pandas.DataFrame', 'pd.DataFrame', (["{'Y': Y, 'X1': X1, 'X2': X2, 'X3': X3}"], {}), "({'Y': Y, 'X1': X1, 'X2': X2, 'X3': X3})\n", (20277, 20317), True, 'import pandas as pd\n'), ((20537, 20598), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sml.params.values', 'md.params'], {'decimal': '(10)'}), '(sml.params.values, md.params, decimal=10)\n', (20556, 20598), False, 'from numpy.testing import assert_almost_equal\n'), ((20651, 20665), 'statsmodels.genmod.dependence_structures.Independence', 'Independence', ([], {}), '()\n', (20663, 20665), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((20683, 20692), 'statsmodels.genmod.families.Poisson', 'Poisson', ([], {}), '()\n', (20690, 20692), False, 'from statsmodels.genmod.families import Gaussian, Binomial, Poisson\n'), ((20765, 20791), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)'}), '(size=100)\n', (20781, 20791), True, 'import numpy as np\n'), ((20805, 20831), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)'}), '(size=100)\n', (20821, 20831), True, 'import numpy as np\n'), ((20845, 20871), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)'}), '(size=100)\n', (20861, 20871), True, 'import numpy as np\n'), ((20889, 20922), 'numpy.random.randint', 'np.random.randint', (['(0)', '(4)'], {'size': '(100)'}), '(0, 4, size=100)\n', (20906, 20922), True, 'import numpy as np\n'), ((20936, 20988), 'pandas.DataFrame', 'pd.DataFrame', (["{'Y': Y, 'X1': X1, 'X2': X2, 'X3': X3}"], {}), "({'Y': Y, 'X1': X1, 'X2': X2, 'X3': X3})\n", (20948, 20988), True, 'import pandas as pd\n'), ((21210, 21271), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sml.params.values', 'md.params'], {'decimal': '(10)'}), '(sml.params.values, md.params, decimal=10)\n', (21229, 21271), False, 'from numpy.testing import assert_almost_equal\n'), ((1765, 1781), 'numpy.arange', 'np.arange', (['(n / 4)'], {}), '(n / 4)\n', (1774, 1781), True, 'import numpy as np\n'), ((1781, 1791), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (1788, 1791), True, 'import numpy as np\n'), ((2303, 2328), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (2318, 2328), False, 'import os\n'), ((3379, 3389), 'statsmodels.compat.lrange', 'lrange', (['(20)'], {}), '(20)\n', (3385, 3389), False, 'from statsmodels.compat import lrange\n'), ((3391, 3401), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (3398, 3401), True, 'import numpy as np\n'), ((4134, 4161), 'numpy.flatnonzero', 'np.flatnonzero', (['(group == ii)'], {}), '(group == ii)\n', (4148, 4161), True, 'import numpy as np\n'), ((5957, 5984), 'numpy.flatnonzero', 'np.flatnonzero', (['(group == ii)'], {}), '(group == ii)\n', (5971, 5984), True, 'import numpy as np\n'), ((6845, 6882), 'statsmodels.genmod.generalized_estimating_equations.GEE', 'GEE', (['endog', 'exog', 'group', 'T', 'family', 'v'], {}), '(endog, exog, group, T, family, v)\n', (6848, 6882), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((7459, 7561), 'statsmodels.genmod.generalized_estimating_equations.GEE.from_formula', 'GEE.from_formula', (['"""Y ~ X1 + X2 + X3"""', 'D', 'None'], {'groups': "D.loc[:, 'Id']", 'family': 'family', 'cov_struct': 'v'}), "('Y ~ X1 + X2 + X3', D, None, groups=D.loc[:, 'Id'], family\n =family, cov_struct=v)\n", (7475, 7561), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((7667, 7716), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf.params', 'cf[j]'], {'decimal': '(6)'}), '(mdf.params, cf[j], decimal=6)\n', (7686, 7716), False, 'from numpy.testing import assert_almost_equal\n'), ((8427, 8437), 'numpy.abs', 'np.abs', (['ix'], {}), '(ix)\n', (8433, 8437), True, 'import numpy as np\n'), ((8493, 8517), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['cmat'], {}), '(cmat)\n', (8511, 8517), True, 'import numpy as np\n'), ((8930, 8951), 'numpy.concatenate', 'np.concatenate', (['endog'], {}), '(endog)\n', (8944, 8951), True, 'import numpy as np\n'), ((8973, 8995), 'numpy.concatenate', 'np.concatenate', (['groups'], {}), '(groups)\n', (8987, 8995), True, 'import numpy as np\n'), ((9015, 9043), 'numpy.concatenate', 'np.concatenate', (['exog'], {'axis': '(0)'}), '(exog, axis=0)\n', (9029, 9043), True, 'import numpy as np\n'), ((9062, 9078), 'statsmodels.genmod.dependence_structures.Autoregressive', 'Autoregressive', ([], {}), '()\n', (9076, 9078), False, 'from statsmodels.genmod.dependence_structures import Exchangeable, Independence, GlobalOddsRatio, Autoregressive, Nested\n'), ((9096, 9146), 'statsmodels.genmod.generalized_estimating_equations.GEE', 'GEE', (['endog', 'exog', 'groups'], {'family': 'ga', 'cov_struct': 'ar'}), '(endog, exog, groups, family=ga, cov_struct=ar)\n', (9099, 9146), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((9189, 9251), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['ar.dep_params', 'dep_params_true[gsize - 1]'], {}), '(ar.dep_params, dep_params_true[gsize - 1])\n', (9208, 9251), False, 'from numpy.testing import assert_almost_equal\n'), ((9262, 9317), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf.params', 'params_true[gsize - 1]'], {}), '(mdf.params, params_true[gsize - 1])\n', (9281, 9317), False, 'from numpy.testing import assert_almost_equal\n'), ((9576, 9600), 'numpy.dot', 'np.dot', (['exog', 'mdf.params'], {}), '(exog, mdf.params)\n', (9582, 9600), True, 'import numpy as np\n'), ((11248, 11288), 'statsmodels.genmod.generalized_estimating_equations.GEE', 'GEE', (['endog', 'exog', 'group', 'None', 'family', 'v'], {}), '(endog, exog, group, None, family, v)\n', (11251, 11288), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((11328, 11378), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf.params', 'cf[j]'], {'decimal': '(10)'}), '(mdf.params, cf[j], decimal=10)\n', (11347, 11378), False, 'from numpy.testing import assert_almost_equal\n'), ((11822, 11924), 'statsmodels.genmod.generalized_estimating_equations.GEE.from_formula', 'GEE.from_formula', (['"""Y ~ X1 + X2 + X3"""', 'D', 'None'], {'groups': "D.loc[:, 'Id']", 'family': 'family', 'cov_struct': 'v'}), "('Y ~ X1 + X2 + X3', D, None, groups=D.loc[:, 'Id'], family\n =family, cov_struct=v)\n", (11838, 11924), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((12026, 12076), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf.params', 'cf[j]'], {'decimal': '(10)'}), '(mdf.params, cf[j], decimal=10)\n', (12045, 12076), False, 'from numpy.testing import assert_almost_equal\n'), ((12337, 12370), 'numpy.dot', 'np.dot', (['exog', 'np.r_[1, 1, 0, 0.2]'], {}), '(exog, np.r_[1, 1, 0, 0.2])\n', (12343, 12370), True, 'import numpy as np\n'), ((12386, 12412), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(300)'}), '(size=300)\n', (12402, 12412), True, 'import numpy as np\n'), ((12437, 12451), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (12446, 12451), True, 'import numpy as np\n'), ((12638, 12697), 'statsmodels.genmod.generalized_estimating_equations.GEE', 'GEE', (['endog', 'exog', 'group', 'None', 'family', 'v'], {'constraint': '(L, R)'}), '(endog, exog, group, None, family, v, constraint=(L, R))\n', (12641, 12697), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((12757, 12806), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf.params[3]', '(0)'], {'decimal': '(10)'}), '(mdf.params[3], 0, decimal=10)\n', (12776, 12806), False, 'from numpy.testing import assert_almost_equal\n'), ((13091, 13108), 'numpy.array', 'np.array', (['group_n'], {}), '(group_n)\n', (13099, 13108), True, 'import numpy as np\n'), ((17792, 17834), 'statsmodels.genmod.generalized_estimating_equations.GEE', 'GEE', (['endog', 'exog', 'group_n', 'None', 'family', 'v'], {}), '(endog, exog, group_n, None, family, v)\n', (17795, 17834), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((17874, 17923), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf.params', 'cf[j]'], {'decimal': '(5)'}), '(mdf.params, cf[j], decimal=5)\n', (17893, 17923), False, 'from numpy.testing import assert_almost_equal\n'), ((18370, 18481), 'statsmodels.genmod.generalized_estimating_equations.GEE.from_formula', 'GEE.from_formula', (['"""Y ~ X1 + X2 + X3 + X4 + X5"""', 'D', 'None'], {'groups': "D.loc[:, 'Id']", 'family': 'family', 'cov_struct': 'v'}), "('Y ~ X1 + X2 + X3 + X4 + X5', D, None, groups=D.loc[:,\n 'Id'], family=family, cov_struct=v)\n", (18386, 18481), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((18588, 18637), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf.params', 'cf[j]'], {'decimal': '(5)'}), '(mdf.params, cf[j], decimal=5)\n', (18607, 18637), False, 'from numpy.testing import assert_almost_equal\n'), ((19263, 19273), 'statsmodels.compat.lrange', 'lrange', (['(20)'], {}), '(20)\n', (19269, 19273), False, 'from statsmodels.compat import lrange\n'), ((19275, 19285), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (19282, 19285), True, 'import numpy as np\n'), ((1461, 1488), 'numpy.ones', 'np.ones', (['(exog.shape[0], 1)'], {}), '((exog.shape[0], 1))\n', (1468, 1488), True, 'import numpy as np\n'), ((1919, 1931), 'numpy.exp', 'np.exp', (['(-lpr)'], {}), '(-lpr)\n', (1925, 1931), True, 'import numpy as np\n'), ((1953, 1978), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'n'}), '(size=n)\n', (1970, 1978), True, 'import numpy as np\n'), ((2941, 2959), 'statsmodels.genmod.families.Poisson', 'families.Poisson', ([], {}), '()\n', (2957, 2959), False, 'from statsmodels.genmod import families\n'), ((6958, 7007), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf.params', 'cf[j]'], {'decimal': '(6)'}), '(mdf.params, cf[j], decimal=6)\n', (6977, 7007), False, 'from numpy.testing import assert_almost_equal\n'), ((8647, 8680), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(gsize, k)'}), '(size=(gsize, k))\n', (8663, 8680), True, 'import numpy as np\n'), ((9684, 9708), 'numpy.dot', 'np.dot', (['exog', 'mdf.params'], {}), '(exog, mdf.params)\n', (9690, 9708), True, 'import numpy as np\n'), ((19556, 19590), 'statsmodels.formula.api.ols', 'sm.ols', (['"""Y ~ X1 + X2 + X3"""'], {'data': 'D'}), "('Y ~ X1 + X2 + X3', data=D)\n", (19562, 19590), True, 'import statsmodels.formula.api as sm\n'), ((19841, 19870), 'numpy.diag', 'np.diag', (['mdf.naive_covariance'], {}), '(mdf.naive_covariance)\n', (19848, 19870), True, 'import numpy as np\n'), ((20049, 20075), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)'}), '(size=100)\n', (20065, 20075), True, 'import numpy as np\n'), ((20332, 20426), 'statsmodels.genmod.generalized_estimating_equations.GEE.from_formula', 'GEE.from_formula', (['"""Y ~ X1 + X2 + X3"""', 'D', 'None'], {'groups': 'groups', 'family': 'family', 'cov_struct': 'vs'}), "('Y ~ X1 + X2 + X3', D, None, groups=groups, family=family,\n cov_struct=vs)\n", (20348, 20426), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((20475, 20511), 'statsmodels.formula.api.logit', 'sm.logit', (['"""Y ~ X1 + X2 + X3"""'], {'data': 'D'}), "('Y ~ X1 + X2 + X3', data=D)\n", (20483, 20511), True, 'import statsmodels.formula.api as sm\n'), ((21003, 21097), 'statsmodels.genmod.generalized_estimating_equations.GEE.from_formula', 'GEE.from_formula', (['"""Y ~ X1 + X2 + X3"""', 'D', 'None'], {'groups': 'groups', 'family': 'family', 'cov_struct': 'vs'}), "('Y ~ X1 + X2 + X3', D, None, groups=groups, family=family,\n cov_struct=vs)\n", (21019, 21097), False, 'from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins, Multinomial\n'), ((21146, 21184), 'statsmodels.formula.api.poisson', 'sm.poisson', (['"""Y ~ X1 + X2 + X3"""'], {'data': 'D'}), "('Y ~ X1 + X2 + X3', data=D)\n", (21156, 21184), True, 'import statsmodels.formula.api as sm\n'), ((8358, 8374), 'numpy.arange', 'np.arange', (['gsize'], {}), '(gsize)\n', (8367, 8374), True, 'import numpy as np\n'), ((8385, 8401), 'numpy.arange', 'np.arange', (['gsize'], {}), '(gsize)\n', (8394, 8401), True, 'import numpy as np\n'), ((8785, 8813), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'gsize'}), '(size=gsize)\n', (8801, 8813), True, 'import numpy as np\n'), ((20722, 20749), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(100)'}), '(size=100)\n', (20739, 20749), True, 'import numpy as np\n'), ((8893, 8907), 'numpy.ones', 'np.ones', (['gsize'], {}), '(gsize)\n', (8900, 8907), True, 'import numpy as np\n')]
|
############################################## README #################################################
# This calculates threshold for an image depending upon its spiking activity.
########################################################################################################
import numpy as np
from snn.neuron import neuron
import random
from matplotlib import pyplot as plt
from snn.recep_field import rf
from snn.spike_train import encode
from snn.rl import rl
from snn.rl import update
from snn.reconstruct import reconst_weights
from snn.parameters import param as par
import os
def threshold(train):
tu = np.shape(train[0])[0]
thresh = 0
for i in range(tu):
simul_active = sum(train[:,i])
if simul_active>thresh:
thresh = simul_active
return (thresh/3)*par.scale
if __name__ == '__main__':
# img = cv2.imread("mnist1/" + str(1) + ".png", 0)
img = np.array(Image.open("mnist1/" + str(1) + ".png", 0))
print(img)
# pot = rf(img)
# train = np.array(encode(pot))
# print threshold(train)
|
[
"numpy.shape"
] |
[((630, 648), 'numpy.shape', 'np.shape', (['train[0]'], {}), '(train[0])\n', (638, 648), True, 'import numpy as np\n')]
|
# all the data from train data set, k-fold validation
import numpy as np
import onnxruntime
import torch
from pandas import read_csv
from tensorflow.python.keras.utils.np_utils import to_categorical
from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score
# load a single file as a numpy array
def load_file(filepath):
dataframe = read_csv(filepath, header=None, delim_whitespace=True)
return dataframe.values
# load a list of files into a 3D array of [samples, timesteps, features]
def load_group(filenames, prefix=''):
loaded = list()
for name in filenames:
data = load_file(prefix + name)
loaded.append(data)
# stack group so that features are the 3rd dimension
loaded = np.dstack(loaded)
return loaded
# load a dataset group, such as train or test
def load_dataset_group(group, prefix=''):
filepath = prefix + group + '/Inertial Signals/'
# load all 9 files as a single array
filenames = list()
# total acceleration
filenames += ['total_acc_x_' + group + '.txt', 'total_acc_y_' + group + '.txt', 'total_acc_z_' + group + '.txt']
# body acceleration
filenames += ['body_acc_x_' + group + '.txt', 'body_acc_y_' + group + '.txt', 'body_acc_z_' + group + '.txt']
# body gyroscope
filenames += ['body_gyro_x_' + group + '.txt', 'body_gyro_y_' + group + '.txt', 'body_gyro_z_' + group + '.txt']
# load input data
X = load_group(filenames, filepath)
# load class output
y = load_file(prefix + group + '/y_' + group + '.txt')
return X, y
# load the dataset, returns train and test X and y elements
def load_dataset(prefix=''):
# load all train
trainX, trainy = load_dataset_group('train', prefix + 'UCI HAR Dataset/')
# print(trainX.shape, trainy.shape)
# load all test
testX, testy = load_dataset_group('test', prefix + 'UCI HAR Dataset/')
# print(testX.shape, testy.shape)
# zero-offset class values
trainy = trainy - 1
testy = testy - 1
# one hot encode y
trainy = to_categorical(trainy)
testy = to_categorical(testy)
print(trainX.shape, trainy.shape, testX.shape, testy.shape)
return trainX, trainy, testX, testy
# summarize scores
def summarize_results(scores):
print('scores:', scores)
mean, std = np.mean(scores), np.std(scores)
return [mean, std]
# run an experiment
def run_experiment(repeats=10):
# load data
trainX, trainy, testX, testy = load_dataset()
# sess = onnxruntime.InferenceSession('./models/model1.onnx')
sess = onnxruntime.InferenceSession('./cnn-pytorch.onnx')
for i in sess.get_inputs():
print(i.name)
print(i.shape)
for i in sess.get_outputs():
print(i.name)
print(i.shape)
# y_predict = sess.run(None, {sess.get_inputs()[0].name: testX.astype(np.float32)})
testX = np.transpose(testX, (0, 2, 1))
testX = torch.utils.data.DataLoader(testX, batch_size=32, shuffle=True, num_workers=0)
testy = torch.utils.data.DataLoader(testy, batch_size=32, shuffle=True, num_workers=0)
for features, labels in zip(testX, testy):
y_predict = sess.run(None, {sess.get_inputs()[0].name: features.float().numpy()})
print('y_predict', y_predict)
# y_predict = np.array(y_predict)
# y_predict = np.argmax(y_predict, axis=2)
# testy = labels
# y_true = np.reshape(testy, [-1])
# y_pred = np.reshape(y_predict, [-1])
# accuracy = accuracy_score(y_true, y_pred)
# precision = precision_score(y_true, y_pred, average='macro')
# recall = recall_score(y_true, y_pred, average='macro')
# f1score = f1_score(y_true, y_pred, average='macro')
# print(accuracy, precision, recall, f1score)
run_experiment()
|
[
"numpy.dstack",
"torch.utils.data.DataLoader",
"pandas.read_csv",
"numpy.std",
"numpy.transpose",
"tensorflow.python.keras.utils.np_utils.to_categorical",
"onnxruntime.InferenceSession",
"numpy.mean"
] |
[((365, 419), 'pandas.read_csv', 'read_csv', (['filepath'], {'header': 'None', 'delim_whitespace': '(True)'}), '(filepath, header=None, delim_whitespace=True)\n', (373, 419), False, 'from pandas import read_csv\n'), ((746, 763), 'numpy.dstack', 'np.dstack', (['loaded'], {}), '(loaded)\n', (755, 763), True, 'import numpy as np\n'), ((2044, 2066), 'tensorflow.python.keras.utils.np_utils.to_categorical', 'to_categorical', (['trainy'], {}), '(trainy)\n', (2058, 2066), False, 'from tensorflow.python.keras.utils.np_utils import to_categorical\n'), ((2079, 2100), 'tensorflow.python.keras.utils.np_utils.to_categorical', 'to_categorical', (['testy'], {}), '(testy)\n', (2093, 2100), False, 'from tensorflow.python.keras.utils.np_utils import to_categorical\n'), ((2554, 2604), 'onnxruntime.InferenceSession', 'onnxruntime.InferenceSession', (['"""./cnn-pytorch.onnx"""'], {}), "('./cnn-pytorch.onnx')\n", (2582, 2604), False, 'import onnxruntime\n'), ((2860, 2890), 'numpy.transpose', 'np.transpose', (['testX', '(0, 2, 1)'], {}), '(testX, (0, 2, 1))\n', (2872, 2890), True, 'import numpy as np\n'), ((2903, 2981), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testX'], {'batch_size': '(32)', 'shuffle': '(True)', 'num_workers': '(0)'}), '(testX, batch_size=32, shuffle=True, num_workers=0)\n', (2930, 2981), False, 'import torch\n'), ((2994, 3072), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testy'], {'batch_size': '(32)', 'shuffle': '(True)', 'num_workers': '(0)'}), '(testy, batch_size=32, shuffle=True, num_workers=0)\n', (3021, 3072), False, 'import torch\n'), ((2302, 2317), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (2309, 2317), True, 'import numpy as np\n'), ((2319, 2333), 'numpy.std', 'np.std', (['scores'], {}), '(scores)\n', (2325, 2333), True, 'import numpy as np\n')]
|
#! /usr/bin/enc python
# -*- coding: utf-8 -*-
# author: <NAME>
# email: <EMAIL>
"""
Swin Transformer
1. 类似CNN的层次化构建方法(Hierarchical Feature Maps),特征图尺寸中有对图像下采样4倍、8倍、以及16倍;
这样的Backbone有助于再此基础上构建目标检测、实例分割等任务。
2. 使用Windows Multi-Head Self-Attention (W-MSA)概念。减少计算量。计算复杂度从指数级降到线性级,Multi-head
Self-Attention只在每个Windows内部进行。相对于ViT直接对整个Global进行MSA,计算复杂度更低;但是会隔绝不同
窗口之间的信息传递,通过Shifted Windows Multi-head Self-Atten来让信息在相邻窗口进行传递。
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows`
- https://arxiv.org/pdf/2103.14030
Code/weights from https://github.com/microsoft/Swin-Transformer
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
import numpy as np
from typing import Optional
from BasicModule import PatchMerging, DropPath, PatchEmbed
from BasicModule import Mlp
from BasicModule import window_partition, window_reverse
"""SwinT
window_size = 7
img_size = 224
Trained ImageNet-1k
depths->2,2,6,2
"""
def swin_tiny_patch4_window7_224(num_classes: int = 1000, **kwargs):
# trained ImageNet-1K
# https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth
model = SwinTransformer(in_chans=3,
patch_size=4,
window_size=7,
embed_dim=96,
depths=(2, 2, 6, 2),
num_heads=(3, 6, 12, 24),
num_classes=num_classes,
**kwargs)
return model
"""Swin-S
depths->2,2,18,2
"""
def swin_small_patch4_window7_224(num_classes: int = 1000, **kwargs):
# trained ImageNet-1K
# https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth
model = SwinTransformer(in_chans=3,
patch_size=4,
window_size=7,
embed_dim=96,
depths=(2, 2, 18, 2),
num_heads=(3, 6, 12, 24),
num_classes=num_classes,
**kwargs)
return model
"""Swin-B"""
def swin_base_patch4_window7_224(num_classes: int = 1000, **kwargs):
# trained ImageNet-1K
# https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth
model = SwinTransformer(in_chans=3,
patch_size=4,
window_size=7,
embed_dim=128,
depths=(2, 2, 18, 2),
num_heads=(4, 8, 16, 32),
num_classes=num_classes,
**kwargs)
return model
def swin_base_patch4_window12_384(num_classes: int = 1000, **kwargs):
# trained ImageNet-1K
# https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth
model = SwinTransformer(in_chans=3,
patch_size=4,
window_size=12,
embed_dim=128,
depths=(2, 2, 18, 2),
num_heads=(4, 8, 16, 32),
num_classes=num_classes,
**kwargs)
return model
def swin_base_patch4_window7_224_in22k(num_classes: int = 21841, **kwargs):
# trained ImageNet-22K
# https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth
model = SwinTransformer(in_chans=3,
patch_size=4,
window_size=7,
embed_dim=128,
depths=(2, 2, 18, 2),
num_heads=(4, 8, 16, 32),
num_classes=num_classes,
**kwargs)
return model
def swin_base_patch4_window12_384_in22k(num_classes: int = 21841, **kwargs):
# trained ImageNet-22K
# https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth
model = SwinTransformer(in_chans=3,
patch_size=4,
window_size=12,
embed_dim=128,
depths=(2, 2, 18, 2),
num_heads=(4, 8, 16, 32),
num_classes=num_classes,
**kwargs)
return model
"""Swin-Large"""
def swin_large_patch4_window7_224_in22k(num_classes: int = 21841, **kwargs):
# trained ImageNet-22K
# https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth
model = SwinTransformer(in_chans=3,
patch_size=4,
window_size=7,
embed_dim=192,
depths=(2, 2, 18, 2),
num_heads=(6, 12, 24, 48),
num_classes=num_classes,
**kwargs)
return model
def swin_large_patch4_window12_384_in22k(num_classes: int = 21841, **kwargs):
# trained ImageNet-22K
# https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth
model = SwinTransformer(in_chans=3,
patch_size=4,
window_size=12,
embed_dim=192,
depths=(2, 2, 18, 2),
num_heads=(6, 12, 24, 48),
num_classes=num_classes,
**kwargs)
return model
"""Swin Transformer"""
class SwinTransformer(nn.Module):
"""Swin Transformer结构
这里有个不同之处,就是每个Stage Layer中,
"""
def __init__(self, patch_size=4, in_chans=3, num_classes=1000,
embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24),
window_size=7, mlp_ratio=4., qkv_bias=True,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, patch_norm=True,
use_checkpoint=False, **kwargs):
super().__init__()
self.num_classes = num_classes
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.patch_norm = patch_norm
# 输出特征矩阵的Channels (C)
# H/4 x W/4 x 48 -> H/4 x W/4 x C(Stage1) -> H/8 x W/8 x 2C(Stage2) -> H/16 x W/16 x 4C(stage3) ...
self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
self.mlp_ratio = mlp_ratio
# 将image切分为不重合的Patches
# input: (Bs, 224, 224, 3)
# output: (e.g patch_size=4: Bs, 56x56, 4x4x3)
self.patch_embed = PatchEmbed(
patch_size=patch_size, in_c=in_chans, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
# Drop Path
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# bulid layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
# 注意这里构建的stage和论文图中有些差异
# 这里的stage不包含该stage的patch_merging层,包含的是下个stage的
layers = BasicLayer(dim=int(embed_dim * 2 ** i_layer),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint)
self.layers.append(layers)
self.norm = norm_layer(self.num_features)
self.avgpool = nn.AdaptiveAvgPool1d(1)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
nn.init.trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self,x):
# x:[B, L, C]
x,H,W = self.patch_embed(x)
x = self.pos_drop(x)
# 多尺度分层Multi-Stage
for layer in self.layers:
x,H,W = layer(x,H,W)
x = self.norm(x) # [B, L, C]
x = self.avgpool(x.transpose(1, 2)) # [B, C, 1]
x = torch.flatten(x, 1)
x = self.head(x) # 分类头
return x
"""一个Stage内的基本SwinTransformer模块"""
class BasicLayer(nn.Module):
"""
One Stage SwinTransformer Layer包括:
"""
def __init__(self, dim, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):
"""
Args:
dim (int): Number of input channels.
depth (int): Number of blocks. block数量
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
super(BasicLayer, self).__init__()
self.dim = dim
self.depth = depth
self.window_size = window_size
self.use_checkpoint = use_checkpoint # pre-trained
self.shift_size = window_size // 2
# 构建SwinTransformer Block
self.blocks = nn.ModuleList([
SwinTransformerBlock(
dim=dim,
num_heads=num_heads,
window_size=window_size,
shift_size=0 if (i % 2 == 0) else self.shift_size, #当i为偶,就是W-MSA,i为奇,就是SW-MSA,与论文一致, 保证窗口之间通信
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
drop=drop,
attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer)
for i in range(depth)])
# Patch Merging Layer 类似于Pooling下采样
if downsample is not None:
self.downsample = downsample(dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def create_mask(self,x,H,W):
"""
SW-MSA后,对于移位后左上角的窗口(也就是移位前最中间的窗口)来说,里面的元素都是互相紧挨着的,
他们之间可以互相两两做自注意力,但是对于剩下几个窗口来说,它们里面的元素是从别的很远的地方搬过来的,
所以他们之间,按道理来说是不应该去做自注意力,也就是说他们之间不应该有什么太大的联系
以14x14个patch为例进行
H: Feature Map Height
W: Feature Map Width
x: Feature Map
"""
# 为SW-MSA计算Attention Mask.
# 保证Hp和Wp是window_size的整数倍
Hp = int(np.ceil(H / self.window_size)) * self.window_size
Wp = int(np.ceil(W / self.window_size)) * self.window_size
# 拥有和feature map一样的通道排列顺序,方便后续window_partition
img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # [1, Hp, Wp, 1]
# 准备进行区域生成,方便生成Mask
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
# 区域编码
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
# Shift Window 混合区域的窗口分割
mask_windows = window_partition(img_mask, self.window_size) # [nW, Mh, Mw, 1]
mask_windows = mask_windows.view(-1, self.window_size * self.window_size) # [nW, Mh*Mw]
# 掩码生成
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) # [nW, 1, Mh*Mw] - [nW, Mh*Mw, 1]
# [nW, Mh*Mw, Mh*Mw]
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
return attn_mask
def forward(self,x,H,W):
# [nW, Mh*Mw, Mh*Mw] nW:窗口数
attn_mask = self.create_mask(x,H,W)
for blk in self.blocks:
blk.H, blk.W = H, W # self.H = H, self.W = W
if not torch.jit.is_scripting() and self.use_checkpoint:
x = checkpoint.checkpoint(blk, x, attn_mask)
else:
x = blk(x, attn_mask)
if self.downsample is not None:
x = self.downsample(x, H, W)
H, W = (H + 1) // 2, (W + 1) // 2 # DownSample之后,H,W应该减半
return x, H, W
"""一个基本的SwinTransformerBlock的构成Model"""
class SwinTransformerBlock(nn.Module):
"""
Swin Transformer Block包括:
Feature Map Input -> LayerNorm -> SW-MSA/W-MSA -> LayerNorm-> MLP -------->
|--------------------------------------||----------------------|
"""
def __init__(self, dim, num_heads, window_size=7, shift_size=0,
mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
"""
Args参数定义:
dim (int): Number of input channels.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
super(SwinTransformerBlock, self).__init__()
self.dim = dim
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
# shift_size必须小于windows_size
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0~window_size"
# LN1
self.norm1 = norm_layer(dim)
# Windows_Multi-head Self Attention
self.attn = WindowsAttention(
dim, window_size=(self.window_size, self.window_size), num_heads=num_heads, qkv_bias=qkv_bias,
attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
# LN2
self.norm2 = norm_layer(dim)
# MLP Layer
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, attn_mask):
# feature map的Height & Width
H, W = self.H, self.W
# Batch, length, channel
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
# Skip Connect
shortcut = x
x = self.norm1(x)
# reshape feature map
x = x.view(B, H, W, C)
# 对feature map进行pad,pad到windows size的整数倍
pad_l = 0
pad_t = 0
pad_r = (self.window_size - W % self.window_size) % self.window_size
pad_b = (self.window_size - H % self.window_size) % self.window_size
x = F.pad(x,(0,0,pad_l,pad_r,pad_t,pad_b))
# Hp, Wp代表pad后的feature map的Height和Width
_, Hp, Wp, _ = x.shape
# 是W-MSA 还是 SW-MSA ?
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x
attn_mask = None
# 窗口划分
# Windows Partition
x_windows = window_partition(shifted_x,self.window_size) #[nW*B, Mh, Mw, C]
x_windows = x_windows.view(-1, self.window_size*self.window_size,C) # [nW*B, Mh*Mw, C]
# W-MSA / SW-MSA
attn_windows = self.attn(x_windows, mask=attn_mask) # [nW*B, Mh*Mw, C]
# 将分割的Windows进行还原
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) # [nW*B, Mh, Mw, C]
shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # [B, H', W', C]
# 如果是SW-MSA,需要逆shift过程
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
# 移除Pad数据
if pad_r > 0 or pad_b > 0:
# 把前面pad的数据移除掉
x = x[:, :H, :W, :].contiguous()
x = x.view(B,H*W,C)
# FFN
# 两个Skip Connect
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class WindowsAttention(nn.Module):
"""
Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
VIT中注意力是全局的,复杂度随着图片尺寸的增加指数增加,样当去做视觉里的下游任务,尤其是密集
预测型的任务,或者说遇到非常大尺寸的图片时候,这种全局算自注意力的计算复杂度就非常贵了
SwinTransformer中,采用Windows-based Attention来将计算复杂度与图片尺寸的关系变为线性关系。
General Model:
W-MSA / SW-MSA
Shift 操作但如果加上 shift 的操作,每个 patch 原来只能跟它所在的窗口里的别的 patch 进行
交互,但是 shift 之后,这个 patch就可以跟新的窗口里的别的 patch就进行交互了,而这个新的窗
口里所有的 patch 其实来自于上一层别的窗口里的 patch,这也就是作者说的能起到
cross-window connection,就是窗口和窗口之间可以交互了
上述过程配合之后的Patch Merging,合并到Transformer最后几层的时候,每一个patch本身的感受
野就已经很大了。
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0.):
"""
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
# Mh: Windows Size Height
# Mw: Windows Size Width
# nH: num_heads
super(WindowsAttention, self).__init__()
self.dim = dim
self.window_size = window_size # [Mh, Mw]
self.num_heads = num_heads
head_dim = dim // num_heads # 每个head的dim
self.scale = head_dim ** -0.5 # scale
# 定义一个parameter table来存放relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # [2*Mh-1 * 2*Mw-1, nH]
# 相对位置索引获得方法
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w], indexing="ij")) # [2, Mh, Mw]
coords_flatten = torch.flatten(coords, 1) # [2, Mh*Mw]
# [2, Mh*Mw, 1] - [2, 1, Mh*Mw]
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # [2, Mh*Mw, Mh*Mw]
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # [Mh*Mw, Mh*Mw, 2]
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # [Mh*Mw, Mh*Mw]
# Register_buffer: 应该就是在内存中定一个常量,同时,模型保存和加载的时候可以写入和读出。
# 不需要学习,但是可以灵活读写
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
nn.init.trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self,x,mask=None):
"""
Args:
x: input features with shape of (num_windows*B, Mh*Mw, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
x的输入维度是(num_windows窗口数*Batch Size)
在窗口内进行Attention Op
"""
# [batch_size*num_windows, Mh*Mw, total_embed_dim]
B_, N, C = x.shape
# qkv(): -> [batch_size*num_windows, Mh*Mw, 3 * total_embed_dim]
# reshape: -> [batch_size*num_windows, Mh*Mw, 3, num_heads, embed_dim_per_head]
# permute: -> [3, batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head]
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
# [batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head]
q,k,v = qkv.unbind(0)
# QK^T/sqrt(d)
# transpose: -> [batch_size*num_windows, num_heads, embed_dim_per_head, Mh*Mw]
# @: multiply -> [batch_size*num_windows, num_heads, Mh*Mw, Mh*Mw]
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
# QK^T/sqrt(d) + B
# B:
# relative_position_bias_table.view: [Mh*Mw*Mh*Mw,nH] -> [Mh*Mw,Mh*Mw,nH]
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1)
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # [nH, Mh*Mw, Mh*Mw]
# [Bs*nW, nH, Mh*Mw, Mh*Mw]
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
# SW-MSA 需要做attention Mask
# mask: [nW, Mh*Mw, Mh*Mw]
# attn.view: [batch_size, num_windows, num_heads, Mh*Mw, Mh*Mw]
# # mask.unsqueeze: [1, nW, 1, Mh*Mw, Mh*Mw]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
# @: multiply -> [batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head]
# transpose: -> [batch_size*num_windows, Mh*Mw, num_heads, embed_dim_per_head]
# reshape: -> [batch_size*num_windows, Mh*Mw, total_embed_dim]
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
if __name__ == "__main__":
pass
|
[
"torch.nn.Dropout",
"BasicModule.Mlp",
"torch.jit.is_scripting",
"torch.roll",
"torch.nn.init.constant_",
"torch.nn.Softmax",
"torch.arange",
"torch.nn.functional.pad",
"torch.flatten",
"torch.nn.Linear",
"BasicModule.window_reverse",
"torch.zeros",
"BasicModule.DropPath",
"numpy.ceil",
"BasicModule.window_partition",
"torch.nn.ModuleList",
"BasicModule.PatchEmbed",
"torch.utils.checkpoint.checkpoint",
"torch.nn.Identity",
"torch.nn.AdaptiveAvgPool1d",
"torch.nn.init.trunc_normal_",
"torch.meshgrid"
] |
[((6893, 7018), 'BasicModule.PatchEmbed', 'PatchEmbed', ([], {'patch_size': 'patch_size', 'in_c': 'in_chans', 'embed_dim': 'embed_dim', 'norm_layer': '(norm_layer if self.patch_norm else None)'}), '(patch_size=patch_size, in_c=in_chans, embed_dim=embed_dim,\n norm_layer=norm_layer if self.patch_norm else None)\n', (6903, 7018), False, 'from BasicModule import PatchMerging, DropPath, PatchEmbed\n'), ((7064, 7087), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'drop_rate'}), '(p=drop_rate)\n', (7074, 7087), True, 'import torch.nn as nn\n'), ((7294, 7309), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (7307, 7309), True, 'import torch.nn as nn\n'), ((8338, 8361), 'torch.nn.AdaptiveAvgPool1d', 'nn.AdaptiveAvgPool1d', (['(1)'], {}), '(1)\n', (8358, 8361), True, 'import torch.nn as nn\n'), ((9178, 9197), 'torch.flatten', 'torch.flatten', (['x', '(1)'], {}), '(x, 1)\n', (9191, 9197), False, 'import torch\n'), ((12159, 12203), 'torch.zeros', 'torch.zeros', (['(1, Hp, Wp, 1)'], {'device': 'x.device'}), '((1, Hp, Wp, 1), device=x.device)\n', (12170, 12203), False, 'import torch\n'), ((12794, 12838), 'BasicModule.window_partition', 'window_partition', (['img_mask', 'self.window_size'], {}), '(img_mask, self.window_size)\n', (12810, 12838), False, 'from BasicModule import window_partition, window_reverse\n'), ((15934, 16022), 'BasicModule.Mlp', 'Mlp', ([], {'in_features': 'dim', 'hidden_features': 'mlp_hidden_dim', 'act_layer': 'act_layer', 'drop': 'drop'}), '(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer,\n drop=drop)\n', (15937, 16022), False, 'from BasicModule import Mlp\n'), ((16625, 16669), 'torch.nn.functional.pad', 'F.pad', (['x', '(0, 0, pad_l, pad_r, pad_t, pad_b)'], {}), '(x, (0, 0, pad_l, pad_r, pad_t, pad_b))\n', (16630, 16669), True, 'import torch.nn.functional as F\n'), ((17057, 17102), 'BasicModule.window_partition', 'window_partition', (['shifted_x', 'self.window_size'], {}), '(shifted_x, self.window_size)\n', (17073, 17102), False, 'from BasicModule import window_partition, window_reverse\n'), ((17474, 17528), 'BasicModule.window_reverse', 'window_reverse', (['attn_windows', 'self.window_size', 'Hp', 'Wp'], {}), '(attn_windows, self.window_size, Hp, Wp)\n', (17488, 17528), False, 'from BasicModule import window_partition, window_reverse\n'), ((20037, 20070), 'torch.arange', 'torch.arange', (['self.window_size[0]'], {}), '(self.window_size[0])\n', (20049, 20070), False, 'import torch\n'), ((20090, 20123), 'torch.arange', 'torch.arange', (['self.window_size[1]'], {}), '(self.window_size[1])\n', (20102, 20123), False, 'import torch\n'), ((20246, 20270), 'torch.flatten', 'torch.flatten', (['coords', '(1)'], {}), '(coords, 1)\n', (20259, 20270), False, 'import torch\n'), ((20996, 21034), 'torch.nn.Linear', 'nn.Linear', (['dim', '(dim * 3)'], {'bias': 'qkv_bias'}), '(dim, dim * 3, bias=qkv_bias)\n', (21005, 21034), True, 'import torch.nn as nn\n'), ((21060, 21081), 'torch.nn.Dropout', 'nn.Dropout', (['attn_drop'], {}), '(attn_drop)\n', (21070, 21081), True, 'import torch.nn as nn\n'), ((21102, 21121), 'torch.nn.Linear', 'nn.Linear', (['dim', 'dim'], {}), '(dim, dim)\n', (21111, 21121), True, 'import torch.nn as nn\n'), ((21147, 21168), 'torch.nn.Dropout', 'nn.Dropout', (['proj_drop'], {}), '(proj_drop)\n', (21157, 21168), True, 'import torch.nn as nn\n'), ((21178, 21244), 'torch.nn.init.trunc_normal_', 'nn.init.trunc_normal_', (['self.relative_position_bias_table'], {'std': '(0.02)'}), '(self.relative_position_bias_table, std=0.02)\n', (21199, 21244), True, 'import torch.nn as nn\n'), ((21267, 21285), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (21277, 21285), True, 'import torch.nn as nn\n'), ((8382, 8423), 'torch.nn.Linear', 'nn.Linear', (['self.num_features', 'num_classes'], {}), '(self.num_features, num_classes)\n', (8391, 8423), True, 'import torch.nn as nn\n'), ((8448, 8461), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (8459, 8461), True, 'import torch.nn as nn\n'), ((8584, 8625), 'torch.nn.init.trunc_normal_', 'nn.init.trunc_normal_', (['m.weight'], {'std': '(0.02)'}), '(m.weight, std=0.02)\n', (8605, 8625), True, 'import torch.nn as nn\n'), ((15739, 15758), 'BasicModule.DropPath', 'DropPath', (['drop_path'], {}), '(drop_path)\n', (15747, 15758), False, 'from BasicModule import PatchMerging, DropPath, PatchEmbed\n'), ((15782, 15795), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (15793, 15795), True, 'import torch.nn as nn\n'), ((16852, 16923), 'torch.roll', 'torch.roll', (['x'], {'shifts': '(-self.shift_size, -self.shift_size)', 'dims': '(1, 2)'}), '(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))\n', (16862, 16923), False, 'import torch\n'), ((17658, 17735), 'torch.roll', 'torch.roll', (['shifted_x'], {'shifts': '(self.shift_size, self.shift_size)', 'dims': '(1, 2)'}), '(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))\n', (17668, 17735), False, 'import torch\n'), ((19813, 19888), 'torch.zeros', 'torch.zeros', (['((2 * window_size[0] - 1) * (2 * window_size[1] - 1))', 'num_heads'], {}), '((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)\n', (19824, 19888), False, 'import torch\n'), ((20153, 20204), 'torch.meshgrid', 'torch.meshgrid', (['[coords_h, coords_w]'], {'indexing': '"""ij"""'}), "([coords_h, coords_w], indexing='ij')\n", (20167, 20204), False, 'import torch\n'), ((8705, 8733), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (8722, 8733), True, 'import torch.nn as nn\n'), ((8788, 8816), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (8805, 8816), True, 'import torch.nn as nn\n'), ((8829, 8861), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.weight', '(1.0)'], {}), '(m.weight, 1.0)\n', (8846, 8861), True, 'import torch.nn as nn\n'), ((11967, 11996), 'numpy.ceil', 'np.ceil', (['(H / self.window_size)'], {}), '(H / self.window_size)\n', (11974, 11996), True, 'import numpy as np\n'), ((12034, 12063), 'numpy.ceil', 'np.ceil', (['(W / self.window_size)'], {}), '(W / self.window_size)\n', (12041, 12063), True, 'import numpy as np\n'), ((13536, 13576), 'torch.utils.checkpoint.checkpoint', 'checkpoint.checkpoint', (['blk', 'x', 'attn_mask'], {}), '(blk, x, attn_mask)\n', (13557, 13576), True, 'import torch.utils.checkpoint as checkpoint\n'), ((13466, 13490), 'torch.jit.is_scripting', 'torch.jit.is_scripting', ([], {}), '()\n', (13488, 13490), False, 'import torch\n')]
|
# -*- coding:utf-8 -*-
import six
import numpy as np
from pyproj import Proj
import operator
from .exceptions import *
class NullProj(object):
"""
Similar to pyproj.Proj, but NullProj does not do actual conversion.
"""
@property
def srs(self):
return ''
def __call__(self, x, y, **kwargs):
return x, y
class GridderBase(object):
"""Gridder is a helper for i, j <-> x, y conversion, etc."""
def i2x(self, *args):
"""Convert i, j, ... -> x, y, ..."""
raise NotImplementedError
def x2i(self, *args, **kwargs):
"""Convert x, y, ... -> i, j, ..."""
raise NotImplementedError
def copy(self, **kwargs):
kws = self.dump()
kws.update(kwargs)
new_gridder = self.__class__(**kws)
return new_gridder
def calibrate(self, x0, y0, x1=None, y1=None):
return
def dump(self):
return {}
class XYGridderBase(GridderBase):
"""
Requires self.X & self.Y.
"""
@property
def bbox(self):
return (np.min(self.X), np.min(self.Y), np.max(self.X), np.max(self.Y))
def get_bounding_ij(self, x1, y1, x2, y2, **kwargs):
bbox = self.bbox
if x1 is None:
x1 = bbox[0]
if y1 is None:
y1 = bbox[1]
if x2 is None:
x2 = bbox[2]
if y2 is None:
y2 = bbox[3]
bad = ~((self.X >= x1) & (self.X <= x2) & (self.Y >= y1) & (self.Y <= y2))
x_bad = np.alltrue(bad, axis=0)
y_bad = np.alltrue(bad, axis=1)
x_points = np.argwhere(np.diff(np.r_[True, x_bad, True])).reshape(-1, 2)
y_points = np.argwhere(np.diff(np.r_[True, y_bad, True])).reshape(-1, 2)
i1, i2 = (-1, -1) if x_points.shape[0] == 0 else x_points[0]
j1, j2 = (-1, -1) if y_points.shape[0] == 0 else y_points[0]
return i1, j1, i2, j2
def check_bound(self, i, j, int_index=True):
start = -0.5
subtracted = 1
if int_index:
start = 0
if int_index in ('lowerleft', 'll'):
subtracted = 2
if np.isscalar(i):
if (i >= start and i <= self.nx-subtracted) and (j >= start and j <= self.ny-subtracted):
return i, j
else:
raise OutOfGridBound("i: {}, j: {} is out of bound!".format(i, j))
else:
i = np.where((i >= start) & (i <= self.nx - subtracted), i, np.nan)
j = np.where((j >= start) & (j <= self.ny - subtracted), j, np.nan)
return i, j
class XYProjGridder(XYGridderBase):
def __init__(self, proj=None, x=None, y=None, nx=None, ny=None, dx=None, dy=None, x_orig=0.0, y_orig=0.0, **kwargs):
self.proj = proj
self._reset_raw_xy()
if x is not None and y is not None:
self.set_xy(x, y)
else:
self._init_with_para(nx, ny, dx, dy, x_orig, y_orig)
@property
def proj(self):
return self._proj
@proj.setter
def proj(self, p):
if p is None:
self._proj = NullProj()
elif isinstance(p, (Proj, NullProj)):
self._proj = p
elif isinstance(p, dict):
self._proj = Proj(**p)
else: # Treat as proj_string
self._proj = Proj(str(p)) # TODO: check PY3 compatibility.
self._reset_raw_xy()
if all([hasattr(self, attr) for attr in ('_nx', '_ny', '_dx', '_dy', '_x_orig', '_y_orig')]):
self._updateXY()
@property
def X(self):
return self._X
@X.setter
def X(self, x):
if self._raw_y is None:
raise ValueError("Cannot set x alone when no raw y presents.")
ndim_x = np.ndim(x)
if ndim_x == 1 and np.ndim(self._raw_y) == 1:
self.set_xy(x, self._raw_y)
elif ndim_x == 2 and np.shape(x) == np.shape(self.Y):
self.set_xy(x, self.Y)
else:
self._raise_invalid_shape(x, self.Y)
@property
def Y(self):
return self._Y
@Y.setter
def Y(self, y):
if self._raw_x is None:
raise ValueError("Cannot set y alone when no raw x presents.")
ndim_y = np.ndim(y)
if ndim_y == 1 and np.ndim(self._raw_x) == 1:
self.set_xy(self._raw_x, y)
elif ndim_y == 2 and np.shape(y) == np.shape(self.X):
self.set_xy(self.X, y)
else:
self._raise_invalid_shape(self.X, y)
@property
def CX(self):
return self._CX
@property
def CY(self):
return self._CY
@property
def x(self):
return self._raw_x if self._raw_x is not None else self._X
@property
def y(self):
return self._raw_y if self._raw_y is not None else self._Y
@property
def cx(self):
return self._raw_cx if self._raw_cx is not None else self._CX
@property
def cy(self):
return self._raw_cy if self._raw_cy is not None else self._CY
@property
def nx(self):
return self._nx
@nx.setter
def nx(self, value):
self._nx = value
self._reset_raw_xy()
self._updateXY()
@property
def ny(self):
return self._ny
@ny.setter
def ny(self, value):
self._ny = value
self._reset_raw_xy()
self._updateXY()
@property
def dx(self):
return self._dx
@dx.setter
def dx(self, value):
self._dx = value
self._reset_raw_xy()
self._updateXY()
@property
def dy(self):
return self._dy
@dy.setter
def dy(self, value):
self._dy = value
self._reset_raw_xy()
self._updateXY()
@property
def x_orig(self):
return self._x_orig
@x_orig.setter
def x_orig(self, value):
self._x_orig = value
self._reset_raw_xy()
self._updateXY()
@property
def y_orig(self):
return self._y_orig
@y_orig.setter
def y_orig(self, value):
self._y_orig = value
self._reset_raw_xy()
self._updateXY()
@property
def bbox(self):
return self._bbox
@property
def cbox(self):
"""corner box"""
return self._cbox
def _init_with_para(self, nx, ny, dx, dy, x_orig, y_orig):
self._nx = nx
self._ny = ny
self._dx = dx
self._dy = dy
self._x_orig = x_orig
self._y_orig = y_orig
self._updateXY()
@property
def has_null_proj(self):
return isinstance(self.proj, NullProj)
def set_xy(self, x, y):
ndim_x, ndim_y = np.ndim(x), np.ndim(y)
if ndim_x == 1 and ndim_y == 1:
self._nx, self._ny = len(x), len(y)
elif ndim_x == 2 and ndim_y == 2:
self._ny, self._nx = np.shape(x)
else:
self._raise_invalid_shape(x, y)
self._raw_x, self._raw_y = np.asarray(x), np.asarray(y)
self.calibrate(x, y)
def _raise_invalid_shape(self, x, y):
raise ValueError("Invalid x, y shape: {}, {}".format(np.shape(x), np.shape(y)))
def _reset_raw_xy(self):
self._raw_x, self._raw_y = None, None
def _updateXY(self):
jj, ii = np.mgrid[0:self.ny, 0:self.nx]
cjj, cii = np.mgrid[-0.5:self.ny, -0.5:self.nx]
xx, yy = self.i2x(ii, jj)
cxx, cyy = self.i2x(cii, cjj)
self._X, self._Y = xx, yy
self._CX, self._CY = cxx, cyy
if self._raw_x is not None and self._raw_x.ndim == 1:
self._raw_cx = self._CX[0]
else:
self._raw_cx = None
if self._raw_y is not None and self._raw_y.ndim == 1:
self._raw_cy = self._CY[:, 0]
else:
self._raw_cy = None
self._bbox = (np.min(self._X), np.min(self._Y), np.max(self._X), np.max(self._Y))
self._cbox = (np.min(self._CX), np.min(self._CY), np.max(self._CX), np.max(self._CY))
return xx, yy
def i2x(self, i, j):
px = i * self.dx + self.x_orig
py = j * self.dy + self.y_orig
return self.proj(px, py, inverse=True)
def x2i(self, x, y, int_index=True, check_bound=None):
px, py = self.proj(x, y)
i = (px - self.x_orig) / self.dx
j = (py - self.y_orig) / self.dy
if int_index:
if int_index in ('lowerleft', 'll'):
i = np.floor(i)
j = np.floor(j)
else:
i = np.round(i)
j = np.round(j)
if np.isscalar(i):
i = int(i)
j = int(j)
else:
i = i.astype('i')
j = j.astype('i')
if check_bound:
return self.check_bound(i, j, int_index=int_index)
else:
return i, j
def calibrate(self, x, y, x1=None, y1=None):
ndim_x, ndim_y = np.ndim(x), np.ndim(y)
if ndim_x == 0 and ndim_y == 0:
x0, y0 = x, y
if ndim_x == 1 and ndim_y == 1:
x0, x1 = x[0], x[1]
y0, y1 = y[0], y[1]
elif ndim_x == 2 and ndim_y == 2:
x0, x1 = x[0, 0], x[1, 1]
y0, y1 = y[0, 0], y[1, 1]
else:
self._raise_invalid_shape(x, y)
px0, py0 = self.proj(x0, y0)
self._x_orig = px0
self._y_orig = py0
if x1 is not None and y1 is not None:
px1, py1 = self.proj(x1, y1)
self._dx = px1 - px0
self._dy = py1 - py0
self._updateXY()
def dump(self):
return {
"proj": self.proj.srs,
"nx": self.nx, "ny": self.ny, "dx": self.dx, "dy": self.dy,
"x_orig": self.x_orig, "y_orig": self.y_orig
}
class LonLatSurroundingGridder(XYGridderBase):
def __init__(self, lon0, lat0, rmin, rmax, nr, ntheta, theta0=0.0, r_earth=6371):
self.lon0 = lon0
self.lat0 = lat0
self.rmin = rmin
self.rmax = rmax
self.nr = nr
self.ntheta = ntheta
self.theta0 = theta0
self.r_earth = r_earth
self.dtheta = np.pi * 2 / self.ntheta
self.dr = (self.rmax - self.rmin) / (self.nr - 1)
self._updateXY()
def _updateXY(self):
r = np.linspace(self.rmin, self.rmax, self.nr)
theta = np.arange(self.ntheta) * self.dtheta + self.theta0
THETA, R = np.meshgrid(theta, r)
LON, LAT = self.r_theta_to_lon_lat(R, THETA)
self._X = LON
self._Y = LAT
return self._X, self._Y
def r_theta_to_lon_lat(self, r, theta):
r_ = r / self.r_earth
sin_r = np.sin(r_)
cos_r = np.cos(r_)
lat0_ = np.deg2rad(self.lat0)
lon0_ = np.deg2rad(self.lon0)
sin_lat0 = np.sin(lat0_)
cos_lat0 = np.cos(lat0_)
sin_lat = sin_lat0 * cos_r + cos_lat0 * sin_r * np.cos(theta)
lat_ = np.arcsin(sin_lat)
lon_ = lon0_ + np.arctan2(np.sin(theta) * sin_r * cos_lat0, cos_r - sin_lat0 * sin_lat)
lon = np.rad2deg(lon_)
lat = np.rad2deg(lat_)
return lon, lat
@property
def nx(self):
return self.ntheta
@property
def ny(self):
return self.nr
@property
def X(self):
return self._X
@property
def Y(self):
return self._Y
@property
def x(self):
return self._X
@property
def y(self):
return self._Y
def i2x(self, i, j):
theta = self.theta0 + i * self.dtheta
r = self.rmin + j * self.dr
lon, lat = self.r_theta_to_lon_lat(r, theta)
return lon, lat
def x2i(self, x, y, int_index=True, check_bound=None):
lon2, lat2 = np.deg2rad(x), np.deg2rad(y)
lon1, lat1 = np.deg2rad(self.lon0), np.deg2rad(self.lat0)
dlon = lon2 - lon1
dlat = lat2 - lat1
sin_dlon = np.sin(dlon)
cos_dlon = np.cos(dlon)
sin_lat1 = np.sin(lat1)
cos_lat1 = np.cos(lat1)
sin_lat2 = np.sin(lat2)
cos_lat2 = np.cos(lat2)
a = cos_lat2 * sin_dlon
b = cos_lat1 * sin_lat2 - sin_lat1 * cos_lat2 * cos_dlon
theta = np.arctan2(a, b)
c = np.sin(dlat / 2) ** 2 + cos_lat1 * cos_lat2 * np.sin(dlon / 2) ** 2
d = 2 * np.arcsin(np.sqrt(c))
r = d * self.r_earth
i = (theta - self.theta0) / self.dtheta % self.ntheta
j = (r - self.rmin) / self.dr
if int_index:
i = np.round(i)
j = np.round(j)
if np.isscalar(i):
i = int(i)
j = int(j)
else:
i = i.astype('i')
j = j.astype('i')
if check_bound:
return self.check_bound(i, j, int_index=int_index)
else:
return i, j
class XYIrregularGridder(XYGridderBase):
# TODO: use kdtree.
def __init__(self, X, Y):
X = np.array(X)
Y = np.array(Y)
if X.ndim == 1:
self.X, self.Y = np.meshgrid(X, Y)
else:
self.X, self.Y = X, Y
self.ny, self.nx = X.shape
def i2x(self, i, j, *args, **kwargs):
return self.X[j, i], self.Y[j, i]
def x2i(self, x, y, *args, **kwargs):
distances = np.hypot(self.X-x, self.Y-y)
flat_i = np.argmin(distances)
nx = self.X.shape[1]
return flat_i / self.nx, flat_i % self.nx
def dump(self):
return {
"X": self.X,
"Y": self.Y,
"nx": self.nx,
"ny": self.ny,
}
|
[
"numpy.arctan2",
"numpy.floor",
"numpy.argmin",
"numpy.shape",
"numpy.sin",
"numpy.arange",
"numpy.round",
"numpy.meshgrid",
"numpy.ndim",
"numpy.arcsin",
"numpy.max",
"numpy.linspace",
"numpy.asarray",
"numpy.hypot",
"numpy.min",
"numpy.cos",
"numpy.alltrue",
"numpy.deg2rad",
"numpy.isscalar",
"numpy.rad2deg",
"numpy.where",
"numpy.array",
"numpy.diff",
"pyproj.Proj",
"numpy.sqrt"
] |
[((1494, 1517), 'numpy.alltrue', 'np.alltrue', (['bad'], {'axis': '(0)'}), '(bad, axis=0)\n', (1504, 1517), True, 'import numpy as np\n'), ((1534, 1557), 'numpy.alltrue', 'np.alltrue', (['bad'], {'axis': '(1)'}), '(bad, axis=1)\n', (1544, 1557), True, 'import numpy as np\n'), ((2117, 2131), 'numpy.isscalar', 'np.isscalar', (['i'], {}), '(i)\n', (2128, 2131), True, 'import numpy as np\n'), ((3714, 3724), 'numpy.ndim', 'np.ndim', (['x'], {}), '(x)\n', (3721, 3724), True, 'import numpy as np\n'), ((4193, 4203), 'numpy.ndim', 'np.ndim', (['y'], {}), '(y)\n', (4200, 4203), True, 'import numpy as np\n'), ((10223, 10265), 'numpy.linspace', 'np.linspace', (['self.rmin', 'self.rmax', 'self.nr'], {}), '(self.rmin, self.rmax, self.nr)\n', (10234, 10265), True, 'import numpy as np\n'), ((10353, 10374), 'numpy.meshgrid', 'np.meshgrid', (['theta', 'r'], {}), '(theta, r)\n', (10364, 10374), True, 'import numpy as np\n'), ((10597, 10607), 'numpy.sin', 'np.sin', (['r_'], {}), '(r_)\n', (10603, 10607), True, 'import numpy as np\n'), ((10624, 10634), 'numpy.cos', 'np.cos', (['r_'], {}), '(r_)\n', (10630, 10634), True, 'import numpy as np\n'), ((10651, 10672), 'numpy.deg2rad', 'np.deg2rad', (['self.lat0'], {}), '(self.lat0)\n', (10661, 10672), True, 'import numpy as np\n'), ((10689, 10710), 'numpy.deg2rad', 'np.deg2rad', (['self.lon0'], {}), '(self.lon0)\n', (10699, 10710), True, 'import numpy as np\n'), ((10730, 10743), 'numpy.sin', 'np.sin', (['lat0_'], {}), '(lat0_)\n', (10736, 10743), True, 'import numpy as np\n'), ((10763, 10776), 'numpy.cos', 'np.cos', (['lat0_'], {}), '(lat0_)\n', (10769, 10776), True, 'import numpy as np\n'), ((10863, 10881), 'numpy.arcsin', 'np.arcsin', (['sin_lat'], {}), '(sin_lat)\n', (10872, 10881), True, 'import numpy as np\n'), ((10993, 11009), 'numpy.rad2deg', 'np.rad2deg', (['lon_'], {}), '(lon_)\n', (11003, 11009), True, 'import numpy as np\n'), ((11024, 11040), 'numpy.rad2deg', 'np.rad2deg', (['lat_'], {}), '(lat_)\n', (11034, 11040), True, 'import numpy as np\n'), ((11836, 11848), 'numpy.sin', 'np.sin', (['dlon'], {}), '(dlon)\n', (11842, 11848), True, 'import numpy as np\n'), ((11868, 11880), 'numpy.cos', 'np.cos', (['dlon'], {}), '(dlon)\n', (11874, 11880), True, 'import numpy as np\n'), ((11900, 11912), 'numpy.sin', 'np.sin', (['lat1'], {}), '(lat1)\n', (11906, 11912), True, 'import numpy as np\n'), ((11932, 11944), 'numpy.cos', 'np.cos', (['lat1'], {}), '(lat1)\n', (11938, 11944), True, 'import numpy as np\n'), ((11964, 11976), 'numpy.sin', 'np.sin', (['lat2'], {}), '(lat2)\n', (11970, 11976), True, 'import numpy as np\n'), ((11996, 12008), 'numpy.cos', 'np.cos', (['lat2'], {}), '(lat2)\n', (12002, 12008), True, 'import numpy as np\n'), ((12124, 12140), 'numpy.arctan2', 'np.arctan2', (['a', 'b'], {}), '(a, b)\n', (12134, 12140), True, 'import numpy as np\n'), ((12877, 12888), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (12885, 12888), True, 'import numpy as np\n'), ((12901, 12912), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (12909, 12912), True, 'import numpy as np\n'), ((13216, 13248), 'numpy.hypot', 'np.hypot', (['(self.X - x)', '(self.Y - y)'], {}), '(self.X - x, self.Y - y)\n', (13224, 13248), True, 'import numpy as np\n'), ((13262, 13282), 'numpy.argmin', 'np.argmin', (['distances'], {}), '(distances)\n', (13271, 13282), True, 'import numpy as np\n'), ((1055, 1069), 'numpy.min', 'np.min', (['self.X'], {}), '(self.X)\n', (1061, 1069), True, 'import numpy as np\n'), ((1071, 1085), 'numpy.min', 'np.min', (['self.Y'], {}), '(self.Y)\n', (1077, 1085), True, 'import numpy as np\n'), ((1087, 1101), 'numpy.max', 'np.max', (['self.X'], {}), '(self.X)\n', (1093, 1101), True, 'import numpy as np\n'), ((1103, 1117), 'numpy.max', 'np.max', (['self.Y'], {}), '(self.Y)\n', (1109, 1117), True, 'import numpy as np\n'), ((2394, 2457), 'numpy.where', 'np.where', (['((i >= start) & (i <= self.nx - subtracted))', 'i', 'np.nan'], {}), '((i >= start) & (i <= self.nx - subtracted), i, np.nan)\n', (2402, 2457), True, 'import numpy as np\n'), ((2474, 2537), 'numpy.where', 'np.where', (['((j >= start) & (j <= self.ny - subtracted))', 'j', 'np.nan'], {}), '((j >= start) & (j <= self.ny - subtracted), j, np.nan)\n', (2482, 2537), True, 'import numpy as np\n'), ((6602, 6612), 'numpy.ndim', 'np.ndim', (['x'], {}), '(x)\n', (6609, 6612), True, 'import numpy as np\n'), ((6614, 6624), 'numpy.ndim', 'np.ndim', (['y'], {}), '(y)\n', (6621, 6624), True, 'import numpy as np\n'), ((6894, 6907), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (6904, 6907), True, 'import numpy as np\n'), ((6909, 6922), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (6919, 6922), True, 'import numpy as np\n'), ((7756, 7771), 'numpy.min', 'np.min', (['self._X'], {}), '(self._X)\n', (7762, 7771), True, 'import numpy as np\n'), ((7773, 7788), 'numpy.min', 'np.min', (['self._Y'], {}), '(self._Y)\n', (7779, 7788), True, 'import numpy as np\n'), ((7790, 7805), 'numpy.max', 'np.max', (['self._X'], {}), '(self._X)\n', (7796, 7805), True, 'import numpy as np\n'), ((7807, 7822), 'numpy.max', 'np.max', (['self._Y'], {}), '(self._Y)\n', (7813, 7822), True, 'import numpy as np\n'), ((7846, 7862), 'numpy.min', 'np.min', (['self._CX'], {}), '(self._CX)\n', (7852, 7862), True, 'import numpy as np\n'), ((7864, 7880), 'numpy.min', 'np.min', (['self._CY'], {}), '(self._CY)\n', (7870, 7880), True, 'import numpy as np\n'), ((7882, 7898), 'numpy.max', 'np.max', (['self._CX'], {}), '(self._CX)\n', (7888, 7898), True, 'import numpy as np\n'), ((7900, 7916), 'numpy.max', 'np.max', (['self._CY'], {}), '(self._CY)\n', (7906, 7916), True, 'import numpy as np\n'), ((8499, 8513), 'numpy.isscalar', 'np.isscalar', (['i'], {}), '(i)\n', (8510, 8513), True, 'import numpy as np\n'), ((8856, 8866), 'numpy.ndim', 'np.ndim', (['x'], {}), '(x)\n', (8863, 8866), True, 'import numpy as np\n'), ((8868, 8878), 'numpy.ndim', 'np.ndim', (['y'], {}), '(y)\n', (8875, 8878), True, 'import numpy as np\n'), ((11667, 11680), 'numpy.deg2rad', 'np.deg2rad', (['x'], {}), '(x)\n', (11677, 11680), True, 'import numpy as np\n'), ((11682, 11695), 'numpy.deg2rad', 'np.deg2rad', (['y'], {}), '(y)\n', (11692, 11695), True, 'import numpy as np\n'), ((11717, 11738), 'numpy.deg2rad', 'np.deg2rad', (['self.lon0'], {}), '(self.lon0)\n', (11727, 11738), True, 'import numpy as np\n'), ((11740, 11761), 'numpy.deg2rad', 'np.deg2rad', (['self.lat0'], {}), '(self.lat0)\n', (11750, 11761), True, 'import numpy as np\n'), ((12430, 12441), 'numpy.round', 'np.round', (['i'], {}), '(i)\n', (12438, 12441), True, 'import numpy as np\n'), ((12458, 12469), 'numpy.round', 'np.round', (['j'], {}), '(j)\n', (12466, 12469), True, 'import numpy as np\n'), ((12486, 12500), 'numpy.isscalar', 'np.isscalar', (['i'], {}), '(i)\n', (12497, 12500), True, 'import numpy as np\n'), ((12966, 12983), 'numpy.meshgrid', 'np.meshgrid', (['X', 'Y'], {}), '(X, Y)\n', (12977, 12983), True, 'import numpy as np\n'), ((3752, 3772), 'numpy.ndim', 'np.ndim', (['self._raw_y'], {}), '(self._raw_y)\n', (3759, 3772), True, 'import numpy as np\n'), ((4231, 4251), 'numpy.ndim', 'np.ndim', (['self._raw_x'], {}), '(self._raw_x)\n', (4238, 4251), True, 'import numpy as np\n'), ((6788, 6799), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (6796, 6799), True, 'import numpy as np\n'), ((7056, 7067), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (7064, 7067), True, 'import numpy as np\n'), ((7069, 7080), 'numpy.shape', 'np.shape', (['y'], {}), '(y)\n', (7077, 7080), True, 'import numpy as np\n'), ((8358, 8369), 'numpy.floor', 'np.floor', (['i'], {}), '(i)\n', (8366, 8369), True, 'import numpy as np\n'), ((8390, 8401), 'numpy.floor', 'np.floor', (['j'], {}), '(j)\n', (8398, 8401), True, 'import numpy as np\n'), ((8440, 8451), 'numpy.round', 'np.round', (['i'], {}), '(i)\n', (8448, 8451), True, 'import numpy as np\n'), ((8472, 8483), 'numpy.round', 'np.round', (['j'], {}), '(j)\n', (8480, 8483), True, 'import numpy as np\n'), ((10282, 10304), 'numpy.arange', 'np.arange', (['self.ntheta'], {}), '(self.ntheta)\n', (10291, 10304), True, 'import numpy as np\n'), ((10834, 10847), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (10840, 10847), True, 'import numpy as np\n'), ((12154, 12170), 'numpy.sin', 'np.sin', (['(dlat / 2)'], {}), '(dlat / 2)\n', (12160, 12170), True, 'import numpy as np\n'), ((12248, 12258), 'numpy.sqrt', 'np.sqrt', (['c'], {}), '(c)\n', (12255, 12258), True, 'import numpy as np\n'), ((1589, 1622), 'numpy.diff', 'np.diff', (['np.r_[True, x_bad, True]'], {}), '(np.r_[True, x_bad, True])\n', (1596, 1622), True, 'import numpy as np\n'), ((1670, 1703), 'numpy.diff', 'np.diff', (['np.r_[True, y_bad, True]'], {}), '(np.r_[True, y_bad, True])\n', (1677, 1703), True, 'import numpy as np\n'), ((3219, 3228), 'pyproj.Proj', 'Proj', ([], {}), '(**p)\n', (3223, 3228), False, 'from pyproj import Proj\n'), ((3848, 3859), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (3856, 3859), True, 'import numpy as np\n'), ((3863, 3879), 'numpy.shape', 'np.shape', (['self.Y'], {}), '(self.Y)\n', (3871, 3879), True, 'import numpy as np\n'), ((4327, 4338), 'numpy.shape', 'np.shape', (['y'], {}), '(y)\n', (4335, 4338), True, 'import numpy as np\n'), ((4342, 4358), 'numpy.shape', 'np.shape', (['self.X'], {}), '(self.X)\n', (4350, 4358), True, 'import numpy as np\n'), ((12200, 12216), 'numpy.sin', 'np.sin', (['(dlon / 2)'], {}), '(dlon / 2)\n', (12206, 12216), True, 'import numpy as np\n'), ((10916, 10929), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (10922, 10929), True, 'import numpy as np\n')]
|
# coding=utf-8
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common code for the simple cnn example."""
import functools
import os
from absl import logging
from flax import serialization
import haiku as hk
import jax
import jax.numpy as jnp
from learned_optimization import filesystem
import numpy as onp
import optax
import tensorflow_datasets as tfds
HKTree = hk.data_structures.to_immutable_dict({}).__class__
# We use flax for serialization but haiku's data struct is not registered.
def _ty_to_state_dict(v):
return serialization.to_state_dict(
{k: v for k, v in hk.data_structures.to_mutable_dict(v).items()})
def _ty_from_state_dict(target, d):
return HKTree(
**
{k: serialization.from_state_dict(target[k], v) for (k, v) in d.items()})
serialization.register_serialization_state(
HKTree, _ty_to_state_dict, _ty_from_state_dict, override=True)
def hk_forward_fn(batch):
"""Forward function for haiku."""
x = batch["image"].astype(jnp.float32) / 255.
mlp = hk.Sequential([
hk.Conv2D(64, (3, 3), stride=2),
jax.nn.relu,
hk.Conv2D(64, (3, 3), stride=1),
jax.nn.relu,
hk.Conv2D(64, (3, 3), stride=2),
jax.nn.relu,
hk.Conv2D(64, (3, 3), stride=1),
jax.nn.relu,
functools.partial(jnp.mean, axis=(1, 2)),
hk.Linear(10),
])
return mlp(x)
@jax.jit
def loss(params, key, batch):
net = hk.transform(hk_forward_fn)
logits = net.apply(params, key, batch)
labels = jax.nn.one_hot(batch["label"], 10)
softmax_xent = -jnp.sum(labels * jax.nn.log_softmax(logits))
softmax_xent /= labels.shape[0]
return softmax_xent
@jax.jit
def update(params, key, state, batch, meta_params):
opt = optax.adam(meta_params["learning_rate"])
l, grad = jax.value_and_grad(loss)(params, key, batch)
updates, new_state = opt.update(grad, state, params)
new_params = optax.apply_updates(params, updates)
return new_params, new_state, l
def save_state(path, state):
filesystem.make_dirs(os.path.dirname(path))
with filesystem.file_open(path, "wb") as fp:
fp.write(serialization.to_bytes(state))
def load_state(path, state):
logging.info("Restoring state %s:", path)
with filesystem.file_open(path, "rb") as fp:
state_new = serialization.from_bytes(state, fp.read())
tree = jax.tree_structure(state)
leaves_new = jax.tree_leaves(state_new)
return jax.tree_unflatten(tree, leaves_new)
def get_data_iterators(fake_data=False):
"""Get training and test data iterators."""
batch_size = 128
if not fake_data:
remap_label = lambda x: {"image": x["image"], "label": x["label"]}
def data(split):
dataset = tfds.load("cifar10", split=split)
iterator = iter(
tfds.as_numpy(
dataset.repeat(-1).shuffle(
batch_size * 10).batch(batch_size).map(remap_label)))
return iterator
return data("train"), data("test")
else:
def data():
while True:
yield {
"image": onp.zeros([batch_size, 32, 32, 3]),
"label": onp.zeros([batch_size], dtype=onp.int32)
}
return data(), data()
|
[
"optax.adam",
"tensorflow_datasets.load",
"jax.nn.log_softmax",
"absl.logging.info",
"flax.serialization.register_serialization_state",
"flax.serialization.from_state_dict",
"jax.nn.one_hot",
"os.path.dirname",
"optax.apply_updates",
"haiku.data_structures.to_immutable_dict",
"haiku.Conv2D",
"haiku.data_structures.to_mutable_dict",
"haiku.Linear",
"functools.partial",
"haiku.transform",
"jax.tree_leaves",
"jax.tree_unflatten",
"jax.tree_structure",
"numpy.zeros",
"jax.value_and_grad",
"flax.serialization.to_bytes",
"learned_optimization.filesystem.file_open"
] |
[((1307, 1416), 'flax.serialization.register_serialization_state', 'serialization.register_serialization_state', (['HKTree', '_ty_to_state_dict', '_ty_from_state_dict'], {'override': '(True)'}), '(HKTree, _ty_to_state_dict,\n _ty_from_state_dict, override=True)\n', (1349, 1416), False, 'from flax import serialization\n'), ((897, 937), 'haiku.data_structures.to_immutable_dict', 'hk.data_structures.to_immutable_dict', (['{}'], {}), '({})\n', (933, 937), True, 'import haiku as hk\n'), ((1925, 1952), 'haiku.transform', 'hk.transform', (['hk_forward_fn'], {}), '(hk_forward_fn)\n', (1937, 1952), True, 'import haiku as hk\n'), ((2005, 2039), 'jax.nn.one_hot', 'jax.nn.one_hot', (["batch['label']", '(10)'], {}), "(batch['label'], 10)\n", (2019, 2039), False, 'import jax\n'), ((2231, 2271), 'optax.adam', 'optax.adam', (["meta_params['learning_rate']"], {}), "(meta_params['learning_rate'])\n", (2241, 2271), False, 'import optax\n'), ((2399, 2435), 'optax.apply_updates', 'optax.apply_updates', (['params', 'updates'], {}), '(params, updates)\n', (2418, 2435), False, 'import optax\n'), ((2672, 2713), 'absl.logging.info', 'logging.info', (['"""Restoring state %s:"""', 'path'], {}), "('Restoring state %s:', path)\n", (2684, 2713), False, 'from absl import logging\n'), ((2829, 2854), 'jax.tree_structure', 'jax.tree_structure', (['state'], {}), '(state)\n', (2847, 2854), False, 'import jax\n'), ((2870, 2896), 'jax.tree_leaves', 'jax.tree_leaves', (['state_new'], {}), '(state_new)\n', (2885, 2896), False, 'import jax\n'), ((2906, 2942), 'jax.tree_unflatten', 'jax.tree_unflatten', (['tree', 'leaves_new'], {}), '(tree, leaves_new)\n', (2924, 2942), False, 'import jax\n'), ((2284, 2308), 'jax.value_and_grad', 'jax.value_and_grad', (['loss'], {}), '(loss)\n', (2302, 2308), False, 'import jax\n'), ((2525, 2546), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (2540, 2546), False, 'import os\n'), ((2555, 2587), 'learned_optimization.filesystem.file_open', 'filesystem.file_open', (['path', '"""wb"""'], {}), "(path, 'wb')\n", (2575, 2587), False, 'from learned_optimization import filesystem\n'), ((2721, 2753), 'learned_optimization.filesystem.file_open', 'filesystem.file_open', (['path', '"""rb"""'], {}), "(path, 'rb')\n", (2741, 2753), False, 'from learned_optimization import filesystem\n'), ((1560, 1591), 'haiku.Conv2D', 'hk.Conv2D', (['(64)', '(3, 3)'], {'stride': '(2)'}), '(64, (3, 3), stride=2)\n', (1569, 1591), True, 'import haiku as hk\n'), ((1618, 1649), 'haiku.Conv2D', 'hk.Conv2D', (['(64)', '(3, 3)'], {'stride': '(1)'}), '(64, (3, 3), stride=1)\n', (1627, 1649), True, 'import haiku as hk\n'), ((1676, 1707), 'haiku.Conv2D', 'hk.Conv2D', (['(64)', '(3, 3)'], {'stride': '(2)'}), '(64, (3, 3), stride=2)\n', (1685, 1707), True, 'import haiku as hk\n'), ((1734, 1765), 'haiku.Conv2D', 'hk.Conv2D', (['(64)', '(3, 3)'], {'stride': '(1)'}), '(64, (3, 3), stride=1)\n', (1743, 1765), True, 'import haiku as hk\n'), ((1792, 1832), 'functools.partial', 'functools.partial', (['jnp.mean'], {'axis': '(1, 2)'}), '(jnp.mean, axis=(1, 2))\n', (1809, 1832), False, 'import functools\n'), ((1840, 1853), 'haiku.Linear', 'hk.Linear', (['(10)'], {}), '(10)\n', (1849, 1853), True, 'import haiku as hk\n'), ((2608, 2637), 'flax.serialization.to_bytes', 'serialization.to_bytes', (['state'], {}), '(state)\n', (2630, 2637), False, 'from flax import serialization\n'), ((3180, 3213), 'tensorflow_datasets.load', 'tfds.load', (['"""cifar10"""'], {'split': 'split'}), "('cifar10', split=split)\n", (3189, 3213), True, 'import tensorflow_datasets as tfds\n'), ((1235, 1278), 'flax.serialization.from_state_dict', 'serialization.from_state_dict', (['target[k]', 'v'], {}), '(target[k], v)\n', (1264, 1278), False, 'from flax import serialization\n'), ((2076, 2102), 'jax.nn.log_softmax', 'jax.nn.log_softmax', (['logits'], {}), '(logits)\n', (2094, 2102), False, 'import jax\n'), ((1113, 1150), 'haiku.data_structures.to_mutable_dict', 'hk.data_structures.to_mutable_dict', (['v'], {}), '(v)\n', (1147, 1150), True, 'import haiku as hk\n'), ((3518, 3552), 'numpy.zeros', 'onp.zeros', (['[batch_size, 32, 32, 3]'], {}), '([batch_size, 32, 32, 3])\n', (3527, 3552), True, 'import numpy as onp\n'), ((3575, 3615), 'numpy.zeros', 'onp.zeros', (['[batch_size]'], {'dtype': 'onp.int32'}), '([batch_size], dtype=onp.int32)\n', (3584, 3615), True, 'import numpy as onp\n')]
|
import os
import math
import time
import functools
import random
from tqdm import tqdm
import cv2
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw
from pylab import rcParams
rcParams['figure.figsize'] = 20, 20 # noqa
from consts import FONT_SIZE
from utils import (
make_contours,
get_centers,
get_labels,
vis_pred_bbox,
filter_polygons_points_intersection,
vis_pred_bbox_polygon,
vis_pred_center,
font
)
from grpc_utils import (
KuzuSegment,
KuzuClassify
)
if __name__ == '__main__':
img_dir = "./images"
img_fp = os.path.join(img_dir, random.choice(os.listdir(img_dir)))
print(img_fp)
filter_polygon = True
kuzu_seg = KuzuSegment()
kuzu_cls = KuzuClassify()
img, origin_image, origin_h, origin_w = kuzu_seg.load_image(img_fp)
pred_bbox, pred_center = kuzu_seg.predict(img)
# get all polygon area in image
polygon_contours = make_contours(pred_bbox)
# get all center points by contour method
center_coords = get_centers(pred_center.astype(np.uint8))
no_center_points = len(center_coords)
final_center = vis_pred_center(center_coords, rad=2)
# filter polygon
if filter_polygon:
filtered_contours = filter_polygons_points_intersection(polygon_contours, center_coords) # noqa
pred_bbox = vis_pred_bbox_polygon(pred_bbox, filtered_contours)
final_bbox = vis_pred_bbox(pred_bbox, center_coords, width=2)
y_ratio = origin_h / 512
x_ratio = origin_w / 512
pil_img = Image.fromarray(origin_image).convert('RGBA')
char_canvas = Image.new('RGBA', pil_img.size)
char_draw = ImageDraw.Draw(char_canvas)
print(">>> {}".format(no_center_points))
if no_center_points > 0:
bbox_cluster = get_labels(center_coords, pred_bbox)
# ignore background hex color (=0)
for cluster_index in tqdm(range(len(center_coords))[1:]):
char_pixel = (bbox_cluster == cluster_index).astype(np.float32)
try:
horizontal_indicies = np.where(np.any(char_pixel, axis=0))[0]
vertical_indicies = np.where(np.any(char_pixel, axis=1))[0]
x_min, x_max = horizontal_indicies[[0, -1]]
y_min, y_max = vertical_indicies[[0, -1]]
except IndexError:
continue
x = x_min
y = y_min
w = x_max - x_min
h = y_max - y_min
# convert to original coordinates
x = int(x * x_ratio)
w = int(w * x_ratio)
y = int(y * y_ratio)
h = int(h * y_ratio)
# set offset to crop character
offset = 5 # percentage
y_diff = math.ceil(h * offset / 100)
x_diff = math.ceil(w * offset / 100)
# expand area
y_from = y - y_diff
y_to = y + h + y_diff
x_from = x - x_diff
x_to = x + w + x_diff
# tune
y_from, y_to, x_from, x_to = \
list(map(functools.partial(np.maximum, 0),
[y_from, y_to, x_from, x_to]))
try:
char_img = origin_image[y_from:y_to, x_from:x_to]
char_img = kuzu_cls.load_image(char_img)
pred_label = kuzu_cls.predict(char_img)
# print(pred_label)
char_draw.text(
(x + w + FONT_SIZE / 4, y + h / 2 - FONT_SIZE),
pred_label, fill=(0, 0, 255, 255),
font=font
)
except Exception as e:
print(e)
continue
char_img = Image.alpha_composite(pil_img, char_canvas)
char_img = char_img.convert("RGB")
char_img = np.asarray(char_img)
final_bbox = cv2.resize(final_bbox, (origin_w, origin_h))
final_center = cv2.resize(final_center, (origin_w, origin_h))
plt.imshow(char_img)
plt.imshow(final_bbox, cmap="jet", alpha=0.50)
plt.savefig("./assets/{}.jpg".format(time.time()), bbox_inches='tight')
|
[
"PIL.Image.new",
"grpc_utils.KuzuClassify",
"utils.make_contours",
"matplotlib.pyplot.imshow",
"utils.filter_polygons_points_intersection",
"utils.vis_pred_center",
"utils.vis_pred_bbox_polygon",
"PIL.ImageDraw.Draw",
"cv2.resize",
"functools.partial",
"math.ceil",
"numpy.asarray",
"os.listdir",
"time.time",
"numpy.any",
"PIL.Image.alpha_composite",
"utils.get_labels",
"PIL.Image.fromarray",
"grpc_utils.KuzuSegment",
"utils.vis_pred_bbox"
] |
[((720, 733), 'grpc_utils.KuzuSegment', 'KuzuSegment', ([], {}), '()\n', (731, 733), False, 'from grpc_utils import KuzuSegment, KuzuClassify\n'), ((749, 763), 'grpc_utils.KuzuClassify', 'KuzuClassify', ([], {}), '()\n', (761, 763), False, 'from grpc_utils import KuzuSegment, KuzuClassify\n'), ((947, 971), 'utils.make_contours', 'make_contours', (['pred_bbox'], {}), '(pred_bbox)\n', (960, 971), False, 'from utils import make_contours, get_centers, get_labels, vis_pred_bbox, filter_polygons_points_intersection, vis_pred_bbox_polygon, vis_pred_center, font\n'), ((1142, 1179), 'utils.vis_pred_center', 'vis_pred_center', (['center_coords'], {'rad': '(2)'}), '(center_coords, rad=2)\n', (1157, 1179), False, 'from utils import make_contours, get_centers, get_labels, vis_pred_bbox, filter_polygons_points_intersection, vis_pred_bbox_polygon, vis_pred_center, font\n'), ((1419, 1467), 'utils.vis_pred_bbox', 'vis_pred_bbox', (['pred_bbox', 'center_coords'], {'width': '(2)'}), '(pred_bbox, center_coords, width=2)\n', (1432, 1467), False, 'from utils import make_contours, get_centers, get_labels, vis_pred_bbox, filter_polygons_points_intersection, vis_pred_bbox_polygon, vis_pred_center, font\n'), ((1606, 1637), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', 'pil_img.size'], {}), "('RGBA', pil_img.size)\n", (1615, 1637), False, 'from PIL import Image, ImageDraw\n'), ((1654, 1681), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['char_canvas'], {}), '(char_canvas)\n', (1668, 1681), False, 'from PIL import Image, ImageDraw\n'), ((3689, 3732), 'PIL.Image.alpha_composite', 'Image.alpha_composite', (['pil_img', 'char_canvas'], {}), '(pil_img, char_canvas)\n', (3710, 3732), False, 'from PIL import Image, ImageDraw\n'), ((3787, 3807), 'numpy.asarray', 'np.asarray', (['char_img'], {}), '(char_img)\n', (3797, 3807), True, 'import numpy as np\n'), ((3826, 3870), 'cv2.resize', 'cv2.resize', (['final_bbox', '(origin_w, origin_h)'], {}), '(final_bbox, (origin_w, origin_h))\n', (3836, 3870), False, 'import cv2\n'), ((3890, 3936), 'cv2.resize', 'cv2.resize', (['final_center', '(origin_w, origin_h)'], {}), '(final_center, (origin_w, origin_h))\n', (3900, 3936), False, 'import cv2\n'), ((3942, 3962), 'matplotlib.pyplot.imshow', 'plt.imshow', (['char_img'], {}), '(char_img)\n', (3952, 3962), True, 'import matplotlib.pyplot as plt\n'), ((3967, 4012), 'matplotlib.pyplot.imshow', 'plt.imshow', (['final_bbox'], {'cmap': '"""jet"""', 'alpha': '(0.5)'}), "(final_bbox, cmap='jet', alpha=0.5)\n", (3977, 4012), True, 'import matplotlib.pyplot as plt\n'), ((1253, 1321), 'utils.filter_polygons_points_intersection', 'filter_polygons_points_intersection', (['polygon_contours', 'center_coords'], {}), '(polygon_contours, center_coords)\n', (1288, 1321), False, 'from utils import make_contours, get_centers, get_labels, vis_pred_bbox, filter_polygons_points_intersection, vis_pred_bbox_polygon, vis_pred_center, font\n'), ((1350, 1401), 'utils.vis_pred_bbox_polygon', 'vis_pred_bbox_polygon', (['pred_bbox', 'filtered_contours'], {}), '(pred_bbox, filtered_contours)\n', (1371, 1401), False, 'from utils import make_contours, get_centers, get_labels, vis_pred_bbox, filter_polygons_points_intersection, vis_pred_bbox_polygon, vis_pred_center, font\n'), ((1780, 1816), 'utils.get_labels', 'get_labels', (['center_coords', 'pred_bbox'], {}), '(center_coords, pred_bbox)\n', (1790, 1816), False, 'from utils import make_contours, get_centers, get_labels, vis_pred_bbox, filter_polygons_points_intersection, vis_pred_bbox_polygon, vis_pred_center, font\n'), ((639, 658), 'os.listdir', 'os.listdir', (['img_dir'], {}), '(img_dir)\n', (649, 658), False, 'import os\n'), ((1542, 1571), 'PIL.Image.fromarray', 'Image.fromarray', (['origin_image'], {}), '(origin_image)\n', (1557, 1571), False, 'from PIL import Image, ImageDraw\n'), ((2736, 2763), 'math.ceil', 'math.ceil', (['(h * offset / 100)'], {}), '(h * offset / 100)\n', (2745, 2763), False, 'import math\n'), ((2785, 2812), 'math.ceil', 'math.ceil', (['(w * offset / 100)'], {}), '(w * offset / 100)\n', (2794, 2812), False, 'import math\n'), ((4055, 4066), 'time.time', 'time.time', ([], {}), '()\n', (4064, 4066), False, 'import time\n'), ((3060, 3092), 'functools.partial', 'functools.partial', (['np.maximum', '(0)'], {}), '(np.maximum, 0)\n', (3077, 3092), False, 'import functools\n'), ((2069, 2095), 'numpy.any', 'np.any', (['char_pixel'], {'axis': '(0)'}), '(char_pixel, axis=0)\n', (2075, 2095), True, 'import numpy as np\n'), ((2145, 2171), 'numpy.any', 'np.any', (['char_pixel'], {'axis': '(1)'}), '(char_pixel, axis=1)\n', (2151, 2171), True, 'import numpy as np\n')]
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from numpy.testing import assert_allclose, assert_equal
import astropy.units as u
from astropy.table import Table
from gammapy.astro.population import (
add_observed_parameters,
add_pulsar_parameters,
add_pwn_parameters,
add_snr_parameters,
make_base_catalog_galactic,
make_catalog_random_positions_cube,
make_catalog_random_positions_sphere,
)
def test_make_catalog_random_positions_cube():
table = make_catalog_random_positions_cube(random_state=0)
d = table[0]
assert len(table) == 100
assert len(table.colnames) == 3
assert table["x"].unit == "pc"
assert_allclose(d["x"], 0.0976270078546495)
assert table["y"].unit == "pc"
assert_allclose(d["y"], 0.3556330735924602)
assert table["z"].unit == "pc"
assert_allclose(d["z"], -0.37640823601179485)
table = make_catalog_random_positions_cube(dimension=2, random_state=0)
assert_equal(table["z"], 0)
table = make_catalog_random_positions_cube(dimension=1, random_state=0)
assert_equal(table["y"], 0)
assert_equal(table["z"], 0)
def test_make_catalog_random_positions_sphere():
table = make_catalog_random_positions_sphere(random_state=0)
d = table[0]
assert len(table) == 100
assert len(table.colnames) == 3
assert table["lon"].unit == "rad"
assert_allclose(d["lon"], 3.4482969442579128)
assert table["lat"].unit == "rad"
assert_allclose(d["lat"], 0.36359133530192267)
assert table["distance"].unit == "pc"
assert_allclose(d["distance"], 0.6780943487897606)
def test_make_base_catalog_galactic():
table = make_base_catalog_galactic(n_sources=10, random_state=0)
d = table[0]
assert len(table) == 10
assert len(table.colnames) == 13
assert table["age"].unit == "yr"
assert_allclose(d["age"], 548813.50392732478)
assert table["n_ISM"].unit == "cm-3"
assert_allclose(d["n_ISM"], 1.0)
assert table["spiralarm"].unit is None
assert d["spiralarm"] == "Crux Scutum"
assert table["x_birth"].unit == "kpc"
assert_allclose(d["x_birth"], -5.856461, atol=1e-5)
assert table["y_birth"].unit == "kpc"
assert_allclose(d["y_birth"], 3.017292, atol=1e-5)
assert table["z_birth"].unit == "kpc"
assert_allclose(d["z_birth"], 0.049088, atol=1e-5)
assert table["x"].unit == "kpc"
assert_allclose(d["x"], -5.941061, atol=1e-5)
assert table["y"].unit == "kpc"
assert_allclose(d["y"], 3.081642, atol=1e-5)
assert table["z"].unit == "kpc"
assert_allclose(d["z"], 0.023161, atol=1e-5)
assert table["vx"].unit == "km/s"
assert_allclose(d["vx"], -150.727104, atol=1e-5)
assert table["vy"].unit == "km/s"
assert_allclose(d["vy"], 114.648494, atol=1e-5)
assert table["vz"].unit == "km/s"
assert_allclose(d["vz"], -46.193814, atol=1e-5)
assert table["v_abs"].unit == "km/s"
assert_allclose(d["v_abs"], 194.927693, atol=1e-5)
def test_add_snr_parameters():
table = Table()
table["age"] = [100, 1000] * u.yr
table["n_ISM"] = u.Quantity(1, "cm-3")
table = add_snr_parameters(table)
assert len(table) == 2
assert table.colnames == ["age", "n_ISM", "E_SN", "r_out", "r_in", "L_SNR"]
assert table["E_SN"].unit == "erg"
assert_allclose(table["E_SN"], 1e51)
assert table["r_out"].unit == "pc"
assert_allclose(table["r_out"], [1, 3.80730787743])
assert table["r_in"].unit == "pc"
assert_allclose(table["r_in"], [0.9086, 3.45931993743])
assert table["L_SNR"].unit == "1 / s"
assert_allclose(table["L_SNR"], [0, 1.0768e33])
def test_add_pulsar_parameters():
table = Table()
table["age"] = [100, 1000] * u.yr
table = add_pulsar_parameters(table, random_state=0)
assert len(table) == 2
assert len(table.colnames) == 10
assert table["age"].unit == "yr"
assert_allclose(table["age"], [100, 1000])
assert table["P0"].unit == "s"
assert_allclose(table["P0"], [0.214478, 0.246349], atol=1e-5)
assert table["P1"].unit == ""
assert_allclose(table["P1"], [6.310423e-13, 4.198294e-16], atol=1e-5)
assert table["P0_birth"].unit == "s"
assert_allclose(table["P0_birth"], [0.212418, 0.246336], atol=1e-5)
assert table["P1_birth"].unit == ""
assert_allclose(table["P1_birth"], [6.558773e-13, 4.199198e-16], atol=1e-5)
assert table["CharAge"].unit == "yr"
assert_allclose(table["CharAge"], [2.207394e-21, 1.638930e-24], atol=1e-5)
assert table["Tau0"].unit == "yr"
assert_allclose(table["Tau0"], [5.131385e03, 9.294538e06], atol=1e-5)
assert table["L_PSR"].unit == "erg / s"
assert_allclose(table["L_PSR"], [2.599229e36, 1.108788e33], rtol=1e-5)
assert table["L0_PSR"].unit == "erg / s"
assert_allclose(table["L0_PSR"], [2.701524e36, 1.109026e33], rtol=1e-5)
assert table["B_PSR"].unit == "G"
assert_allclose(table["B_PSR"], [1.194420e13, 3.254597e11], rtol=1e-5)
def test_add_pwn_parameters():
table = make_base_catalog_galactic(n_sources=10, random_state=0)
# To compute PWN parameters we need PSR and SNR parameters first
table = add_snr_parameters(table)
table = add_pulsar_parameters(table, random_state=0)
table = add_pwn_parameters(table)
d = table[0]
assert len(table) == 10
assert len(table.colnames) == 27
assert table["r_out_PWN"].unit == "pc"
assert_allclose(d["r_out_PWN"], 1.378224, atol=1e-4)
def test_add_observed_parameters():
table = make_base_catalog_galactic(n_sources=10, random_state=0)
table = add_observed_parameters(table)
d = table[0]
assert len(table) == 10
assert len(table.colnames) == 20
assert table["distance"].unit == "pc"
assert_allclose(d["distance"], 13016.572756, atol=1e-5)
assert table["GLON"].unit == "deg"
assert_allclose(d["GLON"], -27.156565, atol=1e-5)
assert table["GLAT"].unit == "deg"
assert_allclose(d["GLAT"], 0.101948, atol=1e-5)
assert table["VGLON"].unit == "deg / Myr"
assert_allclose(d["VGLON"], 0.368166, atol=1e-5)
assert table["VGLAT"].unit == "deg / Myr"
assert_allclose(d["VGLAT"], -0.209514, atol=1e-5)
assert table["RA"].unit == "deg"
assert_allclose(d["RA"], 244.347149, atol=1e-5)
assert table["DEC"].unit == "deg"
assert_allclose(d["DEC"], -50.410142, atol=1e-5)
def test_chain_all():
# Test that running the simulation functions in chain works
table = make_base_catalog_galactic(n_sources=10, random_state=0)
table = add_snr_parameters(table)
table = add_pulsar_parameters(table, random_state=0)
table = add_pwn_parameters(table)
table = add_observed_parameters(table)
d = table[0]
# Note: the individual functions are tested above.
# Here we just run them in a chain and do very basic asserts
# on the output so that we make sure we notice changes.
assert len(table) == 10
assert len(table.colnames) == 34
assert table["r_out_PWN"].unit == "pc"
assert_allclose(d["r_out_PWN"], 1.378224, atol=1e-4)
assert table["RA"].unit == "deg"
assert_allclose(d["RA"], 244.347149, atol=1e-5)
|
[
"gammapy.astro.population.make_catalog_random_positions_cube",
"astropy.table.Table",
"astropy.units.Quantity",
"gammapy.astro.population.add_observed_parameters",
"gammapy.astro.population.make_base_catalog_galactic",
"gammapy.astro.population.add_pulsar_parameters",
"gammapy.astro.population.add_pwn_parameters",
"gammapy.astro.population.make_catalog_random_positions_sphere",
"numpy.testing.assert_equal",
"numpy.testing.assert_allclose",
"gammapy.astro.population.add_snr_parameters"
] |
[((498, 548), 'gammapy.astro.population.make_catalog_random_positions_cube', 'make_catalog_random_positions_cube', ([], {'random_state': '(0)'}), '(random_state=0)\n', (532, 548), False, 'from gammapy.astro.population import add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere\n'), ((672, 715), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['x']", '(0.0976270078546495)'], {}), "(d['x'], 0.0976270078546495)\n", (687, 715), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((755, 798), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['y']", '(0.3556330735924602)'], {}), "(d['y'], 0.3556330735924602)\n", (770, 798), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((838, 883), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['z']", '(-0.37640823601179485)'], {}), "(d['z'], -0.37640823601179485)\n", (853, 883), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((897, 960), 'gammapy.astro.population.make_catalog_random_positions_cube', 'make_catalog_random_positions_cube', ([], {'dimension': '(2)', 'random_state': '(0)'}), '(dimension=2, random_state=0)\n', (931, 960), False, 'from gammapy.astro.population import add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere\n'), ((965, 992), 'numpy.testing.assert_equal', 'assert_equal', (["table['z']", '(0)'], {}), "(table['z'], 0)\n", (977, 992), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((1006, 1069), 'gammapy.astro.population.make_catalog_random_positions_cube', 'make_catalog_random_positions_cube', ([], {'dimension': '(1)', 'random_state': '(0)'}), '(dimension=1, random_state=0)\n', (1040, 1069), False, 'from gammapy.astro.population import add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere\n'), ((1074, 1101), 'numpy.testing.assert_equal', 'assert_equal', (["table['y']", '(0)'], {}), "(table['y'], 0)\n", (1086, 1101), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((1106, 1133), 'numpy.testing.assert_equal', 'assert_equal', (["table['z']", '(0)'], {}), "(table['z'], 0)\n", (1118, 1133), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((1197, 1249), 'gammapy.astro.population.make_catalog_random_positions_sphere', 'make_catalog_random_positions_sphere', ([], {'random_state': '(0)'}), '(random_state=0)\n', (1233, 1249), False, 'from gammapy.astro.population import add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere\n'), ((1376, 1421), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['lon']", '(3.4482969442579128)'], {}), "(d['lon'], 3.4482969442579128)\n", (1391, 1421), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((1464, 1510), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['lat']", '(0.36359133530192267)'], {}), "(d['lat'], 0.36359133530192267)\n", (1479, 1510), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((1557, 1607), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['distance']", '(0.6780943487897606)'], {}), "(d['distance'], 0.6780943487897606)\n", (1572, 1607), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((1661, 1717), 'gammapy.astro.population.make_base_catalog_galactic', 'make_base_catalog_galactic', ([], {'n_sources': '(10)', 'random_state': '(0)'}), '(n_sources=10, random_state=0)\n', (1687, 1717), False, 'from gammapy.astro.population import add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere\n'), ((1843, 1887), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['age']", '(548813.5039273248)'], {}), "(d['age'], 548813.5039273248)\n", (1858, 1887), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((1934, 1966), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['n_ISM']", '(1.0)'], {}), "(d['n_ISM'], 1.0)\n", (1949, 1966), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((2099, 2151), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['x_birth']", '(-5.856461)'], {'atol': '(1e-05)'}), "(d['x_birth'], -5.856461, atol=1e-05)\n", (2114, 2151), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((2197, 2248), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['y_birth']", '(3.017292)'], {'atol': '(1e-05)'}), "(d['y_birth'], 3.017292, atol=1e-05)\n", (2212, 2248), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((2294, 2345), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['z_birth']", '(0.049088)'], {'atol': '(1e-05)'}), "(d['z_birth'], 0.049088, atol=1e-05)\n", (2309, 2345), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((2385, 2431), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['x']", '(-5.941061)'], {'atol': '(1e-05)'}), "(d['x'], -5.941061, atol=1e-05)\n", (2400, 2431), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((2471, 2516), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['y']", '(3.081642)'], {'atol': '(1e-05)'}), "(d['y'], 3.081642, atol=1e-05)\n", (2486, 2516), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((2556, 2601), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['z']", '(0.023161)'], {'atol': '(1e-05)'}), "(d['z'], 0.023161, atol=1e-05)\n", (2571, 2601), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((2643, 2692), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['vx']", '(-150.727104)'], {'atol': '(1e-05)'}), "(d['vx'], -150.727104, atol=1e-05)\n", (2658, 2692), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((2734, 2782), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['vy']", '(114.648494)'], {'atol': '(1e-05)'}), "(d['vy'], 114.648494, atol=1e-05)\n", (2749, 2782), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((2824, 2872), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['vz']", '(-46.193814)'], {'atol': '(1e-05)'}), "(d['vz'], -46.193814, atol=1e-05)\n", (2839, 2872), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((2917, 2968), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['v_abs']", '(194.927693)'], {'atol': '(1e-05)'}), "(d['v_abs'], 194.927693, atol=1e-05)\n", (2932, 2968), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((3013, 3020), 'astropy.table.Table', 'Table', ([], {}), '()\n', (3018, 3020), False, 'from astropy.table import Table\n'), ((3080, 3101), 'astropy.units.Quantity', 'u.Quantity', (['(1)', '"""cm-3"""'], {}), "(1, 'cm-3')\n", (3090, 3101), True, 'import astropy.units as u\n'), ((3115, 3140), 'gammapy.astro.population.add_snr_parameters', 'add_snr_parameters', (['table'], {}), '(table)\n', (3133, 3140), False, 'from gammapy.astro.population import add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere\n'), ((3293, 3330), 'numpy.testing.assert_allclose', 'assert_allclose', (["table['E_SN']", '(1e+51)'], {}), "(table['E_SN'], 1e+51)\n", (3308, 3330), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((3373, 3424), 'numpy.testing.assert_allclose', 'assert_allclose', (["table['r_out']", '[1, 3.80730787743]'], {}), "(table['r_out'], [1, 3.80730787743])\n", (3388, 3424), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((3467, 3522), 'numpy.testing.assert_allclose', 'assert_allclose', (["table['r_in']", '[0.9086, 3.45931993743]'], {}), "(table['r_in'], [0.9086, 3.45931993743])\n", (3482, 3522), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((3569, 3617), 'numpy.testing.assert_allclose', 'assert_allclose', (["table['L_SNR']", '[0, 1.0768e+33]'], {}), "(table['L_SNR'], [0, 1.0768e+33])\n", (3584, 3617), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((3665, 3672), 'astropy.table.Table', 'Table', ([], {}), '()\n', (3670, 3672), False, 'from astropy.table import Table\n'), ((3724, 3768), 'gammapy.astro.population.add_pulsar_parameters', 'add_pulsar_parameters', (['table'], {'random_state': '(0)'}), '(table, random_state=0)\n', (3745, 3768), False, 'from gammapy.astro.population import add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere\n'), ((3876, 3918), 'numpy.testing.assert_allclose', 'assert_allclose', (["table['age']", '[100, 1000]'], {}), "(table['age'], [100, 1000])\n", (3891, 3918), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((3958, 4020), 'numpy.testing.assert_allclose', 'assert_allclose', (["table['P0']", '[0.214478, 0.246349]'], {'atol': '(1e-05)'}), "(table['P0'], [0.214478, 0.246349], atol=1e-05)\n", (3973, 4020), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((4058, 4128), 'numpy.testing.assert_allclose', 'assert_allclose', (["table['P1']", '[6.310423e-13, 4.198294e-16]'], {'atol': '(1e-05)'}), "(table['P1'], [6.310423e-13, 4.198294e-16], atol=1e-05)\n", (4073, 4128), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((4173, 4241), 'numpy.testing.assert_allclose', 'assert_allclose', (["table['P0_birth']", '[0.212418, 0.246336]'], {'atol': '(1e-05)'}), "(table['P0_birth'], [0.212418, 0.246336], atol=1e-05)\n", (4188, 4241), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((4285, 4361), 'numpy.testing.assert_allclose', 'assert_allclose', (["table['P1_birth']", '[6.558773e-13, 4.199198e-16]'], {'atol': '(1e-05)'}), "(table['P1_birth'], [6.558773e-13, 4.199198e-16], atol=1e-05)\n", (4300, 4361), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((4406, 4480), 'numpy.testing.assert_allclose', 'assert_allclose', (["table['CharAge']", '[2.207394e-21, 1.63893e-24]'], {'atol': '(1e-05)'}), "(table['CharAge'], [2.207394e-21, 1.63893e-24], atol=1e-05)\n", (4421, 4480), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((4523, 4588), 'numpy.testing.assert_allclose', 'assert_allclose', (["table['Tau0']", '[5131.385, 9294538.0]'], {'atol': '(1e-05)'}), "(table['Tau0'], [5131.385, 9294538.0], atol=1e-05)\n", (4538, 4588), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((4641, 4714), 'numpy.testing.assert_allclose', 'assert_allclose', (["table['L_PSR']", '[2.599229e+36, 1.108788e+33]'], {'rtol': '(1e-05)'}), "(table['L_PSR'], [2.599229e+36, 1.108788e+33], rtol=1e-05)\n", (4656, 4714), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((4761, 4835), 'numpy.testing.assert_allclose', 'assert_allclose', (["table['L0_PSR']", '[2.701524e+36, 1.109026e+33]'], {'rtol': '(1e-05)'}), "(table['L0_PSR'], [2.701524e+36, 1.109026e+33], rtol=1e-05)\n", (4776, 4835), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((4875, 4954), 'numpy.testing.assert_allclose', 'assert_allclose', (["table['B_PSR']", '[11944200000000.0, 325459700000.0]'], {'rtol': '(1e-05)'}), "(table['B_PSR'], [11944200000000.0, 325459700000.0], rtol=1e-05)\n", (4890, 4954), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((4991, 5047), 'gammapy.astro.population.make_base_catalog_galactic', 'make_base_catalog_galactic', ([], {'n_sources': '(10)', 'random_state': '(0)'}), '(n_sources=10, random_state=0)\n', (5017, 5047), False, 'from gammapy.astro.population import add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere\n'), ((5129, 5154), 'gammapy.astro.population.add_snr_parameters', 'add_snr_parameters', (['table'], {}), '(table)\n', (5147, 5154), False, 'from gammapy.astro.population import add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere\n'), ((5167, 5211), 'gammapy.astro.population.add_pulsar_parameters', 'add_pulsar_parameters', (['table'], {'random_state': '(0)'}), '(table, random_state=0)\n', (5188, 5211), False, 'from gammapy.astro.population import add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere\n'), ((5224, 5249), 'gammapy.astro.population.add_pwn_parameters', 'add_pwn_parameters', (['table'], {}), '(table)\n', (5242, 5249), False, 'from gammapy.astro.population import add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere\n'), ((5381, 5435), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['r_out_PWN']", '(1.378224)'], {'atol': '(0.0001)'}), "(d['r_out_PWN'], 1.378224, atol=0.0001)\n", (5396, 5435), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((5484, 5540), 'gammapy.astro.population.make_base_catalog_galactic', 'make_base_catalog_galactic', ([], {'n_sources': '(10)', 'random_state': '(0)'}), '(n_sources=10, random_state=0)\n', (5510, 5540), False, 'from gammapy.astro.population import add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere\n'), ((5553, 5583), 'gammapy.astro.population.add_observed_parameters', 'add_observed_parameters', (['table'], {}), '(table)\n', (5576, 5583), False, 'from gammapy.astro.population import add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere\n'), ((5714, 5770), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['distance']", '(13016.572756)'], {'atol': '(1e-05)'}), "(d['distance'], 13016.572756, atol=1e-05)\n", (5729, 5770), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((5813, 5863), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['GLON']", '(-27.156565)'], {'atol': '(1e-05)'}), "(d['GLON'], -27.156565, atol=1e-05)\n", (5828, 5863), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((5906, 5954), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['GLAT']", '(0.101948)'], {'atol': '(1e-05)'}), "(d['GLAT'], 0.101948, atol=1e-05)\n", (5921, 5954), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((6004, 6053), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['VGLON']", '(0.368166)'], {'atol': '(1e-05)'}), "(d['VGLON'], 0.368166, atol=1e-05)\n", (6019, 6053), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((6103, 6153), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['VGLAT']", '(-0.209514)'], {'atol': '(1e-05)'}), "(d['VGLAT'], -0.209514, atol=1e-05)\n", (6118, 6153), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((6194, 6242), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['RA']", '(244.347149)'], {'atol': '(1e-05)'}), "(d['RA'], 244.347149, atol=1e-05)\n", (6209, 6242), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((6284, 6333), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['DEC']", '(-50.410142)'], {'atol': '(1e-05)'}), "(d['DEC'], -50.410142, atol=1e-05)\n", (6299, 6333), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((6433, 6489), 'gammapy.astro.population.make_base_catalog_galactic', 'make_base_catalog_galactic', ([], {'n_sources': '(10)', 'random_state': '(0)'}), '(n_sources=10, random_state=0)\n', (6459, 6489), False, 'from gammapy.astro.population import add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere\n'), ((6502, 6527), 'gammapy.astro.population.add_snr_parameters', 'add_snr_parameters', (['table'], {}), '(table)\n', (6520, 6527), False, 'from gammapy.astro.population import add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere\n'), ((6540, 6584), 'gammapy.astro.population.add_pulsar_parameters', 'add_pulsar_parameters', (['table'], {'random_state': '(0)'}), '(table, random_state=0)\n', (6561, 6584), False, 'from gammapy.astro.population import add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere\n'), ((6597, 6622), 'gammapy.astro.population.add_pwn_parameters', 'add_pwn_parameters', (['table'], {}), '(table)\n', (6615, 6622), False, 'from gammapy.astro.population import add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere\n'), ((6635, 6665), 'gammapy.astro.population.add_observed_parameters', 'add_observed_parameters', (['table'], {}), '(table)\n', (6658, 6665), False, 'from gammapy.astro.population import add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere\n'), ((6977, 7031), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['r_out_PWN']", '(1.378224)'], {'atol': '(0.0001)'}), "(d['r_out_PWN'], 1.378224, atol=0.0001)\n", (6992, 7031), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((7071, 7119), 'numpy.testing.assert_allclose', 'assert_allclose', (["d['RA']", '(244.347149)'], {'atol': '(1e-05)'}), "(d['RA'], 244.347149, atol=1e-05)\n", (7086, 7119), False, 'from numpy.testing import assert_allclose, assert_equal\n')]
|
import numpy as np
import matplotlib.pyplot as plt
PI = np.pi
# =========================define sinc
# ---------------normalized
def sinc1(x):
PI = np.pi
x = np.array(x)
y = np.where(np.abs(PI * x) < 1e-38, 1.0, np.sin(PI * x) / (PI * x))
return y
def sinc_interpolation(x, t, T):
ns = np.arange(x.size)
print(ns, "============")
y = []
for tt in t:
y.append(np.sum(x * sinc1((tt - ns * T) / T)))
return np.array(y)
# =========================test sinc definition
f0 = 100
Ns = 2000
Tp = 20.0 / Ns
t = np.linspace(-10, 10, Ns)
t2 = np.linspace(-10, 10, Ns * 2)
y1 = sinc1(t / Tp)
x = np.sin(2 * PI * f0 * t)
print(x.shape)
y = sinc_interpolation(x, t2, Tp)
print(y.shape, "===")
yfft = np.fft.fftshift(np.fft.fft(y))
plt.figure()
plt.subplot(131)
plt.plot(t, x, '^b')
plt.plot(t2, y, '+r')
plt.legend(['original', 'sinc interpolated'])
plt.title('sinc(t/Tp), ' + "Tp=" + str(Tp))
plt.xlabel('Time/s')
plt.ylabel('Amplitude')
plt.grid()
plt.show()
|
[
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"numpy.abs",
"matplotlib.pyplot.plot",
"numpy.fft.fft",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.array",
"numpy.arange",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid"
] |
[((556, 580), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', 'Ns'], {}), '(-10, 10, Ns)\n', (567, 580), True, 'import numpy as np\n'), ((586, 614), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(Ns * 2)'], {}), '(-10, 10, Ns * 2)\n', (597, 614), True, 'import numpy as np\n'), ((640, 663), 'numpy.sin', 'np.sin', (['(2 * PI * f0 * t)'], {}), '(2 * PI * f0 * t)\n', (646, 663), True, 'import numpy as np\n'), ((777, 789), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (787, 789), True, 'import matplotlib.pyplot as plt\n'), ((790, 806), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(131)'], {}), '(131)\n', (801, 806), True, 'import matplotlib.pyplot as plt\n'), ((807, 827), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'x', '"""^b"""'], {}), "(t, x, '^b')\n", (815, 827), True, 'import matplotlib.pyplot as plt\n'), ((828, 849), 'matplotlib.pyplot.plot', 'plt.plot', (['t2', 'y', '"""+r"""'], {}), "(t2, y, '+r')\n", (836, 849), True, 'import matplotlib.pyplot as plt\n'), ((850, 895), 'matplotlib.pyplot.legend', 'plt.legend', (["['original', 'sinc interpolated']"], {}), "(['original', 'sinc interpolated'])\n", (860, 895), True, 'import matplotlib.pyplot as plt\n'), ((940, 960), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time/s"""'], {}), "('Time/s')\n", (950, 960), True, 'import matplotlib.pyplot as plt\n'), ((961, 984), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amplitude"""'], {}), "('Amplitude')\n", (971, 984), True, 'import matplotlib.pyplot as plt\n'), ((985, 995), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (993, 995), True, 'import matplotlib.pyplot as plt\n'), ((998, 1008), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1006, 1008), True, 'import matplotlib.pyplot as plt\n'), ((169, 180), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (177, 180), True, 'import numpy as np\n'), ((311, 328), 'numpy.arange', 'np.arange', (['x.size'], {}), '(x.size)\n', (320, 328), True, 'import numpy as np\n'), ((455, 466), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (463, 466), True, 'import numpy as np\n'), ((761, 774), 'numpy.fft.fft', 'np.fft.fft', (['y'], {}), '(y)\n', (771, 774), True, 'import numpy as np\n'), ((198, 212), 'numpy.abs', 'np.abs', (['(PI * x)'], {}), '(PI * x)\n', (204, 212), True, 'import numpy as np\n'), ((227, 241), 'numpy.sin', 'np.sin', (['(PI * x)'], {}), '(PI * x)\n', (233, 241), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(0, 2*np.pi, 10)
y = np.sin(x) #Función original
xvals = np.linspace(0, 2*np.pi, 50)
yinterp = np.interp(xvals, x, y)
plt.plot(x, y, 'o')
plt.plot(xvals, yinterp, '-x')
plt.show()
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.sin",
"numpy.linspace",
"numpy.interp"
] |
[((56, 85), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(10)'], {}), '(0, 2 * np.pi, 10)\n', (67, 85), True, 'import numpy as np\n'), ((88, 97), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (94, 97), True, 'import numpy as np\n'), ((124, 153), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(50)'], {}), '(0, 2 * np.pi, 50)\n', (135, 153), True, 'import numpy as np\n'), ((162, 184), 'numpy.interp', 'np.interp', (['xvals', 'x', 'y'], {}), '(xvals, x, y)\n', (171, 184), True, 'import numpy as np\n'), ((185, 204), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""o"""'], {}), "(x, y, 'o')\n", (193, 204), True, 'import matplotlib.pyplot as plt\n'), ((205, 235), 'matplotlib.pyplot.plot', 'plt.plot', (['xvals', 'yinterp', '"""-x"""'], {}), "(xvals, yinterp, '-x')\n", (213, 235), True, 'import matplotlib.pyplot as plt\n'), ((236, 246), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (244, 246), True, 'import matplotlib.pyplot as plt\n')]
|
# Copyright 2019 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
import argparse
import sys
import itertools
import numpy as np
sphere_radius = 5
#Take output of cell detect step, split into two streams- one list of cells, the other the map of cells
def split_cells(args):
cells = np.load(args.input)
cell_map = cells[1]
cell_list = cells[0]
with open(args.map_output, 'wb') as f:
np.save(f, cell_map)
# Make volume out of cell_list
cell_centroid_volume = np.zeros(cell_map.shape)
for cell in cell_list:
axes_range = [[],[],[]]
for i,axes in enumerate(cell[:3]):
min_range = max(int(axes-args.sphere_size), 0)
max_range = min(int(axes+args.sphere_size), cell_map.shape[i]-1)
axes_range[i]=range(min_range, max_range)
coords = list(itertools.product(*axes_range))
for pixel in coords:
if np.linalg.norm(np.array(cell[:3])-np.array(pixel)) <= args.sphere_size:
cell_centroid_volume[pixel] = 1
with open(args.list_output, 'wb') as f:
np.save(f, cell_list)
with open(args.centroid_volume_output, 'wb') as f:
np.save(f, cell_centroid_volume)
def main():
parser = argparse.ArgumentParser(description='cell results splitting script')
parser.set_defaults(func=lambda _: parser.print_help())
parser.add_argument('-i', '--input', required=True, help='Input file')
parser.add_argument('--map_output', required=True, help='Map Output file')
parser.add_argument('--list_output', required=True, help='List Output file')
parser.add_argument('--centroid_volume_output', required=True, help='Output volume with spheres')
parser.add_argument('--sphere_size', required=False, help='Size of the spheres in the centroids volume', default=5, type=int)
args = parser.parse_args()
split_cells(args)
if __name__ == '__main__':
main()
|
[
"numpy.load",
"numpy.save",
"argparse.ArgumentParser",
"numpy.zeros",
"numpy.array",
"itertools.product"
] |
[((862, 881), 'numpy.load', 'np.load', (['args.input'], {}), '(args.input)\n', (869, 881), True, 'import numpy as np\n'), ((1066, 1090), 'numpy.zeros', 'np.zeros', (['cell_map.shape'], {}), '(cell_map.shape)\n', (1074, 1090), True, 'import numpy as np\n'), ((1804, 1872), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""cell results splitting script"""'}), "(description='cell results splitting script')\n", (1827, 1872), False, 'import argparse\n'), ((982, 1002), 'numpy.save', 'np.save', (['f', 'cell_map'], {}), '(f, cell_map)\n', (989, 1002), True, 'import numpy as np\n'), ((1655, 1676), 'numpy.save', 'np.save', (['f', 'cell_list'], {}), '(f, cell_list)\n', (1662, 1676), True, 'import numpy as np\n'), ((1740, 1772), 'numpy.save', 'np.save', (['f', 'cell_centroid_volume'], {}), '(f, cell_centroid_volume)\n', (1747, 1772), True, 'import numpy as np\n'), ((1406, 1436), 'itertools.product', 'itertools.product', (['*axes_range'], {}), '(*axes_range)\n', (1423, 1436), False, 'import itertools\n'), ((1497, 1515), 'numpy.array', 'np.array', (['cell[:3]'], {}), '(cell[:3])\n', (1505, 1515), True, 'import numpy as np\n'), ((1516, 1531), 'numpy.array', 'np.array', (['pixel'], {}), '(pixel)\n', (1524, 1531), True, 'import numpy as np\n')]
|
__copyright__ = """This code is licensed under the 3-clause BSD license.
Copyright ETH Zurich, Laboratory of Physical Chemistry, Reiher Group.
See LICENSE.txt for details.
"""
import pytest
import scine_utilities as scine
import numpy as np
import os
class SigmaVectorEvaluatorPython(scine.SigmaVectorEvaluator):
def __init__(self, matrix):
scine.SigmaVectorEvaluator.__init__(self)
self.matrix = matrix
def evaluate(self, guess_vectors):
return np.dot(self.matrix, guess_vectors)
def collapsed(self, newSubspaceDimension):
return
def swap(self, i, j):
return
def create_matrix():
# create a selfadjoint matrix
matrix = np.random.rand(100,100)
matrix = 0.5*(matrix + np.transpose(matrix))
matrix[np.diag_indices_from(matrix)] += 1
return matrix
def initialize_diagonalizer(matrix):
# Create sigma vector evaluator and preconditioner
sve = scine.IndirectSigmaVectorEvaluator(matrix)
prec = scine.IndirectPreconditionerEvaluator(matrix[np.diag_indices_from(matrix)])
# Create and fill Non Orthogonal Davidson
diag = scine.NonOrthogonalDavidson(5,100)
diag.sigma_vector_evaluator = sve
diag.set_preconditioner(prec)
return diag
def test_SigmaVectorEvaluator():
ref = create_matrix()
sve = scine.IndirectSigmaVectorEvaluator(ref)
result = sve.evaluate(2.0 * np.identity(100))
assert np.all(2.0 * ref[:,:] == result[:,:])
def test_Preconditioner():
'''
Test that if you try to precondition a vector of ones, you just get
-1.0 / (difference btw the diagonal and the current eigenvalue)
'''
ref = create_matrix()
diag = ref[np.diag_indices_from(ref)]
ones_vector = np.ones(100)
arbitrary_eigenvalue = 3.5
prec = scine.IndirectPreconditionerEvaluator(diag)
result = prec.evaluate(ones_vector, arbitrary_eigenvalue)
assert np.all(result[:] == -1.0 / (diag - arbitrary_eigenvalue))
def test_InitializeDiagonalizer():
diag = initialize_diagonalizer(create_matrix())
def test_DiagonalizeWithNonOrthogonalDavidson():
ref = create_matrix()
diag = initialize_diagonalizer(ref)
result = diag.solve(scine.core.Log.silent())
# Get reference numbers
w, v = np.linalg.eig(ref)
assert np.all(result.eigenvalues[:] - sorted(w)[:5] <= 1.0e-5)
def test_DiagonalizeWithOrthogonalDavidson():
ref = create_matrix()
# Create sigma vector evaluator and preconditioner
sve = scine.IndirectSigmaVectorEvaluator(ref)
prec = scine.IndirectPreconditionerEvaluator(ref[np.diag_indices_from(ref)])
# Create and fill Non Orthogonal Davidson
diag = scine.OrthogonalDavidson(5,100)
diag.sigma_vector_evaluator = sve
diag.set_preconditioner(prec)
result = diag.solve(scine.core.Log.silent())
# Get reference numbers
w, v = np.linalg.eig(ref)
assert np.all(result.eigenvalues[:] - sorted(w)[:5] <= 1.0e-5)
def test_DiagonalizeWithPythonSigmaVectorEvaluator():
ref = create_matrix()
diag = initialize_diagonalizer(ref)
# Set python specific sigma vector evaluator
# Note: first initialize, then assign to prevent auto casting.
# If I write diag.sigma_vector_evaluator = SigmaVectorEvaluatorPython(ref)
# then it it tried to look for the method SigmaVectorEvaluator::evaluate()
# instead of SigmaVectorEvaluatorPython::evaluate()
sve = SigmaVectorEvaluatorPython(ref)
diag.sigma_vector_evaluator = sve
result = diag.solve(scine.core.Log.silent())
# Get reference numbers
w, v = np.linalg.eig(ref)
assert np.all(result.eigenvalues[:] - sorted(w)[:5] <= 1.0e-5)
|
[
"scine_utilities.NonOrthogonalDavidson",
"scine_utilities.OrthogonalDavidson",
"numpy.diag_indices_from",
"scine_utilities.core.Log.silent",
"numpy.transpose",
"numpy.ones",
"numpy.linalg.eig",
"scine_utilities.SigmaVectorEvaluator.__init__",
"numpy.identity",
"scine_utilities.IndirectSigmaVectorEvaluator",
"numpy.random.rand",
"numpy.dot",
"scine_utilities.IndirectPreconditionerEvaluator",
"numpy.all"
] |
[((687, 711), 'numpy.random.rand', 'np.random.rand', (['(100)', '(100)'], {}), '(100, 100)\n', (701, 711), True, 'import numpy as np\n'), ((927, 969), 'scine_utilities.IndirectSigmaVectorEvaluator', 'scine.IndirectSigmaVectorEvaluator', (['matrix'], {}), '(matrix)\n', (961, 969), True, 'import scine_utilities as scine\n'), ((1115, 1150), 'scine_utilities.NonOrthogonalDavidson', 'scine.NonOrthogonalDavidson', (['(5)', '(100)'], {}), '(5, 100)\n', (1142, 1150), True, 'import scine_utilities as scine\n'), ((1309, 1348), 'scine_utilities.IndirectSigmaVectorEvaluator', 'scine.IndirectSigmaVectorEvaluator', (['ref'], {}), '(ref)\n', (1343, 1348), True, 'import scine_utilities as scine\n'), ((1410, 1449), 'numpy.all', 'np.all', (['(2.0 * ref[:, :] == result[:, :])'], {}), '(2.0 * ref[:, :] == result[:, :])\n', (1416, 1449), True, 'import numpy as np\n'), ((1718, 1730), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (1725, 1730), True, 'import numpy as np\n'), ((1773, 1816), 'scine_utilities.IndirectPreconditionerEvaluator', 'scine.IndirectPreconditionerEvaluator', (['diag'], {}), '(diag)\n', (1810, 1816), True, 'import scine_utilities as scine\n'), ((1890, 1947), 'numpy.all', 'np.all', (['(result[:] == -1.0 / (diag - arbitrary_eigenvalue))'], {}), '(result[:] == -1.0 / (diag - arbitrary_eigenvalue))\n', (1896, 1947), True, 'import numpy as np\n'), ((2240, 2258), 'numpy.linalg.eig', 'np.linalg.eig', (['ref'], {}), '(ref)\n', (2253, 2258), True, 'import numpy as np\n'), ((2464, 2503), 'scine_utilities.IndirectSigmaVectorEvaluator', 'scine.IndirectSigmaVectorEvaluator', (['ref'], {}), '(ref)\n', (2498, 2503), True, 'import scine_utilities as scine\n'), ((2643, 2675), 'scine_utilities.OrthogonalDavidson', 'scine.OrthogonalDavidson', (['(5)', '(100)'], {}), '(5, 100)\n', (2667, 2675), True, 'import scine_utilities as scine\n'), ((2835, 2853), 'numpy.linalg.eig', 'np.linalg.eig', (['ref'], {}), '(ref)\n', (2848, 2853), True, 'import numpy as np\n'), ((3558, 3576), 'numpy.linalg.eig', 'np.linalg.eig', (['ref'], {}), '(ref)\n', (3571, 3576), True, 'import numpy as np\n'), ((355, 396), 'scine_utilities.SigmaVectorEvaluator.__init__', 'scine.SigmaVectorEvaluator.__init__', (['self'], {}), '(self)\n', (390, 396), True, 'import scine_utilities as scine\n'), ((480, 514), 'numpy.dot', 'np.dot', (['self.matrix', 'guess_vectors'], {}), '(self.matrix, guess_vectors)\n', (486, 514), True, 'import numpy as np\n'), ((771, 799), 'numpy.diag_indices_from', 'np.diag_indices_from', (['matrix'], {}), '(matrix)\n', (791, 799), True, 'import numpy as np\n'), ((1673, 1698), 'numpy.diag_indices_from', 'np.diag_indices_from', (['ref'], {}), '(ref)\n', (1693, 1698), True, 'import numpy as np\n'), ((2176, 2199), 'scine_utilities.core.Log.silent', 'scine.core.Log.silent', ([], {}), '()\n', (2197, 2199), True, 'import scine_utilities as scine\n'), ((2771, 2794), 'scine_utilities.core.Log.silent', 'scine.core.Log.silent', ([], {}), '()\n', (2792, 2794), True, 'import scine_utilities as scine\n'), ((3494, 3517), 'scine_utilities.core.Log.silent', 'scine.core.Log.silent', ([], {}), '()\n', (3515, 3517), True, 'import scine_utilities as scine\n'), ((738, 758), 'numpy.transpose', 'np.transpose', (['matrix'], {}), '(matrix)\n', (750, 758), True, 'import numpy as np\n'), ((1026, 1054), 'numpy.diag_indices_from', 'np.diag_indices_from', (['matrix'], {}), '(matrix)\n', (1046, 1054), True, 'import numpy as np\n'), ((1381, 1397), 'numpy.identity', 'np.identity', (['(100)'], {}), '(100)\n', (1392, 1397), True, 'import numpy as np\n'), ((2557, 2582), 'numpy.diag_indices_from', 'np.diag_indices_from', (['ref'], {}), '(ref)\n', (2577, 2582), True, 'import numpy as np\n')]
|
# coding: utf-8
# # 使用预训练的VGG模型Fine-tune CNN
# In[1]:
# Import packs
import numpy as np
import os
import scipy.io
from scipy.misc import imread, imresize
import matplotlib.pyplot as plt
import skimage.io
import skimage.transform
import tensorflow as tf
get_ipython().magic(u'matplotlib inline')
cwd = os.getcwd()
print ("Package loaded")
print ("Current folder is %s" % (cwd) )
# In[2]:
# 下载预先训练好的vgg-19模型,为Matlab的.mat格式,之后会用scipy读取
# (注意此版本模型与此处http://www.vlfeat.org/matconvnet/pretrained/最新版本不同)
import os.path
if not os.path.isfile('./data/imagenet-vgg-verydeep-19.mat'):
get_ipython().system(u'wget -O data/imagenet-vgg-verydeep-19.mat http://www.vlfeat.org/matconvnet/models/beta16/imagenet-vgg-verydeep-19.mat')
# # 载入图像,调节尺寸,生成数据集
# In[3]:
# Configure the locations of the images and reshaping sizes
# ------------------------------------------------------------------- #
paths = {"images/cats", "images/dogs"}
imgsize = [64, 64] # The reshape size
use_gray = 0 # Grayscale
data_name = "data4vgg" # Save name
valid_exts = [".jpg",".gif",".png",".tga", ".jpeg"]
# ------------------------------------------------------------------- #
imgcnt = 0
nclass = len(paths)
for relpath in paths:
fullpath = cwd + "/" + relpath
flist = os.listdir(fullpath)
for f in flist:
if os.path.splitext(f)[1].lower() not in valid_exts:
continue
fullpath = os.path.join(fullpath, f)
imgcnt = imgcnt + 1
# Grayscale
def rgb2gray(rgb):
if len(rgb.shape) is 3:
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
else:
print ("Current Image is GRAY!")
return rgb
if use_gray:
totalimg = np.ndarray((imgcnt, imgsize[0]*imgsize[1]))
else:
totalimg = np.ndarray((imgcnt, imgsize[0]*imgsize[1]*3))
totallabel = np.ndarray((imgcnt, nclass))
imgcnt = 0
for i, relpath in zip(range(nclass), paths):
path = cwd + "/" + relpath
flist = os.listdir(path)
for f in flist:
if os.path.splitext(f)[1].lower() not in valid_exts:
continue
fullpath = os.path.join(path, f)
currimg = imread(fullpath)
# Convert to grayscale
if use_gray:
grayimg = rgb2gray(currimg)
else:
grayimg = currimg
# Reshape
graysmall = imresize(grayimg, [imgsize[0], imgsize[1]])/255.
grayvec = np.reshape(graysmall, (1, -1))
# Save
totalimg[imgcnt, :] = grayvec
totallabel[imgcnt, :] = np.eye(nclass, nclass)[i]
imgcnt = imgcnt + 1
# Divide total data into training and test set
randidx = np.random.randint(imgcnt, size=imgcnt)
trainidx = randidx[0:int(4*imgcnt/5)]
testidx = randidx[int(4*imgcnt/5):imgcnt]
trainimg = totalimg[trainidx, :]
trainlabel = totallabel[trainidx, :]
testimg = totalimg[testidx, :]
testlabel = totallabel[testidx, :]
ntrain = trainimg.shape[0]
nclass = trainlabel.shape[1]
dim = trainimg.shape[1]
ntest = testimg.shape[0]
print ("Number of total images is %d (train: %d, test: %d)"
% (imgcnt, ntrain, ntest))
print ("Shape of an image is (%d, %d, %d)" % (imgsize[0], imgsize[1], 3))
# # 定义VGG网络结构
# In[4]:
def net(data_path, input_image):
layers = (
'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',
'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
'relu3_3', 'conv3_4', 'relu3_4', 'pool3',
'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
'relu4_3', 'conv4_4', 'relu4_4', 'pool4',
'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',
'relu5_3', 'conv5_4', 'relu5_4'
)
data = scipy.io.loadmat(data_path)
mean = data['normalization'][0][0][0]
mean_pixel = np.mean(mean, axis=(0, 1))
weights = data['layers'][0]
net = {}
current = input_image
for i, name in enumerate(layers):
kind = name[:4]
if kind == 'conv':
kernels, bias = weights[i][0][0][0][0]
# matconvnet: weights are [width, height, in_channels, out_channels]
# tensorflow: weights are [height, width, in_channels, out_channels]
kernels = np.transpose(kernels, (1, 0, 2, 3))
bias = bias.reshape(-1)
current = _conv_layer(current, kernels, bias)
elif kind == 'relu':
current = tf.nn.relu(current)
elif kind == 'pool':
current = _pool_layer(current)
net[name] = current
assert len(net) == len(layers)
return net, mean_pixel
def _conv_layer(input, weights, bias):
conv = tf.nn.conv2d(input, tf.constant(weights), strides=(1, 1, 1, 1),
padding='SAME')
return tf.nn.bias_add(conv, bias)
def _pool_layer(input):
return tf.nn.max_pool(input, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1),
padding='SAME')
def preprocess(image, mean_pixel):
return image - mean_pixel
def unprocess(image, mean_pixel):
return image + mean_pixel
print ("VGG net ready")
# # 使用VGG计算卷积特征图
# In[5]:
# Preprocess
trainimg_tensor = np.ndarray((ntrain, imgsize[0], imgsize[1], 3))
testimg_tensor = np.ndarray((ntest, imgsize[0], imgsize[1], 3))
for i in range(ntrain):
currimg = trainimg[i, :]
currimg = np.reshape(currimg, [imgsize[0], imgsize[1], 3])
trainimg_tensor[i, :, :, :] = currimg
print ("Shape of trainimg_tensor is %s" % (trainimg_tensor.shape,))
for i in range(ntest):
currimg = testimg[i, :]
currimg = np.reshape(currimg, [imgsize[0], imgsize[1], 3])
testimg_tensor[i, :, :, :] = currimg
print ("Shape of trainimg_tensor is %s" % (testimg_tensor.shape,))
# Get conv features
VGG_PATH = cwd + "/data/imagenet-vgg-verydeep-19.mat"
with tf.Graph().as_default(), tf.Session() as sess:
with tf.device("/cpu:0"):
img_placeholder = tf.placeholder(tf.float32
, shape=(None, imgsize[0], imgsize[1], 3))
nets, mean_pixel = net(VGG_PATH, img_placeholder)
train_features = nets['relu5_4'].eval(feed_dict={img_placeholder: trainimg_tensor})
test_features = nets['relu5_4'].eval(feed_dict={img_placeholder: testimg_tensor})
print("Convolutional map extraction done")
# # 卷积特征图的形状
# In[6]:
print ("Shape of 'train_features' is %s" % (train_features.shape,))
print ("Shape of 'test_features' is %s" % (test_features.shape,))
# # 向量化
# In[7]:
# Vectorize
train_vectorized = np.ndarray((ntrain, 4*4*512))
test_vectorized = np.ndarray((ntest, 4*4*512))
for i in range(ntrain):
curr_feat = train_features[i, :, :, :]
curr_feat_vec = np.reshape(curr_feat, (1, -1))
train_vectorized[i, :] = curr_feat_vec
for i in range(ntest):
curr_feat = test_features[i, :, :, :]
curr_feat_vec = np.reshape(curr_feat, (1, -1))
test_vectorized[i, :] = curr_feat_vec
print ("Shape of 'train_vectorized' is %s" % (train_features.shape,))
print ("Shape of 'test_vectorized' is %s" % (test_features.shape,))
# # 定义finetuning的结构
# In[8]:
# Parameters
learning_rate = 0.0001
training_epochs = 100
batch_size = 100
display_step = 10
# tf Graph input
x = tf.placeholder(tf.float32, [None, 4*4*512])
y = tf.placeholder(tf.float32, [None, nclass])
keepratio = tf.placeholder(tf.float32)
# Network
with tf.device("/cpu:0"):
n_input = dim
n_output = nclass
weights = {
'wd1': tf.Variable(tf.random_normal([4*4*512, 1024], stddev=0.1)),
'wd2': tf.Variable(tf.random_normal([1024, n_output], stddev=0.1))
}
biases = {
'bd1': tf.Variable(tf.random_normal([1024], stddev=0.1)),
'bd2': tf.Variable(tf.random_normal([n_output], stddev=0.1))
}
def conv_basic(_input, _w, _b, _keepratio):
# Input
_input_r = _input
# Vectorize
_dense1 = tf.reshape(_input_r, [-1, _w['wd1'].get_shape().as_list()[0]])
# Fc1
_fc1 = tf.nn.relu(tf.add(tf.matmul(_dense1, _w['wd1']), _b['bd1']))
_fc_dr1 = tf.nn.dropout(_fc1, _keepratio)
# Fc2
_out = tf.add(tf.matmul(_fc_dr1, _w['wd2']), _b['bd2'])
# Return everything
out = {'input_r': _input_r, 'dense1': _dense1,
'fc1': _fc1, 'fc_dr1': _fc_dr1, 'out': _out }
return out
# Functions!
_pred = conv_basic(x, weights, biases, keepratio)['out']
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=_pred, labels=y))
optm = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
_corr = tf.equal(tf.argmax(_pred,1), tf.argmax(y,1))
accr = tf.reduce_mean(tf.cast(_corr, tf.float32))
init = tf.initialize_all_variables()
print ("Network Ready to Go!")
# # 优化
# In[9]:
# Launch the graph
sess = tf.Session()
sess.run(init)
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
num_batch = int(ntrain/batch_size)+1
# Loop over all batches
for i in range(num_batch):
randidx = np.random.randint(ntrain, size=batch_size)
batch_xs = train_vectorized[randidx, :]
batch_ys = trainlabel[randidx, :]
# Fit training using batch data
sess.run(optm, feed_dict={x: batch_xs, y: batch_ys, keepratio:0.7})
# Compute average loss
avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, keepratio:1.})/num_batch
# Display logs per epoch step
if epoch % display_step == 0:
print ("Epoch: %03d/%03d cost: %.9f" % (epoch, training_epochs, avg_cost))
train_acc = sess.run(accr, feed_dict={x: batch_xs, y: batch_ys, keepratio:1.})
print (" Training accuracy: %.3f" % (train_acc))
test_acc = sess.run(accr, feed_dict={x: test_vectorized, y: testlabel, keepratio:1.})
print (" Test accuracy: %.3f" % (test_acc))
print ("Optimization Finished!")
|
[
"tensorflow.matmul",
"os.path.isfile",
"numpy.random.randint",
"numpy.mean",
"os.path.join",
"numpy.ndarray",
"tensorflow.nn.relu",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"numpy.transpose",
"tensorflow.placeholder",
"tensorflow.cast",
"numpy.reshape",
"tensorflow.initialize_all_variables",
"tensorflow.nn.bias_add",
"tensorflow.Session",
"tensorflow.constant",
"tensorflow.nn.max_pool",
"tensorflow.random_normal",
"tensorflow.Graph",
"scipy.misc.imresize",
"numpy.dot",
"os.listdir",
"scipy.misc.imread",
"os.getcwd",
"tensorflow.argmax",
"tensorflow.device",
"os.path.splitext",
"numpy.eye",
"tensorflow.train.AdamOptimizer",
"tensorflow.nn.dropout"
] |
[((305, 316), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (314, 316), False, 'import os\n'), ((1812, 1840), 'numpy.ndarray', 'np.ndarray', (['(imgcnt, nclass)'], {}), '((imgcnt, nclass))\n', (1822, 1840), True, 'import numpy as np\n'), ((2630, 2668), 'numpy.random.randint', 'np.random.randint', (['imgcnt'], {'size': 'imgcnt'}), '(imgcnt, size=imgcnt)\n', (2647, 2668), True, 'import numpy as np\n'), ((5128, 5175), 'numpy.ndarray', 'np.ndarray', (['(ntrain, imgsize[0], imgsize[1], 3)'], {}), '((ntrain, imgsize[0], imgsize[1], 3))\n', (5138, 5175), True, 'import numpy as np\n'), ((5193, 5239), 'numpy.ndarray', 'np.ndarray', (['(ntest, imgsize[0], imgsize[1], 3)'], {}), '((ntest, imgsize[0], imgsize[1], 3))\n', (5203, 5239), True, 'import numpy as np\n'), ((6489, 6522), 'numpy.ndarray', 'np.ndarray', (['(ntrain, 4 * 4 * 512)'], {}), '((ntrain, 4 * 4 * 512))\n', (6499, 6522), True, 'import numpy as np\n'), ((6538, 6570), 'numpy.ndarray', 'np.ndarray', (['(ntest, 4 * 4 * 512)'], {}), '((ntest, 4 * 4 * 512))\n', (6548, 6570), True, 'import numpy as np\n'), ((7186, 7233), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 4 * 4 * 512]'], {}), '(tf.float32, [None, 4 * 4 * 512])\n', (7200, 7233), True, 'import tensorflow as tf\n'), ((7234, 7276), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, nclass]'], {}), '(tf.float32, [None, nclass])\n', (7248, 7276), True, 'import tensorflow as tf\n'), ((7289, 7315), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (7303, 7315), True, 'import tensorflow as tf\n'), ((8773, 8785), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (8783, 8785), True, 'import tensorflow as tf\n'), ((527, 580), 'os.path.isfile', 'os.path.isfile', (['"""./data/imagenet-vgg-verydeep-19.mat"""'], {}), "('./data/imagenet-vgg-verydeep-19.mat')\n", (541, 580), False, 'import os\n'), ((1273, 1293), 'os.listdir', 'os.listdir', (['fullpath'], {}), '(fullpath)\n', (1283, 1293), False, 'import os\n'), ((1686, 1731), 'numpy.ndarray', 'np.ndarray', (['(imgcnt, imgsize[0] * imgsize[1])'], {}), '((imgcnt, imgsize[0] * imgsize[1]))\n', (1696, 1731), True, 'import numpy as np\n'), ((1753, 1802), 'numpy.ndarray', 'np.ndarray', (['(imgcnt, imgsize[0] * imgsize[1] * 3)'], {}), '((imgcnt, imgsize[0] * imgsize[1] * 3))\n', (1763, 1802), True, 'import numpy as np\n'), ((1944, 1960), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1954, 1960), False, 'import os\n'), ((3822, 3848), 'numpy.mean', 'np.mean', (['mean'], {'axis': '(0, 1)'}), '(mean, axis=(0, 1))\n', (3829, 3848), True, 'import numpy as np\n'), ((4760, 4786), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['conv', 'bias'], {}), '(conv, bias)\n', (4774, 4786), True, 'import tensorflow as tf\n'), ((4822, 4901), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['input'], {'ksize': '(1, 2, 2, 1)', 'strides': '(1, 2, 2, 1)', 'padding': '"""SAME"""'}), "(input, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1), padding='SAME')\n", (4836, 4901), True, 'import tensorflow as tf\n'), ((5307, 5355), 'numpy.reshape', 'np.reshape', (['currimg', '[imgsize[0], imgsize[1], 3]'], {}), '(currimg, [imgsize[0], imgsize[1], 3])\n', (5317, 5355), True, 'import numpy as np\n'), ((5537, 5585), 'numpy.reshape', 'np.reshape', (['currimg', '[imgsize[0], imgsize[1], 3]'], {}), '(currimg, [imgsize[0], imgsize[1], 3])\n', (5547, 5585), True, 'import numpy as np\n'), ((5804, 5816), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5814, 5816), True, 'import tensorflow as tf\n'), ((6654, 6684), 'numpy.reshape', 'np.reshape', (['curr_feat', '(1, -1)'], {}), '(curr_feat, (1, -1))\n', (6664, 6684), True, 'import numpy as np\n'), ((6813, 6843), 'numpy.reshape', 'np.reshape', (['curr_feat', '(1, -1)'], {}), '(curr_feat, (1, -1))\n', (6823, 6843), True, 'import numpy as np\n'), ((7331, 7350), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (7340, 7350), True, 'import tensorflow as tf\n'), ((8665, 8694), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (8692, 8694), True, 'import tensorflow as tf\n'), ((1415, 1440), 'os.path.join', 'os.path.join', (['fullpath', 'f'], {}), '(fullpath, f)\n', (1427, 1440), False, 'import os\n'), ((1543, 1586), 'numpy.dot', 'np.dot', (['rgb[..., :3]', '[0.299, 0.587, 0.114]'], {}), '(rgb[..., :3], [0.299, 0.587, 0.114])\n', (1549, 1586), True, 'import numpy as np\n'), ((2082, 2103), 'os.path.join', 'os.path.join', (['path', 'f'], {}), '(path, f)\n', (2094, 2103), False, 'import os\n'), ((2123, 2139), 'scipy.misc.imread', 'imread', (['fullpath'], {}), '(fullpath)\n', (2129, 2139), False, 'from scipy.misc import imread, imresize\n'), ((2387, 2417), 'numpy.reshape', 'np.reshape', (['graysmall', '(1, -1)'], {}), '(graysmall, (1, -1))\n', (2397, 2417), True, 'import numpy as np\n'), ((4677, 4697), 'tensorflow.constant', 'tf.constant', (['weights'], {}), '(weights)\n', (4688, 4697), True, 'import tensorflow as tf\n'), ((5835, 5854), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (5844, 5854), True, 'import tensorflow as tf\n'), ((5882, 5949), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, imgsize[0], imgsize[1], 3)'}), '(tf.float32, shape=(None, imgsize[0], imgsize[1], 3))\n', (5896, 5949), True, 'import tensorflow as tf\n'), ((8023, 8054), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['_fc1', '_keepratio'], {}), '(_fc1, _keepratio)\n', (8036, 8054), True, 'import tensorflow as tf\n'), ((8398, 8461), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': '_pred', 'labels': 'y'}), '(logits=_pred, labels=y)\n', (8437, 8461), True, 'import tensorflow as tf\n'), ((8562, 8581), 'tensorflow.argmax', 'tf.argmax', (['_pred', '(1)'], {}), '(_pred, 1)\n', (8571, 8581), True, 'import tensorflow as tf\n'), ((8582, 8597), 'tensorflow.argmax', 'tf.argmax', (['y', '(1)'], {}), '(y, 1)\n', (8591, 8597), True, 'import tensorflow as tf\n'), ((8625, 8651), 'tensorflow.cast', 'tf.cast', (['_corr', 'tf.float32'], {}), '(_corr, tf.float32)\n', (8632, 8651), True, 'import tensorflow as tf\n'), ((8994, 9036), 'numpy.random.randint', 'np.random.randint', (['ntrain'], {'size': 'batch_size'}), '(ntrain, size=batch_size)\n', (9011, 9036), True, 'import numpy as np\n'), ((2318, 2361), 'scipy.misc.imresize', 'imresize', (['grayimg', '[imgsize[0], imgsize[1]]'], {}), '(grayimg, [imgsize[0], imgsize[1]])\n', (2326, 2361), False, 'from scipy.misc import imread, imresize\n'), ((2504, 2526), 'numpy.eye', 'np.eye', (['nclass', 'nclass'], {}), '(nclass, nclass)\n', (2510, 2526), True, 'import numpy as np\n'), ((4244, 4279), 'numpy.transpose', 'np.transpose', (['kernels', '(1, 0, 2, 3)'], {}), '(kernels, (1, 0, 2, 3))\n', (4256, 4279), True, 'import numpy as np\n'), ((5779, 5789), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (5787, 5789), True, 'import tensorflow as tf\n'), ((7437, 7486), 'tensorflow.random_normal', 'tf.random_normal', (['[4 * 4 * 512, 1024]'], {'stddev': '(0.1)'}), '([4 * 4 * 512, 1024], stddev=0.1)\n', (7453, 7486), True, 'import tensorflow as tf\n'), ((7512, 7558), 'tensorflow.random_normal', 'tf.random_normal', (['[1024, n_output]'], {'stddev': '(0.1)'}), '([1024, n_output], stddev=0.1)\n', (7528, 7558), True, 'import tensorflow as tf\n'), ((7610, 7646), 'tensorflow.random_normal', 'tf.random_normal', (['[1024]'], {'stddev': '(0.1)'}), '([1024], stddev=0.1)\n', (7626, 7646), True, 'import tensorflow as tf\n'), ((7676, 7716), 'tensorflow.random_normal', 'tf.random_normal', (['[n_output]'], {'stddev': '(0.1)'}), '([n_output], stddev=0.1)\n', (7692, 7716), True, 'import tensorflow as tf\n'), ((8091, 8120), 'tensorflow.matmul', 'tf.matmul', (['_fc_dr1', "_w['wd2']"], {}), "(_fc_dr1, _w['wd2'])\n", (8100, 8120), True, 'import tensorflow as tf\n'), ((8474, 8525), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (8496, 8525), True, 'import tensorflow as tf\n'), ((4425, 4444), 'tensorflow.nn.relu', 'tf.nn.relu', (['current'], {}), '(current)\n', (4435, 4444), True, 'import tensorflow as tf\n'), ((7962, 7991), 'tensorflow.matmul', 'tf.matmul', (['_dense1', "_w['wd1']"], {}), "(_dense1, _w['wd1'])\n", (7971, 7991), True, 'import tensorflow as tf\n'), ((1325, 1344), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (1341, 1344), False, 'import os\n'), ((1992, 2011), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (2008, 2011), False, 'import os\n')]
|
"""A population model that creates samples with more and more variants. Suitable for the aligner paper experiments
^ = intersection
E = subset
vx ^ v0 = v0
vx ^ v1 = v0
...
vx ^ vn = v0
v0 E v1
v1 E v2
v2 E v3
...
v(n-1) E vn
This plugin does not honor the site frequency spectrum model and ignores the original 'p' values
"""
import numpy as np
__example_param_text = """
{
"vn": {
"p_vx": 0.2,
"p_vn": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7],
}
}
"""
_description = __doc__ + '\nExample parameters:\n' + __example_param_text
#_example_params = json.loads(__example_param_text)
_example_params = eval(__example_param_text)
class Model:
def __init__(self, p_vx, p_vn):
"""A population model that creates samples with more and more variants. Suitable for the aligner paper experiments
:param p_vx: probability value for vx set
:param p_vn: probability values for v0, v1, v2, v3 .... set
"""
self.p_vx, self.p_vn = p_vx, p_vn
def samples(self, chrom_no=None, ml=None, rng_seed=1, **kwargs):
"""This returns an iterator
:param chrom_no: number of the chromosome being considered [1,2,3 ...] (ignored here)
:param ml: VariantList. master list of variants as created by genomes program
:param rng_seed: seed for random number generators
:return: A generator returning (generation no, serial_no, chromosome, % samples done) for each sample in population
Algorithm: (Repeat for each chromosome copy)
Generate random numbers r same size as variants list
Select vx <= r < p_vx
Pick a random subset of v0 as v1 size(v1)/size(v0) = p_v1/p_v0
Set all r corresponding to v0 - v1 as 1.0 so we never select these again
Pick v2, v3 ... by comparing r to p_v2, p_v3 and so on
"""
assert 0 <= self.p_vx <= 1.0, 'p_vx needs to be >= 0 and <= 1.0'
assert self.p_vx > self.p_vn[0], 'p_vx needs to be > p_vn[0]'
for n in range(len(self.p_vn) - 1):
assert self.p_vn[n] < self.p_vn[n + 1], 'p_vn needs to be in ascending order'
assert 0 <= self.p_vn[n] <= 1.0, 'p_vn needs to be >= 0 and <= 1.0'
rng = np.random.RandomState(rng_seed)
r = rng.rand(ml.variants.shape[0], 2)
idx_vx = [None, None]
for cpy in [0, 1]:
idx_vx[cpy] = np.sort(rng.choice(ml.variants.shape[0], size=int(ml.variants.shape[0] * self.p_vx), replace=False))
# Take elements in vx that are not going to be in v0 completely out of circulation
r[idx_vx[cpy][(r[idx_vx[cpy]] >= self.p_vn[0]).nonzero()[0]], cpy] = 1.1
# Now all elements for r < 1.0 are either in vx ^ v0 or not in vx
for n in range(len(self.p_vn) + 1):
if n == 0:
this_idx, sample_name = idx_vx, 'vx'
else:
this_idx, sample_name = [(r[:, cpy] < self.p_vn[n - 1]).nonzero()[0] for cpy in [0, 1]], 'v{:d}'.format(n - 1)
yield sample_name, ml.zip_up_chromosome(*this_idx), float(n + 1) / self.get_sample_count_estimate()
def get_sample_count_estimate(self):
"""Give us an as exact as possible estimate of how many samples we will produce"""
return 1 + len(self.p_vn)
|
[
"numpy.random.RandomState"
] |
[((2112, 2143), 'numpy.random.RandomState', 'np.random.RandomState', (['rng_seed'], {}), '(rng_seed)\n', (2133, 2143), True, 'import numpy as np\n')]
|
"""
Functions to rotate a point by a known euler pole.
"""
import numpy as np
from . import fault_vector_functions
def point_rotation_by_Euler_Pole(Point, Euler_Pole):
"""
Compute the velocity of rotation of a point about an Euler pole on a spherical earth.
This function is useful for computing the velocity of a stationary point in one reference frame
with respect to another reference frame.
The resulting velocity is assumed to be horizontal.
:param Point: [longitude, latitude] of observation point, in degrees
:type Point: array_like
:param Euler_Pole: [longitude, latitude, omega] of Euler Pole, in degrees and degrees/Ma
:type Euler_Pole: array_like
:returns: [e_velocity, n_velocity, u_velocity] of point in rotated reference frame, in mm/yr
:rtype: array_like
"""
R_point = get_r(Point[0], Point[1]);
R_ep = get_r(Euler_Pole[0], Euler_Pole[1]);
unit_ep = fault_vector_functions.get_unit_vector(R_ep);
omega_raw = degma2radyr(Euler_Pole[2]);
omega = omega_raw * unit_ep; # in radians per year
velocity_of_transformation = np.cross(omega, R_point); # velocity at the station from the euler pole rotation
velocity_of_transformation = velocity_of_transformation * 1000; # mm/yr in x, y, z
xvel = velocity_of_transformation[0];
yvel = velocity_of_transformation[1];
zvel = velocity_of_transformation[2];
[east_transform, north_transform] = xyz2en(xvel, yvel, zvel, Point[0]);
up_transform = 0; # by definition the velocity will be horizontal
return [east_transform, north_transform, up_transform];
def degma2radyr(omega):
"""Convert omega from degrees/Ma to radians/yr"""
radyr = omega * (np.pi / 180) * 1e-6;
return radyr;
def get_r(lon, lat):
"""
Vector from center of earth to the point in question assuming a spherical earth.
The XYZ coordinate system has x=0 at longitude=0 and z=0 at the equator with positive to the north.
:param lon: Longitude of initial point, in degrees
:type lon: float
:param lat: Latitude of initial point, in degrees
:type lat: float
:returns: [X, Y, Z] coordinates in meters.
:rtype: [float, float, float]
"""
R_fixed = 6378000; # In meters
R_equatorial_disk = R_fixed * np.cos(np.deg2rad(lat));
T_equatorial_disk = np.deg2rad(lon);
X = R_equatorial_disk * np.cos(T_equatorial_disk);
Y = R_equatorial_disk * np.sin(T_equatorial_disk);
Z = np.sqrt(R_fixed * R_fixed - X * X - Y * Y);
if lat < 0:
Z = Z * -1;
return [X, Y, Z];
def get_unit_east(lon):
"""
Unit east vector from a point on earth's surface in XYZ coordinates.
The XYZ coordinate system has x=0 at longitude=0 and z=0 at the equator with positive to the north.
The return value of Z is zero for eastward motion.
:param lon: Longitude of initial point, in degrees
:type lon: float
:returns: [X, Y, Z] components
:rtype: [float, float, float]
"""
T_equatorial_disk = np.deg2rad(lon);
x = -np.sin(T_equatorial_disk);
y = np.cos(T_equatorial_disk);
return [x, y, 0];
def xyz2en(x, y, z, lon):
"""
Convert velocities from xyz to horizontal east and north, assuming spherical earth and no vertical motion.
We take the dot product of the velocity with the unit east vector and the north component is the remainder.
A more complex function xyz2enu(X, Y, Z, lon, lat) could be written later.
:param x: x velocity at observation point
:type x: float
:param y: y velocity at observation point
:type y: float
:param z: z velocity at observation point
:type z: float
:param lon: Longitude of observation point, in degrees
:type lon: float
:returns: [east_vel, north_vel]
:rtype: [float, float]
"""
vel_vector = [x, y, z];
unit_east = get_unit_east(lon);
e = np.dot(vel_vector, unit_east);
n = np.sqrt(x * x + y * y + z * z - e * e);
if z < 0:
n = n * -1;
return [e, n];
if __name__ == "__main__":
Euler_Pole = [69.9, -12.3, 0.55]; # Lon, Lat, Deg/Ma
Point = [-124, 40.5]; # Lon, Lat
[east_transform, north_transform, up_transform] = point_rotation_by_Euler_Pole(Point, Euler_Pole);
total = np.sqrt(east_transform * east_transform + north_transform * north_transform);
print("%.2f east, %.2f north, %.2f up, %.2f mm/yr total" % (east_transform, north_transform, up_transform, total));
|
[
"numpy.deg2rad",
"numpy.cross",
"numpy.sin",
"numpy.cos",
"numpy.dot",
"numpy.sqrt"
] |
[((1109, 1133), 'numpy.cross', 'np.cross', (['omega', 'R_point'], {}), '(omega, R_point)\n', (1117, 1133), True, 'import numpy as np\n'), ((2334, 2349), 'numpy.deg2rad', 'np.deg2rad', (['lon'], {}), '(lon)\n', (2344, 2349), True, 'import numpy as np\n'), ((2469, 2511), 'numpy.sqrt', 'np.sqrt', (['(R_fixed * R_fixed - X * X - Y * Y)'], {}), '(R_fixed * R_fixed - X * X - Y * Y)\n', (2476, 2511), True, 'import numpy as np\n'), ((3015, 3030), 'numpy.deg2rad', 'np.deg2rad', (['lon'], {}), '(lon)\n', (3025, 3030), True, 'import numpy as np\n'), ((3076, 3101), 'numpy.cos', 'np.cos', (['T_equatorial_disk'], {}), '(T_equatorial_disk)\n', (3082, 3101), True, 'import numpy as np\n'), ((3882, 3911), 'numpy.dot', 'np.dot', (['vel_vector', 'unit_east'], {}), '(vel_vector, unit_east)\n', (3888, 3911), True, 'import numpy as np\n'), ((3921, 3959), 'numpy.sqrt', 'np.sqrt', (['(x * x + y * y + z * z - e * e)'], {}), '(x * x + y * y + z * z - e * e)\n', (3928, 3959), True, 'import numpy as np\n'), ((4254, 4330), 'numpy.sqrt', 'np.sqrt', (['(east_transform * east_transform + north_transform * north_transform)'], {}), '(east_transform * east_transform + north_transform * north_transform)\n', (4261, 4330), True, 'import numpy as np\n'), ((2379, 2404), 'numpy.cos', 'np.cos', (['T_equatorial_disk'], {}), '(T_equatorial_disk)\n', (2385, 2404), True, 'import numpy as np\n'), ((2434, 2459), 'numpy.sin', 'np.sin', (['T_equatorial_disk'], {}), '(T_equatorial_disk)\n', (2440, 2459), True, 'import numpy as np\n'), ((3041, 3066), 'numpy.sin', 'np.sin', (['T_equatorial_disk'], {}), '(T_equatorial_disk)\n', (3047, 3066), True, 'import numpy as np\n'), ((2292, 2307), 'numpy.deg2rad', 'np.deg2rad', (['lat'], {}), '(lat)\n', (2302, 2307), True, 'import numpy as np\n')]
|
import numpy as np
import random
import os, sys
from scipy import ndimage
import healpy as hp
from astropy.io import fits
import matplotlib.pyplot as plt
from scipy.signal import savgol_filter
from astropy.io import fits
from importlib import reload
from pycs.misc.cosmostat_init import *
from pycs.misc.mr_prog import *
def make_healpix_map(ra, dec, weights, nside):
pixels= hp.ang2pix(nside,theta = 0.5*np.pi - np.deg2rad(dec), phi = np.deg2rad(ra))
bincount = np.bincount(pixels, minlength = hp.nside2npix(nside))
bincount_weighted = np.bincount(pixels, minlength = hp.nside2npix(nside), weights=weights)
return np.where(bincount>0.5, bincount_weighted/bincount, hp.UNSEEN)
def get_bincount(ra, dec, nside):
pixels= hp.ang2pix(nside,theta = 0.5*np.pi - np.deg2rad(dec), phi = np.deg2rad(ra))
bincount = np.bincount(pixels, minlength = hp.nside2npix(nside))
return bincount
def mrs_read(FN):
return hp.read_map(FN)
def mrs_write(FN, mapin):
hp.write_map(FN, mapin, overwrite=True)
def rims(FN):
return hp.read_map(FN)
def mrs_resize(mapin, nsideout):
k = hp.ud_grade(mapin, nsideout)
return k
# smoothing with sigma in arcmin
def smooth(map, sigma):
s= hp.smoothing(mapin, sigma=sigma/(360.*60.) * (np.pi*2),pol=False)
# lut='rainbow' # 'inferno' 'gist_stern'
def tvs(mapin,min=None,max=None,title=None,sigma=None,lut=None):
if sigma is None:
hp.mollview(mapin,max=max,min=min, title=title,cmap=lut)
else:
s= hp.smoothing(mapin, sigma=sigma/(360.*60.) * (np.pi*2),pol=False)
hp.mollview(s,max=max,min=min, title=title,cmap=lut)
hp.mollview
def get_nside(Npix):
return hp.npix2nside(Npix)
def gnside(data):
npix = data.shape[0]
nside = hp.npix2nside(npix)
return nside
def pixel_size(nside):
# Return the pixel size of a healpix map in arc minutes
# SKI_SURFACE IN SQUARE DEGREES = 4. * !PI * (360. / (2*!PI))^2 = 41253
psize = 41253. / (float(nside)**2.*12.) * 60.**2.
return np.sqrt(psize)
def l2amin(l):
a = 1. / l
a = a * 180.* 60. / np.pi
return a
def amin2l(a):
ar = a / (180.* 60.) * np.pi
l = 1. / ar
return l
def g2eb(g1,g2):
nside = gnside(g1)
(ae,ab) = hp.map2alm_spin((g1,g2), 2)
ke= hp.alm2map(ae, nside, pol=False)
kb= hp.alm2map(ab, nside, pol=False)
return ke,kb
def g2k(g1,g2):
nside = gnside(g1)
(ae,ab) = hp.map2alm_spin((g1,g2), 2)
ke= hp.alm2map(ae, nside, pol=False)
return ke
def k2g(ke):
nside = gnside(ke)
ae = hp.map2alm(ke, 1,pol=False)
ab = np.copy(ae) * 0.
(g1,g2) = hp.alm2map_spin((ae,ab), 2, lmax=lmax)
return g1,g2
# it seems that hp.alm2map_spin crashes.
def eb2g(ke,kb):
nside = gnside(ke)
lmax=nside*3 - 1
ae = hp.map2alm(ke, 1, pol=False)
ab = hp.map2alm(kb, 1, pol=False)
(g1,g2) = hp.alm2map_spin( (ae,ab), nside, 2, lmax)
return g1,g2
def mrs_prog(data, prog="mrs_powspec", opt=None, path='./', remove_files=True, verbose=False, FileOut=None, InputFormatisHealpix=True, OutputFormatisHealpix=True):
# Create a unique string using the current date and time.
# print('mr_filter ', opt)
unique_string = datetime.now().strftime('%Y.%m.%d_%H.%M.%S')
result=0
# Set the ouput file names.
file_name = path + 'mr_temp_' + unique_string
file_fits = file_name + '.fits'
if FileOut is not None:
file_out = FileOut
else:
file_out = file_name + '_out.fits'
# Write the input data to a fits file.
if InputFormatisHealpix:
mrs_write(file_fits, data)
else:
writefits(file_fits, data)
# print("PROG: ", prog)
cmd = prog
if isinstance(opt, type(None)):
optF=' '
else:
optF= opt
if verbose:
optF = optF + " -v "
cmd = cmd + " " + optF + " " + file_fits + " " + file_out
if verbose:
print ('CMD = ', cmd)
args = shlex.split(cmd)
# print('args ', args)
call(args)
# Retrieve wavelet filtered data.
if OutputFormatisHealpix:
result = mrs_read(file_out)
else:
result = readfits(file_out)
# Return the mr_transform results (and the output file names).
if remove_files:
remove(file_fits)
remove(file_out)
return result
else:
return result
def mrs_powspec(map, verbose=False):
p = mrs_prog(map, prog="mrs_powspec", verbose=verbose, OutputFormatisHealpix=False)
return p
def mrs_smooth(map, opt=None, verbose=False):
p = mrs_prog(map, prog="mrs_smooth", verbose=verbose, opt=opt, OutputFormatisHealpix=True)
return p
def mrs_almtrans(map, lmax=None, opt=None, verbose=False):
optParam = ' -T '
if opt is not None:
optParam = ' -T ' + opt
if lmax is not None:
optParam = ' -l ' + str(lmax) + optParam
p = mrs_prog(map, prog="mrs_almtrans", verbose=verbose, opt=optParam, OutputFormatisHealpix=False)
return p
def mrs_almrec(map, opt=None, verbose=False,nside=None):
optParam = ' -T '
if opt is not None:
optParam = ' -T ' + opt
if nside is not None:
optParam = ' -n ' + str(nside) + optParam
p = mrs_prog(map, prog="mrs_almrec", verbose=verbose, opt=optParam, InputFormatisHealpix=False, OutputFormatisHealpix=True)
return p
def tol(map,lmax_amin,amin=False):
ns= gnside(map)
lmax=lmax_amin
if amin is True:
lmax=amin2l(lmax_amin)
a = mrs_almtrans(map, lmax=lmax)
b = mrs_almrec(a, nside=ns)
return b
def mrs_uwttrans(map, lmax=None, opt=None, verbose=False, path='./',progpath=None):
optParam = ' '
if opt is not None:
optParam = ' ' + opt
if lmax is not None:
optParam = ' -l ' + str(lmax) + optParam
if progpath is None:
prog="mrs_uwttrans"
else:
prog=progpath+"mrs_uwttrans"
p = mrs_prog(map, prog=prog, verbose=verbose, opt=optParam, OutputFormatisHealpix=False,path=path)
return p
def mrs_uwtrecons(Tmap, lmax=None, opt=None, verbose=False, path='./',progpath=None):
optParam = ' '
if opt is not None:
optParam = ' ' + opt
if lmax is not None:
optParam = ' -l ' + str(lmax) + optParam
if progpath is None:
prog="mrs_uwttrans"
else:
prog=progpath+"mrs_uwttrans -r "
p = mrs_prog(Tmap, prog=prog, verbose=verbose, opt=optParam, InputFormatisHealpix=False, OutputFormatisHealpix=True,path=path)
return p
|
[
"healpy.write_map",
"healpy.alm2map",
"healpy.mollview",
"numpy.copy",
"healpy.map2alm",
"numpy.deg2rad",
"healpy.ud_grade",
"healpy.nside2npix",
"numpy.where",
"healpy.npix2nside",
"healpy.map2alm_spin",
"healpy.alm2map_spin",
"healpy.smoothing",
"healpy.read_map",
"numpy.sqrt"
] |
[((634, 699), 'numpy.where', 'np.where', (['(bincount > 0.5)', '(bincount_weighted / bincount)', 'hp.UNSEEN'], {}), '(bincount > 0.5, bincount_weighted / bincount, hp.UNSEEN)\n', (642, 699), True, 'import numpy as np\n'), ((938, 953), 'healpy.read_map', 'hp.read_map', (['FN'], {}), '(FN)\n', (949, 953), True, 'import healpy as hp\n'), ((985, 1024), 'healpy.write_map', 'hp.write_map', (['FN', 'mapin'], {'overwrite': '(True)'}), '(FN, mapin, overwrite=True)\n', (997, 1024), True, 'import healpy as hp\n'), ((1051, 1066), 'healpy.read_map', 'hp.read_map', (['FN'], {}), '(FN)\n', (1062, 1066), True, 'import healpy as hp\n'), ((1109, 1137), 'healpy.ud_grade', 'hp.ud_grade', (['mapin', 'nsideout'], {}), '(mapin, nsideout)\n', (1120, 1137), True, 'import healpy as hp\n'), ((1216, 1290), 'healpy.smoothing', 'hp.smoothing', (['mapin'], {'sigma': '(sigma / (360.0 * 60.0) * (np.pi * 2))', 'pol': '(False)'}), '(mapin, sigma=sigma / (360.0 * 60.0) * (np.pi * 2), pol=False)\n', (1228, 1290), True, 'import healpy as hp\n'), ((1685, 1704), 'healpy.npix2nside', 'hp.npix2nside', (['Npix'], {}), '(Npix)\n', (1698, 1704), True, 'import healpy as hp\n'), ((1761, 1780), 'healpy.npix2nside', 'hp.npix2nside', (['npix'], {}), '(npix)\n', (1774, 1780), True, 'import healpy as hp\n'), ((2024, 2038), 'numpy.sqrt', 'np.sqrt', (['psize'], {}), '(psize)\n', (2031, 2038), True, 'import numpy as np\n'), ((2249, 2277), 'healpy.map2alm_spin', 'hp.map2alm_spin', (['(g1, g2)', '(2)'], {}), '((g1, g2), 2)\n', (2264, 2277), True, 'import healpy as hp\n'), ((2285, 2317), 'healpy.alm2map', 'hp.alm2map', (['ae', 'nside'], {'pol': '(False)'}), '(ae, nside, pol=False)\n', (2295, 2317), True, 'import healpy as hp\n'), ((2326, 2358), 'healpy.alm2map', 'hp.alm2map', (['ab', 'nside'], {'pol': '(False)'}), '(ab, nside, pol=False)\n', (2336, 2358), True, 'import healpy as hp\n'), ((2430, 2458), 'healpy.map2alm_spin', 'hp.map2alm_spin', (['(g1, g2)', '(2)'], {}), '((g1, g2), 2)\n', (2445, 2458), True, 'import healpy as hp\n'), ((2466, 2498), 'healpy.alm2map', 'hp.alm2map', (['ae', 'nside'], {'pol': '(False)'}), '(ae, nside, pol=False)\n', (2476, 2498), True, 'import healpy as hp\n'), ((2559, 2587), 'healpy.map2alm', 'hp.map2alm', (['ke', '(1)'], {'pol': '(False)'}), '(ke, 1, pol=False)\n', (2569, 2587), True, 'import healpy as hp\n'), ((2627, 2666), 'healpy.alm2map_spin', 'hp.alm2map_spin', (['(ae, ab)', '(2)'], {'lmax': 'lmax'}), '((ae, ab), 2, lmax=lmax)\n', (2642, 2666), True, 'import healpy as hp\n'), ((2795, 2823), 'healpy.map2alm', 'hp.map2alm', (['ke', '(1)'], {'pol': '(False)'}), '(ke, 1, pol=False)\n', (2805, 2823), True, 'import healpy as hp\n'), ((2833, 2861), 'healpy.map2alm', 'hp.map2alm', (['kb', '(1)'], {'pol': '(False)'}), '(kb, 1, pol=False)\n', (2843, 2861), True, 'import healpy as hp\n'), ((2876, 2917), 'healpy.alm2map_spin', 'hp.alm2map_spin', (['(ae, ab)', 'nside', '(2)', 'lmax'], {}), '((ae, ab), nside, 2, lmax)\n', (2891, 2917), True, 'import healpy as hp\n'), ((1430, 1489), 'healpy.mollview', 'hp.mollview', (['mapin'], {'max': 'max', 'min': 'min', 'title': 'title', 'cmap': 'lut'}), '(mapin, max=max, min=min, title=title, cmap=lut)\n', (1441, 1489), True, 'import healpy as hp\n'), ((1507, 1581), 'healpy.smoothing', 'hp.smoothing', (['mapin'], {'sigma': '(sigma / (360.0 * 60.0) * (np.pi * 2))', 'pol': '(False)'}), '(mapin, sigma=sigma / (360.0 * 60.0) * (np.pi * 2), pol=False)\n', (1519, 1581), True, 'import healpy as hp\n'), ((1580, 1635), 'healpy.mollview', 'hp.mollview', (['s'], {'max': 'max', 'min': 'min', 'title': 'title', 'cmap': 'lut'}), '(s, max=max, min=min, title=title, cmap=lut)\n', (1591, 1635), True, 'import healpy as hp\n'), ((2596, 2607), 'numpy.copy', 'np.copy', (['ae'], {}), '(ae)\n', (2603, 2607), True, 'import numpy as np\n'), ((443, 457), 'numpy.deg2rad', 'np.deg2rad', (['ra'], {}), '(ra)\n', (453, 457), True, 'import numpy as np\n'), ((506, 526), 'healpy.nside2npix', 'hp.nside2npix', (['nside'], {}), '(nside)\n', (519, 526), True, 'import healpy as hp\n'), ((584, 604), 'healpy.nside2npix', 'hp.nside2npix', (['nside'], {}), '(nside)\n', (597, 604), True, 'import healpy as hp\n'), ((803, 817), 'numpy.deg2rad', 'np.deg2rad', (['ra'], {}), '(ra)\n', (813, 817), True, 'import numpy as np\n'), ((866, 886), 'healpy.nside2npix', 'hp.nside2npix', (['nside'], {}), '(nside)\n', (879, 886), True, 'import healpy as hp\n'), ((420, 435), 'numpy.deg2rad', 'np.deg2rad', (['dec'], {}), '(dec)\n', (430, 435), True, 'import numpy as np\n'), ((780, 795), 'numpy.deg2rad', 'np.deg2rad', (['dec'], {}), '(dec)\n', (790, 795), True, 'import numpy as np\n')]
|
import mobula.layers as L
import numpy as np
def test_sigmoid():
X = ((np.arange(10000) - 5000) / 1000.0).reshape((-1, 1, 1, 1))
data = L.Data(X, "data")
data.reshape()
l = L.Sigmoid(data)
l.reshape()
assert l.Y.shape == X.shape
l.forward()
l.dY = np.random.random(l.Y.shape) * 10
l.backward()
enx = np.exp(-X)
assert np.allclose(l.Y.ravel(), (1.0 / (1.0 + enx)).ravel())
assert np.allclose(l.dX.ravel(), (enx / np.square(1 + enx) * l.dY).ravel())
def test_relu():
X = ((np.arange(10000) - 5000) / 1000.0).reshape((-1, 1, 1, 1))
data = L.Data(X, "data")
data.reshape()
l = L.ReLU(data)
l.reshape()
assert l.Y.shape == X.shape
l.forward()
l.dY = np.random.random(l.Y.shape) * 10
l.backward()
Y = np.zeros(X.shape)
b = (X > 0)
Y[b] = X[b]
dX = np.zeros(X.shape)
dX[b] = l.dY[b]
'''
d = (l.dX != dX)
print (l.dX[d], dX[d])
'''
assert np.allclose(l.Y.ravel(), Y.ravel())
assert np.allclose(l.dX.ravel(), dX.ravel())
def test_selu():
X = ((np.arange(10000) - 5000) / 1000.0).reshape((-1, 1, 1, 1))
data = L.Data(X, "data")
data.reshape()
l = L.SELU(data)
y = l.eval()
ty = np.zeros(X.shape)
ty[X > 0] = l.scale * X[X>0]
ty[X<=0] = l.scale * (l.alpha * np.exp(X[X<=0]) - l.alpha)
assert np.allclose(y, ty)
l.dY = np.random.random(l.Y.shape)
l.backward()
dX = np.zeros(X.shape)
dX[X > 0] = l.scale
dX[X <= 0] = l.scale * l.alpha * np.exp(X[X<=0])
dX *= l.dY
assert np.allclose(dX, l.dX)
def test_PReLU():
X = ((np.arange(10000) - 5000) / 1000.0).reshape((-1, 1, 1, 1))
data = L.Data(X, "data")
data.reshape()
l = L.PReLU(data)
y = l.eval()
ty = np.zeros(X.shape)
ty[X>0] = X[X>0]
ty[X<=0] = l.alpha * X[X<=0]
assert np.allclose(y, ty)
l.dY = np.random.random(l.Y.shape)
l.backward()
dX = np.zeros(X.shape)
dX[X>0] = 1
dX[X<=0] = l.alpha
dX *= l.dY
print (dX, l.dX)
assert np.allclose(dX, l.dX)
def test_tanh():
X = ((np.arange(10000) - 5000) / 1000.0).reshape((-1, 1, 1, 1))
data = L.Data(X, "data")
data.reshape()
l = L.Tanh(data)
y = l.eval()
p = np.exp(X)
n = np.exp(-X)
ty = (p - n) / (p + n)
assert np.allclose(y, ty)
l.dY = np.random.random(l.Y.shape)
l.backward()
dX = 1.0 - np.square(p - n) / np.square(p + n)
dX *= l.dY
assert np.allclose(dX, l.dX)
|
[
"mobula.layers.PReLU",
"mobula.layers.Tanh",
"numpy.allclose",
"mobula.layers.ReLU",
"numpy.zeros",
"numpy.square",
"numpy.random.random",
"numpy.arange",
"numpy.exp",
"mobula.layers.Data",
"mobula.layers.SELU",
"mobula.layers.Sigmoid"
] |
[((145, 162), 'mobula.layers.Data', 'L.Data', (['X', '"""data"""'], {}), "(X, 'data')\n", (151, 162), True, 'import mobula.layers as L\n'), ((190, 205), 'mobula.layers.Sigmoid', 'L.Sigmoid', (['data'], {}), '(data)\n', (199, 205), True, 'import mobula.layers as L\n'), ((342, 352), 'numpy.exp', 'np.exp', (['(-X)'], {}), '(-X)\n', (348, 352), True, 'import numpy as np\n'), ((595, 612), 'mobula.layers.Data', 'L.Data', (['X', '"""data"""'], {}), "(X, 'data')\n", (601, 612), True, 'import mobula.layers as L\n'), ((640, 652), 'mobula.layers.ReLU', 'L.ReLU', (['data'], {}), '(data)\n', (646, 652), True, 'import mobula.layers as L\n'), ((786, 803), 'numpy.zeros', 'np.zeros', (['X.shape'], {}), '(X.shape)\n', (794, 803), True, 'import numpy as np\n'), ((845, 862), 'numpy.zeros', 'np.zeros', (['X.shape'], {}), '(X.shape)\n', (853, 862), True, 'import numpy as np\n'), ((1140, 1157), 'mobula.layers.Data', 'L.Data', (['X', '"""data"""'], {}), "(X, 'data')\n", (1146, 1157), True, 'import mobula.layers as L\n'), ((1185, 1197), 'mobula.layers.SELU', 'L.SELU', (['data'], {}), '(data)\n', (1191, 1197), True, 'import mobula.layers as L\n'), ((1224, 1241), 'numpy.zeros', 'np.zeros', (['X.shape'], {}), '(X.shape)\n', (1232, 1241), True, 'import numpy as np\n'), ((1350, 1368), 'numpy.allclose', 'np.allclose', (['y', 'ty'], {}), '(y, ty)\n', (1361, 1368), True, 'import numpy as np\n'), ((1380, 1407), 'numpy.random.random', 'np.random.random', (['l.Y.shape'], {}), '(l.Y.shape)\n', (1396, 1407), True, 'import numpy as np\n'), ((1434, 1451), 'numpy.zeros', 'np.zeros', (['X.shape'], {}), '(X.shape)\n', (1442, 1451), True, 'import numpy as np\n'), ((1555, 1576), 'numpy.allclose', 'np.allclose', (['dX', 'l.dX'], {}), '(dX, l.dX)\n', (1566, 1576), True, 'import numpy as np\n'), ((1675, 1692), 'mobula.layers.Data', 'L.Data', (['X', '"""data"""'], {}), "(X, 'data')\n", (1681, 1692), True, 'import mobula.layers as L\n'), ((1720, 1733), 'mobula.layers.PReLU', 'L.PReLU', (['data'], {}), '(data)\n', (1727, 1733), True, 'import mobula.layers as L\n'), ((1760, 1777), 'numpy.zeros', 'np.zeros', (['X.shape'], {}), '(X.shape)\n', (1768, 1777), True, 'import numpy as np\n'), ((1843, 1861), 'numpy.allclose', 'np.allclose', (['y', 'ty'], {}), '(y, ty)\n', (1854, 1861), True, 'import numpy as np\n'), ((1873, 1900), 'numpy.random.random', 'np.random.random', (['l.Y.shape'], {}), '(l.Y.shape)\n', (1889, 1900), True, 'import numpy as np\n'), ((1927, 1944), 'numpy.zeros', 'np.zeros', (['X.shape'], {}), '(X.shape)\n', (1935, 1944), True, 'import numpy as np\n'), ((2031, 2052), 'numpy.allclose', 'np.allclose', (['dX', 'l.dX'], {}), '(dX, l.dX)\n', (2042, 2052), True, 'import numpy as np\n'), ((2150, 2167), 'mobula.layers.Data', 'L.Data', (['X', '"""data"""'], {}), "(X, 'data')\n", (2156, 2167), True, 'import mobula.layers as L\n'), ((2195, 2207), 'mobula.layers.Tanh', 'L.Tanh', (['data'], {}), '(data)\n', (2201, 2207), True, 'import mobula.layers as L\n'), ((2233, 2242), 'numpy.exp', 'np.exp', (['X'], {}), '(X)\n', (2239, 2242), True, 'import numpy as np\n'), ((2251, 2261), 'numpy.exp', 'np.exp', (['(-X)'], {}), '(-X)\n', (2257, 2261), True, 'import numpy as np\n'), ((2300, 2318), 'numpy.allclose', 'np.allclose', (['y', 'ty'], {}), '(y, ty)\n', (2311, 2318), True, 'import numpy as np\n'), ((2330, 2357), 'numpy.random.random', 'np.random.random', (['l.Y.shape'], {}), '(l.Y.shape)\n', (2346, 2357), True, 'import numpy as np\n'), ((2452, 2473), 'numpy.allclose', 'np.allclose', (['dX', 'l.dX'], {}), '(dX, l.dX)\n', (2463, 2473), True, 'import numpy as np\n'), ((281, 308), 'numpy.random.random', 'np.random.random', (['l.Y.shape'], {}), '(l.Y.shape)\n', (297, 308), True, 'import numpy as np\n'), ((728, 755), 'numpy.random.random', 'np.random.random', (['l.Y.shape'], {}), '(l.Y.shape)\n', (744, 755), True, 'import numpy as np\n'), ((1513, 1530), 'numpy.exp', 'np.exp', (['X[X <= 0]'], {}), '(X[X <= 0])\n', (1519, 1530), True, 'import numpy as np\n'), ((2390, 2406), 'numpy.square', 'np.square', (['(p - n)'], {}), '(p - n)\n', (2399, 2406), True, 'import numpy as np\n'), ((2409, 2425), 'numpy.square', 'np.square', (['(p + n)'], {}), '(p + n)\n', (2418, 2425), True, 'import numpy as np\n'), ((1312, 1329), 'numpy.exp', 'np.exp', (['X[X <= 0]'], {}), '(X[X <= 0])\n', (1318, 1329), True, 'import numpy as np\n'), ((76, 92), 'numpy.arange', 'np.arange', (['(10000)'], {}), '(10000)\n', (85, 92), True, 'import numpy as np\n'), ((526, 542), 'numpy.arange', 'np.arange', (['(10000)'], {}), '(10000)\n', (535, 542), True, 'import numpy as np\n'), ((1071, 1087), 'numpy.arange', 'np.arange', (['(10000)'], {}), '(10000)\n', (1080, 1087), True, 'import numpy as np\n'), ((1606, 1622), 'numpy.arange', 'np.arange', (['(10000)'], {}), '(10000)\n', (1615, 1622), True, 'import numpy as np\n'), ((2081, 2097), 'numpy.arange', 'np.arange', (['(10000)'], {}), '(10000)\n', (2090, 2097), True, 'import numpy as np\n'), ((462, 480), 'numpy.square', 'np.square', (['(1 + enx)'], {}), '(1 + enx)\n', (471, 480), True, 'import numpy as np\n')]
|
import model3 as M
import numpy as np
import tensorflow as tf
params = np.load('lstmpm_d1.npy').item()
params2 = np.load('lstmpm_d2.npy').item()
def get_conv(name):
res = []
# print(params[name])
res.append(params[name]['weights'])
res.append(params[name]['bias'])
# print(res[0].shape)
return res
def get_conv2(name):
res = []
# print(params[name])
res.append(params2[name]['weights'])
res.append(params2[name]['bias'])
# print(res[0].shape)
return res
class Stage0(M.Model):
def initialize(self):
# init encoding
self.c1_s1 = M.ConvLayer(9, 128, pad='SAME_LEFT', activation=M.PARAM_RELU, values=get_conv('conv1_stage1'))
self.p1_s1 = M.MaxPool(3, 2, pad='VALID')
self.c2_s1 = M.ConvLayer(9, 128, pad='SAME_LEFT', activation=M.PARAM_RELU, values=get_conv('conv2_stage1'))
self.p2_s1 = M.MaxPool(3, 2, pad='VALID')
self.c3_s1 = M.ConvLayer(9, 128, pad='SAME_LEFT', activation=M.PARAM_RELU, values=get_conv('conv3_stage1'))
self.p3_s1 = M.MaxPool(3, 2, pad='VALID')
self.c4_s1 = M.ConvLayer(5, 32, pad='SAME_LEFT', activation=M.PARAM_RELU, values=get_conv('conv4_stage1'))
self.c5_s1 = M.ConvLayer(9, 512, pad='SAME_LEFT', activation=M.PARAM_RELU, values=get_conv('conv5_stage1'))
self.c6_s1 = M.ConvLayer(1, 512, activation=M.PARAM_RELU, values=get_conv('conv6_stage1'))
self.c7_s1 = M.ConvLayer(1, 15, values=get_conv('conv7_stage1'))
# frame encoding
self.c1_s2 = M.ConvLayer(9, 128, pad='SAME_LEFT', activation=M.PARAM_RELU, values=get_conv('conv1_stage2'))
self.p1_s2 = M.MaxPool(3, 2, pad='VALID')
self.c2_s2 = M.ConvLayer(9, 128, pad='SAME_LEFT', activation=M.PARAM_RELU, values=get_conv('conv2_stage2'))
self.p2_s2 = M.MaxPool(3, 2, pad='VALID')
self.c3_s2 = M.ConvLayer(9, 128, pad='SAME_LEFT', activation=M.PARAM_RELU, values=get_conv('conv3_stage2'))
self.p3_s2 = M.MaxPool(3, 2, pad='VALID')
self.c4_s2 = M.ConvLayer(5, 32, pad='SAME_LEFT', activation=M.PARAM_RELU, values=get_conv('conv4_stage2'))
# center map
self.pool = M.AvgPool(9,8, pad='VALID')
# LSTM0
self.g = M.ConvLayer(3, 48, pad='SAME_LEFT', values=get_conv('g_x_stage2'))
self.gb = tf.convert_to_tensor(params['g_stage2'][1].astype(np.float32))
self.gb = tf.Variable(self.gb)
self.i = M.ConvLayer(3, 48, pad='SAME_LEFT', values=get_conv('i_x_stage2'))
self.ib = tf.convert_to_tensor(params['i_stage2'][1].astype(np.float32))
self.ib = tf.Variable(self.ib)
self.o = M.ConvLayer(3, 48, pad='SAME_LEFT', values=get_conv('o_x_stage2'))
self.ob = tf.convert_to_tensor(params['o_stage2'][1].astype(np.float32))
self.ob = tf.Variable(self.ob)
# decoder branch
self.mc1 = M.ConvLayer(11, 128, pad='SAME_LEFT', activation=M.PARAM_RELU, values=get_conv('Mconv1_stage2'))
self.mc2 = M.ConvLayer(11, 128, pad='SAME_LEFT', activation=M.PARAM_RELU, values=get_conv('Mconv2_stage2'))
self.mc3 = M.ConvLayer(11, 128, pad='SAME_LEFT', activation=M.PARAM_RELU, values=get_conv('Mconv3_stage2'))
self.mc4 = M.ConvLayer(1, 128, activation=M.PARAM_RELU, values=get_conv('Mconv4_stage2'))
self.mc5 = M.ConvLayer(1, 15, values=get_conv('Mconv5_stage2'))
def forward(self, dt1, dt2, centermap):
#init enc
e = dt1
e = self.c1_s1(e)
e = tf.pad(e, [[0,0],[0,1],[0,1],[0,0]], mode='SYMMETRIC')
e = self.p1_s1(e)
e = self.c2_s1(e)
e = tf.pad(e, [[0,0],[0,1],[0,1],[0,0]], mode='SYMMETRIC')
e = self.p2_s1(e)
e = self.c3_s1(e)
e = tf.pad(e, [[0,0],[0,1],[0,1],[0,0]], mode='SYMMETRIC')
e = self.p3_s1(e)
e = self.c4_s1(e)
e = self.c5_s1(e)
e = self.c6_s1(e)
e = self.c7_s1(e)
# frame encoding
f = dt2
f = self.c1_s2(f)
f = tf.pad(f, [[0,0],[0,1],[0,1],[0,0]], mode='SYMMETRIC')
f = self.p1_s2(f)
f = self.c2_s2(f)
f = tf.pad(f, [[0,0],[0,1],[0,1],[0,0]], mode='SYMMETRIC')
f = self.p2_s2(f)
f = self.c3_s2(f)
f = tf.pad(f, [[0,0],[0,1],[0,1],[0,0]], mode='SYMMETRIC')
f = self.p3_s2(f)
f = self.c4_s2(f)
# centermap pooling
x = tf.pad(centermap, [[0,0],[0,1],[0,1],[0,0]], mode='SYMMETRIC')
x = self.pool(x)
# LSTM branch
x = tf.concat([f, e, x], axis=-1)
g = self.g(x) + self.gb
i = self.i(x) + self.ib
o = self.o(x) + self.ob
g = tf.tanh(g)
i = tf.sigmoid(i)
o = tf.sigmoid(o)
c = g * i
h = o * tf.tanh(c)
# decoder branch
x = self.mc1(h)
x = self.mc2(x)
x = self.mc3(x)
x = self.mc4(x)
out = self.mc5(x)
return out
class Stage1(M.Model):
def initialize(self):
# frame encoding
self.c1_s2 = M.ConvLayer(9, 128, pad='SAME_LEFT', activation=M.PARAM_RELU, values=get_conv2('conv1_stage2'))
self.p1_s2 = M.MaxPool(3, 2, pad='VALID')
self.c2_s2 = M.ConvLayer(9, 128, pad='SAME_LEFT', activation=M.PARAM_RELU, values=get_conv2('conv2_stage2'))
self.p2_s2 = M.MaxPool(3, 2, pad='VALID')
self.c3_s2 = M.ConvLayer(9, 128, pad='SAME_LEFT', activation=M.PARAM_RELU, values=get_conv2('conv3_stage2'))
self.p3_s2 = M.MaxPool(3, 2, pad='VALID')
self.c4_s2 = M.ConvLayer(5, 32, pad='SAME_LEFT', activation=M.PARAM_RELU, values=get_conv2('conv4_stage2'))
# center map
self.pool = M.AvgPool(9,8, pad='VALID')
# lstm
self.gx = M.ConvLayer(3, 48, pad='SAME_LEFT', values=get_conv2('g_x_stage3'))
self.gh = M.ConvLayer(3, 48, pad='SAME_LEFT', values=get_conv2('g_h_stage3'))
self.gb = tf.convert_to_tensor(params2['g_stage3'][1].astype(np.float32))
self.gb = tf.Variable(self.gb)
self.fx = M.ConvLayer(3, 48, pad='SAME_LEFT', values=get_conv2('f_x_stage3'))
self.fh = M.ConvLayer(3, 48, pad='SAME_LEFT', values=get_conv2('f_h_stage3'))
self.fb = tf.convert_to_tensor(params2['f_stage3'][1].astype(np.float32))
self.fb = tf.Variable(self.fb)
self.ox = M.ConvLayer(3, 48, pad='SAME_LEFT', values=get_conv2('o_x_stage3'))
self.oh = M.ConvLayer(3, 48, pad='SAME_LEFT', values=get_conv2('o_h_stage3'))
self.ob = tf.convert_to_tensor(params2['o_stage3'][1].astype(np.float32))
self.ob = tf.Variable(self.ob)
self.ix = M.ConvLayer(3, 48, pad='SAME_LEFT', values=get_conv2('i_x_stage3'))
self.ih = M.ConvLayer(3, 48, pad='SAME_LEFT', values=get_conv2('i_h_stage3'))
self.ib = tf.convert_to_tensor(params2['i_stage3'][1].astype(np.float32))
self.ib = tf.Variable(self.ib)
# decoder branch
self.mc1 = M.ConvLayer(11, 128, pad='SAME_LEFT', activation=M.PARAM_RELU, values=get_conv2('Mres1_stage3'))
self.mc2 = M.ConvLayer(11, 128, pad='SAME_LEFT', activation=M.PARAM_RELU, values=get_conv2('Mres2_stage3'))
self.mc3 = M.ConvLayer(11, 128, pad='SAME_LEFT', activation=M.PARAM_RELU, values=get_conv2('Mres3_stage3'))
self.mc4 = M.ConvLayer(1, 128, activation=M.PARAM_RELU, values=get_conv2('Mres4_stage3'))
self.mc5 = M.ConvLayer(1, 15, values=get_conv2('Mres5_stage3'))
def forward(self, x, hmap, centermap, h, c):
# frame encoding
f = x
f = self.c1_s2(f)
f = tf.pad(f, [[0,0],[0,1],[0,1],[0,0]], mode='SYMMETRIC')
f = self.p1_s2(f)
f = self.c2_s2(f)
f = tf.pad(f, [[0,0],[0,1],[0,1],[0,0]], mode='SYMMETRIC')
f = self.p2_s2(f)
f = self.c3_s2(f)
f = tf.pad(f, [[0,0],[0,1],[0,1],[0,0]], mode='SYMMETRIC')
f = self.p3_s2(f)
f = self.c4_s2(f)
# centermap pooling
ce = tf.pad(centermap, [[0,0],[0,1],[0,1],[0,0]], mode='SYMMETRIC')
ce = self.pool(ce)
# lstm branch
x = tf.concat([f, hmap, ce], axis=-1)
gx = self.gx(x)
gh = self.gh(h)
ox = self.ox(x)
oh = self.oh(h)
fx = self.fx(x)
fh = self.fh(h)
ix = self.ix(x)
ih = self.ih(h)
g = tf.tanh(gx + gh + self.gb)
o = tf.sigmoid(ox + oh + self.ob)
i = tf.sigmoid(ix + ih + self.ib)
f = tf.sigmoid(fx + fh + self.fb)
c = f*c + i*g
h = o * tf.tanh(c)
# decoder branch
x = self.mc1(h)
x = self.mc2(x)
x = self.mc3(x)
x = self.mc4(x)
out = self.mc5(x)
return out
class ModelBundle(M.Model):
def initialize(self):
self.s0 = Stage0()
self.s1 = Stage1()
if __name__=='__main__':
mods = ModelBundle()
mod = mods.s0
x = np.ones([1,368,368,3]).astype(np.float32)
cent = np.ones([1,368,368,1]).astype(np.float32)
x = mod(x, x, cent)
out = np.transpose(x,[0,3,1,2])
print(out)
print(out.shape)
input('Test deploy1 finished. Input for testing deploy2')
mod = mods.s1
x = np.ones([1,368,368,3]).astype(np.float32)
cent = np.ones([1,368,368,1]).astype(np.float32)
h = c = np.ones([1,46,46, 48]).astype(np.float32)
hmap = np.ones([1,46,46, 15]).astype(np.float32)
x[:,-1] = 0
x = mod(x, hmap, cent, h, c)
out = np.transpose(x,[0,3,1,2])
print(out)
print(out.shape)
input('Test deploy2 finished. Input for saving converted weights ')
saver = M.Saver(mods)
saver.save('./LSTMPM/lstmpm.ckpt')
|
[
"model3.Saver",
"numpy.load",
"model3.AvgPool",
"model3.MaxPool",
"tensorflow.pad",
"numpy.transpose",
"tensorflow.concat",
"numpy.ones",
"tensorflow.Variable",
"tensorflow.tanh",
"tensorflow.sigmoid"
] |
[((7984, 8013), 'numpy.transpose', 'np.transpose', (['x', '[0, 3, 1, 2]'], {}), '(x, [0, 3, 1, 2])\n', (7996, 8013), True, 'import numpy as np\n'), ((8365, 8394), 'numpy.transpose', 'np.transpose', (['x', '[0, 3, 1, 2]'], {}), '(x, [0, 3, 1, 2])\n', (8377, 8394), True, 'import numpy as np\n'), ((8500, 8513), 'model3.Saver', 'M.Saver', (['mods'], {}), '(mods)\n', (8507, 8513), True, 'import model3 as M\n'), ((75, 99), 'numpy.load', 'np.load', (['"""lstmpm_d1.npy"""'], {}), "('lstmpm_d1.npy')\n", (82, 99), True, 'import numpy as np\n'), ((117, 141), 'numpy.load', 'np.load', (['"""lstmpm_d2.npy"""'], {}), "('lstmpm_d2.npy')\n", (124, 141), True, 'import numpy as np\n'), ((665, 693), 'model3.MaxPool', 'M.MaxPool', (['(3)', '(2)'], {'pad': '"""VALID"""'}), "(3, 2, pad='VALID')\n", (674, 693), True, 'import model3 as M\n'), ((819, 847), 'model3.MaxPool', 'M.MaxPool', (['(3)', '(2)'], {'pad': '"""VALID"""'}), "(3, 2, pad='VALID')\n", (828, 847), True, 'import model3 as M\n'), ((973, 1001), 'model3.MaxPool', 'M.MaxPool', (['(3)', '(2)'], {'pad': '"""VALID"""'}), "(3, 2, pad='VALID')\n", (982, 1001), True, 'import model3 as M\n'), ((1527, 1555), 'model3.MaxPool', 'M.MaxPool', (['(3)', '(2)'], {'pad': '"""VALID"""'}), "(3, 2, pad='VALID')\n", (1536, 1555), True, 'import model3 as M\n'), ((1681, 1709), 'model3.MaxPool', 'M.MaxPool', (['(3)', '(2)'], {'pad': '"""VALID"""'}), "(3, 2, pad='VALID')\n", (1690, 1709), True, 'import model3 as M\n'), ((1835, 1863), 'model3.MaxPool', 'M.MaxPool', (['(3)', '(2)'], {'pad': '"""VALID"""'}), "(3, 2, pad='VALID')\n", (1844, 1863), True, 'import model3 as M\n'), ((2003, 2031), 'model3.AvgPool', 'M.AvgPool', (['(9)', '(8)'], {'pad': '"""VALID"""'}), "(9, 8, pad='VALID')\n", (2012, 2031), True, 'import model3 as M\n'), ((2207, 2227), 'tensorflow.Variable', 'tf.Variable', (['self.gb'], {}), '(self.gb)\n', (2218, 2227), True, 'import tensorflow as tf\n'), ((2393, 2413), 'tensorflow.Variable', 'tf.Variable', (['self.ib'], {}), '(self.ib)\n', (2404, 2413), True, 'import tensorflow as tf\n'), ((2579, 2599), 'tensorflow.Variable', 'tf.Variable', (['self.ob'], {}), '(self.ob)\n', (2590, 2599), True, 'import tensorflow as tf\n'), ((3201, 3262), 'tensorflow.pad', 'tf.pad', (['e', '[[0, 0], [0, 1], [0, 1], [0, 0]]'], {'mode': '"""SYMMETRIC"""'}), "(e, [[0, 0], [0, 1], [0, 1], [0, 0]], mode='SYMMETRIC')\n", (3207, 3262), True, 'import tensorflow as tf\n'), ((3302, 3363), 'tensorflow.pad', 'tf.pad', (['e', '[[0, 0], [0, 1], [0, 1], [0, 0]]'], {'mode': '"""SYMMETRIC"""'}), "(e, [[0, 0], [0, 1], [0, 1], [0, 0]], mode='SYMMETRIC')\n", (3308, 3363), True, 'import tensorflow as tf\n'), ((3403, 3464), 'tensorflow.pad', 'tf.pad', (['e', '[[0, 0], [0, 1], [0, 1], [0, 0]]'], {'mode': '"""SYMMETRIC"""'}), "(e, [[0, 0], [0, 1], [0, 1], [0, 0]], mode='SYMMETRIC')\n", (3409, 3464), True, 'import tensorflow as tf\n'), ((3616, 3677), 'tensorflow.pad', 'tf.pad', (['f', '[[0, 0], [0, 1], [0, 1], [0, 0]]'], {'mode': '"""SYMMETRIC"""'}), "(f, [[0, 0], [0, 1], [0, 1], [0, 0]], mode='SYMMETRIC')\n", (3622, 3677), True, 'import tensorflow as tf\n'), ((3717, 3778), 'tensorflow.pad', 'tf.pad', (['f', '[[0, 0], [0, 1], [0, 1], [0, 0]]'], {'mode': '"""SYMMETRIC"""'}), "(f, [[0, 0], [0, 1], [0, 1], [0, 0]], mode='SYMMETRIC')\n", (3723, 3778), True, 'import tensorflow as tf\n'), ((3818, 3879), 'tensorflow.pad', 'tf.pad', (['f', '[[0, 0], [0, 1], [0, 1], [0, 0]]'], {'mode': '"""SYMMETRIC"""'}), "(f, [[0, 0], [0, 1], [0, 1], [0, 0]], mode='SYMMETRIC')\n", (3824, 3879), True, 'import tensorflow as tf\n'), ((3943, 4012), 'tensorflow.pad', 'tf.pad', (['centermap', '[[0, 0], [0, 1], [0, 1], [0, 0]]'], {'mode': '"""SYMMETRIC"""'}), "(centermap, [[0, 0], [0, 1], [0, 1], [0, 0]], mode='SYMMETRIC')\n", (3949, 4012), True, 'import tensorflow as tf\n'), ((4049, 4078), 'tensorflow.concat', 'tf.concat', (['[f, e, x]'], {'axis': '(-1)'}), '([f, e, x], axis=-1)\n', (4058, 4078), True, 'import tensorflow as tf\n'), ((4165, 4175), 'tensorflow.tanh', 'tf.tanh', (['g'], {}), '(g)\n', (4172, 4175), True, 'import tensorflow as tf\n'), ((4182, 4195), 'tensorflow.sigmoid', 'tf.sigmoid', (['i'], {}), '(i)\n', (4192, 4195), True, 'import tensorflow as tf\n'), ((4202, 4215), 'tensorflow.sigmoid', 'tf.sigmoid', (['o'], {}), '(o)\n', (4212, 4215), True, 'import tensorflow as tf\n'), ((4572, 4600), 'model3.MaxPool', 'M.MaxPool', (['(3)', '(2)'], {'pad': '"""VALID"""'}), "(3, 2, pad='VALID')\n", (4581, 4600), True, 'import model3 as M\n'), ((4727, 4755), 'model3.MaxPool', 'M.MaxPool', (['(3)', '(2)'], {'pad': '"""VALID"""'}), "(3, 2, pad='VALID')\n", (4736, 4755), True, 'import model3 as M\n'), ((4882, 4910), 'model3.MaxPool', 'M.MaxPool', (['(3)', '(2)'], {'pad': '"""VALID"""'}), "(3, 2, pad='VALID')\n", (4891, 4910), True, 'import model3 as M\n'), ((5051, 5079), 'model3.AvgPool', 'M.AvgPool', (['(9)', '(8)'], {'pad': '"""VALID"""'}), "(9, 8, pad='VALID')\n", (5060, 5079), True, 'import model3 as M\n'), ((5337, 5357), 'tensorflow.Variable', 'tf.Variable', (['self.gb'], {}), '(self.gb)\n', (5348, 5357), True, 'import tensorflow as tf\n'), ((5606, 5626), 'tensorflow.Variable', 'tf.Variable', (['self.fb'], {}), '(self.fb)\n', (5617, 5626), True, 'import tensorflow as tf\n'), ((5875, 5895), 'tensorflow.Variable', 'tf.Variable', (['self.ob'], {}), '(self.ob)\n', (5886, 5895), True, 'import tensorflow as tf\n'), ((6144, 6164), 'tensorflow.Variable', 'tf.Variable', (['self.ib'], {}), '(self.ib)\n', (6155, 6164), True, 'import tensorflow as tf\n'), ((6776, 6837), 'tensorflow.pad', 'tf.pad', (['f', '[[0, 0], [0, 1], [0, 1], [0, 0]]'], {'mode': '"""SYMMETRIC"""'}), "(f, [[0, 0], [0, 1], [0, 1], [0, 0]], mode='SYMMETRIC')\n", (6782, 6837), True, 'import tensorflow as tf\n'), ((6877, 6938), 'tensorflow.pad', 'tf.pad', (['f', '[[0, 0], [0, 1], [0, 1], [0, 0]]'], {'mode': '"""SYMMETRIC"""'}), "(f, [[0, 0], [0, 1], [0, 1], [0, 0]], mode='SYMMETRIC')\n", (6883, 6938), True, 'import tensorflow as tf\n'), ((6978, 7039), 'tensorflow.pad', 'tf.pad', (['f', '[[0, 0], [0, 1], [0, 1], [0, 0]]'], {'mode': '"""SYMMETRIC"""'}), "(f, [[0, 0], [0, 1], [0, 1], [0, 0]], mode='SYMMETRIC')\n", (6984, 7039), True, 'import tensorflow as tf\n'), ((7104, 7173), 'tensorflow.pad', 'tf.pad', (['centermap', '[[0, 0], [0, 1], [0, 1], [0, 0]]'], {'mode': '"""SYMMETRIC"""'}), "(centermap, [[0, 0], [0, 1], [0, 1], [0, 0]], mode='SYMMETRIC')\n", (7110, 7173), True, 'import tensorflow as tf\n'), ((7212, 7245), 'tensorflow.concat', 'tf.concat', (['[f, hmap, ce]'], {'axis': '(-1)'}), '([f, hmap, ce], axis=-1)\n', (7221, 7245), True, 'import tensorflow as tf\n'), ((7400, 7426), 'tensorflow.tanh', 'tf.tanh', (['(gx + gh + self.gb)'], {}), '(gx + gh + self.gb)\n', (7407, 7426), True, 'import tensorflow as tf\n'), ((7433, 7462), 'tensorflow.sigmoid', 'tf.sigmoid', (['(ox + oh + self.ob)'], {}), '(ox + oh + self.ob)\n', (7443, 7462), True, 'import tensorflow as tf\n'), ((7469, 7498), 'tensorflow.sigmoid', 'tf.sigmoid', (['(ix + ih + self.ib)'], {}), '(ix + ih + self.ib)\n', (7479, 7498), True, 'import tensorflow as tf\n'), ((7505, 7534), 'tensorflow.sigmoid', 'tf.sigmoid', (['(fx + fh + self.fb)'], {}), '(fx + fh + self.fb)\n', (7515, 7534), True, 'import tensorflow as tf\n'), ((4240, 4250), 'tensorflow.tanh', 'tf.tanh', (['c'], {}), '(c)\n', (4247, 4250), True, 'import tensorflow as tf\n'), ((7563, 7573), 'tensorflow.tanh', 'tf.tanh', (['c'], {}), '(c)\n', (7570, 7573), True, 'import tensorflow as tf\n'), ((7864, 7889), 'numpy.ones', 'np.ones', (['[1, 368, 368, 3]'], {}), '([1, 368, 368, 3])\n', (7871, 7889), True, 'import numpy as np\n'), ((7914, 7939), 'numpy.ones', 'np.ones', (['[1, 368, 368, 1]'], {}), '([1, 368, 368, 1])\n', (7921, 7939), True, 'import numpy as np\n'), ((8120, 8145), 'numpy.ones', 'np.ones', (['[1, 368, 368, 3]'], {}), '([1, 368, 368, 3])\n', (8127, 8145), True, 'import numpy as np\n'), ((8170, 8195), 'numpy.ones', 'np.ones', (['[1, 368, 368, 1]'], {}), '([1, 368, 368, 1])\n', (8177, 8195), True, 'import numpy as np\n'), ((8221, 8245), 'numpy.ones', 'np.ones', (['[1, 46, 46, 48]'], {}), '([1, 46, 46, 48])\n', (8228, 8245), True, 'import numpy as np\n'), ((8271, 8295), 'numpy.ones', 'np.ones', (['[1, 46, 46, 15]'], {}), '([1, 46, 46, 15])\n', (8278, 8295), True, 'import numpy as np\n')]
|
import gpflow
import matplotlib.pyplot as plt
import numpy as np
from robustgp import ConditionalVariance
X = np.random.rand(150, 1)
Y = 0.8 * np.cos(10 * X) + 1.2 * np.sin(8 * X + 0.3) + np.cos(17 * X) * 1.2 + np.random.randn(*X.shape) * 0.1
gpr = gpflow.models.GPR((X, Y), gpflow.kernels.SquaredExponential())
opt = gpflow.optimizers.Scipy()
opt_logs = opt.minimize(gpr.training_loss, gpr.trainable_variables, options=dict(maxiter=100))
k = gpflow.kernels.SquaredExponential()
gpflow.utilities.multiple_assign(k, gpflow.utilities.read_values(gpr.kernel))
Z_initer = ConditionalVariance()
sp = gpflow.models.SGPR((X, Y), k, Z_initer.compute_initialisation(X, 6, k)[0])
gpflow.utilities.multiple_assign(sp, gpflow.utilities.read_values(gpr))
pX = np.linspace(0, 1, 3000)[:, None]
m, v = sp.predict_f(pX)
ipm, _ = sp.predict_f(sp.inducing_variable.Z.value())
fig, (ax1, ax2) = plt.subplots(2, 1)
ax1.plot(X, Y, 'x')
ax1.plot(pX, m)
ax1.plot(sp.inducing_variable.Z.value(), ipm, 'o', color='C3')
deviation = (2 * (v + sp.likelihood.variance.value()) ** 0.5).numpy().flatten()
ax1.fill_between(pX.flatten(), m.numpy().flatten() - deviation, m.numpy().flatten() + deviation, alpha=0.3)
ax1.axvline(pX[np.argmax(v)].item(), color='C2')
ax1.set_ylabel("y")
ax2.plot(pX, v ** 0.5)
ax2.plot(sp.inducing_variable.Z.value(), sp.inducing_variable.Z.value() * 0.0, 'o', color='C3')
ax2.axvline(pX[np.argmax(v)].item(), color='C2')
ax2.set_xlabel("input $x$")
ax2.set_ylabel("$\mathbb{V}\,[p(f(x) | \mathbf{u}]^{0.5}$")
plt.show()
|
[
"robustgp.ConditionalVariance",
"gpflow.kernels.SquaredExponential",
"matplotlib.pyplot.show",
"numpy.random.randn",
"numpy.argmax",
"gpflow.optimizers.Scipy",
"gpflow.utilities.read_values",
"numpy.sin",
"numpy.linspace",
"numpy.cos",
"numpy.random.rand",
"matplotlib.pyplot.subplots"
] |
[((111, 133), 'numpy.random.rand', 'np.random.rand', (['(150)', '(1)'], {}), '(150, 1)\n', (125, 133), True, 'import numpy as np\n'), ((320, 345), 'gpflow.optimizers.Scipy', 'gpflow.optimizers.Scipy', ([], {}), '()\n', (343, 345), False, 'import gpflow\n'), ((446, 481), 'gpflow.kernels.SquaredExponential', 'gpflow.kernels.SquaredExponential', ([], {}), '()\n', (479, 481), False, 'import gpflow\n'), ((572, 593), 'robustgp.ConditionalVariance', 'ConditionalVariance', ([], {}), '()\n', (591, 593), False, 'from robustgp import ConditionalVariance\n'), ((882, 900), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {}), '(2, 1)\n', (894, 900), True, 'import matplotlib.pyplot as plt\n'), ((1513, 1523), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1521, 1523), True, 'import matplotlib.pyplot as plt\n'), ((277, 312), 'gpflow.kernels.SquaredExponential', 'gpflow.kernels.SquaredExponential', ([], {}), '()\n', (310, 312), False, 'import gpflow\n'), ((518, 558), 'gpflow.utilities.read_values', 'gpflow.utilities.read_values', (['gpr.kernel'], {}), '(gpr.kernel)\n', (546, 558), False, 'import gpflow\n'), ((711, 744), 'gpflow.utilities.read_values', 'gpflow.utilities.read_values', (['gpr'], {}), '(gpr)\n', (739, 744), False, 'import gpflow\n'), ((752, 775), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(3000)'], {}), '(0, 1, 3000)\n', (763, 775), True, 'import numpy as np\n'), ((212, 237), 'numpy.random.randn', 'np.random.randn', (['*X.shape'], {}), '(*X.shape)\n', (227, 237), True, 'import numpy as np\n'), ((189, 203), 'numpy.cos', 'np.cos', (['(17 * X)'], {}), '(17 * X)\n', (195, 203), True, 'import numpy as np\n'), ((144, 158), 'numpy.cos', 'np.cos', (['(10 * X)'], {}), '(10 * X)\n', (150, 158), True, 'import numpy as np\n'), ((167, 186), 'numpy.sin', 'np.sin', (['(8 * X + 0.3)'], {}), '(8 * X + 0.3)\n', (173, 186), True, 'import numpy as np\n'), ((1203, 1215), 'numpy.argmax', 'np.argmax', (['v'], {}), '(v)\n', (1212, 1215), True, 'import numpy as np\n'), ((1391, 1403), 'numpy.argmax', 'np.argmax', (['v'], {}), '(v)\n', (1400, 1403), True, 'import numpy as np\n')]
|
import pdb
import time
import lib.tf_silent
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.colors import Normalize
from matplotlib.gridspec import GridSpec
import os
import pickle
import argparse
from lib.pinn import PINN
from lib.network import Network
from lib.optimizer import Optimizer
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--maxiter', type=int, default=2000)
parser.add_argument('-ntr', '--num-train-samples', type=int, default=10000)
parser.add_argument('-nte', '--num-test-samples', type=int, default=100)
parser.add_argument('-n', '--network', type=str, default='pinn')
parser.add_argument('-l', '--loss', type=str, default='l2')
parser.add_argument('-gi', '--gradient-interval', type=int, default=100)
parser.add_argument('--gt-path', type=str, default='data/pinn.pkl')
return parser.parse_known_args()[0]
def uv(network, xy):
"""
Compute flow velocities (u, v) for the network with output (psi, p).
Args:
xy: network input variables as ndarray.
Returns:
(u, v) as ndarray.
"""
xy = tf.constant(xy)
with tf.GradientTape() as g:
g.watch(xy)
psi_p = network(xy)
psi_p_j = g.batch_jacobian(psi_p, xy)
u = psi_p_j[..., 0, 1]
v = -psi_p_j[..., 0, 0]
return u.numpy(), v.numpy()
def contour(grid, x, y, z, title, levels=50):
"""
Contour plot.
Args:
grid: plot position.
x: x-array.
y: y-array.
z: z-array.
title: title string.
levels: number of contour lines.
"""
# get the value range
vmin = -2e-1
vmax = 2e-1
if (title == 'psi'):
vmax = 1.2e-1
vmin = -1e-1
if (title == 'p'):
vmax = 6.1e-1
vmin = -5e-1
if (title == 'u'):
vmax = 1.1e+0
vmin = -2e-1
if (title == 'v'):
vmax = 2.1e-1
vmin = -2e-1
if (title == 'dpsi'):
vmax = 1.1e-2
vmin = 0.0
if (title == 'dp'):
vmax = 4.1e-1
vmin = 0.0
if (title == 'du'):
vmax = 1.1e-1
vmin = 0.0
if (title == 'dv'):
vmax = 8.1e-2
vmin = 0.0
# plot a contour
plt.subplot(grid)
print(title, vmin, vmax)
plt.contour(x, y, z, colors='k', linewidths=0.2, levels=levels, vmin=vmin, vmax=vmax)
plt.contourf(x, y, z, cmap='rainbow', levels=levels, vmin=vmin, vmax=vmax)
plt.title(title)
m = plt.cm.ScalarMappable(cmap='rainbow', norm=Normalize(vmin=vmin, vmax=vmax))
m.set_array(z)
m.set_clim(vmin, vmax)
cbar = plt.colorbar(m, pad=0.03, aspect=25, format='%.0e')
cbar.mappable.set_clim(vmin, vmax)
if __name__ == '__main__':
"""
Test the physics informed neural network (PINN) model
for the cavity flow governed by the steady Navier-Stokes equation.
"""
args = parse_args()
# number of training samples
num_train_samples = args.num_train_samples
# number of test samples
num_test_samples = args.num_test_samples
# inlet flow velocity
u0 = 1
# density
rho = 1
# viscosity
nu = 0.01
# build a core network model
network = Network().build()
network.summary()
# build a PINN model
model = PINN(network, rho=rho, nu=nu).build()
# create training input
xy_eqn = np.random.rand(num_train_samples, 2)
xy_ub = np.random.rand(num_train_samples//2, 2) # top-bottom boundaries
xy_ub[..., 1] = np.round(xy_ub[..., 1]) # y-position is 0 or 1
xy_lr = np.random.rand(num_train_samples//2, 2) # left-right boundaries
xy_lr[..., 0] = np.round(xy_lr[..., 0]) # x-position is 0 or 1
xy_bnd = np.random.permutation(np.concatenate([xy_ub, xy_lr]))
x_train = [xy_eqn, xy_bnd]
# create training output
zeros = np.zeros((num_train_samples, 2))
uv_bnd = np.zeros((num_train_samples, 2))
uv_bnd[..., 0] = u0 * np.floor(xy_bnd[..., 1])
y_train = [zeros, zeros, uv_bnd]
# train the model using L-BFGS-B algorithm
optimizer = Optimizer(model=model, x_train=x_train, y_train=y_train, dict_params=args.__dict__)
optimizer.fit()
# create meshgrid coordinates (x, y) for test plots
x = np.linspace(0, 1, num_test_samples)
y = np.linspace(0, 1, num_test_samples)
x, y = np.meshgrid(x, y)
xy = np.stack([x.flatten(), y.flatten()], axis=-1)
# predict (psi, p)
psi_p = network.predict(xy, batch_size=len(xy))
psi, p = [ psi_p[..., i].reshape(x.shape) for i in range(psi_p.shape[-1]) ]
# compute (u, v)
u, v = uv(network, xy)
u = u.reshape(x.shape)
v = v.reshape(x.shape)
if os.path.isfile(args.gt_path):
with open(args.gt_path, 'rb') as f:
data = pickle.load(f)
x_gt, y_gt, psi_gt, p_gt, u_gt, v_gt = data
fig = plt.figure(figsize=(6, 5))
gs = GridSpec(2, 2)
contour(gs[0, 0], x, y, np.abs(psi - psi_gt), 'dpsi')
contour(gs[0, 1], x, y, np.abs(p - p_gt), 'dp')
contour(gs[1, 0], x, y, np.abs(u - u_gt), 'du')
contour(gs[1, 1], x, y, np.abs(v - v_gt), 'dv')
plt.tight_layout()
plt.savefig(os.path.join('figures', list(args.__dict__.values())[:-1].__str__() + str(time.time()) +
'_error.png'))
plt.show()
plt.close()
fig = plt.figure(figsize=(6, 5))
gs = GridSpec(2, 2)
contour(gs[0, 0], x, y, psi, 'psi')
contour(gs[0, 1], x, y, p, 'p')
contour(gs[1, 0], x, y, u, 'u')
contour(gs[1, 1], x, y, v, 'v')
plt.tight_layout()
plt.savefig(os.path.join('figures', list(args.__dict__.values())[:-1].__str__() + str(time.time()) + '.png'))
plt.show()
plt.close()
else:
# plot test results
fig = plt.figure(figsize=(6, 5))
gs = GridSpec(2, 2)
contour(gs[0, 0], x, y, psi, 'psi')
contour(gs[0, 1], x, y, p, 'p')
contour(gs[1, 0], x, y, u, 'u')
contour(gs[1, 1], x, y, v, 'v')
data = [x, y, psi, p, u, v]
with open(args.gt_path, 'wb') as f:
pickle.dump(data, f)
plt.tight_layout()
plt.savefig(os.path.join('figures', list(args.__dict__.values())[:-1].__str__() + str(time.time()) + '.png'))
plt.show()
plt.close()
|
[
"matplotlib.pyplot.title",
"pickle.dump",
"numpy.abs",
"argparse.ArgumentParser",
"numpy.floor",
"os.path.isfile",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.contourf",
"matplotlib.pyplot.contour",
"pickle.load",
"numpy.round",
"matplotlib.pyplot.tight_layout",
"numpy.meshgrid",
"matplotlib.colors.Normalize",
"matplotlib.pyplot.close",
"matplotlib.pyplot.colorbar",
"numpy.linspace",
"lib.pinn.PINN",
"matplotlib.pyplot.show",
"lib.network.Network",
"tensorflow.constant",
"numpy.concatenate",
"matplotlib.pyplot.subplot",
"lib.optimizer.Optimizer",
"numpy.zeros",
"time.time",
"numpy.random.rand",
"matplotlib.gridspec.GridSpec",
"tensorflow.GradientTape"
] |
[((395, 420), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (418, 420), False, 'import argparse\n'), ((1189, 1204), 'tensorflow.constant', 'tf.constant', (['xy'], {}), '(xy)\n', (1200, 1204), True, 'import tensorflow as tf\n'), ((2285, 2302), 'matplotlib.pyplot.subplot', 'plt.subplot', (['grid'], {}), '(grid)\n', (2296, 2302), True, 'import matplotlib.pyplot as plt\n'), ((2336, 2425), 'matplotlib.pyplot.contour', 'plt.contour', (['x', 'y', 'z'], {'colors': '"""k"""', 'linewidths': '(0.2)', 'levels': 'levels', 'vmin': 'vmin', 'vmax': 'vmax'}), "(x, y, z, colors='k', linewidths=0.2, levels=levels, vmin=vmin,\n vmax=vmax)\n", (2347, 2425), True, 'import matplotlib.pyplot as plt\n'), ((2426, 2500), 'matplotlib.pyplot.contourf', 'plt.contourf', (['x', 'y', 'z'], {'cmap': '"""rainbow"""', 'levels': 'levels', 'vmin': 'vmin', 'vmax': 'vmax'}), "(x, y, z, cmap='rainbow', levels=levels, vmin=vmin, vmax=vmax)\n", (2438, 2500), True, 'import matplotlib.pyplot as plt\n'), ((2505, 2521), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2514, 2521), True, 'import matplotlib.pyplot as plt\n'), ((2666, 2717), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['m'], {'pad': '(0.03)', 'aspect': '(25)', 'format': '"""%.0e"""'}), "(m, pad=0.03, aspect=25, format='%.0e')\n", (2678, 2717), True, 'import matplotlib.pyplot as plt\n'), ((3409, 3445), 'numpy.random.rand', 'np.random.rand', (['num_train_samples', '(2)'], {}), '(num_train_samples, 2)\n', (3423, 3445), True, 'import numpy as np\n'), ((3458, 3499), 'numpy.random.rand', 'np.random.rand', (['(num_train_samples // 2)', '(2)'], {}), '(num_train_samples // 2, 2)\n', (3472, 3499), True, 'import numpy as np\n'), ((3543, 3566), 'numpy.round', 'np.round', (['xy_ub[..., 1]'], {}), '(xy_ub[..., 1])\n', (3551, 3566), True, 'import numpy as np\n'), ((3611, 3652), 'numpy.random.rand', 'np.random.rand', (['(num_train_samples // 2)', '(2)'], {}), '(num_train_samples // 2, 2)\n', (3625, 3652), True, 'import numpy as np\n'), ((3696, 3719), 'numpy.round', 'np.round', (['xy_lr[..., 0]'], {}), '(xy_lr[..., 0])\n', (3704, 3719), True, 'import numpy as np\n'), ((3892, 3924), 'numpy.zeros', 'np.zeros', (['(num_train_samples, 2)'], {}), '((num_train_samples, 2))\n', (3900, 3924), True, 'import numpy as np\n'), ((3938, 3970), 'numpy.zeros', 'np.zeros', (['(num_train_samples, 2)'], {}), '((num_train_samples, 2))\n', (3946, 3970), True, 'import numpy as np\n'), ((4123, 4211), 'lib.optimizer.Optimizer', 'Optimizer', ([], {'model': 'model', 'x_train': 'x_train', 'y_train': 'y_train', 'dict_params': 'args.__dict__'}), '(model=model, x_train=x_train, y_train=y_train, dict_params=args.\n __dict__)\n', (4132, 4211), False, 'from lib.optimizer import Optimizer\n'), ((4292, 4327), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'num_test_samples'], {}), '(0, 1, num_test_samples)\n', (4303, 4327), True, 'import numpy as np\n'), ((4336, 4371), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'num_test_samples'], {}), '(0, 1, num_test_samples)\n', (4347, 4371), True, 'import numpy as np\n'), ((4383, 4400), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (4394, 4400), True, 'import numpy as np\n'), ((4720, 4748), 'os.path.isfile', 'os.path.isfile', (['args.gt_path'], {}), '(args.gt_path)\n', (4734, 4748), False, 'import os\n'), ((1214, 1231), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (1229, 1231), True, 'import tensorflow as tf\n'), ((3787, 3817), 'numpy.concatenate', 'np.concatenate', (['[xy_ub, xy_lr]'], {}), '([xy_ub, xy_lr])\n', (3801, 3817), True, 'import numpy as np\n'), ((3997, 4021), 'numpy.floor', 'np.floor', (['xy_bnd[..., 1]'], {}), '(xy_bnd[..., 1])\n', (4005, 4021), True, 'import numpy as np\n'), ((4894, 4920), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 5)'}), '(figsize=(6, 5))\n', (4904, 4920), True, 'import matplotlib.pyplot as plt\n'), ((4934, 4948), 'matplotlib.gridspec.GridSpec', 'GridSpec', (['(2)', '(2)'], {}), '(2, 2)\n', (4942, 4948), False, 'from matplotlib.gridspec import GridSpec\n'), ((5187, 5205), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5203, 5205), True, 'import matplotlib.pyplot as plt\n'), ((5371, 5381), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5379, 5381), True, 'import matplotlib.pyplot as plt\n'), ((5390, 5401), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5399, 5401), True, 'import matplotlib.pyplot as plt\n'), ((5417, 5443), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 5)'}), '(figsize=(6, 5))\n', (5427, 5443), True, 'import matplotlib.pyplot as plt\n'), ((5457, 5471), 'matplotlib.gridspec.GridSpec', 'GridSpec', (['(2)', '(2)'], {}), '(2, 2)\n', (5465, 5471), False, 'from matplotlib.gridspec import GridSpec\n'), ((5644, 5662), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5660, 5662), True, 'import matplotlib.pyplot as plt\n'), ((5789, 5799), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5797, 5799), True, 'import matplotlib.pyplot as plt\n'), ((5808, 5819), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5817, 5819), True, 'import matplotlib.pyplot as plt\n'), ((5872, 5898), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 5)'}), '(figsize=(6, 5))\n', (5882, 5898), True, 'import matplotlib.pyplot as plt\n'), ((5912, 5926), 'matplotlib.gridspec.GridSpec', 'GridSpec', (['(2)', '(2)'], {}), '(2, 2)\n', (5920, 5926), False, 'from matplotlib.gridspec import GridSpec\n'), ((6212, 6230), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6228, 6230), True, 'import matplotlib.pyplot as plt\n'), ((6357, 6367), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6365, 6367), True, 'import matplotlib.pyplot as plt\n'), ((6376, 6387), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6385, 6387), True, 'import matplotlib.pyplot as plt\n'), ((2573, 2604), 'matplotlib.colors.Normalize', 'Normalize', ([], {'vmin': 'vmin', 'vmax': 'vmax'}), '(vmin=vmin, vmax=vmax)\n', (2582, 2604), False, 'from matplotlib.colors import Normalize\n'), ((3252, 3261), 'lib.network.Network', 'Network', ([], {}), '()\n', (3259, 3261), False, 'from lib.network import Network\n'), ((3329, 3358), 'lib.pinn.PINN', 'PINN', (['network'], {'rho': 'rho', 'nu': 'nu'}), '(network, rho=rho, nu=nu)\n', (3333, 3358), False, 'from lib.pinn import PINN\n'), ((4813, 4827), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4824, 4827), False, 'import pickle\n'), ((4981, 5001), 'numpy.abs', 'np.abs', (['(psi - psi_gt)'], {}), '(psi - psi_gt)\n', (4987, 5001), True, 'import numpy as np\n'), ((5043, 5059), 'numpy.abs', 'np.abs', (['(p - p_gt)'], {}), '(p - p_gt)\n', (5049, 5059), True, 'import numpy as np\n'), ((5099, 5115), 'numpy.abs', 'np.abs', (['(u - u_gt)'], {}), '(u - u_gt)\n', (5105, 5115), True, 'import numpy as np\n'), ((5155, 5171), 'numpy.abs', 'np.abs', (['(v - v_gt)'], {}), '(v - v_gt)\n', (5161, 5171), True, 'import numpy as np\n'), ((6183, 6203), 'pickle.dump', 'pickle.dump', (['data', 'f'], {}), '(data, f)\n', (6194, 6203), False, 'import pickle\n'), ((5300, 5311), 'time.time', 'time.time', ([], {}), '()\n', (5309, 5311), False, 'import time\n'), ((5757, 5768), 'time.time', 'time.time', ([], {}), '()\n', (5766, 5768), False, 'import time\n'), ((6325, 6336), 'time.time', 'time.time', ([], {}), '()\n', (6334, 6336), False, 'import time\n')]
|
import gzip
import pandas as pd
import numpy as np
import io
import os
import re
import torch
import torch.utils.data as data_utils
import subprocess
import zipfile
import zlib
from Bio import AlignIO
from Bio.SeqIO.FastaIO import FastaIterator, as_fasta
from Bio.Align.Applications import MuscleCommandline
class IndexTensorDataset:
"""
Identical to torch.utils.data.Dataset.TensorDataset, but __getitem__
also returns indices as last value in tuple
"""
def __init__(self, *tensors):
assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors)
self.tensors = tensors
def __getitem__(self, index):
t = [tensor[index] for tensor in self.tensors]
t.append(index)
return(tuple(t))
def __len__(self):
return self.tensors[0].size(0)
class GeneDataset:
"""
Container object that provides access to the PyTorch Dataset and
Dataloader objects needed for one experiment
"""
def __init__(self, data_file, batch_size, test_split, shuffle_dataset,
random_seed, validation_split=0):
# Load tensor data
data = torch.load(data_file)
dataset = IndexTensorDataset(data['X'], data['y'])
# Test / train split
dataset_size = len(dataset)
indices = list(range(dataset_size))
split = int(np.floor(test_split * dataset_size))
if shuffle_dataset:
np.random.seed(random_seed)
np.random.shuffle(indices)
train_indices, test_indices = indices[split:], indices[:split]
# Initialize Dataloaders
train_sampler = data_utils.SubsetRandomSampler(train_indices)
test_sampler = data_utils.SubsetRandomSampler(test_indices)
self.train_loader = data_utils.DataLoader(dataset,
batch_size=batch_size,
sampler=train_sampler)
self.test_loader = data_utils.DataLoader(dataset,
batch_size=batch_size,
sampler=test_sampler)
self.isolates = data['isolates']
def transform(input, output):
"""Snakemake function
Split and transform input data
"""
genesdf = pd.read_csv(input[1], index_col=0, header=0)
metadf = pd.read_csv(input[0])
all_isolates = metadf["Isolate"].to_numpy('U')
encoding = {
'S': 0,
'I': 0.5,
'R': 1
}
pattern = re.compile("(\w{3}).pt$")
for f in output:
m = pattern.match(f, len(f)-6)
d = m.group(1)
# print(d)
y = metadf[d]
omit = pd.isnull(y)
isolates = all_isolates[~omit]
y = y.loc[~omit]
X = genesdf.loc[isolates].to_numpy()
ylabels = np.array([ encoding[v] for v in y ])
# print(ylabels.shape)
# print(X.shape)
# print(isolates.shape)
# print(isolates[0])
# print(isolates.dtype)
y_tensor = torch.from_numpy(ylabels)
X_tensor = torch.from_numpy(X)
torch.save({'y': y_tensor, 'X': X_tensor, 'isolates': isolates}, f)
def align(fh, transl=True):
"""
Translate and align pangenome cluster fasta file
"""
align_exe = MuscleCommandline(
r'C:\Users\matthewwhiteside\workspace\b_ecoli\muscle\muscle3.8.31_i86win32.exe',
clwstrict=True)
# Align on stdin/stdout
proc = subprocess.Popen(str(align_exe),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
shell=False)
sequences = FastaIterator(fh)
inp = [ ">"+record.id+"\n"+str(record.translate(table="Bacterial").seq)+"\n" for record in sequences ]
inp = "".join(inp)
align, err = proc.communicate(input=inp)
return(align)
def decompress(zipf, transl=True):
"""
Decompress gzipped fasta files in zip archive
"""
with zipfile.ZipFile(zipf, "r") as zh:
i = 0
for z in zh.infolist():
if not z.is_dir():
print(z.filename)
gz = zh.read(z.filename)
fh = io.BytesIO(gz)
with gzip.open(fh, 'rb') as gz:
fn = gz.read()
yield fn.decode('utf-8')
if __name__ == "__main__":
for fn in decompress("data/raw/ecoli/pan_genome_sequences.zip"):
with io.StringIO(fn) as ifh:
with open('data/tmp/test.aln', 'w') as ofh:
ofh.write(align(ifh))
break
|
[
"torch.from_numpy",
"io.StringIO",
"zipfile.ZipFile",
"numpy.random.seed",
"torch.utils.data.DataLoader",
"io.BytesIO",
"pandas.read_csv",
"Bio.SeqIO.FastaIO.FastaIterator",
"torch.load",
"numpy.floor",
"gzip.open",
"pandas.isnull",
"Bio.Align.Applications.MuscleCommandline",
"torch.save",
"numpy.array",
"torch.utils.data.SubsetRandomSampler",
"numpy.random.shuffle",
"re.compile"
] |
[((2335, 2379), 'pandas.read_csv', 'pd.read_csv', (['input[1]'], {'index_col': '(0)', 'header': '(0)'}), '(input[1], index_col=0, header=0)\n', (2346, 2379), True, 'import pandas as pd\n'), ((2393, 2414), 'pandas.read_csv', 'pd.read_csv', (['input[0]'], {}), '(input[0])\n', (2404, 2414), True, 'import pandas as pd\n'), ((2549, 2575), 're.compile', 're.compile', (['"""(\\\\w{3}).pt$"""'], {}), "('(\\\\w{3}).pt$')\n", (2559, 2575), False, 'import re\n'), ((3328, 3457), 'Bio.Align.Applications.MuscleCommandline', 'MuscleCommandline', (['"""C:\\\\Users\\\\matthewwhiteside\\\\workspace\\\\b_ecoli\\\\muscle\\\\muscle3.8.31_i86win32.exe"""'], {'clwstrict': '(True)'}), "(\n 'C:\\\\Users\\\\matthewwhiteside\\\\workspace\\\\b_ecoli\\\\muscle\\\\muscle3.8.31_i86win32.exe'\n , clwstrict=True)\n", (3345, 3457), False, 'from Bio.Align.Applications import MuscleCommandline\n'), ((3699, 3716), 'Bio.SeqIO.FastaIO.FastaIterator', 'FastaIterator', (['fh'], {}), '(fh)\n', (3712, 3716), False, 'from Bio.SeqIO.FastaIO import FastaIterator, as_fasta\n'), ((1166, 1187), 'torch.load', 'torch.load', (['data_file'], {}), '(data_file)\n', (1176, 1187), False, 'import torch\n'), ((1650, 1695), 'torch.utils.data.SubsetRandomSampler', 'data_utils.SubsetRandomSampler', (['train_indices'], {}), '(train_indices)\n', (1680, 1695), True, 'import torch.utils.data as data_utils\n'), ((1719, 1763), 'torch.utils.data.SubsetRandomSampler', 'data_utils.SubsetRandomSampler', (['test_indices'], {}), '(test_indices)\n', (1749, 1763), True, 'import torch.utils.data as data_utils\n'), ((1793, 1869), 'torch.utils.data.DataLoader', 'data_utils.DataLoader', (['dataset'], {'batch_size': 'batch_size', 'sampler': 'train_sampler'}), '(dataset, batch_size=batch_size, sampler=train_sampler)\n', (1814, 1869), True, 'import torch.utils.data as data_utils\n'), ((1997, 2072), 'torch.utils.data.DataLoader', 'data_utils.DataLoader', (['dataset'], {'batch_size': 'batch_size', 'sampler': 'test_sampler'}), '(dataset, batch_size=batch_size, sampler=test_sampler)\n', (2018, 2072), True, 'import torch.utils.data as data_utils\n'), ((2715, 2727), 'pandas.isnull', 'pd.isnull', (['y'], {}), '(y)\n', (2724, 2727), True, 'import pandas as pd\n'), ((2856, 2890), 'numpy.array', 'np.array', (['[encoding[v] for v in y]'], {}), '([encoding[v] for v in y])\n', (2864, 2890), True, 'import numpy as np\n'), ((3063, 3088), 'torch.from_numpy', 'torch.from_numpy', (['ylabels'], {}), '(ylabels)\n', (3079, 3088), False, 'import torch\n'), ((3108, 3127), 'torch.from_numpy', 'torch.from_numpy', (['X'], {}), '(X)\n', (3124, 3127), False, 'import torch\n'), ((3137, 3204), 'torch.save', 'torch.save', (["{'y': y_tensor, 'X': X_tensor, 'isolates': isolates}", 'f'], {}), "({'y': y_tensor, 'X': X_tensor, 'isolates': isolates}, f)\n", (3147, 3204), False, 'import torch\n'), ((4029, 4055), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zipf', '"""r"""'], {}), "(zipf, 'r')\n", (4044, 4055), False, 'import zipfile\n'), ((1377, 1412), 'numpy.floor', 'np.floor', (['(test_split * dataset_size)'], {}), '(test_split * dataset_size)\n', (1385, 1412), True, 'import numpy as np\n'), ((1454, 1481), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (1468, 1481), True, 'import numpy as np\n'), ((1494, 1520), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (1511, 1520), True, 'import numpy as np\n'), ((4491, 4506), 'io.StringIO', 'io.StringIO', (['fn'], {}), '(fn)\n', (4502, 4506), False, 'import io\n'), ((4236, 4250), 'io.BytesIO', 'io.BytesIO', (['gz'], {}), '(gz)\n', (4246, 4250), False, 'import io\n'), ((4272, 4291), 'gzip.open', 'gzip.open', (['fh', '"""rb"""'], {}), "(fh, 'rb')\n", (4281, 4291), False, 'import gzip\n')]
|
#!/usr/bin/python
# This file is licensed under MIT license.
# See the LICENSE file in the project root for more information.
import unittest
import rostest
import rosunit
import numpy as np
from numpy.testing import assert_almost_equal
from std_msgs.msg import Header
from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion
from nav_msgs.msg import Path
from car_core.common import msgs_helpers, geom_helpers
def get_poses_helper(points):
poses = []
for p in points:
pose = PoseStamped()
pose.pose.position = Point(p[0], p[1], p[2])
poses.append(pose)
return poses
class TestMsgsHelpers(unittest.TestCase):
def test_quaterion_to_array_ok(self):
q = Quaternion(1,2,3,4)
arr = msgs_helpers.quaterion_to_array(q)
assert_almost_equal(arr, np.array([1,2, 3, 4]))
self.assertTrue(True)
def test_point_to_array_ok(self):
p = Point(1,2,3)
arr = msgs_helpers.point_to_array(p)
assert_almost_equal(arr, np.array([1,2]))
self.assertTrue(True)
def test_path_poses_to_array_ok(self):
poses = get_poses_helper([[1,2,3],
[4,5,6],
[7,8,9]])
arr = msgs_helpers.path_poses_to_array(poses)
assert_almost_equal(arr, np.array([[1,2],
[4,5],
[7,8]]))
self.assertTrue(True)
def test_array_to_point_ok(self):
arr = np.array([1,2])
point = msgs_helpers.array_to_point(arr)
self.assertEqual(point, Point(1,2,0))
def test_array_to_path_poses_ok(self):
arr = np.array([[1,2],
[4,5],
[6,7]])
poses = msgs_helpers.array_to_path_poses(arr)
poses_true = get_poses_helper([[1,2,0],
[4,5,0],
[6,7,0]])
self.assertEqual(poses, poses)
class TestGeomHelpers(unittest.TestCase):
def test_get_closest_path_point_regular(self):
poses = np.array([[0,0],
[1,1],
[2,2],
[3,3]])
point = np.array([0.9, 0.9])
index = geom_helpers.get_closest_path_point(poses, point)
self.assertEqual(index, 1)
def test_get_closest_path_point_far(self):
poses = np.array([[0,0],
[1,1],
[2,2],
[3,3]])
point = np.array([-1, 3])
index = geom_helpers.get_closest_path_point(poses, point)
self.assertEqual(index, 1)
def test_get_closest_path_point_first(self):
poses = np.array([[0,0],
[1,1],
[2,2],
[3,3]])
point = np.array([-1, 1])
index = geom_helpers.get_closest_path_point(poses, point)
self.assertEqual(index, 0)
def test_get_closest_path_point_last(self):
poses = np.array([[0,0],
[1,1],
[2,2],
[3,3]])
point = np.array([4, 4])
index = geom_helpers.get_closest_path_point(poses, point)
self.assertEqual(index, 3)
def test_get_closest_path_point_single_point(self):
poses = np.array([[0,0]])
point = np.array([4, 4])
index = geom_helpers.get_closest_path_point(poses, point)
self.assertEqual(index, 0)
def test_get_closest_path_point_matching_points(self):
poses = np.array([[0,0],
[1,1],
[1,1],
[3,3]])
point = np.array([1.1, 1.1])
index = geom_helpers.get_closest_path_point(poses, point)
self.assertEqual(index, 1)
if __name__ == '__main__':
import rosunit
rosunit.unitrun("car_core", 'test_msgs_helpers', TestMsgsHelpers)
rosunit.unitrun("car_core", 'test_geom_helpers', TestGeomHelpers)
|
[
"geometry_msgs.msg.PoseStamped",
"car_core.common.msgs_helpers.path_poses_to_array",
"car_core.common.msgs_helpers.array_to_point",
"rosunit.unitrun",
"car_core.common.msgs_helpers.array_to_path_poses",
"car_core.common.geom_helpers.get_closest_path_point",
"car_core.common.msgs_helpers.point_to_array",
"geometry_msgs.msg.Quaternion",
"geometry_msgs.msg.Point",
"numpy.array",
"car_core.common.msgs_helpers.quaterion_to_array"
] |
[((3932, 3997), 'rosunit.unitrun', 'rosunit.unitrun', (['"""car_core"""', '"""test_msgs_helpers"""', 'TestMsgsHelpers'], {}), "('car_core', 'test_msgs_helpers', TestMsgsHelpers)\n", (3947, 3997), False, 'import rosunit\n'), ((4002, 4067), 'rosunit.unitrun', 'rosunit.unitrun', (['"""car_core"""', '"""test_geom_helpers"""', 'TestGeomHelpers'], {}), "('car_core', 'test_geom_helpers', TestGeomHelpers)\n", (4017, 4067), False, 'import rosunit\n'), ((506, 519), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (517, 519), False, 'from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion\n'), ((549, 572), 'geometry_msgs.msg.Point', 'Point', (['p[0]', 'p[1]', 'p[2]'], {}), '(p[0], p[1], p[2])\n', (554, 572), False, 'from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion\n'), ((715, 737), 'geometry_msgs.msg.Quaternion', 'Quaternion', (['(1)', '(2)', '(3)', '(4)'], {}), '(1, 2, 3, 4)\n', (725, 737), False, 'from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion\n'), ((749, 783), 'car_core.common.msgs_helpers.quaterion_to_array', 'msgs_helpers.quaterion_to_array', (['q'], {}), '(q)\n', (780, 783), False, 'from car_core.common import msgs_helpers, geom_helpers\n'), ((921, 935), 'geometry_msgs.msg.Point', 'Point', (['(1)', '(2)', '(3)'], {}), '(1, 2, 3)\n', (926, 935), False, 'from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion\n'), ((948, 978), 'car_core.common.msgs_helpers.point_to_array', 'msgs_helpers.point_to_array', (['p'], {}), '(p)\n', (975, 978), False, 'from car_core.common import msgs_helpers, geom_helpers\n'), ((1247, 1286), 'car_core.common.msgs_helpers.path_poses_to_array', 'msgs_helpers.path_poses_to_array', (['poses'], {}), '(poses)\n', (1279, 1286), False, 'from car_core.common import msgs_helpers, geom_helpers\n'), ((1522, 1538), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (1530, 1538), True, 'import numpy as np\n'), ((1554, 1586), 'car_core.common.msgs_helpers.array_to_point', 'msgs_helpers.array_to_point', (['arr'], {}), '(arr)\n', (1581, 1586), False, 'from car_core.common import msgs_helpers, geom_helpers\n'), ((1691, 1725), 'numpy.array', 'np.array', (['[[1, 2], [4, 5], [6, 7]]'], {}), '([[1, 2], [4, 5], [6, 7]])\n', (1699, 1725), True, 'import numpy as np\n'), ((1787, 1824), 'car_core.common.msgs_helpers.array_to_path_poses', 'msgs_helpers.array_to_path_poses', (['arr'], {}), '(arr)\n', (1819, 1824), False, 'from car_core.common import msgs_helpers, geom_helpers\n'), ((2119, 2161), 'numpy.array', 'np.array', (['[[0, 0], [1, 1], [2, 2], [3, 3]]'], {}), '([[0, 0], [1, 1], [2, 2], [3, 3]])\n', (2127, 2161), True, 'import numpy as np\n'), ((2252, 2272), 'numpy.array', 'np.array', (['[0.9, 0.9]'], {}), '([0.9, 0.9])\n', (2260, 2272), True, 'import numpy as np\n'), ((2289, 2338), 'car_core.common.geom_helpers.get_closest_path_point', 'geom_helpers.get_closest_path_point', (['poses', 'point'], {}), '(poses, point)\n', (2324, 2338), False, 'from car_core.common import msgs_helpers, geom_helpers\n'), ((2438, 2480), 'numpy.array', 'np.array', (['[[0, 0], [1, 1], [2, 2], [3, 3]]'], {}), '([[0, 0], [1, 1], [2, 2], [3, 3]])\n', (2446, 2480), True, 'import numpy as np\n'), ((2571, 2588), 'numpy.array', 'np.array', (['[-1, 3]'], {}), '([-1, 3])\n', (2579, 2588), True, 'import numpy as np\n'), ((2605, 2654), 'car_core.common.geom_helpers.get_closest_path_point', 'geom_helpers.get_closest_path_point', (['poses', 'point'], {}), '(poses, point)\n', (2640, 2654), False, 'from car_core.common import msgs_helpers, geom_helpers\n'), ((2756, 2798), 'numpy.array', 'np.array', (['[[0, 0], [1, 1], [2, 2], [3, 3]]'], {}), '([[0, 0], [1, 1], [2, 2], [3, 3]])\n', (2764, 2798), True, 'import numpy as np\n'), ((2889, 2906), 'numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (2897, 2906), True, 'import numpy as np\n'), ((2923, 2972), 'car_core.common.geom_helpers.get_closest_path_point', 'geom_helpers.get_closest_path_point', (['poses', 'point'], {}), '(poses, point)\n', (2958, 2972), False, 'from car_core.common import msgs_helpers, geom_helpers\n'), ((3073, 3115), 'numpy.array', 'np.array', (['[[0, 0], [1, 1], [2, 2], [3, 3]]'], {}), '([[0, 0], [1, 1], [2, 2], [3, 3]])\n', (3081, 3115), True, 'import numpy as np\n'), ((3206, 3222), 'numpy.array', 'np.array', (['[4, 4]'], {}), '([4, 4])\n', (3214, 3222), True, 'import numpy as np\n'), ((3239, 3288), 'car_core.common.geom_helpers.get_closest_path_point', 'geom_helpers.get_closest_path_point', (['poses', 'point'], {}), '(poses, point)\n', (3274, 3288), False, 'from car_core.common import msgs_helpers, geom_helpers\n'), ((3397, 3415), 'numpy.array', 'np.array', (['[[0, 0]]'], {}), '([[0, 0]])\n', (3405, 3415), True, 'import numpy as np\n'), ((3431, 3447), 'numpy.array', 'np.array', (['[4, 4]'], {}), '([4, 4])\n', (3439, 3447), True, 'import numpy as np\n'), ((3464, 3513), 'car_core.common.geom_helpers.get_closest_path_point', 'geom_helpers.get_closest_path_point', (['poses', 'point'], {}), '(poses, point)\n', (3499, 3513), False, 'from car_core.common import msgs_helpers, geom_helpers\n'), ((3625, 3667), 'numpy.array', 'np.array', (['[[0, 0], [1, 1], [1, 1], [3, 3]]'], {}), '([[0, 0], [1, 1], [1, 1], [3, 3]])\n', (3633, 3667), True, 'import numpy as np\n'), ((3758, 3778), 'numpy.array', 'np.array', (['[1.1, 1.1]'], {}), '([1.1, 1.1])\n', (3766, 3778), True, 'import numpy as np\n'), ((3795, 3844), 'car_core.common.geom_helpers.get_closest_path_point', 'geom_helpers.get_closest_path_point', (['poses', 'point'], {}), '(poses, point)\n', (3830, 3844), False, 'from car_core.common import msgs_helpers, geom_helpers\n'), ((817, 839), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (825, 839), True, 'import numpy as np\n'), ((1012, 1028), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (1020, 1028), True, 'import numpy as np\n'), ((1320, 1354), 'numpy.array', 'np.array', (['[[1, 2], [4, 5], [7, 8]]'], {}), '([[1, 2], [4, 5], [7, 8]])\n', (1328, 1354), True, 'import numpy as np\n'), ((1619, 1633), 'geometry_msgs.msg.Point', 'Point', (['(1)', '(2)', '(0)'], {}), '(1, 2, 0)\n', (1624, 1633), False, 'from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion\n')]
|
import cv2
import numpy as np
imagen = cv2.imread('imagen.jpg')
imagen = cv2.cvtColor(imagen,cv2.COLOR_BGR2RGB)
print(imagen.shape)
print(imagen[0][0][0])
imagen = cv2.resize(imagen,(256, 256))
imagen = cv2.imread('imagen.jpg')
imagen = cv2.cvtColor(imagen,cv2.COLOR_BGR2GRAY)
print(imagen.shape)
print(imagen[0][0])
imagen[0][0] = 0
imagen[0][1] = 0
imagen[0][2] = 0
cv2.imwrite('grayimagen.jpg',imagen)
matriz = np.zeros((256,256),np.float32)
print(matriz.shape)
cv2.imwrite('matrizImagen.jpg',matriz)
imagen = cv2.cvtColor(matriz,cv2.COLOR_GRAY2BGR)
print(imagen.shape)
cv2.imwrite('matrizColorImagen.jpg',imagen)
#cv2.imwrite('resizeImagen.jpg',imagen)
#cv2.imshow('image',imagen)
#cv2.waitKey(0)
|
[
"cv2.cvtColor",
"cv2.imwrite",
"numpy.zeros",
"cv2.imread",
"cv2.resize"
] |
[((40, 64), 'cv2.imread', 'cv2.imread', (['"""imagen.jpg"""'], {}), "('imagen.jpg')\n", (50, 64), False, 'import cv2\n'), ((74, 113), 'cv2.cvtColor', 'cv2.cvtColor', (['imagen', 'cv2.COLOR_BGR2RGB'], {}), '(imagen, cv2.COLOR_BGR2RGB)\n', (86, 113), False, 'import cv2\n'), ((165, 195), 'cv2.resize', 'cv2.resize', (['imagen', '(256, 256)'], {}), '(imagen, (256, 256))\n', (175, 195), False, 'import cv2\n'), ((205, 229), 'cv2.imread', 'cv2.imread', (['"""imagen.jpg"""'], {}), "('imagen.jpg')\n", (215, 229), False, 'import cv2\n'), ((239, 279), 'cv2.cvtColor', 'cv2.cvtColor', (['imagen', 'cv2.COLOR_BGR2GRAY'], {}), '(imagen, cv2.COLOR_BGR2GRAY)\n', (251, 279), False, 'import cv2\n'), ((372, 409), 'cv2.imwrite', 'cv2.imwrite', (['"""grayimagen.jpg"""', 'imagen'], {}), "('grayimagen.jpg', imagen)\n", (383, 409), False, 'import cv2\n'), ((418, 450), 'numpy.zeros', 'np.zeros', (['(256, 256)', 'np.float32'], {}), '((256, 256), np.float32)\n', (426, 450), True, 'import numpy as np\n'), ((469, 508), 'cv2.imwrite', 'cv2.imwrite', (['"""matrizImagen.jpg"""', 'matriz'], {}), "('matrizImagen.jpg', matriz)\n", (480, 508), False, 'import cv2\n'), ((517, 557), 'cv2.cvtColor', 'cv2.cvtColor', (['matriz', 'cv2.COLOR_GRAY2BGR'], {}), '(matriz, cv2.COLOR_GRAY2BGR)\n', (529, 557), False, 'import cv2\n'), ((577, 621), 'cv2.imwrite', 'cv2.imwrite', (['"""matrizColorImagen.jpg"""', 'imagen'], {}), "('matrizColorImagen.jpg', imagen)\n", (588, 621), False, 'import cv2\n')]
|
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME> and <NAME>
# --------------------------------------------------------
# --------------------------------------------------------
# Reorganized and modified by <NAME> and <NAME>
# --------------------------------------------------------
import torch
import torch.nn as nn
import numpy as np
import numpy.random as npr
from ..utils.config import cfg
from bbox_transform import bbox_transform, bbox_overlaps, co_bbox_overlaps_batch2, bbox_transform_batch2, bbox_overlaps_batch2
import pdb
DEBUG = False
class _RelProposalTargetLayer(nn.Module):
"""
Assign object detection proposals to ground-truth targets. Produces proposal
classification labels and bounding-box regression targets.
"""
def __init__(self, nclasses_rel):
super(_RelProposalTargetLayer, self).__init__()
self._num_classes_rel = nclasses_rel
self.BBOX_NORMALIZE_MEANS = torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS)
self.BBOX_NORMALIZE_STDS = torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS)
self.BBOX_INSIDE_WEIGHTS = torch.FloatTensor(cfg.TRAIN.BBOX_INSIDE_WEIGHTS)
def forward(self, roi_pairs, gt_boxes, num_boxes):
batch_size = gt_boxes.size(0)
# compute overlap between gt rel pairs and all roi pairs
gt_box_pairs = roi_pairs.new(batch_size, cfg.MAX_ROI_PAIR_NUMBER, 9).zero_()
for i in range(batch_size):
if (gt_boxes[i, :, 21:] > 0).sum() == 0: # no relation
continue
gt_pairs_i = (gt_boxes[i, :, 21:] > 0).nonzero()
n_rel = min(gt_box_pairs[i].size(0), gt_pairs_i.size(0))
gt_box_pairs[i][:n_rel, 0:4] = gt_boxes[i][gt_pairs_i[:n_rel, 0]][:, :4]
gt_box_pairs[i][:n_rel, 4:8] = gt_boxes[i][gt_pairs_i[:n_rel, 1]][:, :4]
gt_box_pairs[i][:n_rel, 8] = gt_boxes[i][gt_pairs_i[:n_rel, 0], 21 + gt_pairs_i[:n_rel, 1]]
# Include ground-truth boxes in the set of candidate rois
# gt_box_pairs_append = roi_pairs.new(batch_size, gt_box_pairs.size(1), roi_pairs.size(2)).zero_()
# gt_box_pairs_append[:,:,1:9] = gt_box_pairs[:,:,:8]
# for i in range(batch_size):
# gt_box_pairs_append[i, :, 0] = i
#
# roi_pairs = torch.cat([roi_pairs, gt_box_pairs_append], 1)
roi_pairs = roi_pairs.contiguous()
num_images = 1
rois_per_image = int(cfg.TRAIN.BATCH_SIZE / num_images)
fg_rois_per_image = int(np.round(cfg.TRAIN.FG_FRACTION * rois_per_image))
labels, rois, keeps = self._sample_roi_pairs_pytorch(roi_pairs, gt_box_pairs, fg_rois_per_image,
rois_per_image, self._num_classes_rel)
return rois, labels, keeps
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def _sample_roi_pairs_pytorch(self, all_roi_pairs, gt_box_pairs, fg_rois_per_image, rois_per_image, num_classes):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
# overlaps: (rois x gt_boxes)
overlaps = co_bbox_overlaps_batch2(all_roi_pairs[:,:,1:].contiguous(),
gt_box_pairs[:,:,:8].contiguous())
max_overlaps, gt_assignment = torch.max(overlaps, 2)
batch_size = overlaps.size(0)
num_proposal = overlaps.size(1)
num_boxes_per_img = overlaps.size(2)
offset = torch.arange(0, batch_size) * gt_box_pairs.size(1)
offset = offset.view(-1, 1).type_as(gt_assignment) + gt_assignment
labels = gt_box_pairs[:,:,8].contiguous().view(-1).index(offset.view(-1))\
.view(batch_size, -1)
fg_mask = max_overlaps >= cfg.TRAIN.RELPN_FG_THRESH
keep_inds_batch = labels.new(batch_size, rois_per_image).zero_()
labels_rel_batch = labels.new(batch_size, rois_per_image).zero_()
roi_pairs_batch = all_roi_pairs.new(batch_size, rois_per_image, 9).zero_()
# Guard against the case when an image has fewer than max_fg_rois_per_image
# foreground RoIs
for i in range(batch_size):
fg_inds = torch.nonzero(max_overlaps[i] >= cfg.TRAIN.RELPN_FG_THRESH).view(-1)
fg_num_rois = fg_inds.numel()
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = torch.nonzero((max_overlaps[i] < cfg.TRAIN.RELPN_BG_THRESH_HI) &
(max_overlaps[i] >= cfg.TRAIN.RELPN_BG_THRESH_LO)).view(-1)
bg_num_rois = bg_inds.numel()
# print(fg_num_rois, bg_num_rois)
# pdb.set_trace()
if fg_num_rois > 0 and bg_num_rois > 0:
# sampling fg
fg_rois_per_this_image = min(fg_rois_per_image, fg_num_rois)
# rand_num = torch.randperm(fg_num_rois).long().cuda()
rand_num = torch.from_numpy(np.random.permutation(fg_num_rois)).long().cuda()
fg_inds = fg_inds[rand_num[:fg_rois_per_this_image]]
# sampling bg
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
# Seems torch.rand has a bug, it will generate very large number and make an error.
# We use numpy rand instead.
#rand_num = (torch.rand(bg_rois_per_this_image) * bg_num_rois).long().cuda()
rand_num = np.floor(np.random.rand(bg_rois_per_this_image) * bg_num_rois)
rand_num = torch.from_numpy(rand_num).long().cuda()
bg_inds = bg_inds[rand_num]
elif fg_num_rois > 0 and bg_num_rois == 0:
# sampling fg
#rand_num = torch.floor(torch.rand(rois_per_image) * fg_num_rois).long().cuda()
rand_num = np.floor(np.random.rand(rois_per_image) * fg_num_rois)
rand_num = torch.from_numpy(rand_num).long().cuda()
fg_inds = fg_inds[rand_num]
fg_rois_per_this_image = rois_per_image
bg_rois_per_this_image = 0
elif bg_num_rois > 0 and fg_num_rois == 0:
# sampling bg
#rand_num = torch.floor(torch.rand(rois_per_image) * bg_num_rois).long().cuda()
rand_num = np.floor(np.random.rand(rois_per_image) * bg_num_rois)
rand_num = torch.from_numpy(rand_num).long().cuda()
bg_inds = bg_inds[rand_num]
bg_rois_per_this_image = rois_per_image
fg_rois_per_this_image = 0
else:
print("relpn: bg_num_rois = 0 and fg_num_rois = 0, this should not happen!")
# The indices that we're selecting (both fg and bg)
keep_inds = torch.cat([fg_inds, bg_inds], 0)
keep_inds_batch[i].copy_(keep_inds)
# Select sampled values from various arrays:
labels_rel_batch[i].copy_(labels[i][keep_inds])
# Clamp relation labels for the background RoIs to 0
labels_rel_batch[i][fg_rois_per_this_image:] = 0
roi_pairs_batch[i].copy_(all_roi_pairs[i][keep_inds])
roi_pairs_batch[i,:,0] = i
return labels_rel_batch, roi_pairs_batch, keep_inds_batch
|
[
"torch.FloatTensor",
"torch.cat",
"torch.nonzero",
"torch.max",
"torch.arange",
"numpy.random.permutation",
"numpy.random.rand",
"numpy.round",
"torch.from_numpy"
] |
[((1065, 1114), 'torch.FloatTensor', 'torch.FloatTensor', (['cfg.TRAIN.BBOX_NORMALIZE_MEANS'], {}), '(cfg.TRAIN.BBOX_NORMALIZE_MEANS)\n', (1082, 1114), False, 'import torch\n'), ((1150, 1198), 'torch.FloatTensor', 'torch.FloatTensor', (['cfg.TRAIN.BBOX_NORMALIZE_STDS'], {}), '(cfg.TRAIN.BBOX_NORMALIZE_STDS)\n', (1167, 1198), False, 'import torch\n'), ((1234, 1282), 'torch.FloatTensor', 'torch.FloatTensor', (['cfg.TRAIN.BBOX_INSIDE_WEIGHTS'], {}), '(cfg.TRAIN.BBOX_INSIDE_WEIGHTS)\n', (1251, 1282), False, 'import torch\n'), ((3497, 3519), 'torch.max', 'torch.max', (['overlaps', '(2)'], {}), '(overlaps, 2)\n', (3506, 3519), False, 'import torch\n'), ((2625, 2673), 'numpy.round', 'np.round', (['(cfg.TRAIN.FG_FRACTION * rois_per_image)'], {}), '(cfg.TRAIN.FG_FRACTION * rois_per_image)\n', (2633, 2673), True, 'import numpy as np\n'), ((3662, 3689), 'torch.arange', 'torch.arange', (['(0)', 'batch_size'], {}), '(0, batch_size)\n', (3674, 3689), False, 'import torch\n'), ((7012, 7044), 'torch.cat', 'torch.cat', (['[fg_inds, bg_inds]', '(0)'], {}), '([fg_inds, bg_inds], 0)\n', (7021, 7044), False, 'import torch\n'), ((4418, 4477), 'torch.nonzero', 'torch.nonzero', (['(max_overlaps[i] >= cfg.TRAIN.RELPN_FG_THRESH)'], {}), '(max_overlaps[i] >= cfg.TRAIN.RELPN_FG_THRESH)\n', (4431, 4477), False, 'import torch\n'), ((4634, 4754), 'torch.nonzero', 'torch.nonzero', (['((max_overlaps[i] < cfg.TRAIN.RELPN_BG_THRESH_HI) & (max_overlaps[i] >= cfg\n .TRAIN.RELPN_BG_THRESH_LO))'], {}), '((max_overlaps[i] < cfg.TRAIN.RELPN_BG_THRESH_HI) & (\n max_overlaps[i] >= cfg.TRAIN.RELPN_BG_THRESH_LO))\n', (4647, 4754), False, 'import torch\n'), ((5696, 5734), 'numpy.random.rand', 'np.random.rand', (['bg_rois_per_this_image'], {}), '(bg_rois_per_this_image)\n', (5710, 5734), True, 'import numpy as np\n'), ((6080, 6110), 'numpy.random.rand', 'np.random.rand', (['rois_per_image'], {}), '(rois_per_image)\n', (6094, 6110), True, 'import numpy as np\n'), ((5777, 5803), 'torch.from_numpy', 'torch.from_numpy', (['rand_num'], {}), '(rand_num)\n', (5793, 5803), False, 'import torch\n'), ((6554, 6584), 'numpy.random.rand', 'np.random.rand', (['rois_per_image'], {}), '(rois_per_image)\n', (6568, 6584), True, 'import numpy as np\n'), ((5190, 5224), 'numpy.random.permutation', 'np.random.permutation', (['fg_num_rois'], {}), '(fg_num_rois)\n', (5211, 5224), True, 'import numpy as np\n'), ((6153, 6179), 'torch.from_numpy', 'torch.from_numpy', (['rand_num'], {}), '(rand_num)\n', (6169, 6179), False, 'import torch\n'), ((6627, 6653), 'torch.from_numpy', 'torch.from_numpy', (['rand_num'], {}), '(rand_num)\n', (6643, 6653), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
"""Model.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1QPnK5YOh8kRYPOOue6txwrgUqwKOMS0I
"""
# # Use seaborn for pairplot
# !pip install -q seaborn
# !pip install tensorflow==2.0.0
# # Use some functions from tensorflow_docs
# !pip install -q git+https://github.com/tensorflow/docs
# !pip install h5py pyyaml
from __future__ import absolute_import, division, print_function, unicode_literals
import pathlib
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
from tensorflow import keras
import tensorflow_docs as tfdocs
import tensorflow_docs.plots
import tensorflow_docs.modeling
# Commented out IPython magic to ensure Python compatibility.
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.patches import ConnectionPatch
from collections import OrderedDict
from matplotlib.gridspec import GridSpec
from sklearn import metrics, linear_model
from sklearn.preprocessing import PolynomialFeatures, StandardScaler, normalize
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict
from scipy.optimize import curve_fit
import warnings
plt.rcParams["patch.force_edgecolor"] = True
plt.style.use('fivethirtyeight')
mpl.rc('patch', edgecolor = 'dimgray', linewidth=1)
# from IPython.core.interactiveshell import InteractiveShell
# InteractiveShell.ast_node_interactivity = "last_expr"
pd.options.display.max_columns = 50
# %matplotlib inline
warnings.filterwarnings("ignore")
# import pickle
# create and save all the models
airlines = pd.read_csv('airlines.csv')
carriers = list(airlines['IATA_CODE'])
# print(carriers)
global train_stats
def norm(x):
global train_stats
return (x - train_stats['mean']) / train_stats['std']
def ret_stats():
return train_stats
def build_model(train_ds):
model = keras.Sequential([
tf.keras.layers.Dense(64, activation='relu', input_shape=[len(train_ds.keys())]),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(1)
])
optimizer = tf.keras.optimizers.RMSprop(0.001)
model.compile(loss='mse',
optimizer=optimizer,
metrics=['mae', 'mse'])
return model
def do_create_models():
for carrier in carriers:
# create a model and save it for each carrier
global train_stats
df = pd.read_csv('carriers/carrier' + str(carrier) + 'data.csv')
df.drop(['Unnamed: 0'], axis=1, inplace=True)
# encode the origin
encoder = LabelEncoder()
encoder.fit(df['ORIGIN_AIRPORT'])
encoded_data_map = dict(zip(encoder.classes_, encoder.transform(encoder.classes_)))
df['ORIGIN_AIRPORT'] = encoder.fit_transform(df['ORIGIN_AIRPORT'])
# create the train and test dataset
train_dataset = df.sample(frac=0.8,random_state=0)
test_dataset = df.drop(train_dataset.index)
# getting the stats
train_stats = train_dataset.describe()
train_stats.pop("ARRIVAL_DELAY")
train_stats = train_stats.transpose()
train_stats.to_csv('stats/train_stats' + str(carrier) + '.csv')
# defining the train and test labels
train_labels = train_dataset.pop('ARRIVAL_DELAY')
test_labels = test_dataset.pop('ARRIVAL_DELAY')
# normalize the data
normed_train_data = norm(train_dataset)
normed_test_data = norm(test_dataset)
# # define the model
# model = build_model(train_dataset)
# # train the model
# EPOCHS = 100
# # The patience parameter is the amount of epochs to check for improvement
# early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
# early_history = model.fit(normed_train_data, train_labels,
# epochs=EPOCHS, validation_split = 0.2, verbose=0,
# callbacks=[early_stop, tfdocs.modeling.EpochDots()])
# # calculating the loss
# loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=2)
# # weights = model.get_weights()
# # fpkl = open('drive/My Drive/pickle_models/model-' + str(carrier) + '-weights.pkl', 'wb')
# # pickle.dump(weights, fpkl, protocol=pickle.HIGHEST_PROTOCOL)
# print("Testing set Mean Abs Error: {:5.2f} minutes".format(mae))
# model.save('models/model-' + str(carrier) + '.h5')
print('OK ' + str(carrier))
# let's create the input pipeline
from datetime import datetime
def conv_to_datetime(str_):
return datetime.strptime(str_, '%Y-%m-%d %H:%M:%S')
def conv_to_time(str_):
return datetime.strptime(str_, '%H:%M:%S')
import datetime
def string_to_time(time_string):
if pd.isnull(time_string):
return np.nan
else:
if time_string == 2400:
time_string = 0
time_string = "{0:04d}".format(int(time_string))
time_ = datetime.time(int(time_string[0:2]), int(time_string[2:4]))
return time_
def func(x):
return x.hour * 3600 + x.minute * 60 + x.second
dayOfWeek = 6
airline = 'AA'
origin = 'LAX'
dest = 'SEA'
sd = 200
ddelay = -10
sa = 800
dist = 1200
do_create_models()
# global train_stats
# stats = ret_stats()
# print(stats)
def processInput(input_):
global train_stats
processed = []
time_sd = string_to_time(np.int64(input_["sd"]))
time_sa = string_to_time(np.int64(input_["sa"]))
time_sd = func(time_sd)
time_sa = func(time_sa)
# encode airlines to their numbers
df = pd.read_csv('carriers/carrier' + str(input_["carrier"]) + 'data.csv')
df.drop(['Unnamed: 0'], axis=1, inplace=True)
encoder = LabelEncoder()
encoder.fit(df['ORIGIN_AIRPORT'])
encoded_data_map = dict(zip(encoder.classes_, encoder.transform(encoder.classes_)))
carrier = input_["carrier"]
for carr_ in carriers:
# create a model and save it for each carrier
if carr_ == carrier:
df = pd.read_csv('carriers/carrier' + str(carr_) + 'data.csv')
df.drop(['Unnamed: 0'], axis=1, inplace=True)
# encode the origin
encoder = LabelEncoder()
encoder.fit(df['ORIGIN_AIRPORT'])
encoded_data_map = dict(zip(encoder.classes_, encoder.transform(encoder.classes_)))
# print(encoded_data_map)
df['ORIGIN_AIRPORT'] = encoder.fit_transform(df['ORIGIN_AIRPORT'])
# # create the train and test dataset
# train_dataset = df.sample(frac=0.8,random_state=0)
# test_dataset = df.drop(train_dataset.index)
# # getting the stats
# train_stats = train_dataset.describe()
# train_stats.pop("ARRIVAL_DELAY")
# train_stats = train_stats.transpose()
# # defining the train and test labels
# train_labels = train_dataset.pop('ARRIVAL_DELAY')
# test_labels = test_dataset.pop('ARRIVAL_DELAY')
# # normalize the data
# normed_train_data = norm(train_dataset)
# normed_test_data = norm(test_dataset)
# # define the model
# model = build_model(train_dataset)
# # train the model
# EPOCHS = 100
# # The patience parameter is the amount of epochs to check for improvement
# early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
# early_history = model.fit(normed_train_data, train_labels,
# epochs=EPOCHS, validation_split = 0.2, verbose=0,
# callbacks=[early_stop, tfdocs.modeling.EpochDots()])
# # calculating the loss
# loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=2)
# print("Testing set Mean Abs Error: {:5.2f} minutes".format(mae))
# model.save('models/model-' + str(carrier) + '.h5')
# weights = model.get_weights()
# fpkl = open('model-' + str(carrier) + '-weights.pkl', 'wb')
# pickle.dump(weights, fpkl, protocol=pickle.HIGHEST_PROTOCOL)
# print('OK ' + str(carrier))
origin = input_["origin"]
ddelay = input_["ddelay"]
origin_ = encoded_data_map[origin]
dist = input_["dist"]
weekday = input_["dayOfWeek"]
input_ = {"time_insec_dep" : time_sd, "time_insec_arr": time_sa,
"ORIGIN_AIRPORT": origin_, "DEPARTURE_DELAY": ddelay,
"DISTANCE": dist, "weekday": weekday }
df = pd.DataFrame([input_])
df = norm(df)
model = keras.models.load_model('models/model-' + str(carrier) +'.h5')
print("OK")
return df, model
# input_ = {
# "dayOfWeek": dayOfWeek,
# "carrier": airline,
# "origin": origin,
# "sd": sd,
# "ddelay": ddelay,
# "sa": sa,
# "dist": dist
# }
# test_input, model = processInput(input_)
# from google.colab import drive
# drive.mount('/content/drive')
# !ls
# test_predictions_input = model.predict(test_input).flatten()
# print("The delay is: ", test_predictions_input[0], " minutes")
|
[
"pandas.DataFrame",
"matplotlib.rc",
"warnings.filterwarnings",
"pandas.read_csv",
"datetime.strptime",
"tensorflow.keras.layers.Dense",
"pandas.isnull",
"sklearn.preprocessing.LabelEncoder",
"matplotlib.pyplot.style.use",
"numpy.int64",
"tensorflow.keras.optimizers.RMSprop"
] |
[((1373, 1405), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (1386, 1405), True, 'import matplotlib.pyplot as plt\n'), ((1406, 1455), 'matplotlib.rc', 'mpl.rc', (['"""patch"""'], {'edgecolor': '"""dimgray"""', 'linewidth': '(1)'}), "('patch', edgecolor='dimgray', linewidth=1)\n", (1412, 1455), True, 'import matplotlib as mpl\n'), ((1633, 1666), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (1656, 1666), False, 'import warnings\n'), ((1728, 1755), 'pandas.read_csv', 'pd.read_csv', (['"""airlines.csv"""'], {}), "('airlines.csv')\n", (1739, 1755), True, 'import pandas as pd\n'), ((2252, 2286), 'tensorflow.keras.optimizers.RMSprop', 'tf.keras.optimizers.RMSprop', (['(0.001)'], {}), '(0.001)\n', (2279, 2286), True, 'import tensorflow as tf\n'), ((4598, 4642), 'datetime.strptime', 'datetime.strptime', (['str_', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(str_, '%Y-%m-%d %H:%M:%S')\n", (4615, 4642), False, 'import datetime\n'), ((4679, 4714), 'datetime.strptime', 'datetime.strptime', (['str_', '"""%H:%M:%S"""'], {}), "(str_, '%H:%M:%S')\n", (4696, 4714), False, 'import datetime\n'), ((4773, 4795), 'pandas.isnull', 'pd.isnull', (['time_string'], {}), '(time_string)\n', (4782, 4795), True, 'import pandas as pd\n'), ((5703, 5717), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (5715, 5717), False, 'from sklearn.preprocessing import LabelEncoder, OneHotEncoder\n'), ((8420, 8442), 'pandas.DataFrame', 'pd.DataFrame', (['[input_]'], {}), '([input_])\n', (8432, 8442), True, 'import pandas as pd\n'), ((2691, 2705), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (2703, 2705), False, 'from sklearn.preprocessing import LabelEncoder, OneHotEncoder\n'), ((5388, 5410), 'numpy.int64', 'np.int64', (["input_['sd']"], {}), "(input_['sd'])\n", (5396, 5410), True, 'import numpy as np\n'), ((5441, 5463), 'numpy.int64', 'np.int64', (["input_['sa']"], {}), "(input_['sa'])\n", (5449, 5463), True, 'import numpy as np\n'), ((2107, 2151), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (2128, 2151), True, 'import tensorflow as tf\n'), ((2157, 2201), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (2178, 2201), True, 'import tensorflow as tf\n'), ((2207, 2231), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {}), '(1)\n', (2228, 2231), True, 'import tensorflow as tf\n'), ((6162, 6176), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (6174, 6176), False, 'from sklearn.preprocessing import LabelEncoder, OneHotEncoder\n')]
|
from __future__ import absolute_import
# --------------------------------------------------------
# Spatial Attention Network withFeature Mimicking
# Copyright (c) 2018 University of Illinois
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
# --------------------------------------------------------
# Reorganized and modified Modified by <NAME>
# -------------------------------------------------------
import torch
import torch.nn as nn
import numpy as np
import math
import yaml
from model.utils.config import cfg
from model.rpn.generate_anchors import generate_anchors
# from model.rpn.bbox_transform import bbox_transform_inv, clip_boxes, clip_boxes_batch
from .bbox.bbox_transform import bbox_pred, clip_boxes, bbox_overlaps
# from model.nms.nms_wrapper import nms
from model.roi_layers import nms
import pdb
DEBUG = False
class _DCRProposalLayer(nn.Module):
def __init__(self, class_agnostic):
super(_DCRProposalLayer, self).__init__()
self.class_agnostic = class_agnostic
self._top = cfg.DCR.TOP
def forward(self, rois, cls_prob, bbox_pred_tensor, im_info):
num_keep_index = int(rois.shape[0] * self._top)
rois = rois[0].cpu().detach().numpy()[:, 1:]
bbox_deltas = bbox_pred_tensor.cpu().detach().numpy()[:, 4:8]
im_info = im_info.cpu().detach().numpy()[0, :]
cls_prob = cls_prob.cpu().detach().numpy()[:, 1:] # ignore bg
# sort scores
max_scores = np.amax(cls_prob, axis=1)
# keep top scores
keep_index = np.argsort(-max_scores)[:num_keep_index]
proposals = bbox_pred(rois, bbox_deltas)
proposals = clip_boxes(proposals, im_info[:2])
batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32)
blob = np.hstack((batch_inds, proposals.astype(np.float32, copy=False)))
return blob[keep_index, :], keep_index
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
|
[
"numpy.amax",
"numpy.argsort",
"numpy.zeros"
] |
[((1534, 1559), 'numpy.amax', 'np.amax', (['cls_prob'], {'axis': '(1)'}), '(cls_prob, axis=1)\n', (1541, 1559), True, 'import numpy as np\n'), ((1775, 1826), 'numpy.zeros', 'np.zeros', (['(proposals.shape[0], 1)'], {'dtype': 'np.float32'}), '((proposals.shape[0], 1), dtype=np.float32)\n', (1783, 1826), True, 'import numpy as np\n'), ((1607, 1630), 'numpy.argsort', 'np.argsort', (['(-max_scores)'], {}), '(-max_scores)\n', (1617, 1630), True, 'import numpy as np\n')]
|
"""SentencePiece Tokenization for Wiki Dataset
Example:
* python scripts/wiki_sp_tokenize_json.py --word --unigram
"""
import gzip
import json
import subprocess
from pathlib import Path
import sentencepiece as spm
import joblib
import numpy as np
import click
from tqdm import tqdm
from opencc import OpenCC
from wiki_tokenize_json import clean_text, filter_texts, SECTION_BLACKLIST
DATAPATH = "/mnt/Intel/zhwiki.json.gz"
TMPPATH = "/mnt/Intel/tmp_texts.txt"
TMPPATH_WORD = "/mnt/Intel/tmp_words.txt"
MODEL_PREFIX = "data/{algorithm}_{seg_word}_model"
CC = OpenCC('t2s')
VOC_SIZE = 7500
PAD = 1
UNK = 0
def json_to_txt():
with gzip.open(DATAPATH) as f:
with open(TMPPATH, "w") as fw:
for _, line in tqdm(enumerate(f.readlines())):
article = json.loads(line)
if "年表" in article["title"] or "列表" in article["title"]:
continue
for title, section in zip(article["section_titles"], article["section_texts"]):
title = CC.convert(title)
if title in SECTION_BLACKLIST:
continue
for paragraph in [x for x in section.split("\n") if len(x) > 50]:
paragraph = clean_text(paragraph)
if len(paragraph) < 200 or filter_texts(paragraph):
continue
for sentence in [x for x in paragraph.split("。") if len(x) > 10]:
fw.write(sentence + "。\n")
def fit_model(seg_word=True, algorithm="bpe"):
if not Path(TMPPATH).exists():
json_to_txt()
if seg_word:
print("Performing word segmentation...")
res = subprocess.run([
"thulac", "-model_dir", "/mnt/SSD_Data/openai_nlp/THULAC/models/",
"-seg_only", "-input", TMPPATH, "-output", TMPPATH_WORD
], stdout=subprocess.PIPE)
print(res)
# Train Model
print("Training model...")
spm.SentencePieceTrainer.Train(
'--input={} --model_prefix={} --vocab_size={} '
'--input_sentence_size=20000000 '
'--character_coverage=0.995 --model_type={algorithm}'.format(
TMPPATH_WORD if seg_word else TMPPATH,
MODEL_PREFIX.format(algorithm=algorithm, seg_word=seg_word),
VOC_SIZE, algorithm="unigram"
)
)
def tokenize(seg_word=True, algorithm="bpe"):
print("Tokenizing...")
sp = spm.SentencePieceProcessor()
sp.Load(MODEL_PREFIX.format(
algorithm=algorithm, seg_word=seg_word) + ".model")
tokens = []
with open(TMPPATH_WORD if seg_word else TMPPATH) as f:
for _, sentence in tqdm(enumerate(f.readlines())):
tokens.append(
np.array(sp.EncodeAsIds(sentence))
)
joblib.dump(np.array(tokens), f"data/tokens_{algorithm}_{seg_word}.pkl")
@click.command()
@click.option("--word", is_flag=True)
@click.option("--bpe/--unigram", default=True)
def main(word, bpe):
seg_word = True if word else False
algorithm = "bpe" if bpe else "unigram"
# fit_model(seg_word, algorithm)
tokenize(seg_word, algorithm)
if __name__ == "__main__":
# pylint: disable=no-value-for-parameter
main()
|
[
"subprocess.run",
"gzip.open",
"sentencepiece.SentencePieceProcessor",
"json.loads",
"wiki_tokenize_json.clean_text",
"click.option",
"click.command",
"opencc.OpenCC",
"pathlib.Path",
"numpy.array",
"wiki_tokenize_json.filter_texts"
] |
[((564, 577), 'opencc.OpenCC', 'OpenCC', (['"""t2s"""'], {}), "('t2s')\n", (570, 577), False, 'from opencc import OpenCC\n'), ((2890, 2905), 'click.command', 'click.command', ([], {}), '()\n', (2903, 2905), False, 'import click\n'), ((2907, 2943), 'click.option', 'click.option', (['"""--word"""'], {'is_flag': '(True)'}), "('--word', is_flag=True)\n", (2919, 2943), False, 'import click\n'), ((2945, 2990), 'click.option', 'click.option', (['"""--bpe/--unigram"""'], {'default': '(True)'}), "('--bpe/--unigram', default=True)\n", (2957, 2990), False, 'import click\n'), ((2462, 2490), 'sentencepiece.SentencePieceProcessor', 'spm.SentencePieceProcessor', ([], {}), '()\n', (2488, 2490), True, 'import sentencepiece as spm\n'), ((640, 659), 'gzip.open', 'gzip.open', (['DATAPATH'], {}), '(DATAPATH)\n', (649, 659), False, 'import gzip\n'), ((1724, 1896), 'subprocess.run', 'subprocess.run', (["['thulac', '-model_dir', '/mnt/SSD_Data/openai_nlp/THULAC/models/',\n '-seg_only', '-input', TMPPATH, '-output', TMPPATH_WORD]"], {'stdout': 'subprocess.PIPE'}), "(['thulac', '-model_dir',\n '/mnt/SSD_Data/openai_nlp/THULAC/models/', '-seg_only', '-input',\n TMPPATH, '-output', TMPPATH_WORD], stdout=subprocess.PIPE)\n", (1738, 1896), False, 'import subprocess\n'), ((2826, 2842), 'numpy.array', 'np.array', (['tokens'], {}), '(tokens)\n', (2834, 2842), True, 'import numpy as np\n'), ((790, 806), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (800, 806), False, 'import json\n'), ((1597, 1610), 'pathlib.Path', 'Path', (['TMPPATH'], {}), '(TMPPATH)\n', (1601, 1610), False, 'from pathlib import Path\n'), ((1257, 1278), 'wiki_tokenize_json.clean_text', 'clean_text', (['paragraph'], {}), '(paragraph)\n', (1267, 1278), False, 'from wiki_tokenize_json import clean_text, filter_texts, SECTION_BLACKLIST\n'), ((1330, 1353), 'wiki_tokenize_json.filter_texts', 'filter_texts', (['paragraph'], {}), '(paragraph)\n', (1342, 1353), False, 'from wiki_tokenize_json import clean_text, filter_texts, SECTION_BLACKLIST\n')]
|
"""
Tests with the Izhikevich neuron model.
"""
import numpy as np
import matplotlib.pyplot as plt
import pyNN.nest as sim
from pyNN.utility.plotting import Figure, Panel
# === Configure the simulator ================================================
duration = 100
dt = 0.01
sim.setup(timestep=dt, min_delay=0.1)
# === Build and instrument the network =======================================
phasic_spiking = {'a': 0.02, 'b': 0.25, 'c': -65, 'd': 6}
class_2 = {'a': 0.2, 'b': 0.26, 'c': -65, 'd': 0}
params = class_2
n = 100
v_init = -64
input_currents = 0.0005 * np.logspace(-4, 6, n, base=np.e)
neurons = sim.Population(n, sim.Izhikevich(i_offset=input_currents, **params))
neurons.record(['v', 'u', 'spikes'])
neurons.initialize(v=v_init, u=-params['b']*v_init)
# === Run the simulation =====================================================
sim.run(duration)
# === Save the results, optionally plot a figure =============================
data = neurons.get_data().segments[0]
first_spiketimes = []
rates = []
for spiketrain in data.spiketrains:
if len(spiketrain) == 0:
first_spiketimes.append(np.infty)
else:
first_spiketimes.append(spiketrain[0])
rates.append(np.count_nonzero(spiketrain) / duration)
plt.scatter(input_currents, 1 / np.array(first_spiketimes),
label='inverse ttfs')
plt.scatter(input_currents, rates, label='avg spikerate')
plt.legend()
plt.savefig('FI')
v = data.filter(name="v")[0]
u = data.filter(name="u")[0]
Figure(Panel(v, ylabel="Membrane potential (mV)", xticks=True,
xlabel="Time (ms)", yticks=True),
Panel(u, ylabel="u variable (units?)")).save('mem')
# === Clean up and quit ========================================================
sim.end()
|
[
"pyNN.nest.run",
"numpy.count_nonzero",
"pyNN.nest.setup",
"pyNN.nest.end",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"numpy.logspace",
"numpy.array",
"pyNN.utility.plotting.Panel",
"matplotlib.pyplot.savefig",
"pyNN.nest.Izhikevich"
] |
[((281, 318), 'pyNN.nest.setup', 'sim.setup', ([], {'timestep': 'dt', 'min_delay': '(0.1)'}), '(timestep=dt, min_delay=0.1)\n', (290, 318), True, 'import pyNN.nest as sim\n'), ((857, 874), 'pyNN.nest.run', 'sim.run', (['duration'], {}), '(duration)\n', (864, 874), True, 'import pyNN.nest as sim\n'), ((1345, 1402), 'matplotlib.pyplot.scatter', 'plt.scatter', (['input_currents', 'rates'], {'label': '"""avg spikerate"""'}), "(input_currents, rates, label='avg spikerate')\n", (1356, 1402), True, 'import matplotlib.pyplot as plt\n'), ((1403, 1415), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1413, 1415), True, 'import matplotlib.pyplot as plt\n'), ((1416, 1433), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""FI"""'], {}), "('FI')\n", (1427, 1433), True, 'import matplotlib.pyplot as plt\n'), ((1745, 1754), 'pyNN.nest.end', 'sim.end', ([], {}), '()\n', (1752, 1754), True, 'import pyNN.nest as sim\n'), ((574, 606), 'numpy.logspace', 'np.logspace', (['(-4)', '(6)', 'n'], {'base': 'np.e'}), '(-4, 6, n, base=np.e)\n', (585, 606), True, 'import numpy as np\n'), ((635, 684), 'pyNN.nest.Izhikevich', 'sim.Izhikevich', ([], {'i_offset': 'input_currents'}), '(i_offset=input_currents, **params)\n', (649, 684), True, 'import pyNN.nest as sim\n'), ((1283, 1309), 'numpy.array', 'np.array', (['first_spiketimes'], {}), '(first_spiketimes)\n', (1291, 1309), True, 'import numpy as np\n'), ((1209, 1237), 'numpy.count_nonzero', 'np.count_nonzero', (['spiketrain'], {}), '(spiketrain)\n', (1225, 1237), True, 'import numpy as np\n'), ((1500, 1592), 'pyNN.utility.plotting.Panel', 'Panel', (['v'], {'ylabel': '"""Membrane potential (mV)"""', 'xticks': '(True)', 'xlabel': '"""Time (ms)"""', 'yticks': '(True)'}), "(v, ylabel='Membrane potential (mV)', xticks=True, xlabel='Time (ms)',\n yticks=True)\n", (1505, 1592), False, 'from pyNN.utility.plotting import Figure, Panel\n'), ((1610, 1648), 'pyNN.utility.plotting.Panel', 'Panel', (['u'], {'ylabel': '"""u variable (units?)"""'}), "(u, ylabel='u variable (units?)')\n", (1615, 1648), False, 'from pyNN.utility.plotting import Figure, Panel\n')]
|
'''
utility functions
'''
__author__ = '<NAME>'
import os
from os.path import join
from os.path import abspath
import json
import pandas as pd
import numpy as np
from configs import config as cf
def is_available(filename):
'''
[filename] : str
'''
return os.path.isfile(filename)
def chunks(lst, n):
'''
Yield successive n-sized chunks from list
[lst] : python list
[n] : int
'''
for i in range(0, len(lst), n):
yield lst[i:i + n]
def read_intent_dataset(verbose=True):
'''
Load 'Intent' dataset
[verbose] : bool, verbosity level
'''
# read as a pandas dataframe
data = []
for lang in ['en', 'es', 'fr']:
for ds in ['train', 'test', 'eval']:
path = abspath(join(cf.INTENT_DIR, lang, '{}.tsv'.format(ds)))
df = pd.read_csv(path, header=None, sep='\t',
names=['text', 'class'])
data.append(df)
data = pd.concat(data)
# merge certain categories (see configs.py) and rename columns
data['class'] = data['class'].replace(cf.intent_label_map)
# remove trivial (too easy) categories
for cat in ['hi', 'okay_thanks']:
data = data[data['class'] != 'intent:{}'.format(cat)]
if verbose:
print('\t"Intent" data shape={}'.format(data.shape))
return data
def read_questions_dataset(verbose=True):
'''
Load 'Questions' dataset
[verbose] : bool, verbosity level
'''
# read as a pandas dataframe
data_path = abspath(join(cf.QUESTIONS_DIR, 'final_master_dataset.csv'))
data = pd.read_csv(data_path, delimiter=',',
usecols=['Question', 'Category'])
data.rename(columns={'Question': 'text', 'Category': 'class'},
inplace=True)
data = data[~data['class'].isna()] # remove unannotated rows
# split label into class and subclass, keep only class
data[['class', 'subclass']] = data['class'].str.split('-', 1, expand=True)
data['class'] = data['class'].str.strip()
data.drop(['subclass'], axis=1, inplace=True)
data = data[[i in cf.questions_relevant_categories for i in data['class']]]
if verbose:
print('\t"Questions" data shape={}'.format(data.shape))
return data
def merge_datasets(embeddings='labse', verbose=True):
'''
Merge 'Intent' and 'Questions' datasets
[embeddings] : str, type of embeddings to load ('bert' or 'labse')
[verbose] : bool, verbosity level
'''
# load datasets
intent = read_intent_dataset(verbose=False)
questions = read_questions_dataset(verbose=False)
merged = pd.concat([intent, questions])
# load corresponding embeddings
if embeddings == 'labse':
emb_to_load = (cf.intent_embeddings, cf.questions_embeddings)
elif embeddings == 'bert':
emb_to_load = (cf.intent_embeddings_bert, cf.questions_embeddings_bert)
else:
raise ValueError("embeddings argument can be 'bert' or 'labse'")
print(f'{embeddings} embeddings loaded.')
intent_embeddings = np.load(abspath(join(cf.INTENT_DIR, emb_to_load[0])))
questions_embeddings = np.load(abspath(join(cf.QUESTIONS_DIR,
emb_to_load[1])))
merged_embeddings = np.vstack([intent_embeddings, questions_embeddings])
assert merged.shape[0] == merged_embeddings.shape[0]
if verbose:
print('Full data shape={}'.format(merged.shape))
return merged, merged_embeddings
# _____________ Logging related functions _____________
def convert(o):
if isinstance(o, np.int64):
return int(o)
raise TypeError
def save_logs(logs_dict, dict_name):
'''
Save best hyperparameters dictionary to "logs" directory
[logs_dict] : dict
[dict_name] : str
'''
json.dump(logs_dict,
open('{}/{}.json'.format(cf.LOGS_DIR,
dict_name),
'w'), default=convert)
print('Best hyper-parameters saved...')
return None
def load_logs(dict_name):
'''
Load best hyperparameters dictionary from "logs" directory
[dict_name] : str
'''
log_path = '{}/{}.json'.format(cf.LOGS_DIR, dict_name)
if not is_available(log_path):
raise ValueError('Hyperparameters are not available. '
'Please run train.py in "hyper_opt" mode before full '
'training.')
with open() as logs_json:
logs = json.load(logs_json)
print('Best hyperparameters loaded...')
return logs
|
[
"json.load",
"pandas.read_csv",
"os.path.isfile",
"os.path.join",
"pandas.concat",
"numpy.vstack"
] |
[((278, 302), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (292, 302), False, 'import os\n'), ((965, 980), 'pandas.concat', 'pd.concat', (['data'], {}), '(data)\n', (974, 980), True, 'import pandas as pd\n'), ((1598, 1669), 'pandas.read_csv', 'pd.read_csv', (['data_path'], {'delimiter': '""","""', 'usecols': "['Question', 'Category']"}), "(data_path, delimiter=',', usecols=['Question', 'Category'])\n", (1609, 1669), True, 'import pandas as pd\n'), ((2630, 2660), 'pandas.concat', 'pd.concat', (['[intent, questions]'], {}), '([intent, questions])\n', (2639, 2660), True, 'import pandas as pd\n'), ((3273, 3325), 'numpy.vstack', 'np.vstack', (['[intent_embeddings, questions_embeddings]'], {}), '([intent_embeddings, questions_embeddings])\n', (3282, 3325), True, 'import numpy as np\n'), ((1535, 1585), 'os.path.join', 'join', (['cf.QUESTIONS_DIR', '"""final_master_dataset.csv"""'], {}), "(cf.QUESTIONS_DIR, 'final_master_dataset.csv')\n", (1539, 1585), False, 'from os.path import join\n'), ((4490, 4510), 'json.load', 'json.load', (['logs_json'], {}), '(logs_json)\n', (4499, 4510), False, 'import json\n'), ((831, 896), 'pandas.read_csv', 'pd.read_csv', (['path'], {'header': 'None', 'sep': '"""\t"""', 'names': "['text', 'class']"}), "(path, header=None, sep='\\t', names=['text', 'class'])\n", (842, 896), True, 'import pandas as pd\n'), ((3079, 3114), 'os.path.join', 'join', (['cf.INTENT_DIR', 'emb_to_load[0]'], {}), '(cf.INTENT_DIR, emb_to_load[0])\n', (3083, 3114), False, 'from os.path import join\n'), ((3160, 3198), 'os.path.join', 'join', (['cf.QUESTIONS_DIR', 'emb_to_load[1]'], {}), '(cf.QUESTIONS_DIR, emb_to_load[1])\n', (3164, 3198), False, 'from os.path import join\n')]
|
"""
Test princomp extraction from CLI
"""
import argparse
import os
import numpy as np
from demo_utils import get_random_data
from hebbnets.networks import MultilayerHahNetwork
np.set_printoptions(suppress=True)
def _argparse():
parser = argparse.ArgumentParser(
prog="Testing HebbNet principal components",
description="Testing HebbNet principal components by decomposing random data"
)
parser.add_argument(
"--num_samples",
help="Number of samples for synthetic data",
default=25,
type=int,
required=False
)
parser.add_argument(
"--data_dimension",
help="Dimension of synthetic data",
default=100,
type=int,
required=False
)
parser.add_argument(
"--data_latent_dimension",
help="Latent dimension of synthetic data",
default=3,
type=int,
required=False
)
parser.add_argument(
"--num_pc",
help="Number of principle components to extract",
default=2,
type=int,
required=False
)
return parser.parse_args()
def get_top_princomps(data_array, num_pcs):
U, S, V = np.linalg.svd(np.array(data_array))
_idx = np.argsort(S)[-num_pcs:]
return V[_idx, :].T
def main(args):
# Make data
demo_data = get_random_data(
args.num_samples,
args.data_dimension,
latent_dim=args.data_latent_dimension
)
# Build/train network
hah_network = MultilayerHahNetwork(
args.data_dimension,
[args.num_pc],
has_bias=False,
act_type='linear',
)
hah_network.train(demo_data, num_epochs=1000)
# Build/train network
real_princomps = get_top_princomps(demo_data, args.num_pc)
hebb_princomps = np.squeeze(hah_network.layers[0].input_weights)
hebb_princomps /= np.linalg.norm(hebb_princomps, axis=0, keepdims=True)
# Show the inner product of top two PCs with learned input weights
inner_prod_mat = real_princomps.T.matmul(hebb_princomps)
prod_as_string = np.array_str(
inner_prod_mat,
suppress_small=True,
precision=4
)
print(np.array_str(inner_prod_mat, precision=4))
if __name__ == "__main__":
args = _argparse()
main(args)
|
[
"numpy.set_printoptions",
"demo_utils.get_random_data",
"argparse.ArgumentParser",
"hebbnets.networks.MultilayerHahNetwork",
"numpy.array_str",
"numpy.argsort",
"numpy.linalg.norm",
"numpy.array",
"numpy.squeeze"
] |
[((186, 220), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (205, 220), True, 'import numpy as np\n'), ((254, 410), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""Testing HebbNet principal components"""', 'description': '"""Testing HebbNet principal components by decomposing random data"""'}), "(prog='Testing HebbNet principal components',\n description=\n 'Testing HebbNet principal components by decomposing random data')\n", (277, 410), False, 'import argparse\n'), ((1348, 1446), 'demo_utils.get_random_data', 'get_random_data', (['args.num_samples', 'args.data_dimension'], {'latent_dim': 'args.data_latent_dimension'}), '(args.num_samples, args.data_dimension, latent_dim=args.\n data_latent_dimension)\n', (1363, 1446), False, 'from demo_utils import get_random_data\n'), ((1517, 1612), 'hebbnets.networks.MultilayerHahNetwork', 'MultilayerHahNetwork', (['args.data_dimension', '[args.num_pc]'], {'has_bias': '(False)', 'act_type': '"""linear"""'}), "(args.data_dimension, [args.num_pc], has_bias=False,\n act_type='linear')\n", (1537, 1612), False, 'from hebbnets.networks import MultilayerHahNetwork\n'), ((1810, 1857), 'numpy.squeeze', 'np.squeeze', (['hah_network.layers[0].input_weights'], {}), '(hah_network.layers[0].input_weights)\n', (1820, 1857), True, 'import numpy as np\n'), ((1880, 1933), 'numpy.linalg.norm', 'np.linalg.norm', (['hebb_princomps'], {'axis': '(0)', 'keepdims': '(True)'}), '(hebb_princomps, axis=0, keepdims=True)\n', (1894, 1933), True, 'import numpy as np\n'), ((2089, 2151), 'numpy.array_str', 'np.array_str', (['inner_prod_mat'], {'suppress_small': '(True)', 'precision': '(4)'}), '(inner_prod_mat, suppress_small=True, precision=4)\n', (2101, 2151), True, 'import numpy as np\n'), ((1215, 1235), 'numpy.array', 'np.array', (['data_array'], {}), '(data_array)\n', (1223, 1235), True, 'import numpy as np\n'), ((1248, 1261), 'numpy.argsort', 'np.argsort', (['S'], {}), '(S)\n', (1258, 1261), True, 'import numpy as np\n'), ((2193, 2234), 'numpy.array_str', 'np.array_str', (['inner_prod_mat'], {'precision': '(4)'}), '(inner_prod_mat, precision=4)\n', (2205, 2234), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
## Author: <NAME>
## Copyright: Copyright 2018-2019, Packt Publishing Limited
## Version: 0.0.1
## Maintainer: <NAME>
## Email: <EMAIL>
## Linkedin: https://www.linkedin.com/in/linus1/
## Contributor : {if you debug, append your name here}
## Contributor Email : {if you debug, append your email here}
## Status: active
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
np.random.seed(0)
def true_fun(X):
"""
given X it will provide its mapping to Y by sing function np.cos(1.5 * np.pi * X)
:param X:
:return:
"""
return np.cos(1.5 * np.pi * X)
if __name__ == '__main__':
n_samples = 30
degrees = [1, 3, 9, 15]
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
"""
Evaluating and plotting for each degree of freedom
"""
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using cross-validation
scores = cross_val_score(pipeline, X[:, np.newaxis], y,
scoring="neg_mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
# predicting on test data
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
# plotting the True and predicted function
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, edgecolor='b', s=20, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\n TEST MSE = {:.2e}".format(
degrees[i], -scores.mean()))
plt.show()
|
[
"matplotlib.pyplot.xlim",
"sklearn.pipeline.Pipeline",
"numpy.random.seed",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"numpy.random.randn",
"sklearn.model_selection.cross_val_score",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.setp",
"sklearn.linear_model.LinearRegression",
"sklearn.preprocessing.PolynomialFeatures",
"matplotlib.pyplot.figure",
"numpy.cos",
"numpy.linspace",
"numpy.random.rand",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((618, 635), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (632, 635), True, 'import numpy as np\n'), ((804, 827), 'numpy.cos', 'np.cos', (['(1.5 * np.pi * X)'], {}), '(1.5 * np.pi * X)\n', (810, 827), True, 'import numpy as np\n'), ((1018, 1045), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 5)'}), '(figsize=(14, 5))\n', (1028, 1045), True, 'import matplotlib.pyplot as plt\n'), ((2428, 2438), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2436, 2438), True, 'import matplotlib.pyplot as plt\n'), ((928, 953), 'numpy.random.rand', 'np.random.rand', (['n_samples'], {}), '(n_samples)\n', (942, 953), True, 'import numpy as np\n'), ((1226, 1260), 'matplotlib.pyplot.setp', 'plt.setp', (['ax'], {'xticks': '()', 'yticks': '()'}), '(ax, xticks=(), yticks=())\n', (1234, 1260), True, 'import matplotlib.pyplot as plt\n'), ((1294, 1351), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', ([], {'degree': 'degrees[i]', 'include_bias': '(False)'}), '(degree=degrees[i], include_bias=False)\n', (1312, 1351), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((1431, 1449), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1447, 1449), False, 'from sklearn.linear_model import LinearRegression\n'), ((1470, 1573), 'sklearn.pipeline.Pipeline', 'Pipeline', (["[('polynomial_features', polynomial_features), ('linear_regression',\n linear_regression)]"], {}), "([('polynomial_features', polynomial_features), (\n 'linear_regression', linear_regression)])\n", (1478, 1573), False, 'from sklearn.pipeline import Pipeline\n'), ((1716, 1808), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['pipeline', 'X[:, np.newaxis]', 'y'], {'scoring': '"""neg_mean_squared_error"""', 'cv': '(10)'}), "(pipeline, X[:, np.newaxis], y, scoring=\n 'neg_mean_squared_error', cv=10)\n", (1731, 1808), False, 'from sklearn.model_selection import cross_val_score\n'), ((1858, 1880), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (1869, 1880), True, 'import numpy as np\n'), ((2131, 2186), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X', 'y'], {'edgecolor': '"""b"""', 's': '(20)', 'label': '"""Samples"""'}), "(X, y, edgecolor='b', s=20, label='Samples')\n", (2142, 2186), True, 'import matplotlib.pyplot as plt\n'), ((2196, 2211), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (2206, 2211), True, 'import matplotlib.pyplot as plt\n'), ((2221, 2236), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (2231, 2236), True, 'import matplotlib.pyplot as plt\n'), ((2246, 2262), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0, 1)'], {}), '((0, 1))\n', (2254, 2262), True, 'import matplotlib.pyplot as plt\n'), ((2272, 2289), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-2, 2)'], {}), '((-2, 2))\n', (2280, 2289), True, 'import matplotlib.pyplot as plt\n'), ((2299, 2321), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (2309, 2321), True, 'import matplotlib.pyplot as plt\n'), ((978, 1004), 'numpy.random.randn', 'np.random.randn', (['n_samples'], {}), '(n_samples)\n', (993, 1004), True, 'import numpy as np\n')]
|
import numpy as np
import argparse
from utils import Audio
def sample_wav_audio(path):
audio = Audio()
mel = audio.audio_to_mel(path)
samples = audio.mel_sample(mel, width=128, k=5)
return samples
def save_embeddings(name, samples):
audio = Audio()
avg_embed = np.zeros(256, dtype=np.float32)
for mel in samples:
embed = audio.mel_to_embed(mel)
avg_embed += embed
avg_embed = avg_embed / 5
np.save(f'./embeddings/{name}.npy', avg_embed)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--path', action='store', type=str, required=True)
parser.add_argument('--name', action='store', type=str, required=True)
args = parser.parse_args()
samples = sample_wav_audio(args.path)
save_embeddings(args.name, samples)
|
[
"utils.Audio",
"numpy.save",
"numpy.zeros",
"argparse.ArgumentParser"
] |
[((100, 107), 'utils.Audio', 'Audio', ([], {}), '()\n', (105, 107), False, 'from utils import Audio\n'), ((263, 270), 'utils.Audio', 'Audio', ([], {}), '()\n', (268, 270), False, 'from utils import Audio\n'), ((287, 318), 'numpy.zeros', 'np.zeros', (['(256)'], {'dtype': 'np.float32'}), '(256, dtype=np.float32)\n', (295, 318), True, 'import numpy as np\n'), ((448, 494), 'numpy.save', 'np.save', (['f"""./embeddings/{name}.npy"""', 'avg_embed'], {}), "(f'./embeddings/{name}.npy', avg_embed)\n", (455, 494), True, 'import numpy as np\n'), ((536, 561), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (559, 561), False, 'import argparse\n')]
|
import argparse
import logging
import os
import pickle
import random
import ujson
import sys
import math
from ctypes import c_ulong
from multiprocessing import Array, Queue
from multiprocessing.sharedctypes import RawArray
from queue import Empty
from time import time
import numpy as np
import resource
from scipy.sparse import csr_matrix
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
from sklearn.metrics.pairwise import cosine_similarity
from data.bug_report_database import BugReportDatabase
from data.preprocessing import concatenateSummaryAndDescription
from experiments.sparse_vector import TokenizerStemmer
from nltk import TreebankWordTokenizer, SnowballStemmer
from sklearn.feature_extraction.text import TfidfVectorizer
def loadData(filePath):
f = open(filePath, 'r')
bugIds = set()
duplicateByBugId = {}
pairs = []
for l in f:
l = l.strip()
if len(l) == 0:
break
bug1Id, bug2Id, label = l.split(',')
label = int(label)
pairs.append((bug1Id, bug2Id, label))
bugIds.add(bug1Id)
bugIds.add(bug2Id)
if label == 1:
duplicateBug1List = duplicateByBugId.get(bug1Id, set())
if len(duplicateBug1List) == 0:
duplicateByBugId[bug1Id] = duplicateBug1List
duplicateBug1List.add(bug2Id)
duplicateBug2List = duplicateByBugId.get(bug2Id, set())
if len(duplicateBug2List) == 0:
duplicateByBugId[bug2Id] = duplicateBug2List
duplicateBug2List.add(bug1Id)
return bugIds, duplicateByBugId, pairs, ujson.loads(f.readline())['validations']
class Obj(object):
def __init__(self, dict):
for k, v in dict.items():
setattr(self, k, v)
def predictDeepLearningModel(bugEmbeddingsById, validationPairs):
batchSize = 1024
predictions = []
nBatches = math.ceil(float(len(validationPairs)) / batchSize)
firstBugPairs = []
secondBugPairs = []
for bug1, bug2 in validationPairs:
firstBugPairs.append(bugEmbeddingsById[bug1])
secondBugPairs.append(bugEmbeddingsById[bug2])
for batchIdx in range(nBatches):
batchStart = batchIdx * batchSize
bug1s = getVariable(torch.stack(firstBugPairs[batchStart: batchStart + batchSize]), args.cuda)
bug2s = getVariable(torch.stack(secondBugPairs[batchStart: batchStart + batchSize]), args.cuda)
if arguments.model == 'retrieval':
predictionInput = [bug1s, bug2s]
elif arguments.model == 'classification':
predictionInput = model[1](bug1s, bug2s)
output = predictionFunction(predictionInput).data.cpu().numpy()
for pr in output:
if isinstance(pr, (np.float32, np.uint8)):
predictions.append(pr)
else:
predictions.append(pr[-1])
return predictions
def parallel(start, duplicateBugs, q):
logger = logging.getLogger()
c = time()
logger.info(
"Process %s started to compute the similarity for %d duplicate bugs. Start idx: %d" % (os.getpid(), len(duplicateBugs), start))
for i, db in enumerate(duplicateBugs):
q.put([start + i, calculateSimiliratyScoreTFIDF(str(db), vectorByBug, bugIds)])
if i % 20 == 0 and i != 0:
logger.info("TF-IDF: Process %s processed %d Duplicate bug of %d in %f" % (
os.getpid(), i, len(duplicateBugs), time() - c))
c = time()
q.put([-1, None])
def calculateSimiliratyScoreTFIDF(duplicateBug, vectorByBug, bugIds):
batchSize = 1024
nPairs = len(bugIds)
nBatches = math.ceil(float(nPairs) / batchSize)
bugEmbedding1 = vectorByBug[duplicateBug]
similarityScores = []
nbDim = bugEmbedding1.shape[1]
for batchIdx in range(nBatches):
batchStart = batchIdx * batchSize
data1 = []
indices1 = []
ptrs1 = [0]
data2 = []
indices2 = []
ptrs2 = [0]
for otherBug in bugIds[batchStart: batchStart + batchSize]:
data1.extend(bugEmbedding1.data)
indices1.extend(bugEmbedding1.indices)
ptrs1.append(len(indices1))
bugEmbedding2 = vectorByBug[otherBug]
data2.extend(bugEmbedding2.data)
indices2.extend(bugEmbedding2.indices)
ptrs2.append(len(indices2))
matrix1 = csr_matrix((data1, indices1, ptrs1), shape=(len(ptrs1) - 1, nbDim))
matrix2 = csr_matrix((data2, indices2, ptrs2), shape=(len(ptrs2) - 1, nbDim))
score = cosine_similarity(matrix1, matrix2)
for i in range(score.shape[0]):
similarityScores.append(score[i][i])
return similarityScores
def predictTFIDF(pairs):
batchSize = 8192
nPairs = len(pairs)
nBatches = math.ceil(float(nPairs) / batchSize)
similarityScores = []
for batchIdx in range(nBatches):
batchStart = batchIdx * batchSize
data1 = []
indices1 = []
ptrs1 = [0]
data2 = []
indices2 = []
ptrs2 = [0]
for bug1, bug2 in pairs[batchStart: batchStart + batchSize]:
bugEmbedding1 = vectorByBug[bug1]
data1.extend(bugEmbedding1.data)
indices1.extend(bugEmbedding1.indices)
ptrs1.append(len(indices1))
bugEmbedding2 = vectorByBug[bug2]
data2.extend(bugEmbedding2.data)
indices2.extend(bugEmbedding2.indices)
ptrs2.append(len(indices2))
nbDim = vectorByBug[bug1].shape[1]
pairBug1 = csr_matrix((data1, indices1, ptrs1), shape=(len(ptrs1) - 1, nbDim))
pairBug2 = csr_matrix((data2, indices2, ptrs2), shape=(len(ptrs2) - 1, nbDim))
score = cosine_similarity(pairBug1, pairBug2)
for i in range(score.shape[0]):
similarityScores.append(score[i][i])
return (np.asarray(similarityScores) > args.retrieval_threshold).astype(int)
def chunks(l, n):
chunkSize = int(len(l) / n)
remaining = len(l) % n
chunks = []
begin = 0
for i in range(n):
if remaining != 0:
additional = 1
remaining -= 1
else:
additional = 0
end = begin + chunkSize + additional
chunks.append(l[begin:end])
begin = end
return chunks
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--recall_ratio_k', nargs='+', required=True,
help="list of the values of k to be used in the recall ratio. If k is empty list so recall rate "
"is not calculated")
parser.add_argument('--model', help="model")
parser.add_argument('--model_type', help="model type")
parser.add_argument('--bug_dataset', help="")
parser.add_argument('--input', required=True)
parser.add_argument('--retrieval_threshold', type=float, default=None, help="")
parser.add_argument('--nb_processes', type=int, default=8, help="")
parser.add_argument('--cuda', action="store_true", help="enable cuda.")
logging.basicConfig(format='%(asctime)s %(levelname)-4s %(message)s', level=logging.DEBUG,
datefmt='%Y-%m-%d %H:%M:%S', )
logger = logging.getLogger()
args = parser.parse_args()
print(args)
global bugIds
args.recall_ratio_k = [int(k) for k in args.recall_ratio_k]
bugIds, duplicateByBugId, pairs, validations = loadData(args.input)
biggestValidation = validations[-1]
bugReportDataset = BugReportDatabase.fromJson(args.bug_dataset)
bugIds = list(bugIds)
similarityListByDuplicate = []
if args.model_type == 'tfidf':
# Load Model
global vectorByBug
vectorByBug = {}
tfIdfVectorizer = pickle.load(open(args.model, 'rb'))
# Generate bag of words representation for each bug
texts = [concatenateSummaryAndDescription(bugReportDataset.getBug(bugId)) for bugId in bugIds]
vectors = tfIdfVectorizer.transform(texts)
for idx, bugId in enumerate(bugIds):
vectorByBug[bugId] = vectors[idx]
else:
# We can't import torch without allocating a GPU in Cedar cluster.
from experiments.duplicate_bug_detection_deep_learning import generateBugEmbeddings, \
calculateSimilarityScoresDL, \
CosinePrediction, getDataHandlerLexiconEmb, getModel
import torch
import torch.nn.functional as F
from util.torch_util import softmaxPrediction, getVariable
from data.dataset import BugDataExtractor
# Load Model and DataHandlers
arguments = Obj({
'load': args.model,
'cuda': args.cuda,
'summary_bidirectional': False,
'classifier_hidden_size': 300,
'classifier_mul_dif': True
})
dataHandlers, lexicons, embeddings, arguments = getDataHandlerLexiconEmb(arguments)
encoderContainer, model = getModel(dataHandlers, lexicons, embeddings, arguments)
encoderContainer.eval()
model.eval()
# Set the similarity and prediction functions
if arguments.model == 'classification':
similarityFunction = model[1]
if args.cuda:
similarityFunction.cuda()
predictionFunction = softmaxPrediction
elif arguments.model == 'retrieval':
similarityFunction = F.cosine_similarity
predictionFunction = CosinePrediction(args.retrieval_threshold, args.cuda)
if args.cuda:
model.cuda()
encoderContainer.cuda()
# Generate the embedding for each bug
logger.info("Generating Embeddings")
dataExtractor = BugDataExtractor(bugReportDataset, dataHandlers)
bugEmbeddingsById = generateBugEmbeddings(bugIds, dataExtractor, encoderContainer)
# Start to calculate all duplicate pairs recommend list
c = time()
logger.info("Calculating similarity scores")
dupDictItems = duplicateByBugId.items()
if args.model_type == 'tfidf':
# Calculating the score for tf-idf. We had to parallel this step because the sequential version was too slow.
import multiprocessing
logger.info("Calculating cosine similarity of tf-idf model using %d processes" % (args.nb_processes))
funcArgs = []
duplicateBugs = [duplicateBug for duplicateBug, listOfDuplicates in dupDictItems]
q = Queue()
processes = []
similarityScoresList = [0] * len(duplicateBugs)
startToWrite = 0
for idx, chunk in enumerate(chunks(duplicateBugs, args.nb_processes)):
arr = RawArray(c_ulong, [int(bugId) for bugId in chunk])
processes.append(multiprocessing.Process(target=parallel, args=(startToWrite, arr, q)))
startToWrite += len(chunk)
for p in processes:
p.start()
count = 0
while True:
try:
id, scoreList = q.get()
if id == -1:
# The process send a tuple (-1,None) when it is ending its work.
count += 1
# Break the loop when all processes were terminated
if count == len(processes):
break
else:
similarityScoresList[id] = scoreList
except Empty as e:
pass
logger.info(
"Total time to calculate cosine similarity of %d duplicate bugs: %s " % (len(dupDictItems), time() - c))
c = time()
for i, (duplicateBug, listOfDuplicates) in enumerate(dupDictItems):
# Calculate the similarity score of duplicate bug with each bug
if args.model_type == 'tfidf':
similarityScores = similarityScoresList.pop(0)
else:
similarityScores = calculateSimilarityScoresDL(duplicateBug, similarityFunction, bugEmbeddingsById, bugIds,
args.cuda)
# Remove pair (duplicateBug, duplicateBug) and create tuples with bug id and its similarity score.
bugScores = [(bugId, score) for bugId, score in zip(bugIds, similarityScores) if bugId != duplicateBug]
# Sort in descending order the bugs by probability of being duplicate
similarityList = sorted(bugScores, key=lambda x: x[1], reverse=True)
similarityListByDuplicate.append((duplicateBug, [t[0] for t in similarityList]))
if i % 200 == 0 and i != 0:
logger.info("Processed %d Duplicate bug of %d in %f" % (i, len(duplicateByBugId), time() - c))
c = time()
# For each different proportion, we calculate the recall rate and the precision, recall, accuracy
recallKs = sorted([int(k) for k in args.recall_ratio_k])
biggestKValue = recallKs[-1]
total = len(duplicateByBugId)
for validation in validations:
logger.info("Calculating metrics to a validation with proportion: %d" % validation['k'])
valitionBugIds = {}
# Prepare data to prediction
validationPairs = []
targets = []
bugIdsOfValidation = set()
for pairIndex in validation['indexes']:
bug1, bug2, label = pairs[pairIndex]
validationPairs.append((bug1, bug2))
valitionBugIds[bug1] = True
valitionBugIds[bug2] = True
bugIdsOfValidation.add(bug1)
bugIdsOfValidation.add(bug2)
targets.append(max(0, label))
logger.debug("Amount of duplicate pairs: %d\tAmount of pairs: %d" % (
np.count_nonzero(np.asarray(targets)), len(targets)))
logger.debug("Amount of bugs: %d" % (len(bugIdsOfValidation)))
logger.info("Predicting pair labels: %d" % validation['k'])
if args.model_type == 'tfidf':
predictions = predictTFIDF(validationPairs)
else:
predictions = predictDeepLearningModel(bugEmbeddingsById, validationPairs)
# Calculate Recall Rate
hitsPerRateK = [0] * len(recallKs)
logger.info("Calculating Recall Rate")
for duplicateBug, similarityList in similarityListByDuplicate:
pos = biggestKValue + 1
cur = 0
listOfDuplicates = duplicateByBugId[duplicateBug]
for bugId in similarityList:
if bugId not in bugIdsOfValidation:
continue
if bugId in listOfDuplicates:
pos = cur + 1
break
cur += 1
if cur >= biggestKValue:
break
for idx, k in enumerate(recallKs):
if k < pos:
continue
hitsPerRateK[idx] += 1
logger.info("Recall Rate Results:")
for k, hit in zip(recallKs, hitsPerRateK):
rate = float(hit) / total
logger.info("\t\t k=%d: %.3f (%d/%d) " % (k, rate, hit, total))
# Calculate Acc, precision, recall and f1
accum = accuracy_score(targets, predictions, normalize=False)
acc = accum / len(targets)
prec, recall, f1, _ = precision_recall_fscore_support(targets, predictions)
logger.info("Accuracy: %.3f (%d/%d)" % (acc * 100, accum, len(targets)))
logger.info("Precision: {}\tRecall: {}\tF1:{}".format(list(np.around(prec * 100, decimals=3)),
list(np.around(recall * 100, decimals=3)),
list(np.around(f1 * 100, decimals=3))))
logger.info("")
|
[
"argparse.ArgumentParser",
"experiments.duplicate_bug_detection_deep_learning.getDataHandlerLexiconEmb",
"sklearn.metrics.accuracy_score",
"numpy.around",
"multiprocessing.Queue",
"data.bug_report_database.BugReportDatabase.fromJson",
"experiments.duplicate_bug_detection_deep_learning.getModel",
"sklearn.metrics.precision_recall_fscore_support",
"sklearn.metrics.pairwise.cosine_similarity",
"experiments.duplicate_bug_detection_deep_learning.calculateSimilarityScoresDL",
"data.dataset.BugDataExtractor",
"numpy.asarray",
"experiments.duplicate_bug_detection_deep_learning.generateBugEmbeddings",
"experiments.duplicate_bug_detection_deep_learning.CosinePrediction",
"os.getpid",
"torch.stack",
"logging.basicConfig",
"time.time",
"multiprocessing.Process",
"logging.getLogger"
] |
[((2982, 3001), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (2999, 3001), False, 'import logging\n'), ((3010, 3016), 'time.time', 'time', ([], {}), '()\n', (3014, 3016), False, 'from time import time\n'), ((6412, 6437), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6435, 6437), False, 'import argparse\n'), ((7125, 7249), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(levelname)-4s %(message)s"""', 'level': 'logging.DEBUG', 'datefmt': '"""%Y-%m-%d %H:%M:%S"""'}), "(format='%(asctime)s %(levelname)-4s %(message)s', level\n =logging.DEBUG, datefmt='%Y-%m-%d %H:%M:%S')\n", (7144, 7249), False, 'import logging\n'), ((7285, 7304), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (7302, 7304), False, 'import logging\n'), ((7572, 7616), 'data.bug_report_database.BugReportDatabase.fromJson', 'BugReportDatabase.fromJson', (['args.bug_dataset'], {}), '(args.bug_dataset)\n', (7598, 7616), False, 'from data.bug_report_database import BugReportDatabase\n'), ((9985, 9991), 'time.time', 'time', ([], {}), '()\n', (9989, 9991), False, 'from time import time\n'), ((11632, 11638), 'time.time', 'time', ([], {}), '()\n', (11636, 11638), False, 'from time import time\n'), ((4602, 4637), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['matrix1', 'matrix2'], {}), '(matrix1, matrix2)\n', (4619, 4637), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((5784, 5821), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['pairBug1', 'pairBug2'], {}), '(pairBug1, pairBug2)\n', (5801, 5821), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((8945, 8980), 'experiments.duplicate_bug_detection_deep_learning.getDataHandlerLexiconEmb', 'getDataHandlerLexiconEmb', (['arguments'], {}), '(arguments)\n', (8969, 8980), False, 'from experiments.duplicate_bug_detection_deep_learning import generateBugEmbeddings, calculateSimilarityScoresDL, CosinePrediction, getDataHandlerLexiconEmb, getModel\n'), ((9015, 9070), 'experiments.duplicate_bug_detection_deep_learning.getModel', 'getModel', (['dataHandlers', 'lexicons', 'embeddings', 'arguments'], {}), '(dataHandlers, lexicons, embeddings, arguments)\n', (9023, 9070), False, 'from experiments.duplicate_bug_detection_deep_learning import generateBugEmbeddings, calculateSimilarityScoresDL, CosinePrediction, getDataHandlerLexiconEmb, getModel\n'), ((9776, 9824), 'data.dataset.BugDataExtractor', 'BugDataExtractor', (['bugReportDataset', 'dataHandlers'], {}), '(bugReportDataset, dataHandlers)\n', (9792, 9824), False, 'from data.dataset import BugDataExtractor\n'), ((9853, 9915), 'experiments.duplicate_bug_detection_deep_learning.generateBugEmbeddings', 'generateBugEmbeddings', (['bugIds', 'dataExtractor', 'encoderContainer'], {}), '(bugIds, dataExtractor, encoderContainer)\n', (9874, 9915), False, 'from experiments.duplicate_bug_detection_deep_learning import generateBugEmbeddings, calculateSimilarityScoresDL, CosinePrediction, getDataHandlerLexiconEmb, getModel\n'), ((10507, 10514), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (10512, 10514), False, 'from multiprocessing import Array, Queue\n'), ((15127, 15180), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['targets', 'predictions'], {'normalize': '(False)'}), '(targets, predictions, normalize=False)\n', (15141, 15180), False, 'from sklearn.metrics import accuracy_score, precision_recall_fscore_support\n'), ((15246, 15299), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['targets', 'predictions'], {}), '(targets, predictions)\n', (15277, 15299), False, 'from sklearn.metrics import accuracy_score, precision_recall_fscore_support\n'), ((2278, 2339), 'torch.stack', 'torch.stack', (['firstBugPairs[batchStart:batchStart + batchSize]'], {}), '(firstBugPairs[batchStart:batchStart + batchSize])\n', (2289, 2339), False, 'import torch\n'), ((2381, 2443), 'torch.stack', 'torch.stack', (['secondBugPairs[batchStart:batchStart + batchSize]'], {}), '(secondBugPairs[batchStart:batchStart + batchSize])\n', (2392, 2443), False, 'import torch\n'), ((3507, 3513), 'time.time', 'time', ([], {}), '()\n', (3511, 3513), False, 'from time import time\n'), ((11926, 12029), 'experiments.duplicate_bug_detection_deep_learning.calculateSimilarityScoresDL', 'calculateSimilarityScoresDL', (['duplicateBug', 'similarityFunction', 'bugEmbeddingsById', 'bugIds', 'args.cuda'], {}), '(duplicateBug, similarityFunction,\n bugEmbeddingsById, bugIds, args.cuda)\n', (11953, 12029), False, 'from experiments.duplicate_bug_detection_deep_learning import generateBugEmbeddings, calculateSimilarityScoresDL, CosinePrediction, getDataHandlerLexiconEmb, getModel\n'), ((12710, 12716), 'time.time', 'time', ([], {}), '()\n', (12714, 12716), False, 'from time import time\n'), ((3129, 3140), 'os.getpid', 'os.getpid', ([], {}), '()\n', (3138, 3140), False, 'import os\n'), ((5925, 5953), 'numpy.asarray', 'np.asarray', (['similarityScores'], {}), '(similarityScores)\n', (5935, 5953), True, 'import numpy as np\n'), ((9522, 9575), 'experiments.duplicate_bug_detection_deep_learning.CosinePrediction', 'CosinePrediction', (['args.retrieval_threshold', 'args.cuda'], {}), '(args.retrieval_threshold, args.cuda)\n', (9538, 9575), False, 'from experiments.duplicate_bug_detection_deep_learning import generateBugEmbeddings, calculateSimilarityScoresDL, CosinePrediction, getDataHandlerLexiconEmb, getModel\n'), ((10797, 10866), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'parallel', 'args': '(startToWrite, arr, q)'}), '(target=parallel, args=(startToWrite, arr, q))\n', (10820, 10866), False, 'import multiprocessing\n'), ((15449, 15482), 'numpy.around', 'np.around', (['(prec * 100)'], {'decimals': '(3)'}), '(prec * 100, decimals=3)\n', (15458, 15482), True, 'import numpy as np\n'), ((15552, 15587), 'numpy.around', 'np.around', (['(recall * 100)'], {'decimals': '(3)'}), '(recall * 100, decimals=3)\n', (15561, 15587), True, 'import numpy as np\n'), ((15657, 15688), 'numpy.around', 'np.around', (['(f1 * 100)'], {'decimals': '(3)'}), '(f1 * 100, decimals=3)\n', (15666, 15688), True, 'import numpy as np\n'), ((3442, 3453), 'os.getpid', 'os.getpid', ([], {}), '()\n', (3451, 3453), False, 'import os\n'), ((11610, 11616), 'time.time', 'time', ([], {}), '()\n', (11614, 11616), False, 'from time import time\n'), ((13695, 13714), 'numpy.asarray', 'np.asarray', (['targets'], {}), '(targets)\n', (13705, 13714), True, 'import numpy as np\n'), ((3478, 3484), 'time.time', 'time', ([], {}), '()\n', (3482, 3484), False, 'from time import time\n'), ((12681, 12687), 'time.time', 'time', ([], {}), '()\n', (12685, 12687), False, 'from time import time\n')]
|
import glob
import numpy as np
from matplotlib import pyplot as plt
for filename in glob.glob("*.dat"):
print(filename)
name = filename.split(".")[0]
data = np.loadtxt(filename, delimiter=",")
size = int(np.sqrt(len(data)))
data = data.reshape((size, size))
fig, ax = plt.subplots(figsize=(5.12, 5.12))
ax.imshow(data)
plt.tick_params(
bottom=False, left=False, right=False, top=False,
labelbottom=False, labelleft=False, labelright=False, labeltop=False
)
plt.tight_layout()
plt.savefig(name + ".png")
plt.close()
|
[
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots",
"numpy.loadtxt",
"glob.glob",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.savefig"
] |
[((85, 103), 'glob.glob', 'glob.glob', (['"""*.dat"""'], {}), "('*.dat')\n", (94, 103), False, 'import glob\n'), ((170, 205), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'delimiter': '""","""'}), "(filename, delimiter=',')\n", (180, 205), True, 'import numpy as np\n'), ((293, 327), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(5.12, 5.12)'}), '(figsize=(5.12, 5.12))\n', (305, 327), True, 'from matplotlib import pyplot as plt\n'), ((352, 491), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'bottom': '(False)', 'left': '(False)', 'right': '(False)', 'top': '(False)', 'labelbottom': '(False)', 'labelleft': '(False)', 'labelright': '(False)', 'labeltop': '(False)'}), '(bottom=False, left=False, right=False, top=False,\n labelbottom=False, labelleft=False, labelright=False, labeltop=False)\n', (367, 491), True, 'from matplotlib import pyplot as plt\n'), ((514, 532), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (530, 532), True, 'from matplotlib import pyplot as plt\n'), ((537, 563), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(name + '.png')"], {}), "(name + '.png')\n", (548, 563), True, 'from matplotlib import pyplot as plt\n'), ((568, 579), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (577, 579), True, 'from matplotlib import pyplot as plt\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 6 00:25:27 2017
@author: Wayne
"""
import pandas as pd
import xgboost as xgb
import numpy as np
from sklearn.model_selection import train_test_split
import pickle
#%%
mydf1= mydf[outliers.outliers==False]
z = np.log(data.trip_duration+1)
X = mydf1
Xtest = testdf
data_test = xgb.DMatrix(Xtest)
#%%
rmse = lambda z,zp:np.sqrt(np.mean((z-zp)**2))
#%%
parms = {'max_depth':14, #maximum depth of a tree
'objective':'reg:linear',
'eta' :0.025,
'subsample':0.8,#SGD will use this percentage of data
'lambda ' :4, #L2 regularization term,>1 more conservative
'colsample_bytree ':0.9,
'colsample_bylevel':1,
'min_child_weight': 10,
'nthread' :3} #number of cpu core to use
#%% split training set to validation set
Xtrain, Xval, Ztrain, Zval = train_test_split(X, z, test_size=0.2, random_state=1)
#Xcv,Xv,Zcv,Zv = train_test_split(Xval, Zval, test_size=0.5, random_state=1)
data_tr = xgb.DMatrix(Xtrain, label=Ztrain)
data_val = xgb.DMatrix(Xval , label=Zval)
evallist = [(data_tr, 'train'), (data_val, 'valid')]
model = xgb.train(parms, data_tr, num_boost_round=881, evals = evallist,
early_stopping_rounds=30, maximize=False,
verbose_eval=100)
print('score = %1.5f, n_boost_round =%d.'%(model.best_score,model.best_iteration))
#%% training all the data
data_train = xgb.DMatrix(X, label=z)
evallist = [(data_train, 'train')]
model = xgb.train(parms, data_train, num_boost_round=880, evals = evallist,
maximize=False,
verbose_eval=100)
#%%
#%%
ztest = model.predict(data_test)
#%%
ytest = np.exp(ztest)-1
submission = pd.DataFrame({'id': test.id, 'trip_duration': ytest})
submission.to_csv('submission_1.csv', index=False)
#%%
with open('filename.pickle', 'rb') as handle:
b = pickle.load(handle)
#%%
for d in (mydf,testdf):
print(d.Temp.mean())
#%%
print('Id is unique.') if train.id.nunique() == train.shape[0] else print('oops')
print('Train and test sets are distinct.') if len(np.intersect1d(train.id.values, test.id.values))== 0 else print('oops')
print('We do not need to worry about missing values.') if train.count().min() == train.shape[0] and test.count().min() == test.shape[0] else print('oops')
print('The store_and_fwd_flag has only two values {}.'.format(str(set(train.store_and_fwd_flag.unique()) | set(test.store_and_fwd_flag.unique()))))
#%% Kmeans
from sklearn.cluster import MiniBatchKMeans
coords = np.vstack((mydf[['pickup_latitude', 'pickup_longitude']].values,
mydf[['dropoff_latitude', 'dropoff_longitude']].values,
testdf[['pickup_latitude', 'pickup_longitude']].values,
testdf[['dropoff_latitude', 'dropoff_longitude']].values))
sample_ind = np.random.permutation(len(coords))[:500000]
kmeans = MiniBatchKMeans(n_clusters=20, batch_size=10000).fit(coords[sample_ind])
for df in (mydf,testdf):
df.loc[:, 'pickup_loc'] = kmeans.predict(df[['pickup_latitude', 'pickup_longitude']])
df.loc[:, 'dropoff_loc'] = kmeans.predict(df[['dropoff_latitude', 'dropoff_longitude']])
#%%
train_loc = [None]*2;test_loc=[None]*2
for i,loc in enumerate(['pickup_loc','dropoff_loc']):
train_loc[i]= pd.get_dummies(mydf[loc], prefix=loc, prefix_sep='_')
test_loc[i] = pd.get_dummies(testdf[loc], prefix=loc, prefix_sep='_')
train_loc = pd.concat(train_loc,axis=1)
test_loc = pd.concat(test_loc,axis=1)
#%%
mydf1 = pd.concat([mydf,train_loc],axis = 1)
testdf1 = pd.concat([testdf,test_loc],axis = 1)
#%%
mydf1 = mydf1[mydf1['outliers']==False]
mydf1 = mydf1.drop(['id','outliers'],axis=1)
z = mydf1.log_trip_duration
X = mydf1.drop(['log_trip_duration'],axis=1)
Xtest = testdf1.drop('id',axis=1)
#%%
X = X.drop(['pickup_loc','dropoff_loc'],axis=1)
#%%
Xtest=Xtest.drop(['pickup_loc','dropoff_loc'],axis=1)
|
[
"pandas.DataFrame",
"sklearn.cluster.MiniBatchKMeans",
"numpy.log",
"xgboost.train",
"sklearn.model_selection.train_test_split",
"pandas.get_dummies",
"pickle.load",
"numpy.mean",
"numpy.exp",
"numpy.intersect1d",
"pandas.concat",
"xgboost.DMatrix",
"numpy.vstack"
] |
[((273, 303), 'numpy.log', 'np.log', (['(data.trip_duration + 1)'], {}), '(data.trip_duration + 1)\n', (279, 303), True, 'import numpy as np\n'), ((342, 360), 'xgboost.DMatrix', 'xgb.DMatrix', (['Xtest'], {}), '(Xtest)\n', (353, 360), True, 'import xgboost as xgb\n'), ((899, 952), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'z'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(X, z, test_size=0.2, random_state=1)\n', (915, 952), False, 'from sklearn.model_selection import train_test_split\n'), ((1043, 1076), 'xgboost.DMatrix', 'xgb.DMatrix', (['Xtrain'], {'label': 'Ztrain'}), '(Xtrain, label=Ztrain)\n', (1054, 1076), True, 'import xgboost as xgb\n'), ((1090, 1119), 'xgboost.DMatrix', 'xgb.DMatrix', (['Xval'], {'label': 'Zval'}), '(Xval, label=Zval)\n', (1101, 1119), True, 'import xgboost as xgb\n'), ((1188, 1314), 'xgboost.train', 'xgb.train', (['parms', 'data_tr'], {'num_boost_round': '(881)', 'evals': 'evallist', 'early_stopping_rounds': '(30)', 'maximize': '(False)', 'verbose_eval': '(100)'}), '(parms, data_tr, num_boost_round=881, evals=evallist,\n early_stopping_rounds=30, maximize=False, verbose_eval=100)\n', (1197, 1314), True, 'import xgboost as xgb\n'), ((1480, 1503), 'xgboost.DMatrix', 'xgb.DMatrix', (['X'], {'label': 'z'}), '(X, label=z)\n', (1491, 1503), True, 'import xgboost as xgb\n'), ((1551, 1655), 'xgboost.train', 'xgb.train', (['parms', 'data_train'], {'num_boost_round': '(880)', 'evals': 'evallist', 'maximize': '(False)', 'verbose_eval': '(100)'}), '(parms, data_train, num_boost_round=880, evals=evallist, maximize=\n False, verbose_eval=100)\n', (1560, 1655), True, 'import xgboost as xgb\n'), ((1784, 1837), 'pandas.DataFrame', 'pd.DataFrame', (["{'id': test.id, 'trip_duration': ytest}"], {}), "({'id': test.id, 'trip_duration': ytest})\n", (1796, 1837), True, 'import pandas as pd\n'), ((2610, 2860), 'numpy.vstack', 'np.vstack', (["(mydf[['pickup_latitude', 'pickup_longitude']].values, mydf[[\n 'dropoff_latitude', 'dropoff_longitude']].values, testdf[[\n 'pickup_latitude', 'pickup_longitude']].values, testdf[[\n 'dropoff_latitude', 'dropoff_longitude']].values)"], {}), "((mydf[['pickup_latitude', 'pickup_longitude']].values, mydf[[\n 'dropoff_latitude', 'dropoff_longitude']].values, testdf[[\n 'pickup_latitude', 'pickup_longitude']].values, testdf[[\n 'dropoff_latitude', 'dropoff_longitude']].values))\n", (2619, 2860), True, 'import numpy as np\n'), ((3526, 3554), 'pandas.concat', 'pd.concat', (['train_loc'], {'axis': '(1)'}), '(train_loc, axis=1)\n', (3535, 3554), True, 'import pandas as pd\n'), ((3567, 3594), 'pandas.concat', 'pd.concat', (['test_loc'], {'axis': '(1)'}), '(test_loc, axis=1)\n', (3576, 3594), True, 'import pandas as pd\n'), ((3610, 3646), 'pandas.concat', 'pd.concat', (['[mydf, train_loc]'], {'axis': '(1)'}), '([mydf, train_loc], axis=1)\n', (3619, 3646), True, 'import pandas as pd\n'), ((3658, 3695), 'pandas.concat', 'pd.concat', (['[testdf, test_loc]'], {'axis': '(1)'}), '([testdf, test_loc], axis=1)\n', (3667, 3695), True, 'import pandas as pd\n'), ((1754, 1767), 'numpy.exp', 'np.exp', (['ztest'], {}), '(ztest)\n', (1760, 1767), True, 'import numpy as np\n'), ((1951, 1970), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (1962, 1970), False, 'import pickle\n'), ((3380, 3433), 'pandas.get_dummies', 'pd.get_dummies', (['mydf[loc]'], {'prefix': 'loc', 'prefix_sep': '"""_"""'}), "(mydf[loc], prefix=loc, prefix_sep='_')\n", (3394, 3433), True, 'import pandas as pd\n'), ((3457, 3512), 'pandas.get_dummies', 'pd.get_dummies', (['testdf[loc]'], {'prefix': 'loc', 'prefix_sep': '"""_"""'}), "(testdf[loc], prefix=loc, prefix_sep='_')\n", (3471, 3512), True, 'import pandas as pd\n'), ((394, 416), 'numpy.mean', 'np.mean', (['((z - zp) ** 2)'], {}), '((z - zp) ** 2)\n', (401, 416), True, 'import numpy as np\n'), ((2977, 3025), 'sklearn.cluster.MiniBatchKMeans', 'MiniBatchKMeans', ([], {'n_clusters': '(20)', 'batch_size': '(10000)'}), '(n_clusters=20, batch_size=10000)\n', (2992, 3025), False, 'from sklearn.cluster import MiniBatchKMeans\n'), ((2166, 2213), 'numpy.intersect1d', 'np.intersect1d', (['train.id.values', 'test.id.values'], {}), '(train.id.values, test.id.values)\n', (2180, 2213), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
import cPickle
import ipdb
class Detector():
def __init__(self,weight_file_path,n_labels):
self.image_mean=[103.939,116.779,123.68]
self.n_labels=n_labels
with open(weight_file_path)as f:
self.pretrained_weights=cPickle.load(f)
def get_weight(self,layer_name):
layer=self.pretrained_weights[layer_name]
return layer[0]
def get_bias(self,layer_name):
layer=self.pretrained_weights[layer_name]
return layer[1]
def get_conv_weight(self,name):
f=self.get_weight(name)
return f.transpose((2,3,1,0))
def conv_layer(self,bottom,name):
with tf.variable_scope(name)as scope:
w=self.get_conv_weight(name)
b=self.get_bias(name)
conv_weights=tf.get_variable("W",shape=w.shape,initializer=tf.constant_initializer(w))
conv_biases=tf.get_variable("b",shape=b.shape,initializer=tf.constant_initializer(b))
conv=tf.nn.conv2d(bottom,conv_weights,[1,1,1,1],padding='SAME')
bias=tf.nn.bias_add(conv,conv_biases)
relu=tf.nn.relu(bias,name=name)
return relu
def new_conv_layer(self,bottom,filter_shape,name):
with tf.variable_scope(name)as scope:
w=tf.get_variable("W",shape=filter_shape,initializer=tf.random_normal_initializer(0.,0.01))
b=tf.get_variable("b",shape=filter_shape[-1],initializer=tf.constant_initializer(0.))
conv=tf.nn.conv2d(bottom,w,[1,1,1,1],padding='SAME')
bias=tf.nn.bias_add(conv,b)
return bias
def fc_layer(self,bottom,name,create=False):
shape=bottom.get_shape().as_list()
dim=np.prod(shape[1:])
x=tf.reshape(bottom,[-1,dim])
cw=self.get_weight(name)
b=self.get_bias(name)
if name=="fc6":
cw=cw.reshape((4096,512,7,7))
cw=cw.transpose((2,3,1,0))
cw=cw.reshape((25088,4096))
else:
cw=cw.transpose((1,0))
with tf.variable_scope(name)as scope:
cw=tf.get_variable("W",shape=cw.shape,initializer=tf.constant_initializer(cw))
b=tf.get_variable("b",shape=b.shape,initializer=tf.constant_initializer(b))
fc=tf.nn.bias_add(tf.matmul(x,cw),b,name=scope)
return fc
def new_fc_layer(self,bottom,input_size,output_size,name):
shape=bottom.get_shape().to_list()
dim=np.prod(shape[1:])
x=tf.reshape(bottom,[-1,dim])
with tf.variable_scope(name)as scope:
w=tf.get_variable("W",shape=[input_size,output_size],initializer=tf.random_normal_initializer(0.,0.01))
b=tf.get_variable("b",shape=[output_size],initializer=tf.constant_initializer(0.))
fc=tf.nn.bias_add(tf.matmul(x,w),b,name=scope)
return fc
def inference(self,rgb,train=False):
rgb*=255.
r,g,b=tf.split(rgb,3,3)
bgr=tf.concat([b-self.image_mean[0],g-self.image_mean[1],r-self.image_mean[2]],3)
relu1_1=self.conv_layer(bgr,"conv1_1")
relu1_2=self.conv_layer(relu1_1,"conv1_2")
pool1=tf.nn.max_pool(relu1_2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool1')
relu2_1=self.conv_layer(pool1,"conv2_1")
relu2_2=self.conv_layer(relu2_1,"conv2_2")
pool2=tf.nn.max_pool(relu2_2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool2')
relu3_1=self.conv_layer(pool2,"conv3_1")
relu3_2=self.conv_layer(relu3_1,"conv3_2")
relu3_3=self.conv_layer(relu3_2,"conv3_3")
pool3=tf.nn.max_pool(relu3_3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool3')
relu4_1=self.conv_layer(pool3,"conv4_1")
relu4_2=self.conv_layer(relu4_1,"conv4_2")
relu4_3=self.conv_layer(relu4_2,"conv4_3")
pool4=tf.nn.max_pool(relu4_3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool4')
relu5_1=self.conv_layer(pool4,"conv5_1")
relu5_2=self.conv_layer(relu5_1,"conv5_2")
relu5_3=self.conv_layer(relu5_2,"conv5_3")
conv6=self.new_conv_layer(relu5_3,[3,3,512,1024],"conv6")
gap=tf.reduce_mean(conv6,[1,2])
with tf.variable_scope("GAP"):
gap_w=tf.get_variable("W",shape=[1024,self.n_labels],initializer=tf.random_normal_initializer(0.,0.01))
output=tf.matmul(gap,gap_w)
return pool1,pool2,pool3,pool4,relu5_3,conv6,gap,output
def get_classmap(self,label,conv6):
conv6_resized=tf.image.resize_bilinear(conv6,[224,224])
with tf.variable_scope("GAP",reuse=True):
label_w=tf.gather(tf.transpose(tf.get_variable("W")),label)
label_w=tf.reshape(label_w,[-1,1024,1])
conv6_resized=tf.reshape(conv6_resized,[-1,224*224,1024])
classmap=tf.matmul(conv6_resized,label_w)
classmap=tf.reshape(classmap,[-1,224,224])
return classmap
|
[
"tensorflow.nn.relu",
"tensorflow.constant_initializer",
"tensorflow.reshape",
"tensorflow.reduce_mean",
"tensorflow.concat",
"cPickle.load",
"tensorflow.variable_scope",
"tensorflow.nn.bias_add",
"tensorflow.nn.max_pool",
"tensorflow.matmul",
"tensorflow.get_variable",
"tensorflow.random_normal_initializer",
"tensorflow.nn.conv2d",
"tensorflow.split",
"numpy.prod",
"tensorflow.image.resize_bilinear"
] |
[((1521, 1539), 'numpy.prod', 'np.prod', (['shape[1:]'], {}), '(shape[1:])\n', (1528, 1539), True, 'import numpy as np\n'), ((1544, 1573), 'tensorflow.reshape', 'tf.reshape', (['bottom', '[-1, dim]'], {}), '(bottom, [-1, dim])\n', (1554, 1573), True, 'import tensorflow as tf\n'), ((2136, 2154), 'numpy.prod', 'np.prod', (['shape[1:]'], {}), '(shape[1:])\n', (2143, 2154), True, 'import numpy as np\n'), ((2159, 2188), 'tensorflow.reshape', 'tf.reshape', (['bottom', '[-1, dim]'], {}), '(bottom, [-1, dim])\n', (2169, 2188), True, 'import tensorflow as tf\n'), ((2540, 2559), 'tensorflow.split', 'tf.split', (['rgb', '(3)', '(3)'], {}), '(rgb, 3, 3)\n', (2548, 2559), True, 'import tensorflow as tf\n'), ((2564, 2655), 'tensorflow.concat', 'tf.concat', (['[b - self.image_mean[0], g - self.image_mean[1], r - self.image_mean[2]]', '(3)'], {}), '([b - self.image_mean[0], g - self.image_mean[1], r - self.\n image_mean[2]], 3)\n', (2573, 2655), True, 'import tensorflow as tf\n'), ((2736, 2836), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['relu1_2'], {'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""', 'name': '"""pool1"""'}), "(relu1_2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\n 'SAME', name='pool1')\n", (2750, 2836), True, 'import tensorflow as tf\n'), ((2918, 3018), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['relu2_2'], {'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""', 'name': '"""pool2"""'}), "(relu2_2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\n 'SAME', name='pool2')\n", (2932, 3018), True, 'import tensorflow as tf\n'), ((3145, 3245), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['relu3_3'], {'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""', 'name': '"""pool3"""'}), "(relu3_3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\n 'SAME', name='pool3')\n", (3159, 3245), True, 'import tensorflow as tf\n'), ((3372, 3472), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['relu4_3'], {'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""', 'name': '"""pool4"""'}), "(relu4_3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\n 'SAME', name='pool4')\n", (3386, 3472), True, 'import tensorflow as tf\n'), ((3657, 3686), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['conv6', '[1, 2]'], {}), '(conv6, [1, 2])\n', (3671, 3686), True, 'import tensorflow as tf\n'), ((3834, 3855), 'tensorflow.matmul', 'tf.matmul', (['gap', 'gap_w'], {}), '(gap, gap_w)\n', (3843, 3855), True, 'import tensorflow as tf\n'), ((3966, 4009), 'tensorflow.image.resize_bilinear', 'tf.image.resize_bilinear', (['conv6', '[224, 224]'], {}), '(conv6, [224, 224])\n', (3990, 4009), True, 'import tensorflow as tf\n'), ((4174, 4222), 'tensorflow.reshape', 'tf.reshape', (['conv6_resized', '[-1, 224 * 224, 1024]'], {}), '(conv6_resized, [-1, 224 * 224, 1024])\n', (4184, 4222), True, 'import tensorflow as tf\n'), ((4229, 4262), 'tensorflow.matmul', 'tf.matmul', (['conv6_resized', 'label_w'], {}), '(conv6_resized, label_w)\n', (4238, 4262), True, 'import tensorflow as tf\n'), ((4273, 4309), 'tensorflow.reshape', 'tf.reshape', (['classmap', '[-1, 224, 224]'], {}), '(classmap, [-1, 224, 224])\n', (4283, 4309), True, 'import tensorflow as tf\n'), ((289, 304), 'cPickle.load', 'cPickle.load', (['f'], {}), '(f)\n', (301, 304), False, 'import cPickle\n'), ((628, 651), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (645, 651), True, 'import tensorflow as tf\n'), ((905, 969), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['bottom', 'conv_weights', '[1, 1, 1, 1]'], {'padding': '"""SAME"""'}), "(bottom, conv_weights, [1, 1, 1, 1], padding='SAME')\n", (917, 969), True, 'import tensorflow as tf\n'), ((972, 1005), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['conv', 'conv_biases'], {}), '(conv, conv_biases)\n', (986, 1005), True, 'import tensorflow as tf\n'), ((1013, 1040), 'tensorflow.nn.relu', 'tf.nn.relu', (['bias'], {'name': 'name'}), '(bias, name=name)\n', (1023, 1040), True, 'import tensorflow as tf\n'), ((1113, 1136), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (1130, 1136), True, 'import tensorflow as tf\n'), ((1338, 1391), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['bottom', 'w', '[1, 1, 1, 1]'], {'padding': '"""SAME"""'}), "(bottom, w, [1, 1, 1, 1], padding='SAME')\n", (1350, 1391), True, 'import tensorflow as tf\n'), ((1394, 1417), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['conv', 'b'], {}), '(conv, b)\n', (1408, 1417), True, 'import tensorflow as tf\n'), ((1776, 1799), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (1793, 1799), True, 'import tensorflow as tf\n'), ((2194, 2217), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (2211, 2217), True, 'import tensorflow as tf\n'), ((3692, 3716), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""GAP"""'], {}), "('GAP')\n", (3709, 3716), True, 'import tensorflow as tf\n'), ((4015, 4051), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""GAP"""'], {'reuse': '(True)'}), "('GAP', reuse=True)\n", (4032, 4051), True, 'import tensorflow as tf\n'), ((4126, 4160), 'tensorflow.reshape', 'tf.reshape', (['label_w', '[-1, 1024, 1]'], {}), '(label_w, [-1, 1024, 1])\n', (4136, 4160), True, 'import tensorflow as tf\n'), ((1991, 2007), 'tensorflow.matmul', 'tf.matmul', (['x', 'cw'], {}), '(x, cw)\n', (2000, 2007), True, 'import tensorflow as tf\n'), ((2441, 2456), 'tensorflow.matmul', 'tf.matmul', (['x', 'w'], {}), '(x, w)\n', (2450, 2456), True, 'import tensorflow as tf\n'), ((780, 806), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['w'], {}), '(w)\n', (803, 806), True, 'import tensorflow as tf\n'), ((869, 895), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['b'], {}), '(b)\n', (892, 895), True, 'import tensorflow as tf\n'), ((1202, 1241), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', (['(0.0)', '(0.01)'], {}), '(0.0, 0.01)\n', (1230, 1241), True, 'import tensorflow as tf\n'), ((1301, 1329), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (1324, 1329), True, 'import tensorflow as tf\n'), ((1862, 1889), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['cw'], {}), '(cw)\n', (1885, 1889), True, 'import tensorflow as tf\n'), ((1942, 1968), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['b'], {}), '(b)\n', (1965, 1968), True, 'import tensorflow as tf\n'), ((2295, 2334), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', (['(0.0)', '(0.01)'], {}), '(0.0, 0.01)\n', (2323, 2334), True, 'import tensorflow as tf\n'), ((2391, 2419), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (2414, 2419), True, 'import tensorflow as tf\n'), ((3786, 3825), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', (['(0.0)', '(0.01)'], {}), '(0.0, 0.01)\n', (3814, 3825), True, 'import tensorflow as tf\n'), ((4086, 4106), 'tensorflow.get_variable', 'tf.get_variable', (['"""W"""'], {}), "('W')\n", (4101, 4106), True, 'import tensorflow as tf\n')]
|
import numpy as np
import os
import torch
import torch.nn as nn
import pytorch_lightning as pl
from data.VOCdevkit.vocdata import VOCDataset
from torch.utils.data import DataLoader
from torchvision.transforms import Compose, Resize, ToTensor, Normalize, GaussianBlur
from torchvision.transforms.functional import InterpolationMode
from skimage.measure import label
class EvaluateAttnMaps(pl.callbacks.Callback):
def __init__(self,
voc_root: str,
train_input_height: int,
attn_batch_size: int,
num_workers: int,
threshold: float = 0.6):
# Setup transforms and dataloader pvoc
image_transforms = Compose([Resize((train_input_height, train_input_height)),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
target_transforms = Compose([Resize((train_input_height, train_input_height),
interpolation=InterpolationMode.NEAREST),
ToTensor()])
self.dataset = VOCDataset(root=os.path.join(voc_root, "VOCSegmentation"), image_set="val",
transform=image_transforms, target_transform=target_transforms)
self.loader = DataLoader(self.dataset, batch_size=attn_batch_size, shuffle=False, num_workers=num_workers,
drop_last=True, pin_memory=True)
self.threshold = threshold
def on_validation_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
# Evaluate attention maps.
if pl_module.global_rank == 0 and pl_module.local_rank == 0:
print("\n" + "#" * 20 + "Evaluating attention maps on VOC2012 with threshold: " +
str(self.threshold) + "#" * 20)
jacs_merged_attn = 0
jacs_all_heads = 0
# If teacher is present use teacher attention as it is also used during training
if hasattr(pl_module, 'teacher'):
patch_size = pl_module.teacher.patch_size
model = pl_module.teacher
else:
patch_size = pl_module.model.patch_size
model = pl_module.model
model.eval()
for i, (imgs, maps) in enumerate(self.loader):
w_featmap = imgs.shape[-2] // patch_size
h_featmap = imgs.shape[-1] // patch_size
with torch.no_grad():
attentions = model.get_last_selfattention(imgs.to(pl_module.device))
bs = attentions.shape[0]
attentions = attentions[..., 0, 1:]
# Evaluate two different protocols: merged attention and best head
jacs_merged_attn += self.evaluate_merged_attentions(attentions, bs, w_featmap, h_featmap, patch_size,
maps)
jacs_all_heads += self.evaluate_best_head(attentions, bs, w_featmap, h_featmap, patch_size, maps)
jacs_merged_attn /= len(self.dataset)
jacs_all_heads /= len(self.dataset)
print(f"Merged Jaccard on VOC12: {jacs_merged_attn.item()}")
print(f"All heads Jaccard on VOC12: {jacs_all_heads.item()}")
pl_module.logger.experiment.log_metric('attn_jacs_voc', jacs_merged_attn.item())
pl_module.logger.experiment.log_metric('all_heads_jacs_voc', jacs_all_heads.item())
def evaluate_best_head(self, attentions: torch.Tensor, bs: int, w_featmap: int, h_featmap: int, patch_size: int,
maps: torch.Tensor) -> torch.Tensor:
jacs = 0
nh = attentions.shape[1] # number of heads
# we keep only a certain percentage of the mass
val, idx = torch.sort(attentions)
val /= torch.sum(val, dim=-1, keepdim=True)
cumval = torch.cumsum(val, dim=-1)
th_attn = cumval > (1 - self.threshold)
idx2 = torch.argsort(idx)
for head in range(nh):
th_attn[:, head] = torch.gather(th_attn[:, head], dim=1, index=idx2[:, head])
th_attn = th_attn.reshape(bs, nh, w_featmap, h_featmap).float()
# interpolate
th_attn = nn.functional.interpolate(th_attn, scale_factor=patch_size, mode="nearest").cpu().numpy()
# Calculate IoU for each image
for k, map in enumerate(maps):
jac = 0
objects = np.unique(map)
objects = np.delete(objects, [0, -1])
for o in objects:
masko = map == o
intersection = masko * th_attn[k]
intersection = torch.sum(torch.sum(intersection, dim=-1), dim=-1)
union = (masko + th_attn[k]) > 0
union = torch.sum(torch.sum(union, dim=-1), dim=-1)
jaco = intersection / union
jac += max(jaco)
if len(objects) != 0:
jac /= len(objects)
jacs += jac
return jacs
def evaluate_merged_attentions(self, attentions: torch.Tensor, bs: int, w_featmap: int, h_featmap: int,
patch_size: int, maps: torch.Tensor) -> torch.Tensor:
jacs = 0
# Average attentions
attentions = sum(attentions[:, i] * 1 / attentions.size(1) for i in range(attentions.size(1)))
nh = 1 # number of heads is one as we merged all heads
# Gaussian blurring
attentions = GaussianBlur(7, sigma=(.6))(attentions.reshape(bs * nh, 1, w_featmap, h_featmap))\
.reshape(bs, nh, -1)
# we keep only a certain percentage of the mass
val, idx = torch.sort(attentions)
val /= torch.sum(val, dim=-1, keepdim=True)
cumval = torch.cumsum(val, dim=-1)
th_attn = cumval > (1 - self.threshold)
idx2 = torch.argsort(idx)
th_attn[:, 0] = torch.gather(th_attn[:, 0], dim=1, index=idx2[:, 0])
th_attn = th_attn.reshape(bs, nh, w_featmap, h_featmap).float()
# remove components that are less then 3 pixels
for j, th_att in enumerate(th_attn):
labelled = label(th_att.cpu().numpy())
for k in range(1, np.max(labelled) + 1):
mask = labelled == k
if np.sum(mask) <= 2:
th_attn[j, 0][mask] = 0
# interpolate
th_attn = nn.functional.interpolate(th_attn, scale_factor=patch_size, mode="nearest").cpu().numpy()
# Calculate IoU for each image
for k, map in enumerate(maps):
gt_fg_mask = (map != 0.).float()
intersection = gt_fg_mask * th_attn[k]
intersection = torch.sum(torch.sum(intersection, dim=-1), dim=-1)
union = (gt_fg_mask + th_attn[k]) > 0
union = torch.sum(torch.sum(union, dim=-1), dim=-1)
jacs += intersection / union
return jacs
|
[
"numpy.delete",
"torch.gather",
"torch.utils.data.DataLoader",
"os.path.join",
"numpy.sum",
"torch.argsort",
"torchvision.transforms.ToTensor",
"torch.cumsum",
"numpy.max",
"torch.nn.functional.interpolate",
"torchvision.transforms.GaussianBlur",
"torchvision.transforms.Normalize",
"torch.no_grad",
"torch.sum",
"torch.sort",
"numpy.unique",
"torchvision.transforms.Resize"
] |
[((1354, 1483), 'torch.utils.data.DataLoader', 'DataLoader', (['self.dataset'], {'batch_size': 'attn_batch_size', 'shuffle': '(False)', 'num_workers': 'num_workers', 'drop_last': '(True)', 'pin_memory': '(True)'}), '(self.dataset, batch_size=attn_batch_size, shuffle=False,\n num_workers=num_workers, drop_last=True, pin_memory=True)\n', (1364, 1483), False, 'from torch.utils.data import DataLoader\n'), ((3871, 3893), 'torch.sort', 'torch.sort', (['attentions'], {}), '(attentions)\n', (3881, 3893), False, 'import torch\n'), ((3909, 3945), 'torch.sum', 'torch.sum', (['val'], {'dim': '(-1)', 'keepdim': '(True)'}), '(val, dim=-1, keepdim=True)\n', (3918, 3945), False, 'import torch\n'), ((3963, 3988), 'torch.cumsum', 'torch.cumsum', (['val'], {'dim': '(-1)'}), '(val, dim=-1)\n', (3975, 3988), False, 'import torch\n'), ((4052, 4070), 'torch.argsort', 'torch.argsort', (['idx'], {}), '(idx)\n', (4065, 4070), False, 'import torch\n'), ((5736, 5758), 'torch.sort', 'torch.sort', (['attentions'], {}), '(attentions)\n', (5746, 5758), False, 'import torch\n'), ((5774, 5810), 'torch.sum', 'torch.sum', (['val'], {'dim': '(-1)', 'keepdim': '(True)'}), '(val, dim=-1, keepdim=True)\n', (5783, 5810), False, 'import torch\n'), ((5828, 5853), 'torch.cumsum', 'torch.cumsum', (['val'], {'dim': '(-1)'}), '(val, dim=-1)\n', (5840, 5853), False, 'import torch\n'), ((5917, 5935), 'torch.argsort', 'torch.argsort', (['idx'], {}), '(idx)\n', (5930, 5935), False, 'import torch\n'), ((5960, 6012), 'torch.gather', 'torch.gather', (['th_attn[:, 0]'], {'dim': '(1)', 'index': 'idx2[:, 0]'}), '(th_attn[:, 0], dim=1, index=idx2[:, 0])\n', (5972, 6012), False, 'import torch\n'), ((4133, 4191), 'torch.gather', 'torch.gather', (['th_attn[:, head]'], {'dim': '(1)', 'index': 'idx2[:, head]'}), '(th_attn[:, head], dim=1, index=idx2[:, head])\n', (4145, 4191), False, 'import torch\n'), ((4515, 4529), 'numpy.unique', 'np.unique', (['map'], {}), '(map)\n', (4524, 4529), True, 'import numpy as np\n'), ((4552, 4579), 'numpy.delete', 'np.delete', (['objects', '[0, -1]'], {}), '(objects, [0, -1])\n', (4561, 4579), True, 'import numpy as np\n'), ((712, 760), 'torchvision.transforms.Resize', 'Resize', (['(train_input_height, train_input_height)'], {}), '((train_input_height, train_input_height))\n', (718, 760), False, 'from torchvision.transforms import Compose, Resize, ToTensor, Normalize, GaussianBlur\n'), ((798, 808), 'torchvision.transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (806, 808), False, 'from torchvision.transforms import Compose, Resize, ToTensor, Normalize, GaussianBlur\n'), ((846, 910), 'torchvision.transforms.Normalize', 'Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (855, 910), False, 'from torchvision.transforms import Compose, Resize, ToTensor, Normalize, GaussianBlur\n'), ((950, 1044), 'torchvision.transforms.Resize', 'Resize', (['(train_input_height, train_input_height)'], {'interpolation': 'InterpolationMode.NEAREST'}), '((train_input_height, train_input_height), interpolation=\n InterpolationMode.NEAREST)\n', (956, 1044), False, 'from torchvision.transforms import Compose, Resize, ToTensor, Normalize, GaussianBlur\n'), ((1122, 1132), 'torchvision.transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (1130, 1132), False, 'from torchvision.transforms import Compose, Resize, ToTensor, Normalize, GaussianBlur\n'), ((1174, 1215), 'os.path.join', 'os.path.join', (['voc_root', '"""VOCSegmentation"""'], {}), "(voc_root, 'VOCSegmentation')\n", (1186, 1215), False, 'import os\n'), ((6753, 6784), 'torch.sum', 'torch.sum', (['intersection'], {'dim': '(-1)'}), '(intersection, dim=-1)\n', (6762, 6784), False, 'import torch\n'), ((6874, 6898), 'torch.sum', 'torch.sum', (['union'], {'dim': '(-1)'}), '(union, dim=-1)\n', (6883, 6898), False, 'import torch\n'), ((2522, 2537), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2535, 2537), False, 'import torch\n'), ((4734, 4765), 'torch.sum', 'torch.sum', (['intersection'], {'dim': '(-1)'}), '(intersection, dim=-1)\n', (4743, 4765), False, 'import torch\n'), ((4858, 4882), 'torch.sum', 'torch.sum', (['union'], {'dim': '(-1)'}), '(union, dim=-1)\n', (4867, 4882), False, 'import torch\n'), ((5544, 5570), 'torchvision.transforms.GaussianBlur', 'GaussianBlur', (['(7)'], {'sigma': '(0.6)'}), '(7, sigma=0.6)\n', (5556, 5570), False, 'from torchvision.transforms import Compose, Resize, ToTensor, Normalize, GaussianBlur\n'), ((6268, 6284), 'numpy.max', 'np.max', (['labelled'], {}), '(labelled)\n', (6274, 6284), True, 'import numpy as np\n'), ((6347, 6359), 'numpy.sum', 'np.sum', (['mask'], {}), '(mask)\n', (6353, 6359), True, 'import numpy as np\n'), ((4304, 4379), 'torch.nn.functional.interpolate', 'nn.functional.interpolate', (['th_attn'], {'scale_factor': 'patch_size', 'mode': '"""nearest"""'}), "(th_attn, scale_factor=patch_size, mode='nearest')\n", (4329, 4379), True, 'import torch.nn as nn\n'), ((6451, 6526), 'torch.nn.functional.interpolate', 'nn.functional.interpolate', (['th_attn'], {'scale_factor': 'patch_size', 'mode': '"""nearest"""'}), "(th_attn, scale_factor=patch_size, mode='nearest')\n", (6476, 6526), True, 'import torch.nn as nn\n')]
|
# Autoencoder using convolutional layers
# Dataset : MNIST
# Requires : PIL, matplotlib
# Inspired by https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html
# To compress data : net.encode(data)
# To decompress data : net.decode(data)
# To mutate data : net(data)
import os
import numpy as np
import matplotlib.pyplot as plt
import torch as T
from torch import nn
from torch import cuda
import torch.nn.functional as F
from torchvision import transforms
import torchvision
from torchvision.datasets import MNIST
from torch.nn import ReLU, Linear, Sigmoid, Conv2d, ConvTranspose2d, MaxPool2d
import PIL.Image as im
from utils import dataset_dir, models_dir
# Displays an image (1 dim tensor)
# t has values in [0, 1]
def imshow(t):
transforms.ToPILImage()(t).show()
# Show in matplotlib
def gridshow(img):
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
class Net(nn.Module):
def __init__(self, hidden_size, latent_size):
super().__init__()
self.latent_size = latent_size
self.encodeConv1 = Conv2d(1, 16, 4)
self.encodeConv2 = Conv2d(16, 32, 2)
self.encodeFC1 = Linear(800, hidden_size)
self.encodeFC2 = Linear(hidden_size, self.latent_size)
self.decodeFC1 = Linear(self.latent_size, 13 * 13)
self.decodeConv1 = ConvTranspose2d(1, 1, 2)
self.decodeFC2 = Linear(14 * 14, 28 * 28)
def encode(self, x):
x = MaxPool2d(2)(F.relu(self.encodeConv1(x)))
x = MaxPool2d(2)(F.relu(self.encodeConv2(x)))
x = x.view(-1, 800)
x = F.relu(self.encodeFC1(x))
x = T.sigmoid(self.encodeFC2(x))
return x
def decode(self, x):
x = F.relu(self.decodeFC1(x))
x = x.view(-1, 1, 13, 13)
x = F.relu(self.decodeConv1(x))
x = x.view(-1, 14 * 14)
x = T.sigmoid(self.decodeFC2(x))
x = x.view(-1, 1, 28, 28)
return x
def forward(self, x):
return self.decode(self.encode(x))
# Hyper params
latent_size = 10
hidden_size = 256
epochs = 3
batch_size = 10
learning_rate = .0002
train_or_test = 'test'
path = models_dir + '/deep_autoencoder'
# Training device
device = T.device('cuda:0' if cuda.is_available() else 'cpu')
# Dataset
trans = transforms.ToTensor()
dataset = MNIST(root=dataset_dir, train=True, download=True, transform=trans)
loader = T.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=0)
# Model
net = Net(hidden_size, latent_size)
net.to(device)
if train_or_test == 'train':
# Load
if os.path.exists(path):
net.load_state_dict(T.load(path))
print('Model loaded')
# Train
optim = T.optim.Adam(net.parameters(), lr=learning_rate, betas=(.9, .999))
criterion = nn.MSELoss()
for e in range(epochs):
avg_loss = 0
for i, data in enumerate(loader, 0):
# Only inputs (no labels)
inputs, _ = data
# Zero the parameter gradients
optim.zero_grad()
# Predictions
x = inputs.to(device)
y = net(x)
# Back prop
loss = criterion(y, x)
loss.backward()
optim.step()
avg_loss += loss.item()
# Stats
print_freq = 100
if i % print_freq == print_freq - 1:
print(f'Epoch {e + 1:2d}, Batch {i + 1:5d}, Loss {avg_loss / print_freq:.3f}')
avg_loss = 0.0
# Save
T.save(net.state_dict(), path)
print('Model trained and saved')
else:
# Load
net.load_state_dict(T.load(path))
# Test
dataiter = iter(loader)
images, _ = dataiter.next()
# Show ground truth
gridshow(torchvision.utils.make_grid(images))
# Show predictions
with T.no_grad():
preds = T.cat([net(images[i].view(1, 1, 28, 28).to(device)).view(1, 1, 28, 28).cpu() for i in range(batch_size)])
preds = T.tensor(preds)
gridshow(torchvision.utils.make_grid(preds))
|
[
"torch.nn.MSELoss",
"matplotlib.pyplot.show",
"torch.nn.ConvTranspose2d",
"torch.utils.data.DataLoader",
"torch.load",
"torch.nn.Conv2d",
"os.path.exists",
"numpy.transpose",
"torchvision.transforms.ToPILImage",
"torchvision.utils.make_grid",
"torch.cuda.is_available",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.no_grad",
"torchvision.datasets.MNIST",
"torch.tensor",
"torchvision.transforms.ToTensor"
] |
[((2274, 2295), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2293, 2295), False, 'from torchvision import transforms\n'), ((2306, 2373), 'torchvision.datasets.MNIST', 'MNIST', ([], {'root': 'dataset_dir', 'train': '(True)', 'download': '(True)', 'transform': 'trans'}), '(root=dataset_dir, train=True, download=True, transform=trans)\n', (2311, 2373), False, 'from torchvision.datasets import MNIST\n'), ((2383, 2471), 'torch.utils.data.DataLoader', 'T.utils.data.DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(0)'}), '(dataset, batch_size=batch_size, shuffle=True,\n num_workers=0)\n', (2406, 2471), True, 'import torch as T\n'), ((901, 911), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (909, 911), True, 'import matplotlib.pyplot as plt\n'), ((2576, 2596), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2590, 2596), False, 'import os\n'), ((2778, 2790), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (2788, 2790), False, 'from torch import nn\n'), ((3978, 3993), 'torch.tensor', 'T.tensor', (['preds'], {}), '(preds)\n', (3986, 3993), True, 'import torch as T\n'), ((865, 895), 'numpy.transpose', 'np.transpose', (['npimg', '(1, 2, 0)'], {}), '(npimg, (1, 2, 0))\n', (877, 895), True, 'import numpy as np\n'), ((1081, 1097), 'torch.nn.Conv2d', 'Conv2d', (['(1)', '(16)', '(4)'], {}), '(1, 16, 4)\n', (1087, 1097), False, 'from torch.nn import ReLU, Linear, Sigmoid, Conv2d, ConvTranspose2d, MaxPool2d\n'), ((1125, 1142), 'torch.nn.Conv2d', 'Conv2d', (['(16)', '(32)', '(2)'], {}), '(16, 32, 2)\n', (1131, 1142), False, 'from torch.nn import ReLU, Linear, Sigmoid, Conv2d, ConvTranspose2d, MaxPool2d\n'), ((1168, 1192), 'torch.nn.Linear', 'Linear', (['(800)', 'hidden_size'], {}), '(800, hidden_size)\n', (1174, 1192), False, 'from torch.nn import ReLU, Linear, Sigmoid, Conv2d, ConvTranspose2d, MaxPool2d\n'), ((1218, 1255), 'torch.nn.Linear', 'Linear', (['hidden_size', 'self.latent_size'], {}), '(hidden_size, self.latent_size)\n', (1224, 1255), False, 'from torch.nn import ReLU, Linear, Sigmoid, Conv2d, ConvTranspose2d, MaxPool2d\n'), ((1282, 1315), 'torch.nn.Linear', 'Linear', (['self.latent_size', '(13 * 13)'], {}), '(self.latent_size, 13 * 13)\n', (1288, 1315), False, 'from torch.nn import ReLU, Linear, Sigmoid, Conv2d, ConvTranspose2d, MaxPool2d\n'), ((1343, 1367), 'torch.nn.ConvTranspose2d', 'ConvTranspose2d', (['(1)', '(1)', '(2)'], {}), '(1, 1, 2)\n', (1358, 1367), False, 'from torch.nn import ReLU, Linear, Sigmoid, Conv2d, ConvTranspose2d, MaxPool2d\n'), ((1393, 1417), 'torch.nn.Linear', 'Linear', (['(14 * 14)', '(28 * 28)'], {}), '(14 * 14, 28 * 28)\n', (1399, 1417), False, 'from torch.nn import ReLU, Linear, Sigmoid, Conv2d, ConvTranspose2d, MaxPool2d\n'), ((2223, 2242), 'torch.cuda.is_available', 'cuda.is_available', ([], {}), '()\n', (2240, 2242), False, 'from torch import cuda\n'), ((3637, 3649), 'torch.load', 'T.load', (['path'], {}), '(path)\n', (3643, 3649), True, 'import torch as T\n'), ((3761, 3796), 'torchvision.utils.make_grid', 'torchvision.utils.make_grid', (['images'], {}), '(images)\n', (3788, 3796), False, 'import torchvision\n'), ((3831, 3842), 'torch.no_grad', 'T.no_grad', ([], {}), '()\n', (3840, 3842), True, 'import torch as T\n'), ((4007, 4041), 'torchvision.utils.make_grid', 'torchvision.utils.make_grid', (['preds'], {}), '(preds)\n', (4034, 4041), False, 'import torchvision\n'), ((1456, 1468), 'torch.nn.MaxPool2d', 'MaxPool2d', (['(2)'], {}), '(2)\n', (1465, 1468), False, 'from torch.nn import ReLU, Linear, Sigmoid, Conv2d, ConvTranspose2d, MaxPool2d\n'), ((1510, 1522), 'torch.nn.MaxPool2d', 'MaxPool2d', (['(2)'], {}), '(2)\n', (1519, 1522), False, 'from torch.nn import ReLU, Linear, Sigmoid, Conv2d, ConvTranspose2d, MaxPool2d\n'), ((2626, 2638), 'torch.load', 'T.load', (['path'], {}), '(path)\n', (2632, 2638), True, 'import torch as T\n'), ((750, 773), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (771, 773), False, 'from torchvision import transforms\n')]
|
# Copyright (c) 2018 <NAME>
# Copyright (c) 2018 <NAME>
#
# Distributed under the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
from phylanx import Phylanx
import numpy as np
@Phylanx
def foo():
local_a = np.array((2, 1))
local_a[0] += 55
return local_a
assert (np.array((57, 1)) == foo()).any()
|
[
"numpy.array"
] |
[((297, 313), 'numpy.array', 'np.array', (['(2, 1)'], {}), '((2, 1))\n', (305, 313), True, 'import numpy as np\n'), ((364, 381), 'numpy.array', 'np.array', (['(57, 1)'], {}), '((57, 1))\n', (372, 381), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import datetime
from dateutil.tz import tzutc
def plot_water_levels(station, dates, levels):
"""Task 2E: Plots water level against time"""
#Assign variables
range_high = [station.typical_range[1]]*len(dates)
range_low = [station.typical_range[0]]*len(dates)
# Plot
plt.plot(dates, levels, label="Water Level")
plt.plot(dates, range_high, label="Typical High")
plt.plot(dates, range_low, label="Typical Low")
# Add axis labels, add legend, rotate date labels and add plot title
plt.xlabel('Date')
plt.ylabel('Water Level (m)')
plt.legend()
plt.xticks(rotation=45)
plt.title(station.name)
# Display plot
plt.tight_layout() # This makes sure plot does not cut off date labels
return plt.show()
def plot_water_level_with_fit(station, dates, levels, p):
"""Task 2F: Plots the water level data and the best-fit polynomial"""
# Convert dates to floats
dates_float = matplotlib.dates.date2num(dates)
# Create a shifted time list
dates_shifted = []
for i in range(len(dates_float)):
dates_shifted.append(dates_float[i] - dates_float[0])
# Find coefficients of best-fit polynomial f(x) of degree p
p_coeff = np.polyfit(dates_shifted, levels, p)
# Convert coefficient into a polynomial that can be evaluated,
# e.g. poly(0.3)
poly = np.poly1d(p_coeff)
# Plot original data points
plt.plot(dates_shifted, levels, '.', label='Data Points')
# Plot polynomial fit and typical range low/high at 30 points along interval
# (note that polynomial is evaluated using the date shift)
x = np.linspace(dates_shifted[0], dates_shifted[-1], 30)
range_high = [station.typical_range[1]]*len(x)
range_low = [station.typical_range[0]]*len(x)
plt.plot(x, poly(x - x[0]), label="Polynomial Fit")
plt.plot(x, range_high, label="Typical High")
plt.plot(x, range_low, label="Typical Low")
# Add axis labels, add legend, rotate date labels and add plot title
plt.xlabel('Dates from {}'.format(dates[-1]))
plt.ylabel('Water Level (m)')
plt.legend()
plt.xticks(rotation=45)
plt.title(station.name)
# Display plot
plt.tight_layout() # This makes sure plot does not cut off date labels
return plt.show()
|
[
"matplotlib.pyplot.title",
"numpy.poly1d",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.polyfit",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xticks",
"numpy.linspace",
"matplotlib.dates.date2num",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tight_layout"
] |
[((363, 407), 'matplotlib.pyplot.plot', 'plt.plot', (['dates', 'levels'], {'label': '"""Water Level"""'}), "(dates, levels, label='Water Level')\n", (371, 407), True, 'import matplotlib.pyplot as plt\n'), ((412, 461), 'matplotlib.pyplot.plot', 'plt.plot', (['dates', 'range_high'], {'label': '"""Typical High"""'}), "(dates, range_high, label='Typical High')\n", (420, 461), True, 'import matplotlib.pyplot as plt\n'), ((466, 513), 'matplotlib.pyplot.plot', 'plt.plot', (['dates', 'range_low'], {'label': '"""Typical Low"""'}), "(dates, range_low, label='Typical Low')\n", (474, 513), True, 'import matplotlib.pyplot as plt\n'), ((592, 610), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Date"""'], {}), "('Date')\n", (602, 610), True, 'import matplotlib.pyplot as plt\n'), ((615, 644), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Water Level (m)"""'], {}), "('Water Level (m)')\n", (625, 644), True, 'import matplotlib.pyplot as plt\n'), ((649, 661), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (659, 661), True, 'import matplotlib.pyplot as plt\n'), ((666, 689), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (676, 689), True, 'import matplotlib.pyplot as plt\n'), ((694, 717), 'matplotlib.pyplot.title', 'plt.title', (['station.name'], {}), '(station.name)\n', (703, 717), True, 'import matplotlib.pyplot as plt\n'), ((742, 760), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (758, 760), True, 'import matplotlib.pyplot as plt\n'), ((826, 836), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (834, 836), True, 'import matplotlib.pyplot as plt\n'), ((1019, 1051), 'matplotlib.dates.date2num', 'matplotlib.dates.date2num', (['dates'], {}), '(dates)\n', (1044, 1051), False, 'import matplotlib\n'), ((1288, 1324), 'numpy.polyfit', 'np.polyfit', (['dates_shifted', 'levels', 'p'], {}), '(dates_shifted, levels, p)\n', (1298, 1324), True, 'import numpy as np\n'), ((1425, 1443), 'numpy.poly1d', 'np.poly1d', (['p_coeff'], {}), '(p_coeff)\n', (1434, 1443), True, 'import numpy as np\n'), ((1481, 1538), 'matplotlib.pyplot.plot', 'plt.plot', (['dates_shifted', 'levels', '"""."""'], {'label': '"""Data Points"""'}), "(dates_shifted, levels, '.', label='Data Points')\n", (1489, 1538), True, 'import matplotlib.pyplot as plt\n'), ((1692, 1744), 'numpy.linspace', 'np.linspace', (['dates_shifted[0]', 'dates_shifted[-1]', '(30)'], {}), '(dates_shifted[0], dates_shifted[-1], 30)\n', (1703, 1744), True, 'import numpy as np\n'), ((1911, 1956), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'range_high'], {'label': '"""Typical High"""'}), "(x, range_high, label='Typical High')\n", (1919, 1956), True, 'import matplotlib.pyplot as plt\n'), ((1961, 2004), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'range_low'], {'label': '"""Typical Low"""'}), "(x, range_low, label='Typical Low')\n", (1969, 2004), True, 'import matplotlib.pyplot as plt\n'), ((2133, 2162), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Water Level (m)"""'], {}), "('Water Level (m)')\n", (2143, 2162), True, 'import matplotlib.pyplot as plt\n'), ((2167, 2179), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2177, 2179), True, 'import matplotlib.pyplot as plt\n'), ((2184, 2207), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (2194, 2207), True, 'import matplotlib.pyplot as plt\n'), ((2212, 2235), 'matplotlib.pyplot.title', 'plt.title', (['station.name'], {}), '(station.name)\n', (2221, 2235), True, 'import matplotlib.pyplot as plt\n'), ((2260, 2278), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2276, 2278), True, 'import matplotlib.pyplot as plt\n'), ((2344, 2354), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2352, 2354), True, 'import matplotlib.pyplot as plt\n')]
|
import numpy as np
from scipy.integrate import quad
import matplotlib.pyplot as plt
def Redshift(n0, n1, n2, z1=3.6, z=np.linspace(0,10,num=1001)):
Rlow = np.power((1.0 + z), n1)
Rhigh = np.power((1.0 + z), n2)
rbrk = np.power((1.0 + z1), n1 - n2)
R = Rlow * (z <= z1) + rbrk * Rhigh * (z > z1)
R *= n0 / R[0]
return z, R
z, R = Redshift(0.84, 2.07, -0.7)
plt.plot(z,R,'-k')
plt.xlabel(r'$z$')
plt.ylabel(r'$\mathcal{R}(z)$')
plt.grid()
#plt.gca().set_yscale('log')
plt.show()
#### This computes E(z) and int_0^z dz'/E(z') and saves to file
def Efunc(z):
Omega_m = 0.274
Omega_lambda = 0.726
E = np.sqrt(Omega_m * np.power((1 + z), 3) + Omega_lambda)
return E
def Efuncinv(z):
return 1.0 / Efunc(z)
z = np.linspace(0,10,num=1001)
dz = z[1] - z[0]
E = Efunc(z)
Eics = np.zeros(E.shape)
for i in range(len(Eics)):
Eics[i] = (quad(Efuncinv, 0, z[i])[0])**2.0
#Eics = np.square(np.cumsum(1.0 / E) * dz)
#Eics[1:] = Eics[:-1]
#Eics[0] = 0
Eall = Eics / E;
z = z.reshape(z.shape[0],1)
E = E.reshape(E.shape[0],1)
Eics = Eics.reshape(Eics.shape[0],1)
Eall = Eall.reshape(Eall.shape[0],1)
d = np.concatenate((z,E,Eics,Eall),axis=1)
np.savetxt('support_data/splines_Ez.txt',d,fmt='%0.9lf')
z2, R = Redshift(0.84, 2.07, -0.7, z=z)
Rp = R / (1+z) * Eall
plt.plot(z,Rp,'-k')
plt.plot(z,R/(1+z),'--b')
plt.plot(z,Eall,'-.r')
#plt.plot(z,np.cumsum(Eall),'-g')
plt.xlabel(r'$z$')
plt.grid()
plt.show()
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"scipy.integrate.quad",
"numpy.power",
"numpy.savetxt",
"numpy.zeros",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"numpy.concatenate"
] |
[((365, 385), 'matplotlib.pyplot.plot', 'plt.plot', (['z', 'R', '"""-k"""'], {}), "(z, R, '-k')\n", (373, 385), True, 'import matplotlib.pyplot as plt\n'), ((384, 401), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$z$"""'], {}), "('$z$')\n", (394, 401), True, 'import matplotlib.pyplot as plt\n'), ((403, 434), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\mathcal{R}(z)$"""'], {}), "('$\\\\mathcal{R}(z)$')\n", (413, 434), True, 'import matplotlib.pyplot as plt\n'), ((435, 445), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (443, 445), True, 'import matplotlib.pyplot as plt\n'), ((475, 485), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (483, 485), True, 'import matplotlib.pyplot as plt\n'), ((723, 751), 'numpy.linspace', 'np.linspace', (['(0)', '(10)'], {'num': '(1001)'}), '(0, 10, num=1001)\n', (734, 751), True, 'import numpy as np\n'), ((789, 806), 'numpy.zeros', 'np.zeros', (['E.shape'], {}), '(E.shape)\n', (797, 806), True, 'import numpy as np\n'), ((1112, 1154), 'numpy.concatenate', 'np.concatenate', (['(z, E, Eics, Eall)'], {'axis': '(1)'}), '((z, E, Eics, Eall), axis=1)\n', (1126, 1154), True, 'import numpy as np\n'), ((1152, 1210), 'numpy.savetxt', 'np.savetxt', (['"""support_data/splines_Ez.txt"""', 'd'], {'fmt': '"""%0.9lf"""'}), "('support_data/splines_Ez.txt', d, fmt='%0.9lf')\n", (1162, 1210), True, 'import numpy as np\n'), ((1274, 1295), 'matplotlib.pyplot.plot', 'plt.plot', (['z', 'Rp', '"""-k"""'], {}), "(z, Rp, '-k')\n", (1282, 1295), True, 'import matplotlib.pyplot as plt\n'), ((1294, 1325), 'matplotlib.pyplot.plot', 'plt.plot', (['z', '(R / (1 + z))', '"""--b"""'], {}), "(z, R / (1 + z), '--b')\n", (1302, 1325), True, 'import matplotlib.pyplot as plt\n'), ((1320, 1344), 'matplotlib.pyplot.plot', 'plt.plot', (['z', 'Eall', '"""-.r"""'], {}), "(z, Eall, '-.r')\n", (1328, 1344), True, 'import matplotlib.pyplot as plt\n'), ((1377, 1394), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$z$"""'], {}), "('$z$')\n", (1387, 1394), True, 'import matplotlib.pyplot as plt\n'), ((1396, 1406), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1404, 1406), True, 'import matplotlib.pyplot as plt\n'), ((1407, 1417), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1415, 1417), True, 'import matplotlib.pyplot as plt\n'), ((120, 148), 'numpy.linspace', 'np.linspace', (['(0)', '(10)'], {'num': '(1001)'}), '(0, 10, num=1001)\n', (131, 148), True, 'import numpy as np\n'), ((157, 178), 'numpy.power', 'np.power', (['(1.0 + z)', 'n1'], {}), '(1.0 + z, n1)\n', (165, 178), True, 'import numpy as np\n'), ((190, 211), 'numpy.power', 'np.power', (['(1.0 + z)', 'n2'], {}), '(1.0 + z, n2)\n', (198, 211), True, 'import numpy as np\n'), ((222, 249), 'numpy.power', 'np.power', (['(1.0 + z1)', '(n1 - n2)'], {}), '(1.0 + z1, n1 - n2)\n', (230, 249), True, 'import numpy as np\n'), ((846, 869), 'scipy.integrate.quad', 'quad', (['Efuncinv', '(0)', 'z[i]'], {}), '(Efuncinv, 0, z[i])\n', (850, 869), False, 'from scipy.integrate import quad\n'), ((630, 648), 'numpy.power', 'np.power', (['(1 + z)', '(3)'], {}), '(1 + z, 3)\n', (638, 648), True, 'import numpy as np\n')]
|
import csv
import os
import logging
import argparse
import random
import collections
import operator
from tqdm import tqdm, trange
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from pytorch_pretrained_bert.tokenization import BertTokenizer
from pytorch_pretrained_bert.optimization import BertAdam
from tensorboardX import SummaryWriter
import pdb
import matplotlib.pyplot as plt
import seaborn
seaborn.set_context(context="talk")
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
###############################################################################
# Data Preprocessing
###############################################################################
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None, prev_label=None):
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label # Target slots in this training task
self.prev_label = prev_label # trained slots in previous tasks
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_len, label_id, prev_label_id):
self.input_ids = input_ids
self.input_len = input_len
self.label_id = label_id
self.prev_label_id = prev_label_id # trained slots in previous tasks
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding='utf-8') as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if len(line) > 0 and line[0][0] == '#': # ignore comments (starting with '#')
continue
lines.append(line)
return lines
class Processor(DataProcessor):
"""Processor for the belief tracking dataset (GLUE version)."""
def __init__(self, config):
super(Processor, self).__init__()
import json
if config.data_dir == "data/woz" or config.data_dir=="data/woz-turn":
fp_ontology = open(os.path.join(config.data_dir, "ontology_dstc2_en.json"), "r")
ontology = json.load(fp_ontology)
ontology = ontology["informable"]
del ontology["request"]
for slot in ontology.keys():
ontology[slot].append("do not care")
ontology[slot].append("none")
fp_ontology.close()
elif config.data_dir == "data/multiwoz":
fp_ontology = open(os.path.join(config.data_dir, "ontology.json"), "r")
ontology = json.load(fp_ontology)
for slot in ontology.keys():
ontology[slot].append("none")
fp_ontology.close()
if not config.target_slot == 'all':
slot_idx = {'attraction':'0:1:2', 'bus':'3:4:5:6', 'hospital':'7', 'hotel':'8:9:10:11:12:13:14:15:16:17',\
'restaurant':'18:19:20:21:22:23:24', 'taxi':'25:26:27:28', 'train':'29:30:31:32:33:34'}
target_slot =[]
prev_slot = []
for key, value in slot_idx.items():
if key == config.target_slot:
target_slot.append(value)
else:
prev_slot.append(value)
config.target_slot = ':'.join(target_slot)
config.prev_slot = ':'.join(prev_slot)
else:
raise NotImplementedError()
# sorting the ontology according to the alphabetic order of the slots
self.ontology = collections.OrderedDict(sorted(ontology.items()))
# select slots to train
self.target_slot = []
self.prev_slot = []
self.target_slot_idx = sorted([ int(x) for x in config.target_slot.split(':')])
self.prev_slot_idx = sorted([ int(x) for x in config.prev_slot.split(':')])
ontology_items = list(self.ontology.items())
for idx, domain in enumerate(ontology_items):
slot, value = domain
if slot == "pricerange":
slot = "price range"
if idx in self.target_slot_idx:
self.target_slot.append(slot)
elif idx in self.prev_slot_idx:
self.prev_slot.append(slot)
self.all_slot = self.prev_slot + self.target_slot
logger.info('Processor: previous slots: ' + ', '.join(self.prev_slot))
logger.info('Processor: target slots: '+ ', '.join(self.target_slot))
def get_train_examples(self, data_dir, accumulation=False):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train", accumulation)
def get_dev_examples(self, data_dir, accumulation=False):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev", accumulation)
def get_test_examples(self, data_dir, accumulation=False):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test", accumulation)
def get_labels(self):
"""See base class."""
return [ self.ontology[slot] for slot in self.target_slot]
def get_prev_labels(self):
"""See base class."""
return [ self.ontology[slot] for slot in self.prev_slot]
def _create_examples(self, lines, set_type, accumulation=False):
"""Creates examples for the training and dev sets."""
prev_dialogue_index = None
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s-%s" % (set_type, line[0], line[1]) # line[0]: dialogue index, line[1]: turn index
if accumulation:
if prev_dialogue_index is None or prev_dialogue_index != line[0]:
text_a = line[2]
text_b = line[3]
prev_dialogue_index = line[0]
else:
# The symbol '#' will be replaced with '[SEP]' after tokenization.
text_a = line[2] + " # " + text_a
text_b = line[3] + " # " + text_b
else:
text_a = line[2] # line[2]: user utterance
text_b = line[3] # line[3]: system response
label = [ line[4+idx] for idx in self.target_slot_idx]
prev_label = [ line[4+idx] for idx in self.prev_slot_idx]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label, prev_label=prev_label))
return examples
def convert_examples_to_features(examples, label_list, prev_label_list, max_seq_length, tokenizer, max_turn_length):
"""Loads a data file into a list of `InputBatch`s."""
slot_dim = len(label_list)
prev_slot_dim = len(prev_label_list)
def _hard_coding_label(label):
return 'do not care' if label=='dontcare' else label
def _get_label(label, label_list):
label_id = []
label_info = ''
label_map = [{_label: i for i, _label in enumerate(labels)} for labels in label_list]
for i, label in enumerate(label):
label = _hard_coding_label(label)
label_id.append(label_map[i][label])
label_info += '%s (id = %d) ' % (label, label_map[i][label])
return label_id, label_info
features = []
prev_dialogue_idx = None
all_padding = [0] * max_seq_length
all_padding_len = [0, 0]
max_turn = 0
for (ex_index, example) in enumerate(examples):
if max_turn < int(example.guid.split('-')[2]):
max_turn = int(example.guid.split('-')[2])
max_turn_length = min(max_turn+1, max_turn_length)
logger.info("max_turn_length = %d" % max_turn)
for (ex_index, example) in enumerate(examples):
tokens_a = [x if x != '#' else '[SEP]' for x in tokenizer.tokenize(example.text_a)]
tokens_b = None
if example.text_b:
tokens_b = [x if x != '#' else '[SEP]' for x in tokenizer.tokenize(example.text_b)]
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
input_len = [len(tokens), 0]
if tokens_b:
tokens += tokens_b + ["[SEP]"]
input_len[1] = len(tokens_b) + 1
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# Zero-pad up to the sequence length.
input_ids += [0] * (max_seq_length - len(input_ids)) # Note: padding idx = 0
assert len(input_ids) == max_seq_length
label_id, label_info = _get_label(example.label, label_list)
prev_label_id, prev_label_info = _get_label(example.prev_label, prev_label_list)
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % example.guid)
logger.info("tokens: %s" % " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_len: %s" % " ".join([str(x) for x in input_len]))
logger.info("label: " + label_info)
logger.info("previous label: " + prev_label_info)
curr_dialogue_idx = example.guid.split('-')[1]
curr_turn_idx = int(example.guid.split('-')[2])
if (prev_dialogue_idx is not None) and (prev_dialogue_idx != curr_dialogue_idx):
if prev_turn_idx < max_turn_length:
features += [InputFeatures(input_ids=all_padding,
input_len=all_padding_len,
label_id=[-1]*slot_dim,
prev_label_id=[-1] * prev_slot_dim)] * (max_turn_length - prev_turn_idx - 1)
assert len(features) % max_turn_length == 0
if prev_dialogue_idx is None or prev_turn_idx < max_turn_length:
features.append(InputFeatures(input_ids=input_ids,
input_len=input_len,
label_id=label_id,
prev_label_id=prev_label_id,
))
prev_dialogue_idx = curr_dialogue_idx
prev_turn_idx = curr_turn_idx
if prev_turn_idx < max_turn_length:
features += [InputFeatures(input_ids=all_padding,
input_len=all_padding_len,
label_id=[-1]*slot_dim,
prev_label_id=[-1]*prev_slot_dim)] * (max_turn_length - prev_turn_idx - 1)
assert len(features) % max_turn_length == 0
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_len= torch.tensor([f.input_len for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
all_prev_label_ids = torch.tensor([f.prev_label_id for f in features], dtype=torch.long)
# reshape tensors to [batch, turn, word]
all_input_ids = all_input_ids.view(-1, max_turn_length, max_seq_length)
all_input_len = all_input_len.view(-1, max_turn_length, 2)
all_label_ids = all_label_ids.view(-1, max_turn_length, slot_dim)
all_prev_label_ids = all_prev_label_ids.view(-1, max_turn_length, prev_slot_dim)
return all_input_ids, all_input_len, all_label_ids, all_prev_label_ids
def get_label_embedding(labels, max_seq_length, tokenizer, device):
features = []
for label in labels:
label_tokens = ["[CLS]"] + tokenizer.tokenize(label) + ["[SEP]"]
label_token_ids = tokenizer.convert_tokens_to_ids(label_tokens)
label_len = len(label_token_ids)
label_padding = [0] * (max_seq_length - len(label_token_ids))
label_token_ids += label_padding
assert len(label_token_ids) == max_seq_length
features.append((label_token_ids, label_len))
all_label_token_ids = torch.tensor([f[0] for f in features], dtype=torch.long).to(device)
all_label_len = torch.tensor([f[1] for f in features], dtype=torch.long).to(device)
return all_label_token_ids, all_label_len
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
###############################################################################
# Miscellaneous functions
###############################################################################
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels)
def warmup_linear(x, warmup=0.002):
if x < warmup:
return x / warmup
return 1.0 - x
###############################################################################
# Main
###############################################################################
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument('--data_dir', type=str, required=True,
help='location of the data corpus')
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
"bert-base-multilingual-cased, bert-base-chinese.")
parser.add_argument("--bert_dir", default='/gfs/nlp/.pytorch_pretrained_bert',
type=str, required=False,
help="The directory of the pretrained BERT model")
parser.add_argument("--task_name", default=None, type=str, required=True,
help="The name of the task to train: bert, bert-gru, bert-lstm, "
"bert-label-embedding, bert-gru-label-embedding, bert-lstm-label-embedding")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument('--load_path', type=str, default='',
help='pretrained model directory name')
parser.add_argument("--target_slot", default='', type=str, required=True,
help="Target slot idx to train model. ex. '0:1:2 or an excluding slot name 'attraction'" )
parser.add_argument("--prev_slot", default='', type=str, required=True,
help="Previous trained slots. ex. '0:1:2 or an excluding slot name 'attraction'" )
parser.add_argument("--tf_dir", default='tensorboard', type=str, required=False,
help="Tensorboard directory")
parser.add_argument("--nbt", default='rnn', type=str, required=True,
help="nbt type: rnn or transformer or turn" )
parser.add_argument("--fix_utterance_encoder",
action='store_true',
help="Do not train BERT utterance encoder")
## Other parameters
parser.add_argument("--max_seq_length", default=64, type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--max_label_length", default=32, type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--max_turn_length", default=22, type=int,
help="The maximum total input turn length. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument('--hidden_dim',
type=int,
default=100,
help="hidden dimension used in belief tracker")
parser.add_argument('--num_rnn_layers',
type=int,
default=1,
help="number of RNN layers")
parser.add_argument('--zero_init_rnn',
action='store_true',
help="set initial hidden of rnns zero")
parser.add_argument('--skip_connect',
type=str,
default=False,
help="skip-connection")
parser.add_argument('--attn_head',
type=int,
default=4,
help="the number of heads in multi-headed attention")
parser.add_argument("--do_train",
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
action='store_true',
help="Whether to run eval on the test set.")
parser.add_argument("--do_analyze",
action='store_true',
help="Whether to run analysis on the test set.")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--set_label_encoder_trainable",
action='store_true',
help="Set this flag if you want to set the label encoder trainable. \n"
"This option is valid only when using label embeddings. \n")
parser.add_argument("--distance_metric",
type=str,
default="cosine",
help="The metric for distance between label embeddings: cosine, euclidean.")
parser.add_argument("--train_batch_size",
default=4,
type=int,
help="Total batch size for training.")
parser.add_argument("--dev_batch_size",
default=1,
type=int,
help="Total batch size for validation.")
parser.add_argument("--eval_batch_size",
default=16,
type=int,
help="Total batch size for eval.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--patience",
default=10.0,
type=float,
help="The number of epochs to allow no further improvement.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--lambda_ewc",
default=0.1,
type=float,
help="Hyper-parameter for EWC")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
parser.add_argument("--do_not_use_tensorboard",
action='store_true',
help="Whether to run eval on the test set.")
args = parser.parse_args()
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
os.makedirs(args.output_dir, exist_ok=True)
task_name = args.task_name.lower()
tb_file_name = args.output_dir.split('/')[1]
# Tensorboard logging
if not args.do_not_use_tensorboard:
summary_writer = SummaryWriter("./%s/%s" % (args.tf_dir, tb_file_name))
else:
summary_writer = None
fileHandler = logging.FileHandler(os.path.join(args.output_dir, "%s.txt"%(tb_file_name)))
logger.addHandler(fileHandler)
logger.info(args)
# CUDA setting
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
# Set the random seed manually for reproducibility.
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_eval and not args.do_analyze:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
###############################################################################
# Load data
###############################################################################
# Get Processor
processor = Processor(args)
prev_label_list = processor.get_prev_labels() # Slot value labels of Previous task
target_label_list = processor.get_labels() # Slot value labels of Present task
label_list = prev_label_list + target_label_list # All slot value labels
num_labels = [len(labels) for labels in label_list] # Number of labels of all slots
#prev_slot_id = processor.prev_slot_idx
#target_slot_id = processor.target_slot_idx
# wrong
prev_slot_id = list(range(0, len(processor.prev_slot))) # List of slots in previous task
target_slot_id = list(range(len(processor.prev_slot), len(processor.all_slot))) # list of slots in present task
# tokenizer
vocab_dir = os.path.join(args.bert_dir, '%s-vocab.txt' % args.bert_model)
if not os.path.exists(vocab_dir):
raise ValueError("Can't find %s " % vocab_dir)
tokenizer = BertTokenizer.from_pretrained(vocab_dir, do_lower_case=args.do_lower_case)
num_train_steps = None
accumulation = False
if args.do_train:
train_examples = processor.get_train_examples(args.data_dir, accumulation=accumulation)
dev_examples = processor.get_dev_examples(args.data_dir, accumulation=accumulation)
num_train_steps = int(len(train_examples) / args.train_batch_size * args.num_train_epochs)
num_dev_steps = int(len(dev_examples) / args.dev_batch_size * args.num_train_epochs)
## utterances
all_input_ids, all_input_len, all_label_ids, all_prev_label_ids = convert_examples_to_features(
train_examples, target_label_list, prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_steps)
all_input_ids, all_input_len, all_label_ids, all_prev_label_ids \
= all_input_ids.to(device), all_input_len.to(device), all_label_ids.to(device), all_prev_label_ids.to(device)
train_data = TensorDataset(all_input_ids, all_input_len, all_label_ids, all_prev_label_ids)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
## Dev
## utterances
all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev = convert_examples_to_features(
dev_examples, target_label_list, prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length)
logger.info("***** Running validation *****")
logger.info(" Num examples = %d", len(dev_examples))
logger.info(" Batch size = %d", args.dev_batch_size)
logger.info(" Num steps = %d", num_dev_steps)
all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev = \
all_input_ids_dev.to(device), all_input_len_dev.to(device), all_label_ids_dev.to(device), all_prev_label_ids_dev.to(device)
dev_data = TensorDataset(all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev)
dev_sampler = SequentialSampler(dev_data)
dev_dataloader = DataLoader(dev_data, sampler=dev_sampler, batch_size=args.dev_batch_size)
logger.info("Loaded data!")
###############################################################################
# Build the models
###############################################################################
# Prepare model
if args.nbt =='rnn':
from BeliefTrackerSlotQueryMultiSlot import BeliefTracker
if args.task_name.find("gru") == -1 and args.task_name.find("lstm") == -1:
raise ValueError("Task name should include at least \"gru\" or \"lstm\"")
elif args.nbt =='turn':
from BeliefTrackerSlotQueryMultiSlotTurn import BeliefTracker
elif args.nbt == 'transformer':
from BeliefTrackerSlotQueryMultiSlotTransformer import BeliefTracker
from BeliefTrackerSlotQueryMultiSlotEWC import EWC
else:
raise ValueError('nbt type should be either rnn or transformer')
from BeliefTrackerSlotQueryMultiSlotEWC import EWC
model = BeliefTracker(args, num_labels, device)
if args.fp16:
model.half()
# Load pretrained model
# in the case that slot and values are different between the training and evaluation
ptr_model = torch.load(args.load_path, map_location=device)
del_list = []
rename_list = []
for key in ptr_model.keys():
if ('slot_lookup' in key) or ('value_lookup' in key): # remove slot_lookup and value_lookup
del_list.append(key)
if ('rnn.' in key): # rename rnn -> nbt,
rename_list.append(key)
for key in del_list:
del ptr_model[key]
for key in rename_list:
new_key = key.replace('rnn.', 'nbt.')
ptr_model[new_key] = ptr_model[key]
del ptr_model[key]
state = model.state_dict()
state.update(ptr_model)
model.load_state_dict(state)
model.to(device)
## Get slot-value embeddings
label_token_ids, label_len = [], []
for labels in label_list:
token_ids, lens = get_label_embedding(labels, args.max_label_length, tokenizer, device)
label_token_ids.append(token_ids)
label_len.append(lens)
## Get slot-type embeddings
## Note: slot embeddings are ordered as [previous slots + present target slots]
slot_token_ids, slot_len = \
get_label_embedding(processor.all_slot, args.max_label_length, tokenizer, device)
model.initialize_slot_value_lookup(label_token_ids, label_len, slot_token_ids, slot_len)
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare optimizer
if args.do_train:
def get_optimizer_grouped_parameters(model):
param_optimizer = [(n, p) for n, p in model.named_parameters() if p.requires_grad]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01,
'lr': args.learning_rate},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0,
'lr': args.learning_rate},
]
return optimizer_grouped_parameters
if n_gpu == 1:
optimizer_grouped_parameters = get_optimizer_grouped_parameters(model)
else:
optimizer_grouped_parameters = get_optimizer_grouped_parameters(model.module)
t_total = num_train_steps
if args.local_rank != -1:
t_total = t_total // torch.distributed.get_world_size()
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=t_total)
logger.info(optimizer)
###############################################################################
# Training code
###############################################################################
if args.do_train:
logger.info("Training...")
global_step = 0
last_update = None
best_loss = None
#### EWC: calculate Fisher
ewc = EWC(model, dev_dataloader, oldtask=prev_slot_id, num_labels=num_labels, device=device, n_gpu=n_gpu)
for epoch in trange(int(args.num_train_epochs), desc="Epoch"):
# for epoch in trange(1):
#### TRAIN
model.train()
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
batch = tuple(t.to(device) for t in batch)
input_ids, input_len, label_ids, _ = batch
if n_gpu == 1:
loss_, loss_slot, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id)
loss_ewc = ewc.penalty(model)
loss = loss_ + args.lambda_ewc * loss_ewc
else:
loss_, _, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu,
target_slot=target_slot_id)
loss_ = loss_.mean()
acc = acc.mean()
acc_slot = acc_slot.mean(0)
loss_ewc = ewc.penalty(model)
loss = loss_ + args.lambda_ewc * loss_ewc
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
if summary_writer is not None:
summary_writer.add_scalar("Epoch", epoch, global_step)
summary_writer.add_scalar("Train/Loss", loss_, global_step)
summary_writer.add_scalar("Train/Loss_EWC", loss_ewc, global_step)
summary_writer.add_scalar("Train/Loss_Total", loss, global_step)
summary_writer.add_scalar("Train/JointAcc", acc, global_step)
if n_gpu == 1:
for i, slot in enumerate(processor.target_slot):
summary_writer.add_scalar("Train/Loss_%s" % slot.replace(' ','_'), loss_slot[i], global_step)
summary_writer.add_scalar("Train/Acc_%s" % slot.replace(' ','_'), acc_slot[i], global_step)
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (step + 1) % args.gradient_accumulation_steps == 0:
# modify learning rate with special warm up BERT uses
lr_this_step = args.learning_rate * warmup_linear(global_step / t_total, args.warmup_proportion)
if summary_writer is not None:
summary_writer.add_scalar("Train/LearningRate", lr_this_step, global_step)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
# Perform evaluation on validation dataset
model.eval()
dev_loss = 0
dev_acc = 0
dev_loss_slot, dev_acc_slot = None, None
nb_dev_examples, nb_dev_steps = 0, 0
prev_dev_loss = 0
prev_dev_acc = 0
prev_dev_loss_slot, prev_dev_acc_slot = None, None
prev_nb_dev_examples = 0
for step, batch in enumerate(tqdm(dev_dataloader, desc="Validation")):
batch = tuple(t.to(device) for t in batch)
input_ids, input_len, label_ids, prev_label_ids = batch
if input_ids.dim() == 2:
input_ids = input_ids.unsqueeze(0)
input_len = input_len.unsqueeze(0)
label_ids = label_ids.unsuqeeze(0)
prev_label_ids = prev_label_ids.unsuqeeze(0)
with torch.no_grad():
if n_gpu == 1:
loss_, loss_slot, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu,
target_slot=target_slot_id)
loss = loss_ + args.lambda_ewc * ewc.penalty(model)
prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, _ = model(input_ids, input_len,
prev_label_ids, n_gpu,
target_slot=prev_slot_id)
else:
loss_, _, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id)
loss_ = loss_.mean()
acc = acc.mean()
acc_slot = acc_slot.mean(0)
loss_ewc = ewc.penalty(model)
loss = loss_ + args.lambda_ewc * loss_ewc
prev_loss, _, prev_acc, prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id)
prev_loss = prev_loss.mean()
prev_acc = prev_acc.mean()
prev_acc_slot = prev_acc_slot.mean(0)
num_valid_turn = torch.sum(label_ids[:,:,0].view(-1) > -1, 0).item()
dev_loss += loss.item() * num_valid_turn
dev_acc += acc.item() * num_valid_turn
prev_num_valid_turn = torch.sum(prev_label_ids[:,:,0].view(-1) > -1, 0).item()
prev_dev_loss += prev_loss.item() * prev_num_valid_turn
prev_dev_acc += prev_acc.item() * prev_num_valid_turn
if n_gpu == 1:
if dev_loss_slot is None:
dev_loss_slot = [ l * num_valid_turn for l in loss_slot]
dev_acc_slot = acc_slot * num_valid_turn
prev_dev_loss_slot = [ l * prev_num_valid_turn for l in prev_loss_slot]
prev_dev_acc_slot = prev_acc_slot * prev_num_valid_turn
else:
for i, l in enumerate(loss_slot):
dev_loss_slot[i] = dev_loss_slot[i] + l * num_valid_turn
dev_acc_slot += acc_slot * num_valid_turn
for i, l in enumerate(prev_loss_slot):
prev_dev_loss_slot[i] = prev_dev_loss_slot[i] + l * prev_num_valid_turn
prev_dev_acc_slot += prev_acc_slot * prev_num_valid_turn
nb_dev_examples += num_valid_turn
prev_nb_dev_examples += prev_num_valid_turn
dev_loss = dev_loss / nb_dev_examples
dev_acc = dev_acc / nb_dev_examples
prev_dev_loss = prev_dev_loss / prev_nb_dev_examples
prev_dev_acc = prev_dev_acc / prev_nb_dev_examples
if n_gpu == 1:
dev_acc_slot = dev_acc_slot / nb_dev_examples
prev_dev_acc_slot = prev_dev_acc_slot / prev_nb_dev_examples
if summary_writer is not None:
summary_writer.add_scalar("Validate/Loss", dev_loss, global_step)
summary_writer.add_scalar("Validate/Acc", dev_acc, global_step)
summary_writer.add_scalar("Validate/Prev_Loss", prev_dev_loss, global_step)
summary_writer.add_scalar("Validate/Prev_Acc", prev_dev_acc, global_step)
if n_gpu == 1:
for i, slot in enumerate(processor.target_slot):
summary_writer.add_scalar("Validate/Loss_%s" % slot.replace(' ','_'), dev_loss_slot[i]/nb_dev_examples, global_step)
summary_writer.add_scalar("Validate/Acc_%s" % slot.replace(' ','_'), dev_acc_slot[i], global_step)
for i, slot in enumerate(processor.prev_slot):
summary_writer.add_scalar("Validate/Prev_Loss_%s" % slot.replace(' ','_'), prev_dev_loss_slot[i]/prev_nb_dev_examples, global_step)
summary_writer.add_scalar("Validate/Prev_Acc_%s" % slot.replace(' ','_'), prev_dev_acc_slot[i], global_step)
logger.info("*** Model Updated: Epoch=%d, Valid loss=%.6f, Valid acc=%.6f, Valid prev loss=%.6f, Valid prev acc=%.6f ***" \
% (epoch, dev_loss, dev_acc, prev_dev_loss, prev_dev_acc))
dev_loss = round(dev_loss, 6)
if last_update is None or dev_loss < best_loss:
# Save a trained model
output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
if args.do_train:
if n_gpu == 1:
torch.save(model.state_dict(), output_model_file)
else:
torch.save(model.module.state_dict(), output_model_file)
last_update = epoch
best_loss = dev_loss
best_acc = dev_acc
logger.info("*** Model Updated: Epoch=%d, Validation Loss=%.6f, Validation Acc=%.6f ***" % (last_update, best_loss, best_acc))
else:
logger.info("*** Model NOT Updated: Epoch=%d, Validation Loss=%.6f, Validation Acc=%.6f ***" % (epoch, dev_loss, dev_acc))
#if epoch > 100 and last_update + args.patience <= epoch:
if last_update + args.patience <= epoch:
break
###############################################################################
# Evaluation
###############################################################################
# Test
output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
# Load a trained model that you have fine-tuned
ptr_model = torch.load(output_model_file, map_location=device)
del_list = []
for key in ptr_model.keys():
if ('slot' in key) or ('value' in key):
del_list.append(key)
for key in del_list:
del ptr_model[key]
if n_gpu > 1:
model = model.module
state = model.state_dict()
state.update(ptr_model)
model.load_state_dict(state)
model.to(device)
## Get slot-value embeddings
label_token_ids, label_len = [], []
for labels in label_list:
token_ids, lens = get_label_embedding(labels, args.max_label_length, tokenizer, device)
label_token_ids.append(token_ids)
label_len.append(lens)
## Get slot-type embeddings
## Note: slot embeddings are ordered as [previous slots + present target slots]
slot_token_ids, slot_len = \
get_label_embedding(processor.all_slot, args.max_label_length, tokenizer, device)
model.initialize_slot_value_lookup(label_token_ids, label_len, slot_token_ids, slot_len)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
eval_examples = processor.get_test_examples(args.data_dir, accumulation=accumulation)
all_input_ids, all_input_len, all_label_ids, all_prev_label_ids = convert_examples_to_features(
eval_examples, target_label_list, prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length)
all_input_ids, all_input_len, all_label_ids, all_prev_label_ids \
= all_input_ids.to(device), all_input_len.to(device), all_label_ids.to(device), all_prev_label_ids.to(device)
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_data = TensorDataset(all_input_ids, all_input_len, all_label_ids, all_prev_label_ids)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
model.eval()
eval_loss, eval_accuracy = 0, 0
eval_loss_slot, eval_acc_slot = None, None
nb_eval_steps, nb_eval_examples = 0, 0
prev_eval_loss, prev_eval_accuracy = 0, 0
prev_eval_loss_slot, prev_eval_acc_slot = None, None
nb_eval_examples_prev = 0
for input_ids, input_len, label_ids, prev_label_ids in tqdm(eval_dataloader, desc="Evaluating"):
if input_ids.dim() == 2:
input_ids = input_ids.unsqueeze(0)
input_len = input_len.unsqueeze(0)
label_ids = label_ids.unsuqeeze(0)
prev_label_ids = prev_label_ids.unsuqeeze(0)
with torch.no_grad():
if n_gpu == 1:
loss, loss_slot, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id)
prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id)
else:
loss, loss_slot, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id)
loss = loss.mean()
acc = acc.mean()
acc_slot = acc_slot.mean(0)
prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id)
prev_loss = prev_loss.mean()
prev_acc = prev_acc.mean()
prev_acc_slot = prev_acc_slot.mean(0)
nb_eval_ex_prev = (prev_label_ids[:,:,0].view(-1) != -1).sum().item()
nb_eval_examples_prev += nb_eval_ex_prev
nb_eval_ex = (label_ids[:,:,0].view(-1) != -1).sum().item()
nb_eval_examples += nb_eval_ex
nb_eval_steps += 1
def _post_process(eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot, loss, loss_slot, acc, acc_slot, nb_eval_ex):
eval_loss += loss.item() * nb_eval_ex
eval_accuracy += acc.item() * nb_eval_ex
if loss_slot is not None:
if eval_loss_slot is None:
eval_loss_slot = [ l * nb_eval_ex for l in loss_slot]
else:
for i, l in enumerate(loss_slot):
eval_loss_slot[i] = eval_loss_slot[i] + l * nb_eval_ex
if eval_acc_slot is None:
eval_acc_slot = acc_slot * nb_eval_ex
else:
eval_acc_slot += acc_slot * nb_eval_ex
return eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot
eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot = \
_post_process(eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot, loss, loss_slot, acc, acc_slot, nb_eval_ex)
prev_eval_loss, prev_eval_loss_slot, prev_eval_accuracy, prev_eval_acc_slot = \
_post_process(prev_eval_loss, prev_eval_loss_slot, prev_eval_accuracy, prev_eval_acc_slot, \
prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, nb_eval_ex_prev)
eval_loss /= nb_eval_examples
if eval_loss_slot is None: # for multi-gpu
eval_loss_slot = [0]
prev_eval_loss_slot = [0]
eval_accuracy = eval_accuracy / nb_eval_examples
prev_eval_loss = prev_eval_loss / nb_eval_examples_prev
prev_eval_accuracy = prev_eval_accuracy / nb_eval_examples_prev
eval_acc_slot = eval_acc_slot / nb_eval_examples
prev_eval_acc_slot = prev_eval_acc_slot / nb_eval_examples_prev
total_acc_slot = {}
for val, idx in zip(torch.cat([eval_acc_slot, prev_eval_acc_slot]), (target_slot_id+prev_slot_id)):
total_acc_slot[idx] = val
total_acc_slot = sorted(total_acc_slot.items(), key=operator.itemgetter(0))
loss = tr_loss / nb_tr_steps if args.do_train else None
result = {'eval_loss': eval_loss,
'eval_accuracy': eval_accuracy,
'loss': loss,
'eval_loss_slot':'\t'.join([ str(val/ nb_eval_examples) for val in eval_loss_slot]),
'eval_acc_slot':'\t'.join([ str((val).item()) for val in eval_acc_slot]),
'prev_eval_loss': prev_eval_loss,
'prev_eval_accuracy': prev_eval_accuracy,
'prev_eval_loss_slot': '\t'.join([str(val / nb_eval_examples_prev) for val in prev_eval_loss_slot]),
'prev_eval_acc_slot': '\t'.join([str((val).item()) for val in prev_eval_acc_slot]),
'total_acc_slot': '\t'.join([str(val[1].item()) for val in total_acc_slot])
}
out_file_name = 'eval_results'
if args.target_slot=='all':
out_file_name += '_all'
output_eval_file = os.path.join(args.output_dir, "%s.txt" % out_file_name)
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
###############################################################################
# Analyze: TODO
###############################################################################
if args.do_analyze and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
pdb.set_trace()
def draw(data, x, y, ax):
seaborn.heatmap(data,
xticklabels=x, square=True, yticklabels=y, vmin=0.0, vmax=1.0,
cbar=False, ax=ax)
class_correct = [[0 for x in range(num_labels[i])] for i in range(len(num_labels))]
class_count = [[0 for x in range(num_labels[i])] for i in range(len(num_labels))]
eval_examples = processor.get_test_examples(args.data_dir, accumulation=accumulation)
all_input_ids, all_input_len, all_label_ids = convert_examples_to_features(
eval_examples, label_list, args.max_seq_length, tokenizer, args.max_turn_length)
all_input_ids, all_input_len, all_label_ids = all_input_ids.to(device), all_input_len.to(
device), all_label_ids.to(device)
logger.info("***** Running analysis *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", 1)
eval_data = TensorDataset(all_input_ids, all_input_len, all_label_ids)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=1)
model.eval()
none_value_id = [ len(val)-1 for val in label_list]
incorrect_dialogs = []
attention_draw = 5
for input_ids, input_len, label_ids in tqdm(eval_dataloader, desc="Evaluating"):
if input_ids.dim() == 2:
input_ids = input_ids.unsqueeze(0)
input_len = input_len.unsqueeze(0)
label_ids = label_ids.unsuqeeze(0)
with torch.no_grad():
_, _, acc, _, pred_slot = model(input_ids, input_len, label_ids, 1)
nturn = (label_ids[:,:,0].view(-1) != -1).sum().item()
nslot = label_ids.size(2)
for slot in range(nslot):
for turn in range(nturn):
class_count[slot][label_ids[0][turn][slot]]+=1
if label_ids[0][turn][slot] == pred_slot[0][turn][slot]:
class_correct[slot][label_ids[0][turn][slot]] +=1
drawfig = False
print('hotel')
print(label_ids[0, 0:10, 8:18].cpu() == torch.Tensor(none_value_id[8:18]).long().repeat(10, 1))
print(pred_slot[0, 0:10, 8:18].cpu() == torch.Tensor(none_value_id[8:18]).long().repeat(10, 1))
print(label_ids[0, 0:10, 0:8].cpu() == torch.Tensor(none_value_id[0:8]).long().repeat(10, 1))
print(label_ids[0, 0:10, 18:].cpu() == torch.Tensor(none_value_id[18:]).long().repeat(10, 1))
pdb.set_trace()
if drawfig == True:
#if (len(incorrect_dialogs) < attention_draw):
max_len = input_ids.size(2)
attn_scores = model.attn.get_scores().transpose(1, 2).contiguous().view(label_ids.size(1)*nslot, -1, max_len)
for slot in range(0, nslot):
fig, axs = plt.subplots(nturn, 1, figsize=(50, 10*nturn))
print("Slot", slot)
for turn in range(nturn):
draw(attn_scores[slot*label_ids.size(1)+turn,:,:].cpu(),
tokenizer.convert_ids_to_tokens(input_ids[0][turn].cpu().numpy()),
[*range(0, args.attn_head)], ax=axs[turn])
axs[turn].set_title("turn %d slot: %s label: %s pred: %s"
% (turn, processor.target_slot[slot], str(label_list[slot][label_ids[0][turn][slot].item()]),
str(label_list[slot][pred_slot[0][turn][slot].item()]) ))
plt.show()
plt.savefig(os.path.join(args.output_dir, "attention-d%d-slot%s.png"%(len(incorrect_dialogs), slot)))
plt.close()
if not acc == 1:
dialog = []
for input, label, pred in zip(input_ids[0], label_ids[0], pred_slot[0]):
if label[0] == -1:
break
text = {}
text['input'] = ' '.join(tokenizer.convert_ids_to_tokens(input.cpu().numpy())).replace(' [PAD]', '')
text['label'] = [str(label_list[idx][x]) for idx, x in enumerate(label.cpu().numpy())]
text['pred'] = [str(label_list[idx][x]) for idx, x in enumerate(pred.cpu().numpy())]
dialog.append(text)
incorrect_dialogs.append(dialog)
output_eval_incorr_file = os.path.join(args.output_dir, "incorrect_dialog.txt")
with open(output_eval_incorr_file, "w") as writer:
for dialog in incorrect_dialogs:
for turn in dialog:
text = turn['input'] + '\t'
for label, pred in zip(turn['label'], turn['pred']):
text += '%s\t%s\t'%(label, pred)
writer.write("%s\n" % text)
writer.write("---------- \n")
logger.info("Done analysis: %s" % output_eval_incorr_file)
output_eval_incorr_file = os.path.join(args.output_dir, "per_class_accuracy.txt")
with open(output_eval_incorr_file, "w") as writer:
total_class_acc = 0
total_slot_class_acc = []
nlabels = 0
for sid, slot in enumerate(class_count):
slot_class_acc = 0
for vid, value in enumerate(slot):
if not value == 0:
class_acc = class_correct[sid][vid]/value
writer.write("%s\t%d\t%d\t%.3f\n"%(label_list[sid][vid], class_correct[sid][vid], value, class_acc) )
slot_class_acc += class_acc
nlabels += 1
else:
writer.write("%s\t%d\t%d\t%.3f\n"%(label_list[sid][vid], class_correct[sid][vid], value, -1) )
total_slot_class_acc.append(slot_class_acc/(vid+1))
total_class_acc+=slot_class_acc
total_class_acc /= nlabels
for sid, slot_acc in enumerate(total_slot_class_acc):
writer.write("%d\t%.3f\n" % (sid, slot_acc))
writer.write("total class accuracy \t%.3f\n" % total_class_acc)
logger.info("Done analysis: %s" % output_eval_incorr_file)
print(class_correct)
print(class_count)
if __name__ == "__main__":
main()
|
[
"numpy.sum",
"argparse.ArgumentParser",
"numpy.random.seed",
"numpy.argmax",
"csv.reader",
"torch.utils.data.RandomSampler",
"pytorch_pretrained_bert.optimization.BertAdam",
"pytorch_pretrained_bert.tokenization.BertTokenizer.from_pretrained",
"seaborn.heatmap",
"torch.cat",
"torch.cuda.device_count",
"torch.utils.data.TensorDataset",
"torch.distributed.get_world_size",
"torch.device",
"torch.no_grad",
"os.path.join",
"torch.utils.data.DataLoader",
"torch.distributed.get_rank",
"matplotlib.pyplot.close",
"torch.load",
"os.path.exists",
"apex.optimizers.FusedAdam",
"apex.optimizers.FP16_Optimizer",
"torch.utils.data.distributed.DistributedSampler",
"torch.Tensor",
"random.seed",
"torch.utils.data.SequentialSampler",
"torch.cuda.set_device",
"matplotlib.pyplot.subplots",
"seaborn.set_context",
"BeliefTrackerSlotQueryMultiSlotTransformer.BeliefTracker",
"tqdm.tqdm",
"matplotlib.pyplot.show",
"torch.manual_seed",
"BeliefTrackerSlotQueryMultiSlotEWC.EWC",
"torch.cuda.is_available",
"apex.parallel.DistributedDataParallel",
"os.listdir",
"tensorboardX.SummaryWriter",
"json.load",
"torch.distributed.init_process_group",
"os.makedirs",
"logging.basicConfig",
"torch.nn.DataParallel",
"torch.cuda.manual_seed_all",
"pdb.set_trace",
"torch.tensor",
"operator.itemgetter",
"logging.getLogger"
] |
[((535, 570), 'seaborn.set_context', 'seaborn.set_context', ([], {'context': '"""talk"""'}), "(context='talk')\n", (554, 570), False, 'import seaborn\n'), ((572, 715), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""', 'datefmt': '"""%m/%d/%Y %H:%M:%S"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt=\n '%m/%d/%Y %H:%M:%S', level=logging.INFO)\n", (591, 715), False, 'import logging\n'), ((755, 782), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (772, 782), False, 'import logging\n'), ((11829, 11892), 'torch.tensor', 'torch.tensor', (['[f.input_ids for f in features]'], {'dtype': 'torch.long'}), '([f.input_ids for f in features], dtype=torch.long)\n', (11841, 11892), False, 'import torch\n'), ((11912, 11975), 'torch.tensor', 'torch.tensor', (['[f.input_len for f in features]'], {'dtype': 'torch.long'}), '([f.input_len for f in features], dtype=torch.long)\n', (11924, 11975), False, 'import torch\n'), ((11996, 12058), 'torch.tensor', 'torch.tensor', (['[f.label_id for f in features]'], {'dtype': 'torch.long'}), '([f.label_id for f in features], dtype=torch.long)\n', (12008, 12058), False, 'import torch\n'), ((12084, 12151), 'torch.tensor', 'torch.tensor', (['[f.prev_label_id for f in features]'], {'dtype': 'torch.long'}), '([f.prev_label_id for f in features], dtype=torch.long)\n', (12096, 12151), False, 'import torch\n'), ((13909, 13931), 'numpy.argmax', 'np.argmax', (['out'], {'axis': '(1)'}), '(out, axis=1)\n', (13918, 13931), True, 'import numpy as np\n'), ((13943, 13968), 'numpy.sum', 'np.sum', (['(outputs == labels)'], {}), '(outputs == labels)\n', (13949, 13968), True, 'import numpy as np\n'), ((14266, 14291), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (14289, 14291), False, 'import argparse\n'), ((22629, 22672), 'os.makedirs', 'os.makedirs', (['args.output_dir'], {'exist_ok': '(True)'}), '(args.output_dir, exist_ok=True)\n', (22640, 22672), False, 'import os\n'), ((24097, 24119), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (24108, 24119), False, 'import random\n'), ((24124, 24149), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (24138, 24149), True, 'import numpy as np\n'), ((24154, 24182), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (24171, 24182), False, 'import torch\n'), ((25330, 25391), 'os.path.join', 'os.path.join', (['args.bert_dir', "('%s-vocab.txt' % args.bert_model)"], {}), "(args.bert_dir, '%s-vocab.txt' % args.bert_model)\n", (25342, 25391), False, 'import os\n'), ((25501, 25575), 'pytorch_pretrained_bert.tokenization.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['vocab_dir'], {'do_lower_case': 'args.do_lower_case'}), '(vocab_dir, do_lower_case=args.do_lower_case)\n', (25530, 25575), False, 'from pytorch_pretrained_bert.tokenization import BertTokenizer\n'), ((29016, 29055), 'BeliefTrackerSlotQueryMultiSlotTransformer.BeliefTracker', 'BeliefTracker', (['args', 'num_labels', 'device'], {}), '(args, num_labels, device)\n', (29029, 29055), False, 'from BeliefTrackerSlotQueryMultiSlotTransformer import BeliefTracker\n'), ((29229, 29276), 'torch.load', 'torch.load', (['args.load_path'], {'map_location': 'device'}), '(args.load_path, map_location=device)\n', (29239, 29276), False, 'import torch\n'), ((43141, 43191), 'os.path.join', 'os.path.join', (['args.output_dir', '"""pytorch_model.bin"""'], {}), "(args.output_dir, 'pytorch_model.bin')\n", (43153, 43191), False, 'import os\n'), ((43261, 43311), 'torch.load', 'torch.load', (['output_model_file'], {'map_location': 'device'}), '(output_model_file, map_location=device)\n', (43271, 43311), False, 'import torch\n'), ((22435, 22466), 'os.path.exists', 'os.path.exists', (['args.output_dir'], {}), '(args.output_dir)\n', (22449, 22466), False, 'import os\n'), ((22471, 22498), 'os.listdir', 'os.listdir', (['args.output_dir'], {}), '(args.output_dir)\n', (22481, 22498), False, 'import os\n'), ((22854, 22908), 'tensorboardX.SummaryWriter', 'SummaryWriter', (["('./%s/%s' % (args.tf_dir, tb_file_name))"], {}), "('./%s/%s' % (args.tf_dir, tb_file_name))\n", (22867, 22908), False, 'from tensorboardX import SummaryWriter\n'), ((22988, 23042), 'os.path.join', 'os.path.join', (['args.output_dir', "('%s.txt' % tb_file_name)"], {}), "(args.output_dir, '%s.txt' % tb_file_name)\n", (23000, 23042), False, 'import os\n'), ((23282, 23307), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (23305, 23307), False, 'import torch\n'), ((23326, 23364), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.local_rank'], {}), '(args.local_rank)\n', (23347, 23364), False, 'import torch\n'), ((23382, 23419), 'torch.device', 'torch.device', (['"""cuda"""', 'args.local_rank'], {}), "('cuda', args.local_rank)\n", (23394, 23419), False, 'import torch\n'), ((23540, 23592), 'torch.distributed.init_process_group', 'torch.distributed.init_process_group', ([], {'backend': '"""nccl"""'}), "(backend='nccl')\n", (23576, 23592), False, 'import torch\n'), ((24209, 24246), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (24235, 24246), False, 'import torch\n'), ((25403, 25428), 'os.path.exists', 'os.path.exists', (['vocab_dir'], {}), '(vocab_dir)\n', (25417, 25428), False, 'import os\n'), ((26735, 26813), 'torch.utils.data.TensorDataset', 'TensorDataset', (['all_input_ids', 'all_input_len', 'all_label_ids', 'all_prev_label_ids'], {}), '(all_input_ids, all_input_len, all_label_ids, all_prev_label_ids)\n', (26748, 26813), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((27003, 27082), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data'], {'sampler': 'train_sampler', 'batch_size': 'args.train_batch_size'}), '(train_data, sampler=train_sampler, batch_size=args.train_batch_size)\n', (27013, 27082), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((27840, 27938), 'torch.utils.data.TensorDataset', 'TensorDataset', (['all_input_ids_dev', 'all_input_len_dev', 'all_label_ids_dev', 'all_prev_label_ids_dev'], {}), '(all_input_ids_dev, all_input_len_dev, all_label_ids_dev,\n all_prev_label_ids_dev)\n', (27853, 27938), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((27957, 27984), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['dev_data'], {}), '(dev_data)\n', (27974, 27984), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((28010, 28083), 'torch.utils.data.DataLoader', 'DataLoader', (['dev_data'], {'sampler': 'dev_sampler', 'batch_size': 'args.dev_batch_size'}), '(dev_data, sampler=dev_sampler, batch_size=args.dev_batch_size)\n', (28020, 28083), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((30792, 30802), 'apex.parallel.DistributedDataParallel', 'DDP', (['model'], {}), '(model)\n', (30795, 30802), True, 'from apex.parallel import DistributedDataParallel as DDP\n'), ((33363, 33466), 'BeliefTrackerSlotQueryMultiSlotEWC.EWC', 'EWC', (['model', 'dev_dataloader'], {'oldtask': 'prev_slot_id', 'num_labels': 'num_labels', 'device': 'device', 'n_gpu': 'n_gpu'}), '(model, dev_dataloader, oldtask=prev_slot_id, num_labels=num_labels,\n device=device, n_gpu=n_gpu)\n', (33366, 33466), False, 'from BeliefTrackerSlotQueryMultiSlotEWC import EWC\n'), ((44301, 44329), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (44322, 44329), False, 'import torch\n'), ((45131, 45209), 'torch.utils.data.TensorDataset', 'TensorDataset', (['all_input_ids', 'all_input_len', 'all_label_ids', 'all_prev_label_ids'], {}), '(all_input_ids, all_input_len, all_label_ids, all_prev_label_ids)\n', (45144, 45209), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((45273, 45301), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['eval_data'], {}), '(eval_data)\n', (45290, 45301), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((45328, 45404), 'torch.utils.data.DataLoader', 'DataLoader', (['eval_data'], {'sampler': 'eval_sampler', 'batch_size': 'args.eval_batch_size'}), '(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)\n', (45338, 45404), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((45775, 45815), 'tqdm.tqdm', 'tqdm', (['eval_dataloader'], {'desc': '"""Evaluating"""'}), "(eval_dataloader, desc='Evaluating')\n", (45779, 45815), False, 'from tqdm import tqdm, trange\n'), ((50344, 50399), 'os.path.join', 'os.path.join', (['args.output_dir', "('%s.txt' % out_file_name)"], {}), "(args.output_dir, '%s.txt' % out_file_name)\n", (50356, 50399), False, 'import os\n'), ((50971, 50986), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (50984, 50986), False, 'import pdb\n'), ((51972, 52030), 'torch.utils.data.TensorDataset', 'TensorDataset', (['all_input_ids', 'all_input_len', 'all_label_ids'], {}), '(all_input_ids, all_input_len, all_label_ids)\n', (51985, 52030), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((52094, 52122), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['eval_data'], {}), '(eval_data)\n', (52111, 52122), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((52149, 52206), 'torch.utils.data.DataLoader', 'DataLoader', (['eval_data'], {'sampler': 'eval_sampler', 'batch_size': '(1)'}), '(eval_data, sampler=eval_sampler, batch_size=1)\n', (52159, 52206), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((52396, 52436), 'tqdm.tqdm', 'tqdm', (['eval_dataloader'], {'desc': '"""Evaluating"""'}), "(eval_dataloader, desc='Evaluating')\n", (52400, 52436), False, 'from tqdm import tqdm, trange\n'), ((55601, 55654), 'os.path.join', 'os.path.join', (['args.output_dir', '"""incorrect_dialog.txt"""'], {}), "(args.output_dir, 'incorrect_dialog.txt')\n", (55613, 55654), False, 'import os\n'), ((56170, 56225), 'os.path.join', 'os.path.join', (['args.output_dir', '"""per_class_accuracy.txt"""'], {}), "(args.output_dir, 'per_class_accuracy.txt')\n", (56182, 56225), False, 'import os\n'), ((2433, 2483), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '"""\t"""', 'quotechar': 'quotechar'}), "(f, delimiter='\\t', quotechar=quotechar)\n", (2443, 2483), False, 'import csv\n'), ((3119, 3141), 'json.load', 'json.load', (['fp_ontology'], {}), '(fp_ontology)\n', (3128, 3141), False, 'import json\n'), ((13115, 13171), 'torch.tensor', 'torch.tensor', (['[f[0] for f in features]'], {'dtype': 'torch.long'}), '([f[0] for f in features], dtype=torch.long)\n', (13127, 13171), False, 'import torch\n'), ((13203, 13259), 'torch.tensor', 'torch.tensor', (['[f[1] for f in features]'], {'dtype': 'torch.long'}), '([f[1] for f in features], dtype=torch.long)\n', (13215, 13259), False, 'import torch\n'), ((26876, 26901), 'torch.utils.data.RandomSampler', 'RandomSampler', (['train_data'], {}), '(train_data)\n', (26889, 26901), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((26944, 26974), 'torch.utils.data.distributed.DistributedSampler', 'DistributedSampler', (['train_data'], {}), '(train_data)\n', (26962, 26974), False, 'from torch.utils.data.distributed import DistributedSampler\n'), ((30839, 30867), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (30860, 30867), False, 'import torch\n'), ((32280, 32388), 'apex.optimizers.FusedAdam', 'FusedAdam', (['optimizer_grouped_parameters'], {'lr': 'args.learning_rate', 'bias_correction': '(False)', 'max_grad_norm': '(1.0)'}), '(optimizer_grouped_parameters, lr=args.learning_rate,\n bias_correction=False, max_grad_norm=1.0)\n', (32289, 32388), False, 'from apex.optimizers import FusedAdam\n'), ((32749, 32863), 'pytorch_pretrained_bert.optimization.BertAdam', 'BertAdam', (['optimizer_grouped_parameters'], {'lr': 'args.learning_rate', 'warmup': 'args.warmup_proportion', 't_total': 't_total'}), '(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.\n warmup_proportion, t_total=t_total)\n', (32757, 32863), False, 'from pytorch_pretrained_bert.optimization import BertAdam\n'), ((49171, 49217), 'torch.cat', 'torch.cat', (['[eval_acc_slot, prev_eval_acc_slot]'], {}), '([eval_acc_slot, prev_eval_acc_slot])\n', (49180, 49217), False, 'import torch\n'), ((51033, 51140), 'seaborn.heatmap', 'seaborn.heatmap', (['data'], {'xticklabels': 'x', 'square': '(True)', 'yticklabels': 'y', 'vmin': '(0.0)', 'vmax': '(1.0)', 'cbar': '(False)', 'ax': 'ax'}), '(data, xticklabels=x, square=True, yticklabels=y, vmin=0.0,\n vmax=1.0, cbar=False, ax=ax)\n', (51048, 51140), False, 'import seaborn\n'), ((53650, 53665), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (53663, 53665), False, 'import pdb\n'), ((3034, 3089), 'os.path.join', 'os.path.join', (['config.data_dir', '"""ontology_dstc2_en.json"""'], {}), "(config.data_dir, 'ontology_dstc2_en.json')\n", (3046, 3089), False, 'import os\n'), ((3554, 3576), 'json.load', 'json.load', (['fp_ontology'], {}), '(fp_ontology)\n', (3563, 3576), False, 'import json\n'), ((5630, 5665), 'os.path.join', 'os.path.join', (['data_dir', '"""train.tsv"""'], {}), "(data_dir, 'train.tsv')\n", (5642, 5665), False, 'import os\n'), ((5849, 5882), 'os.path.join', 'os.path.join', (['data_dir', '"""dev.tsv"""'], {}), "(data_dir, 'dev.tsv')\n", (5861, 5882), False, 'import os\n'), ((6065, 6099), 'os.path.join', 'os.path.join', (['data_dir', '"""test.tsv"""'], {}), "(data_dir, 'test.tsv')\n", (6077, 6099), False, 'import os\n'), ((31880, 31914), 'torch.distributed.get_world_size', 'torch.distributed.get_world_size', ([], {}), '()\n', (31912, 31914), False, 'import torch\n'), ((32552, 32602), 'apex.optimizers.FP16_Optimizer', 'FP16_Optimizer', (['optimizer'], {'dynamic_loss_scale': '(True)'}), '(optimizer, dynamic_loss_scale=True)\n', (32566, 32602), False, 'from apex.optimizers import FP16_Optimizer\n'), ((32649, 32709), 'apex.optimizers.FP16_Optimizer', 'FP16_Optimizer', (['optimizer'], {'static_loss_scale': 'args.loss_scale'}), '(optimizer, static_loss_scale=args.loss_scale)\n', (32663, 32709), False, 'from apex.optimizers import FP16_Optimizer\n'), ((33731, 33771), 'tqdm.tqdm', 'tqdm', (['train_dataloader'], {'desc': '"""Iteration"""'}), "(train_dataloader, desc='Iteration')\n", (33735, 33771), False, 'from tqdm import tqdm, trange\n'), ((36887, 36926), 'tqdm.tqdm', 'tqdm', (['dev_dataloader'], {'desc': '"""Validation"""'}), "(dev_dataloader, desc='Validation')\n", (36891, 36926), False, 'from tqdm import tqdm, trange\n'), ((42061, 42111), 'os.path.join', 'os.path.join', (['args.output_dir', '"""pytorch_model.bin"""'], {}), "(args.output_dir, 'pytorch_model.bin')\n", (42073, 42111), False, 'import os\n'), ((44381, 44409), 'torch.distributed.get_rank', 'torch.distributed.get_rank', ([], {}), '()\n', (44407, 44409), False, 'import torch\n'), ((46086, 46101), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (46099, 46101), False, 'import torch\n'), ((49350, 49372), 'operator.itemgetter', 'operator.itemgetter', (['(0)'], {}), '(0)\n', (49369, 49372), False, 'import operator\n'), ((50926, 50954), 'torch.distributed.get_rank', 'torch.distributed.get_rank', ([], {}), '()\n', (50952, 50954), False, 'import torch\n'), ((52646, 52661), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (52659, 52661), False, 'import torch\n'), ((3478, 3524), 'os.path.join', 'os.path.join', (['config.data_dir', '"""ontology.json"""'], {}), "(config.data_dir, 'ontology.json')\n", (3490, 3524), False, 'import os\n'), ((23207, 23232), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (23230, 23232), False, 'import torch\n'), ((37353, 37368), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (37366, 37368), False, 'import torch\n'), ((54005, 54053), 'matplotlib.pyplot.subplots', 'plt.subplots', (['nturn', '(1)'], {'figsize': '(50, 10 * nturn)'}), '(nturn, 1, figsize=(50, 10 * nturn))\n', (54017, 54053), True, 'import matplotlib.pyplot as plt\n'), ((54732, 54742), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (54740, 54742), True, 'import matplotlib.pyplot as plt\n'), ((54885, 54896), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (54894, 54896), True, 'import matplotlib.pyplot as plt\n'), ((53261, 53294), 'torch.Tensor', 'torch.Tensor', (['none_value_id[8:18]'], {}), '(none_value_id[8:18])\n', (53273, 53294), False, 'import torch\n'), ((53369, 53402), 'torch.Tensor', 'torch.Tensor', (['none_value_id[8:18]'], {}), '(none_value_id[8:18])\n', (53381, 53402), False, 'import torch\n'), ((53476, 53508), 'torch.Tensor', 'torch.Tensor', (['none_value_id[0:8]'], {}), '(none_value_id[0:8])\n', (53488, 53508), False, 'import torch\n'), ((53582, 53614), 'torch.Tensor', 'torch.Tensor', (['none_value_id[18:]'], {}), '(none_value_id[18:])\n', (53594, 53614), False, 'import torch\n')]
|
import numpy as np
import os
import re
import cPickle
class read_cifar10(object):
def __init__(self, data_path=None, is_training=True):
self.data_path = data_path
self.is_training = is_training
def load_data(self):
files = os.listdir(self.data_path)
if self.is_training is True:
pattern = re.compile('(data_batch_).')
to_read = [m.group(0) for i in files for m in [pattern.search(i)] if m]
data = []
labels = []
for t in to_read:
with open(self.data_path+'/'+t, 'rb') as f:
d = cPickle.load(f)
data.append(d['data'])
labels.append(d['labels'])
data = np.vstack(data)
labels = np.hstack(labels)
else:
with open(self.data_path+'/test_batch') as f:
d = cPickle.load(f)
data = d['data']
labels = d['labels']
return data, labels
|
[
"cPickle.load",
"numpy.hstack",
"os.listdir",
"numpy.vstack",
"re.compile"
] |
[((243, 269), 'os.listdir', 'os.listdir', (['self.data_path'], {}), '(self.data_path)\n', (253, 269), False, 'import os\n'), ((320, 348), 're.compile', 're.compile', (['"""(data_batch_)."""'], {}), "('(data_batch_).')\n", (330, 348), False, 'import re\n'), ((655, 670), 'numpy.vstack', 'np.vstack', (['data'], {}), '(data)\n', (664, 670), True, 'import numpy as np\n'), ((686, 703), 'numpy.hstack', 'np.hstack', (['labels'], {}), '(labels)\n', (695, 703), True, 'import numpy as np\n'), ((779, 794), 'cPickle.load', 'cPickle.load', (['f'], {}), '(f)\n', (791, 794), False, 'import cPickle\n'), ((555, 570), 'cPickle.load', 'cPickle.load', (['f'], {}), '(f)\n', (567, 570), False, 'import cPickle\n')]
|
import ast
from collections import OrderedDict
from .codegen import to_source
from .function_compiler_ast import timeshift, StandardizeDatesSimple
from dolo.compiler.recipes import recipes
from numba import njit
class NumericModel:
calibration = None
calibration_dict = None
covariances = None
markov_chain = None
def __init__(self, symbolic_model, options=None, infos=None):
self.symbolic = symbolic_model
self.symbols = symbolic_model.symbols
self.variables = sum( [tuple(e) for k,e in self.symbols.items() if k not in ('parameters','shocks','values')], ())
self.options = options if options is not None else {}
self.infos = infos if infos is not None else {}
self.infos['data_layout'] = 'columns'
self.name = self.infos['name']
self.model_type = self.infos['type']
# self.model_spec
self.__update_from_symbolic__()
self.__compile_functions__()
def __update_from_symbolic__(self):
import numpy
# updates calibration according to the symbolic definitions
system = self.symbolic.calibration_dict
from dolo.compiler.triangular_solver import solve_triangular_system
self.calibration_dict = solve_triangular_system( system )
from dolo.compiler.misc import CalibrationDict, calibration_to_vector
calib = calibration_to_vector(self.symbols, self.calibration_dict)
self.calibration = CalibrationDict(self.symbols, calib)
from .symbolic_eval import NumericEval
evaluator = NumericEval(self.calibration_dict)
# read symbolic structure
self.options = evaluator.eval(self.symbolic.options)
distribution = evaluator.eval(self.symbolic.distribution)
discrete_transition = evaluator.eval(self.symbolic.discrete_transition)
covariances = distribution
if distribution is None:
self.covariances = None
else:
self.covariances = numpy.atleast_2d(numpy.array(covariances, dtype=float))
markov_chain = discrete_transition
if markov_chain is None:
self.markov_chain = None
else:
self.markov_chain = [numpy.atleast_2d(numpy.array(tab, dtype=float)) for tab in markov_chain]
def get_calibration(self, pname, *args):
if isinstance(pname, list):
return [ self.get_calibration(p) for p in pname ]
elif isinstance(pname, tuple):
return tuple( [ self.get_calibration(p) for p in pname ] )
elif len(args)>0:
pnames = (pname,) + args
return self.get_calibration(pnames)
group = [g for g in self.symbols.keys() if pname in self.symbols[g]]
try:
group = group[0]
except:
raise Exception('Unknown symbol {}.'.format(pname))
i = self.symbols[group].index(pname)
v = self.calibration[group][i]
return v
def set_calibration(self, *args, **kwargs):
# raise exception if unknown symbol ?
if len(args)==2:
pname, pvalue = args
if isinstance(pname, str):
self.set_calibration(**{pname:pvalue})
else:
# else ignore pname and pvalue
calib = self.symbolic.calibration_dict
calib.update(kwargs)
self.__update_from_symbolic__()
def __str__(self):
from dolo.misc.termcolor import colored
s = u'''
Model object:
------------
- name: "{name}"
- type: "{type}"
- file: "{filename}\n'''.format(**self.infos)
ss = '\n- residuals:\n\n'
res = self.residuals()
# for eqgroup, eqlist in self.symbolic.equations.items():
for eqgroup in res.keys():
eqlist = self.symbolic.equations[eqgroup]
ss += u" {}\n".format(eqgroup)
for i, eq in enumerate(eqlist):
val = res[eqgroup][i]
if abs(val) < 1e-8:
val = 0
vals = '{:.4f}'.format(val)
if abs(val) > 1e-8:
vals = colored(vals, 'red')
# eq = eq.replace('|', u"\u27C2")
ss += u" {eqn:3} : {vals} : {eqs}\n".format(eqn=str(i+1), vals=vals, eqs=eq)
ss += "\n"
s += ss
# import pprint
# s += '- residuals:\n'
# s += pprint.pformat(compute_residuals(self),indent=2, depth=1)
return s
def __repr__(self):
return self.__str__()
@property
def x_bounds(self):
if 'controls_ub' in self.functions:
fun_lb = self.functions['controls_lb']
fun_ub = self.functions['controls_ub']
return [fun_lb, fun_ub]
else:
return None
def residuals(self, calib=None):
if self.model_type == 'dtcscc':
from dolo.algos.dtcscc.steady_state import residuals
return residuals(self, calib)
elif self.model_type == 'dtmscc':
from dolo.algos.dtmscc.steady_state import residuals
return residuals(self, calib)
def eval_formula(self, expr, dataframe=None, calib=None):
from dolo.compiler.eval_formula import eval_formula
if calib is None:
calib = self.calibration
return eval_formula(expr, dataframe=dataframe, context=calib)
def __compile_functions__(self):
from dolo.compiler.function_compiler_ast import compile_function_ast
from dolo.compiler.function_compiler import standard_function
defs = self.symbolic.definitions
# works for fg models only
model_type = self.model_type
if 'auxiliaries' not in self.symbols:
model_type += '_'
else:
# prepare auxiliaries
auxeqs = self.symbolic.equations['auxiliary']
auxdefs = {}
for time in [-1,0,1]:
dd = OrderedDict()
for eq in auxeqs:
lhs, rhs = eq.split('=')
lhs = ast.parse( str.strip(lhs) ).body[0].value
rhs = ast.parse( str.strip(rhs) ).body[0].value
tmp = timeshift(rhs, self.variables, time)
k = timeshift(lhs, self.variables, time)
k = StandardizeDatesSimple(self.variables).visit(k)
v = StandardizeDatesSimple(self.variables).visit(tmp)
dd[to_source(k)] = to_source(v)
auxdefs[time] = dd
recipe = recipes[model_type]
symbols = self.symbols # should match self.symbols
comps = []
functions = {}
original_functions = {}
original_gufunctions = {}
for funname in recipe['specs'].keys():
spec = recipe['specs'][funname]
if funname not in self.symbolic.equations:
if not spec.get('optional'):
raise Exception("The model doesn't contain equations of type '{}'.".format(funname))
else:
continue
if spec.get('target'):
# keep only right-hand side
# TODO: restore recursive definitions
eqs = self.symbolic.equations[funname]
eqs = [eq.split('=')[1] for eq in eqs]
eqs = [str.strip(eq) for eq in eqs]
target_spec = spec.get('target')
n_output = len(self.symbols[target_spec[0]])
# target_short_name = spec.get('target')[2]
if spec.get('recursive') is False:
target_spec = None
else:
target_spec[2] = 'out'
else:
target_spec = None
if spec.get('complementarities'):
# TODO: Rewrite and simplify
comp_spec = spec.get('complementarities')
comp_order = comp_spec['middle']
comp_args = comp_spec['left-right']
comps = []
eqs = []
for i,eq in enumerate(self.symbolic.equations[funname]):
if '|' in eq:
control = self.symbols[comp_order[0]][i]
eq, comp = str.split(eq,'|')
lhs, rhs = decode_complementarity(comp, control)
comps.append([lhs, rhs])
else:
comps.append(['-inf', 'inf'])
eqs.append(eq)
comp_lhs, comp_rhs = zip(*comps)
# fb_names = ['{}_lb'.format(funname), '{}_ub'.format(funname)]
fb_names = ['controls_lb'.format(funname), 'controls_ub'.format(funname)]
ddefs = OrderedDict()
for ag in comp_args:
if ag[0] == 'auxiliaries':
t = ag[1]
ddefs.update(auxdefs[t])
ddefs.update(defs)
lower_bound, gu_lower_bound = compile_function_ast(comp_lhs, symbols, comp_args, funname=fb_names[0],definitions=defs)
upper_bound, gu_upper_bound = compile_function_ast(comp_rhs, symbols, comp_args, funname=fb_names[1],definitions=defs)
n_output = len(comp_lhs)
functions[fb_names[0]] = standard_function(gu_lower_bound, n_output )
functions[fb_names[1]] = standard_function(gu_upper_bound, n_output )
original_functions[fb_names[0]] = lower_bound
original_functions[fb_names[1]] = upper_bound
original_gufunctions[fb_names[0]] = gu_lower_bound
original_gufunctions[fb_names[1]] = gu_upper_bound
# rewrite all equations as rhs - lhs
def filter_equal(eq):
if '=' in eq:
lhs, rhs = str.split(eq,'=')
eq = '{} - ( {} )'.format(rhs, lhs)
eq = str.strip(eq)
return eq
else:
return eq
eqs = [filter_equal(eq) for eq in eqs]
arg_names = recipe['specs'][funname]['eqs']
ddefs = OrderedDict()
for ag in arg_names:
if ag[0] == 'auxiliaries':
t = ag[1]
ddefs.update(auxdefs[t])
ddefs.update(defs)
fun, gufun = compile_function_ast(eqs, symbols, arg_names,
output_names=target_spec, funname=funname, definitions=ddefs,
)
# print("So far so good !")c
n_output = len(eqs)
original_functions[funname] = fun
functions[funname] = standard_function(gufun, n_output )
original_functions[funname] = fun
original_gufunctions[funname] = gufun
self.__original_functions__ = original_functions
self.__original_gufunctions__ = original_gufunctions
self.functions = functions
import re
regex = re.compile("(.*)<=(.*)<=(.*)")
def decode_complementarity(comp, control):
'''
# comp can be either:
- None
- "a<=expr" where a is a controls
- "expr<=a" where a is a control
- "expr1<=a<=expr2"
'''
try:
res = regex.match(comp).groups()
except:
raise Exception("Unable to parse complementarity condition '{}'".format(comp))
res = [r.strip() for r in res]
if res[1] != control:
msg = "Complementarity condition '{}' incorrect. Expected {} instead of {}.".format(comp, control, res[1])
raise Exception(msg)
return [res[0], res[2]]
|
[
"dolo.compiler.function_compiler_ast.compile_function_ast",
"dolo.algos.dtmscc.steady_state.residuals",
"dolo.misc.termcolor.colored",
"dolo.compiler.eval_formula.eval_formula",
"dolo.compiler.misc.calibration_to_vector",
"dolo.compiler.triangular_solver.solve_triangular_system",
"numpy.array",
"dolo.compiler.function_compiler.standard_function",
"collections.OrderedDict",
"dolo.compiler.misc.CalibrationDict",
"re.compile"
] |
[((11083, 11113), 're.compile', 're.compile', (['"""(.*)<=(.*)<=(.*)"""'], {}), "('(.*)<=(.*)<=(.*)')\n", (11093, 11113), False, 'import re\n'), ((1256, 1287), 'dolo.compiler.triangular_solver.solve_triangular_system', 'solve_triangular_system', (['system'], {}), '(system)\n', (1279, 1287), False, 'from dolo.compiler.triangular_solver import solve_triangular_system\n'), ((1384, 1442), 'dolo.compiler.misc.calibration_to_vector', 'calibration_to_vector', (['self.symbols', 'self.calibration_dict'], {}), '(self.symbols, self.calibration_dict)\n', (1405, 1442), False, 'from dolo.compiler.misc import CalibrationDict, calibration_to_vector\n'), ((1470, 1506), 'dolo.compiler.misc.CalibrationDict', 'CalibrationDict', (['self.symbols', 'calib'], {}), '(self.symbols, calib)\n', (1485, 1506), False, 'from dolo.compiler.misc import CalibrationDict, calibration_to_vector\n'), ((5332, 5386), 'dolo.compiler.eval_formula.eval_formula', 'eval_formula', (['expr'], {'dataframe': 'dataframe', 'context': 'calib'}), '(expr, dataframe=dataframe, context=calib)\n', (5344, 5386), False, 'from dolo.compiler.eval_formula import eval_formula\n'), ((4958, 4980), 'dolo.algos.dtmscc.steady_state.residuals', 'residuals', (['self', 'calib'], {}), '(self, calib)\n', (4967, 4980), False, 'from dolo.algos.dtmscc.steady_state import residuals\n'), ((10220, 10233), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (10231, 10233), False, 'from collections import OrderedDict\n'), ((10442, 10553), 'dolo.compiler.function_compiler_ast.compile_function_ast', 'compile_function_ast', (['eqs', 'symbols', 'arg_names'], {'output_names': 'target_spec', 'funname': 'funname', 'definitions': 'ddefs'}), '(eqs, symbols, arg_names, output_names=target_spec,\n funname=funname, definitions=ddefs)\n', (10462, 10553), False, 'from dolo.compiler.function_compiler_ast import compile_function_ast\n'), ((10778, 10812), 'dolo.compiler.function_compiler.standard_function', 'standard_function', (['gufun', 'n_output'], {}), '(gufun, n_output)\n', (10795, 10812), False, 'from dolo.compiler.function_compiler import standard_function\n'), ((2020, 2057), 'numpy.array', 'numpy.array', (['covariances'], {'dtype': 'float'}), '(covariances, dtype=float)\n', (2031, 2057), False, 'import numpy\n'), ((5107, 5129), 'dolo.algos.dtmscc.steady_state.residuals', 'residuals', (['self', 'calib'], {}), '(self, calib)\n', (5116, 5129), False, 'from dolo.algos.dtmscc.steady_state import residuals\n'), ((5952, 5965), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5963, 5965), False, 'from collections import OrderedDict\n'), ((8787, 8800), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8798, 8800), False, 'from collections import OrderedDict\n'), ((9050, 9143), 'dolo.compiler.function_compiler_ast.compile_function_ast', 'compile_function_ast', (['comp_lhs', 'symbols', 'comp_args'], {'funname': 'fb_names[0]', 'definitions': 'defs'}), '(comp_lhs, symbols, comp_args, funname=fb_names[0],\n definitions=defs)\n', (9070, 9143), False, 'from dolo.compiler.function_compiler_ast import compile_function_ast\n'), ((9185, 9278), 'dolo.compiler.function_compiler_ast.compile_function_ast', 'compile_function_ast', (['comp_rhs', 'symbols', 'comp_args'], {'funname': 'fb_names[1]', 'definitions': 'defs'}), '(comp_rhs, symbols, comp_args, funname=fb_names[1],\n definitions=defs)\n', (9205, 9278), False, 'from dolo.compiler.function_compiler_ast import compile_function_ast\n'), ((9358, 9401), 'dolo.compiler.function_compiler.standard_function', 'standard_function', (['gu_lower_bound', 'n_output'], {}), '(gu_lower_bound, n_output)\n', (9375, 9401), False, 'from dolo.compiler.function_compiler import standard_function\n'), ((9444, 9487), 'dolo.compiler.function_compiler.standard_function', 'standard_function', (['gu_upper_bound', 'n_output'], {}), '(gu_upper_bound, n_output)\n', (9461, 9487), False, 'from dolo.compiler.function_compiler import standard_function\n'), ((2237, 2266), 'numpy.array', 'numpy.array', (['tab'], {'dtype': 'float'}), '(tab, dtype=float)\n', (2248, 2266), False, 'import numpy\n'), ((4119, 4139), 'dolo.misc.termcolor.colored', 'colored', (['vals', '"""red"""'], {}), "(vals, 'red')\n", (4126, 4139), False, 'from dolo.misc.termcolor import colored\n')]
|
# load in data
import helper
import numpy as np
import torch
import torch.nn as nn
from string import punctuation
from collections import Counter
from torch.utils.data import TensorDataset, DataLoader
data_dir = './data/Seinfeld_Scripts.txt'
text = helper.load_data(data_dir)
# Check for a GPU
train_on_gpu = torch.cuda.is_available()
if not train_on_gpu:
print('No GPU found. Please use a GPU to train your neural network.')
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
word_counts = Counter(text)
sorted_vocab = sorted(word_counts, key=word_counts.get, reverse=True)
int_to_vocab = {ii: word for ii, word in enumerate(sorted_vocab)}
vocab_to_int = {word: ii for ii, word in int_to_vocab.items()}
return vocab_to_int, int_to_vocab
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenized dictionary where the key is the punctuation and the value is the token
"""
return {
'.': '||PERIOD||',
',': '||COMMA||',
'"': '||QUOTATION_MARK||',
';': '||SEMICOLON||',
'!': '||EXCLAMATION_MARK||',
'?': '||QUESTION_MARK||',
'(': '||LEFT_PAREN>||',
')': '||RIGHT_PAREN||',
'-': '||DASH||',
'\n': '||RETURN||',
}
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
def batch_data(words, sequence_length, batch_size):
"""
Batch the neural network data using DataLoader
:param words: The word ids of the TV scripts
:param sequence_length: The sequence length of each batch
:param batch_size: The size of each batch; the number of sequences in a batch
:return: DataLoader with batched data
"""
n_batches = len(words)//batch_size
words = words[:n_batches*batch_size]
features = []
targets = []
total = len(words)-sequence_length
for idx in range(0, total):
x = words[idx:idx+sequence_length]
features.append(x)
y = words[idx+sequence_length]
targets.append(y)
train_x = np.array(features)
train_y = np.array(targets)
train_data = TensorDataset(torch.from_numpy(train_x), torch.from_numpy(train_y))
train_loader = DataLoader(train_data, shuffle=False, batch_size=batch_size)
# return a dataloader
return train_loader
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
print(token_dict)
print(int_text[:10])
print(list(vocab_to_int.values())[:10])
print(list(int_to_vocab.values())[:10])
class RNN(nn.Module):
def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5):
"""
Initialize the PyTorch RNN Module
:param vocab_size: The number of input dimensions of the neural network (the size of the vocabulary)
:param output_size: The number of output dimensions of the neural network
:param embedding_dim: The size of embeddings, should you choose to use them
:param hidden_dim: The size of the hidden layer outputs
:param dropout: dropout to add in between LSTM/GRU layers
"""
super(RNN, self).__init__()
# set class variables
self.output_size = output_size
self.n_layers = n_layers
self.hidden_dim = hidden_dim
# define model layers
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers,
dropout=dropout, batch_first=True)
self.fc = nn.Linear(hidden_dim, output_size)
self.dropout = nn.Dropout(dropout)
def forward(self, nn_input, hidden):
"""
Forward propagation of the neural network
:param nn_input: The input to the neural network
:param hidden: The hidden state
:return: Two Tensors, the output of the neural network and the latest hidden state
"""
batch_size = nn_input.size(0)
x = self.embedding(nn_input)
x,h = self.lstm(x, hidden)
x = x.contiguous().view(-1, self.hidden_dim)
# x = self.dropout(x)
x = self.fc(x)
x = x.view(batch_size, -1, self.output_size)
x = x[:, -1]
# return one batch of output word scores and the hidden state
return x, h
def init_hidden(self, batch_size):
'''
Initialize the hidden state of an LSTM/GRU
:param batch_size: The batch_size of the hidden state
:return: hidden state of dims (n_layers, batch_size, hidden_dim)
'''
# Implement function
weight = next(self.parameters()).data
if (train_on_gpu):
hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda(),
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda())
else:
hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(),
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_())
return hidden
def forward_back_prop(rnn, optimizer, criterion, inp, target, hidden):
"""
Forward and backward propagation on the neural network
:param decoder: The PyTorch Module that holds the neural network
:param decoder_optimizer: The PyTorch optimizer for the neural network
:param criterion: The PyTorch loss function
:param inp: A batch of input to the neural network
:param target: The target output for the batch of input
:return: The loss and the latest hidden state Tensor
"""
# move data to GPU, if available
if train_on_gpu:
inp, target = inp.cuda(), target.cuda()
# perform backpropagation and optimization
h = tuple([each.data for each in hidden])
rnn.zero_grad()
output, h = rnn(inp, h)
loss = criterion(output, target)
loss.backward()
nn.utils.clip_grad_norm_(rnn.parameters(), 5)
optimizer.step()
# return the loss over a batch and the hidden state produced by our model
return loss.item(), h
def train_rnn(rnn, batch_size, optimizer, criterion, n_epochs, show_every_n_batches=100):
batch_losses = []
rnn.train()
print("Training for %d epoch(s)..." % n_epochs)
for epoch_i in range(1, n_epochs + 1):
# initialize hidden state
hidden = rnn.init_hidden(batch_size)
for batch_i, (inputs, labels) in enumerate(train_loader, 1):
# make sure you iterate over completely full batches, only
n_batches = len(train_loader.dataset)//batch_size
if(batch_i > n_batches):
break
# forward, back prop
loss, hidden = forward_back_prop(rnn, optimizer, criterion, inputs, labels, hidden)
# record loss
batch_losses.append(loss)
# printing loss stats
if batch_i % show_every_n_batches == 0:
print('Epoch: {:>4}/{:<4} Loss: {}\n'.format(
epoch_i, n_epochs, np.average(batch_losses)))
batch_losses = []
# returns a trained rnn
return rnn
# Data params
# Sequence Length
sequence_length = 8 # of words in a sequence
# Batch Size
batch_size = 100
# data loader - do not change
train_loader = batch_data(int_text, sequence_length, batch_size)
# Training parameters
# Number of Epochs
num_epochs = 5
# Learning Rate
learning_rate = 0.001
# Model parameters
# Vocab size
vocab_size = len(vocab_to_int)
# Output size
output_size = vocab_size
# Embedding Dimension
embedding_dim = 128
# Hidden Dimension
hidden_dim = 512
# Number of RNN Layers
n_layers = 2
# Show stats for every n number of batches
show_every_n_batches = 500
# create model and move to gpu if available
rnn = RNN(vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5)
if train_on_gpu:
rnn.cuda()
# defining loss and optimization functions for training
optimizer = torch.optim.Adam(rnn.parameters(), lr=learning_rate)
criterion = nn.CrossEntropyLoss()
# training the model
trained_rnn = train_rnn(rnn, batch_size, optimizer, criterion, num_epochs, show_every_n_batches)
# saving the trained model
helper.save_model('./trained_tv_script', trained_rnn)
print('Model Trained and Saved')
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
trained_rnn = helper.load_model('./trained_tv_script')
import torch.nn.functional as F
def generate(rnn, prime_id, int_to_vocab, token_dict, pad_value, predict_len=100):
"""
Generate text using the neural network
:param decoder: The PyTorch Module that holds the trained neural network
:param prime_id: The word id to start the first prediction
:param int_to_vocab: Dict of word id keys to word values
:param token_dict: Dict of puncuation tokens keys to puncuation values
:param pad_value: The value used to pad a sequence
:param predict_len: The length of text to generate
:return: The generated text
"""
rnn.eval()
# create a sequence (batch_size=1) with the prime_id
current_seq = np.full((1, sequence_length), pad_value)
current_seq[-1][-1] = prime_id
predicted = [int_to_vocab[prime_id]]
for _ in range(predict_len):
if train_on_gpu:
current_seq = torch.LongTensor(current_seq).cuda()
else:
current_seq = torch.LongTensor(current_seq)
# initialize the hidden state
hidden = rnn.init_hidden(current_seq.size(0))
# get the output of the rnn
output, _ = rnn(current_seq, hidden)
# get the next word probabilities
p = F.softmax(output, dim=1).data
if(train_on_gpu):
p = p.cpu() # move to cpu
# use top_k sampling to get the index of the next word
top_k = 5
p, top_i = p.topk(top_k)
top_i = top_i.numpy().squeeze()
# select the likely next word index with some element of randomness
p = p.numpy().squeeze()
word_i = np.random.choice(top_i, p=p/p.sum())
# retrieve that word from the dictionary
word = int_to_vocab[word_i]
predicted.append(word)
# the generated word becomes the next "current sequence" and the cycle can continue
current_seq = np.roll(current_seq, -1, 1)
current_seq[-1][-1] = word_i
gen_sentences = ' '.join(predicted)
# Replace punctuation tokens
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
gen_sentences = gen_sentences.replace(' ' + token.lower(), key)
gen_sentences = gen_sentences.replace('\n ', '\n')
gen_sentences = gen_sentences.replace('( ', '(')
# return all the sentences
return gen_sentences
# run the cell multiple times to get different results!
gen_length = 400 # modify the length to your preference
prime_word = 'jerry' # name for starting the script
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
pad_word = helper.SPECIAL_WORDS['PADDING']
generated_script = generate(trained_rnn, vocab_to_int[prime_word + ':'], int_to_vocab, token_dict, vocab_to_int[pad_word], gen_length)
print(generated_script)
|
[
"torch.nn.Dropout",
"torch.nn.Embedding",
"numpy.full",
"torch.utils.data.DataLoader",
"helper.save_model",
"helper.load_preprocess",
"torch.nn.Linear",
"collections.Counter",
"torch.nn.LSTM",
"numpy.average",
"numpy.roll",
"torch.cuda.is_available",
"torch.from_numpy",
"helper.load_data",
"torch.LongTensor",
"torch.nn.CrossEntropyLoss",
"torch.nn.functional.softmax",
"numpy.array",
"helper.preprocess_and_save_data",
"helper.load_model"
] |
[((251, 277), 'helper.load_data', 'helper.load_data', (['data_dir'], {}), '(data_dir)\n', (267, 277), False, 'import helper\n'), ((312, 337), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (335, 337), False, 'import torch\n'), ((1432, 1509), 'helper.preprocess_and_save_data', 'helper.preprocess_and_save_data', (['data_dir', 'token_lookup', 'create_lookup_tables'], {}), '(data_dir, token_lookup, create_lookup_tables)\n', (1463, 1509), False, 'import helper\n'), ((2518, 2542), 'helper.load_preprocess', 'helper.load_preprocess', ([], {}), '()\n', (2540, 2542), False, 'import helper\n'), ((8225, 8246), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (8244, 8246), True, 'import torch.nn as nn\n'), ((8394, 8447), 'helper.save_model', 'helper.save_model', (['"""./trained_tv_script"""', 'trained_rnn'], {}), "('./trained_tv_script', trained_rnn)\n", (8411, 8447), False, 'import helper\n'), ((8527, 8551), 'helper.load_preprocess', 'helper.load_preprocess', ([], {}), '()\n', (8549, 8551), False, 'import helper\n'), ((8566, 8606), 'helper.load_model', 'helper.load_model', (['"""./trained_tv_script"""'], {}), "('./trained_tv_script')\n", (8583, 8606), False, 'import helper\n'), ((656, 669), 'collections.Counter', 'Counter', (['text'], {}), '(text)\n', (663, 669), False, 'from collections import Counter\n'), ((2200, 2218), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (2208, 2218), True, 'import numpy as np\n'), ((2233, 2250), 'numpy.array', 'np.array', (['targets'], {}), '(targets)\n', (2241, 2250), True, 'import numpy as np\n'), ((2355, 2415), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data'], {'shuffle': '(False)', 'batch_size': 'batch_size'}), '(train_data, shuffle=False, batch_size=batch_size)\n', (2365, 2415), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((9292, 9332), 'numpy.full', 'np.full', (['(1, sequence_length)', 'pad_value'], {}), '((1, sequence_length), pad_value)\n', (9299, 9332), True, 'import numpy as np\n'), ((2282, 2307), 'torch.from_numpy', 'torch.from_numpy', (['train_x'], {}), '(train_x)\n', (2298, 2307), False, 'import torch\n'), ((2309, 2334), 'torch.from_numpy', 'torch.from_numpy', (['train_y'], {}), '(train_y)\n', (2325, 2334), False, 'import torch\n'), ((3504, 3543), 'torch.nn.Embedding', 'nn.Embedding', (['vocab_size', 'embedding_dim'], {}), '(vocab_size, embedding_dim)\n', (3516, 3543), True, 'import torch.nn as nn\n'), ((3564, 3643), 'torch.nn.LSTM', 'nn.LSTM', (['embedding_dim', 'hidden_dim', 'n_layers'], {'dropout': 'dropout', 'batch_first': '(True)'}), '(embedding_dim, hidden_dim, n_layers, dropout=dropout, batch_first=True)\n', (3571, 3643), True, 'import torch.nn as nn\n'), ((3691, 3725), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', 'output_size'], {}), '(hidden_dim, output_size)\n', (3700, 3725), True, 'import torch.nn as nn\n'), ((3749, 3768), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (3759, 3768), True, 'import torch.nn as nn\n'), ((10475, 10502), 'numpy.roll', 'np.roll', (['current_seq', '(-1)', '(1)'], {}), '(current_seq, -1, 1)\n', (10482, 10502), True, 'import numpy as np\n'), ((9571, 9600), 'torch.LongTensor', 'torch.LongTensor', (['current_seq'], {}), '(current_seq)\n', (9587, 9600), False, 'import torch\n'), ((9831, 9855), 'torch.nn.functional.softmax', 'F.softmax', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (9840, 9855), True, 'import torch.nn.functional as F\n'), ((9494, 9523), 'torch.LongTensor', 'torch.LongTensor', (['current_seq'], {}), '(current_seq)\n', (9510, 9523), False, 'import torch\n'), ((7238, 7262), 'numpy.average', 'np.average', (['batch_losses'], {}), '(batch_losses)\n', (7248, 7262), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""Next-Word Prediction using Universal Sentence Encoder.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1r2ma5P7w2LE30L1o5mAyNPLE7Qi3JxoL
# **Google drive for local storage**
_NB: All comments are written to facilitate smooth evaluation of the model, that the **Current User** may be less fatigued and see beauty in the good work._
Uncomment text under **PREVIEW OUTPUT** to further scrutinize.
"""
# Commented out IPython magic to ensure Python compatibility.
# This cell will prompt an external url to accept permissions for Colab to access Google Drive
from google.colab import drive
drive.mount("/gdrive")
# %ls
"""# **Import ***"""
# Getting all required libraries
import os
import re
import gdown
import numpy
import string
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
from absl import logging
import tensorflow_hub as hub
from tensorflow import keras
import matplotlib.pyplot as plt
from keras.models import Sequential
import tensorflow.keras.backend as K
from keras.layers.recurrent import LSTM
from keras.layers import Dense, Activation
from keras.callbacks import LambdaCallback
from keras.utils.data_utils import get_file
from keras.layers.embeddings import Embedding
from sklearn.model_selection import train_test_split
"""## **Data preparation - _Generating Corpus_**"""
# Download data from Google drive
'''
ORIGINAL DATASET URL:
https://raw.githubusercontent.com/maxim5/stanford-tensorflow-tutorials/master/data/arxiv_abstracts.txt
'''
url = ' https://drive.google.com/uc?id=1YTBR7FiXssaKXHhOZbUbwoWw6jzQxxKW'
output = 'corpus.txt'
gdown.download(url, output, quiet=False)
# sentence_length = 40
# Read local file from directory
with open('corpus.txt') as subject:
cache = subject.readlines()
translator = str.maketrans('', '', string.punctuation) # Remove punctuation
lines = [doc.lower().translate(translator) for doc in cache] # Switch to lower case
# PREVIEW OUTPUT ::
# print(lines[0][:100])
# len(lines)
# Generate an list of single/independent words
vocabulary = list(set(' '.join(lines).replace('\n','').split(' ')))
primary_store = {}
for strings, texts in enumerate(vocabulary):
primary_store[texts] = strings
# PREVIEW OUTPUT ::
# print(vocabulary[:50])
# len(vocabulary)
# Splitting data into Train sets and test sets
X = []
y = []
for c in lines:
xxxx = c.replace('\n','').split(' ')
X.append(' '.join(xxxx[:-1])) # X from the corpus
yyyy = [0 for i in range(len(vocabulary))] # Generate Y from the Vocabulary
# yyyy[primary_store[xxxx[-1]]] = 1
yyyy[primary_store[xxxx[-1]]] = 1
y.append(yyyy)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)
y_test = numpy.array(y_test)
y_train = numpy.array(y_train)
# PREVIEW OUTPUT ::
# print(X_train[:10])
# print(y_train[:10])
# print(X_test[:10])
# print(y_test[:10])
"""## **Embeddings!**"""
# Import the Universal Sentence Encoder's TF Hub module (Here we're making use of version 4)
# This will take a while but won't be long :)
module_url = "https://tfhub.dev/google/universal-sentence-encoder/4"
appreciate = hub.load(module_url)
# Making it easier - Function for embedding
def embed(goodness):
return appreciate(goodness)
# REVIEW OUTPUT ::
# appreciate.variables
# Wrapping up with the U-S-E
X_train = embed(X_train)
X_test = embed(X_test)
X_train = X_train.numpy()
X_test = X_test.numpy()
# PREVIEW OUTPUT ::
# print(X_train[:10])
# print(y_train[:10])
# print(X_test[:10])
# print(y_test[:10])
# print(X_train.shape, X_test.shape, y_test.shape, y_train.shape)
"""# **Building the model**"""
model = Sequential()
# model.add(Embedding(input_dim=len(vocabulary), output_dim=100))
model = Sequential()
# model.add(LSTM(units=100, input_shape=[512]))
model.add(Dense(512, input_shape=[512], activation = 'relu'))
model.add(Dense(units=len(vocabulary), activation = 'softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
model.summary()
# Training the model.
model.fit(X_train, y_train, batch_size=512, shuffle=True, epochs=20, validation_data=(X_test, y_test), callbacks=[LambdaCallback()])
"""#**Unto the tests!**"""
# Create function to predict and show detailed output
def next_word(collection=[], extent=1):
for item in collection:
text = item
for i in range(extent):
prediction = model.predict(x=embed([item]).numpy())
idx = np.argmax(prediction[-1])
item += ' ' + vocabulary[idx]
print(text + ' --> ' + item + '\nNEXT WORD: ' + item.split(' ')[-1] + '\n')
# Tests - please feel free to explore
single_text = ['and some other essential']
next_word(single_text)
# Testing on a collection of words
text_collection = ['deep convolutional', 'simple and effective', 'a nonconvex', 'a']
next_word(text_collection)
"""## **For the record**
The Dataset is based on a Tensorflow tutorial from Stanford, so all predicted words will be based on Deep learning and Machine learning _common terms_.
"""
# Storing data
vocabulary = numpy.array(vocabulary)
numpy.save('./vocabulary.npy', vocabulary)
model.save('./NWP-USE')
## END OF NOTEBOOK
|
[
"tensorflow_hub.load",
"numpy.save",
"numpy.argmax",
"gdown.download",
"sklearn.model_selection.train_test_split",
"keras.callbacks.LambdaCallback",
"keras.layers.Dense",
"numpy.array",
"google.colab.drive.mount",
"keras.models.Sequential"
] |
[((690, 712), 'google.colab.drive.mount', 'drive.mount', (['"""/gdrive"""'], {}), "('/gdrive')\n", (701, 712), False, 'from google.colab import drive\n'), ((1704, 1744), 'gdown.download', 'gdown.download', (['url', 'output'], {'quiet': '(False)'}), '(url, output, quiet=False)\n', (1718, 1744), False, 'import gdown\n'), ((2745, 2800), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.25)', 'random_state': '(42)'}), '(X, y, test_size=0.25, random_state=42)\n', (2761, 2800), False, 'from sklearn.model_selection import train_test_split\n'), ((2810, 2829), 'numpy.array', 'numpy.array', (['y_test'], {}), '(y_test)\n', (2821, 2829), False, 'import numpy\n'), ((2840, 2860), 'numpy.array', 'numpy.array', (['y_train'], {}), '(y_train)\n', (2851, 2860), False, 'import numpy\n'), ((3220, 3240), 'tensorflow_hub.load', 'hub.load', (['module_url'], {}), '(module_url)\n', (3228, 3240), True, 'import tensorflow_hub as hub\n'), ((3725, 3737), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3735, 3737), False, 'from keras.models import Sequential\n'), ((3812, 3824), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3822, 3824), False, 'from keras.models import Sequential\n'), ((5145, 5168), 'numpy.array', 'numpy.array', (['vocabulary'], {}), '(vocabulary)\n', (5156, 5168), False, 'import numpy\n'), ((5169, 5211), 'numpy.save', 'numpy.save', (['"""./vocabulary.npy"""', 'vocabulary'], {}), "('./vocabulary.npy', vocabulary)\n", (5179, 5211), False, 'import numpy\n'), ((3883, 3931), 'keras.layers.Dense', 'Dense', (['(512)'], {'input_shape': '[512]', 'activation': '"""relu"""'}), "(512, input_shape=[512], activation='relu')\n", (3888, 3931), False, 'from keras.layers import Dense, Activation\n'), ((4236, 4252), 'keras.callbacks.LambdaCallback', 'LambdaCallback', ([], {}), '()\n', (4250, 4252), False, 'from keras.callbacks import LambdaCallback\n'), ((4520, 4545), 'numpy.argmax', 'np.argmax', (['prediction[-1]'], {}), '(prediction[-1])\n', (4529, 4545), True, 'import numpy as np\n')]
|
import argparse
import os
from scipy.special import erf
from scipy.stats import truncnorm
import numpy as np
import data
def build_vector_cache(glove_filename, vec_cache_filename, vocab):
print("Building vector cache...")
with open(glove_filename) as f, open(vec_cache_filename, "w") as f2:
for line in f:
tok, vec = line.split(" ", 1)
if tok in vocab:
vocab.remove(tok)
f2.write("{} {}".format(tok, vec))
def discrete_tnorm(a, b, tgt_loc, sigma=1, n_steps=100):
def phi(zeta):
return 1 / (np.sqrt(2 * np.pi)) * np.exp(-0.5 * zeta**2)
def Phi(x):
return 0.5 * (1 + erf(x / np.sqrt(2)))
def tgt_loc_update(x):
y1 = phi((a - x) / sigma)
y2 = phi((b - x) / sigma)
x1 = Phi((b - x) / sigma)
x2 = Phi((a - x) / sigma)
denom = x1 - x2 + 1E-4
return y1 / denom - y2 / denom
x = tgt_loc
direction = np.sign(tgt_loc - (b - a))
for _ in range(n_steps):
x = tgt_loc - sigma * tgt_loc_update(x)
tn = truncnorm((a - x) / sigma, (b - x) / sigma, loc=x, scale=sigma)
rrange = np.arange(a, b + 1)
pmf = tn.pdf(rrange)
pmf /= np.sum(pmf)
return pmf
def discrete_lerp(a, b, ground_truth):
pmf = np.zeros(b - a + 1)
c = int(np.ceil(ground_truth + 1E-8))
f = int(np.floor(ground_truth))
pmf[min(c - a, b - a)] = ground_truth - f
pmf[f - a] = c - ground_truth
return pmf
def smoothed_labels(truth, n_labels):
return discrete_lerp(1, n_labels, truth)
def preprocess(filename, output_name="sim_sparse.txt"):
print("Preprocessing {}...".format(filename))
with open(filename) as f:
values = [float(l.strip()) for l in f.readlines()]
values = [" ".join([str(l) for l in smoothed_labels(v, 5)]) for v in values]
with open(os.path.join(os.path.dirname(filename), output_name), "w") as f:
f.write("\n".join(values))
def add_vocab(tok_filename, vocab):
with open(tok_filename) as f:
for line in f:
vocab.update(line.strip().split())
def main():
base_conf = data.Configs.base_config()
sick_conf = data.Configs.sick_config()
sick_folder = sick_conf.sick_data
vocab = set()
for name in ("train", "dev", "test"):
preprocess(os.path.join(sick_folder, name, "sim.txt"))
add_vocab(os.path.join(sick_folder, name, "a.toks"), vocab)
add_vocab(os.path.join(sick_folder, name, "b.toks"), vocab)
build_vector_cache(base_conf.wordvecs_file, sick_conf.sick_cache, vocab)
if __name__ == "__main__":
main()
|
[
"numpy.sum",
"data.Configs.base_config",
"numpy.ceil",
"scipy.stats.truncnorm",
"numpy.floor",
"numpy.zeros",
"data.Configs.sick_config",
"os.path.dirname",
"numpy.arange",
"numpy.exp",
"numpy.sign",
"os.path.join",
"numpy.sqrt"
] |
[((952, 978), 'numpy.sign', 'np.sign', (['(tgt_loc - (b - a))'], {}), '(tgt_loc - (b - a))\n', (959, 978), True, 'import numpy as np\n'), ((1065, 1128), 'scipy.stats.truncnorm', 'truncnorm', (['((a - x) / sigma)', '((b - x) / sigma)'], {'loc': 'x', 'scale': 'sigma'}), '((a - x) / sigma, (b - x) / sigma, loc=x, scale=sigma)\n', (1074, 1128), False, 'from scipy.stats import truncnorm\n'), ((1142, 1161), 'numpy.arange', 'np.arange', (['a', '(b + 1)'], {}), '(a, b + 1)\n', (1151, 1161), True, 'import numpy as np\n'), ((1198, 1209), 'numpy.sum', 'np.sum', (['pmf'], {}), '(pmf)\n', (1204, 1209), True, 'import numpy as np\n'), ((1275, 1294), 'numpy.zeros', 'np.zeros', (['(b - a + 1)'], {}), '(b - a + 1)\n', (1283, 1294), True, 'import numpy as np\n'), ((2113, 2139), 'data.Configs.base_config', 'data.Configs.base_config', ([], {}), '()\n', (2137, 2139), False, 'import data\n'), ((2156, 2182), 'data.Configs.sick_config', 'data.Configs.sick_config', ([], {}), '()\n', (2180, 2182), False, 'import data\n'), ((1307, 1336), 'numpy.ceil', 'np.ceil', (['(ground_truth + 1e-08)'], {}), '(ground_truth + 1e-08)\n', (1314, 1336), True, 'import numpy as np\n'), ((1349, 1371), 'numpy.floor', 'np.floor', (['ground_truth'], {}), '(ground_truth)\n', (1357, 1371), True, 'import numpy as np\n'), ((600, 624), 'numpy.exp', 'np.exp', (['(-0.5 * zeta ** 2)'], {}), '(-0.5 * zeta ** 2)\n', (606, 624), True, 'import numpy as np\n'), ((2300, 2342), 'os.path.join', 'os.path.join', (['sick_folder', 'name', '"""sim.txt"""'], {}), "(sick_folder, name, 'sim.txt')\n", (2312, 2342), False, 'import os\n'), ((2362, 2403), 'os.path.join', 'os.path.join', (['sick_folder', 'name', '"""a.toks"""'], {}), "(sick_folder, name, 'a.toks')\n", (2374, 2403), False, 'import os\n'), ((2430, 2471), 'os.path.join', 'os.path.join', (['sick_folder', 'name', '"""b.toks"""'], {}), "(sick_folder, name, 'b.toks')\n", (2442, 2471), False, 'import os\n'), ((578, 596), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (585, 596), True, 'import numpy as np\n'), ((1856, 1881), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (1871, 1881), False, 'import os\n'), ((673, 683), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (680, 683), True, 'import numpy as np\n')]
|
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm
import numpy as np
pipelines = pd.read_csv('OntoGasGrid/pipeline_owl_generator/pipeline_split.csv').to_numpy()
offtakes = pd.read_csv('OntoGasGrid/grid_component_owl_generator/grid_component_data.csv').to_numpy()
n_offt = len(offtakes[:,0])
n_cons = len(pipelines[:,0])
closest_connection = np.zeros((n_offt,2),dtype=object)
def connection_name_get(i):
grid_line = pipelines[i,3]
connect_num = pipelines[i,8]
return grid_line + ' ' + str(connect_num) + ' Connection'
for i in tqdm(range(n_offt)):
if offtakes[i,2] != '#VALUE!':
dist_store = []
max_dist = 1000
off_lat = float(offtakes[i,2])
off_lng = float(offtakes[i,1])
for ii in range(n_cons):
con_lat = float(pipelines[ii,0])
con_lng = float(pipelines[ii,1])
dist = np.sqrt((off_lat-con_lat)**2+(off_lng-con_lng)**2)
if dist < max_dist:
closest_connection[i,0] = connection_name_get(ii)
closest_connection[i,1] = pipelines[ii,2]
max_dist = dist
closest_connection = pd.DataFrame(closest_connection).to_csv('OntoGasGrid/grid_component_owl_generator/closest connection.csv',index=False,header=False)
|
[
"pandas.read_csv",
"numpy.zeros",
"pandas.DataFrame",
"numpy.sqrt"
] |
[((373, 408), 'numpy.zeros', 'np.zeros', (['(n_offt, 2)'], {'dtype': 'object'}), '((n_offt, 2), dtype=object)\n', (381, 408), True, 'import numpy as np\n'), ((110, 178), 'pandas.read_csv', 'pd.read_csv', (['"""OntoGasGrid/pipeline_owl_generator/pipeline_split.csv"""'], {}), "('OntoGasGrid/pipeline_owl_generator/pipeline_split.csv')\n", (121, 178), True, 'import pandas as pd\n'), ((201, 280), 'pandas.read_csv', 'pd.read_csv', (['"""OntoGasGrid/grid_component_owl_generator/grid_component_data.csv"""'], {}), "('OntoGasGrid/grid_component_owl_generator/grid_component_data.csv')\n", (212, 280), True, 'import pandas as pd\n'), ((1182, 1214), 'pandas.DataFrame', 'pd.DataFrame', (['closest_connection'], {}), '(closest_connection)\n', (1194, 1214), True, 'import pandas as pd\n'), ((904, 964), 'numpy.sqrt', 'np.sqrt', (['((off_lat - con_lat) ** 2 + (off_lng - con_lng) ** 2)'], {}), '((off_lat - con_lat) ** 2 + (off_lng - con_lng) ** 2)\n', (911, 964), True, 'import numpy as np\n')]
|
from random import shuffle
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.datasets import load_iris
import numpy as np
iris = load_iris()
print(type(iris), len(iris.data))
def test1():
XY = np.array(zip(iris.data, iris.target))
np.random.shuffle(XY)
X, Y = XY[:, :1][:100], XY[:, 1:][:100]
X_test, Y_test = XY[:, :1][100:], XY[:, 1:][100:]
X.shape, Y.shape = -1, -1
X_test.shape, Y_test.shape = -1, -1
X = [list(i) for i in X]
X_test = [list(i) for i in X_test]
print('X:', X)
print('Y:', Y)
# Train model
rf = RandomForestRegressor()
rf.fit(X, Y)
# Predict new sample
Y_pre = rf.predict(X_test)
print('Y_test:', Y_test)
print('Y_pre:', Y_pre)
def test2():
from sklearn.cross_validation import cross_val_score, ShuffleSplit
X, Y, names = iris.data, iris.target, iris['feature_names']
rf = RandomForestRegressor()
scores = []
for i in range(X.shape[1]):
score = cross_val_score(rf, X[:, i:i + 1], Y,
scoring='r2',
cv=ShuffleSplit(len(X), 3, .3))
scores.append((round(np.mean(score), 3), names[i]))
print(sorted(scores, reverse=True))
if __name__ == '__main__':
test1()
test2()
|
[
"sklearn.datasets.load_iris",
"numpy.mean",
"numpy.random.shuffle",
"sklearn.ensemble.RandomForestRegressor"
] |
[((192, 203), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (201, 203), False, 'from sklearn.datasets import load_iris\n'), ((304, 325), 'numpy.random.shuffle', 'np.random.shuffle', (['XY'], {}), '(XY)\n', (321, 325), True, 'import numpy as np\n'), ((628, 651), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '()\n', (649, 651), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((942, 965), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '()\n', (963, 965), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((1207, 1221), 'numpy.mean', 'np.mean', (['score'], {}), '(score)\n', (1214, 1221), True, 'import numpy as np\n')]
|
import os
import argparse
import datetime
import numpy as np
from glob import glob
from typing import List, Set, Tuple
"""
Author: <NAME> (<EMAIL>)
Computes character-level Cohen's kappa and percentage
agreement for a set of brat annotated files from two
annotators for a sequence labeling task (e.g. NER).
"""
class BratANN(object):
"""
A brat annotation.
>>> ann = "T1\tent 1 4\tcat"
>>> b1 = BratANN("T3", "ent", 1, 4, "cat")
>>> b2 = BratANN.from_string(ann)
>>> b1 == b2
True
>>> b3 = BratANN("T3", "ent", 1, 5, "cat ")
>>> b1 == b3
False
"""
def __init__(self, num: str, label: str, start: int, end: int, text: str):
self.num = num
self.label = label
self.start = int(start)
self.end = int(end)
self.text = text
@classmethod
def from_string(cls, string: str):
(n, l, s, e, t) = string.split(maxsplit=4)
return cls(n, l, int(s), int(e), t)
def __str__(self) -> str:
return f"{self.num}\t{self.label} {self.start} {self.end}\t{self.text}" # noqa
def __repr__(self) -> str:
return f"<ira.BratANN '{self.num}, {self.label}, {self.start}, {self.end}, {self.text}'>" # noqa
def __eq__(self, other) -> bool:
"""
Overrides the default implementation
Two BratANNs are considering equal iff they have the same label,
offset, and text.
Equality does not consider the annotation number, e.g. T1
"""
if isinstance(other, BratANN):
return all([self.label == other.label,
self.start == other.start,
self.end == other.end,
self.text == other.text])
else:
return False
def parse_args():
def usage():
return """ira.py
[--help, Show this help message and exit]
[--test, Test the ira function]
[--docdir, Directory containing the documents that were annotated.
If not specified, looks in indir1.]
--indir1, Directory containing first annotators annotations
--indir2, Directory containing second annotators annotations
--annotation_conf, The brat annotation.conf that was used
for this annotation task
--disagreements, Whether to suppress, print, or log files
in which annotators disagree. Possible values
are "suppress", "print", "log". Default is
"suppress". If "log", writes file names to
"disagreements.log" in the current working
directory.
"""
desc = """Computes Cohen's kappa at the token
level for a sequence labeling task."""
parser = argparse.ArgumentParser(description=desc, usage=usage())
parser.add_argument("--test", action="store_true", default=False,
help="""Test the ira function.""")
args, remainder = parser.parse_known_args()
if args.test is True:
return args
parser = argparse.ArgumentParser(usage=usage())
parser.add_argument("--indir1", type=str, required=True)
parser.add_argument("--indir2", type=str, required=True)
parser.add_argument("--annotation_conf", type=str, required=True)
parser.add_argument("--docdir", type=str, required=False, default=None)
parser.add_argument("--disagreements", type=str,
required=False,
default="suppress",
choices=["suppress", "print", "log"])
args = parser.parse_args(remainder)
args.test = False
return args
def main(indir1: str, indir2: str, ann_conf: str,
docdir: str = None, disagreements: str = "suppress"):
"""
param indir{1,2}: Input directories containing the first and second
annotators .ann files, respectively.
param ann_conf: Path to the annotation.conf file.
param docdir: Directory containing the .txt files which were annotated.
If None, uses indir1.
param disagreements: How disagreements are logged. Possible values are
"suppress", "print" and "log". If "suppress",
do nothing. If "print", prints files that disagree
to the console. If "log", files that disagree
will be written to "disagreements.log" in the current
working directory.
"""
# Read in the documents.
if docdir is not None:
doc_fnames = glob(f"{docdir}/*.txt")
else:
doc_fnames = glob(f"{indir1}/*.txt")
docs = read_docs(doc_fnames)
# Read in the annotations.
basenames = [os.path.splitext(os.path.basename(fn))[0]
for fn in doc_fnames]
ann_fnames1 = [os.path.join(indir1, f"{bn}.ann") for bn in basenames]
ann_fnames2 = [os.path.join(indir2, f"{bn}.ann") for bn in basenames]
anns1 = read_anns(ann_fnames1)
anns2 = read_anns(ann_fnames2)
if not len(docs) == len(anns1) == len(anns2):
raise ValueError("Different numbers of documents and annotations.")
# Read the entity labels.
labels = read_labels(ann_conf)
# Compute inter rater agreement.
kappa, agreement, disagree_idxs = ira(docs, anns1, anns2, labels)
summary(kappa, "Cohen's Kappa")
summary(agreement, "Percentage Agreement")
# Do something with disagreements.
if disagreements == "print":
print("=== Disagreements ===")
for (idx, p_o) in disagree_idxs:
bn = os.path.basename(doc_fnames[idx])
print(f"{bn}: Agreement={p_o:.3f}")
if disagreements == "log":
with open("disagreements.log", 'w') as outF:
outF.write(str(datetime.datetime.now() + '\n'))
for (idx, p_o) in disagree_idxs:
bn = os.path.basename(doc_fnames[idx])
outF.write(f"{bn}: Agreement={p_o:.3f}\n")
def read_docs(fnames: List[str]) -> List[str]:
"""
Reads in the documents.
param fnames: List of paths to .txt files to read.
returns: List of input documents.
"""
all_docs = []
for docfile in fnames:
doc = open(docfile, 'r').read()
all_docs.append(doc)
return all_docs
def read_anns(fnames: List[str]) -> List[List[BratANN]]:
"""
Reads all .ann files and converts their
annotations to BratANN objects.
param fnames: List of paths to .ann files to read.
returns: List of annotations.
"""
all_anns = []
for annfile in fnames:
anns = [BratANN.from_string(a.strip()) for a in open(annfile, 'r')]
all_anns.append(anns)
return all_anns
def read_labels(ann_conf: str) -> Set[str]:
"""
Reads the entity labels from annotation.conf.
param ann_conf: Path to annotation.conf
returns: set of entity labels.
"""
labels = set()
with open(ann_conf, 'r') as infile:
copy = False
for line in infile:
# Skip blank lines and comments.
if not line.strip() or line.strip().startswith('#'):
continue
if line.strip() == "[entities]":
copy = True
elif line.strip() == "[relations]":
copy = False
elif copy is True:
labels.add(line.strip())
return labels
def ira(docs: List[str],
anns1: List[List[BratANN]],
anns2: List[List[BratANN]],
labels: Set[str]) -> Tuple[np.array, np.array, List[Tuple[int, float]]]: # noqa
"""
Computes Cohen's kappa and percentage agreement between two annotators.
param docs: List of documents, output of read_docs().
param anns1: List of first annotators annotations, output of read_anns().
param anns2: List of second annotators annotations, output of read_anns().
param labels: Set of labels annotated, output of read_labels().
returns: Kappa and percentage agreement for each document.
"""
n_docs = len(docs)
p_os = np.zeros(n_docs)
kappas = np.zeros(n_docs)
disagree_idxs_po = []
for i in range(n_docs):
denom = len(docs[i])
v1 = label_vector(docs[i], anns1[i], labels)
v2 = label_vector(docs[i], anns2[i], labels)
# Observed agreement: How often the two annotators actually agreed.
# Equivalent to accuracy.
p_o = np.sum(v1 == v2) / denom
if p_o != 1.0:
disagree_idxs_po.append((i, p_o))
# Expected agreement: How often the two annotators are expected to
# agree. For number of items N, labels k, and the number of times
# rater j predicted label k, n_j_k:
# p_e = (1/N^2) * sum_k (n_1_k * n_2_k)
p_e = (1/denom**2) * np.sum([np.sum(v1 == k) * np.sum(v2 == k)
for k in range(len(labels)+1)])
if p_e == 1:
k = 0.0
else:
k = (p_o - p_e) / (1 - p_e)
p_os[i] = p_o
kappas[i] = k
return (kappas, p_os, disagree_idxs_po)
def label_vector(doc: List[str],
anns: List[List[BratANN]],
labels: Set[str]) -> np.array:
"""
Converts the document into an integer vector. The value
of each element corresponds to the entity type of the
annotation at that character position, with 0 indicating
no annotation. So an annotation task with 3 annotation types
would have a vector of 0s, 1s, 2s, and 3s.
param doc: Document that was annotated.
param anns: Annotations for each document.
param labels: Set of entity labels for this task.
returns: Vector of character level annotations.
"""
v = np.zeros(len(doc)) # For each character
for (i, lab) in enumerate(labels):
i += 1 # 0 is reserved for no label
idxs = [np.arange(a.start, a.end) for a in anns if a.label == lab]
idxs = [j for mask in idxs for j in mask]
v[idxs] = i
return v
def summary(results: np.array, varname: str = None):
"""
Prints summary statistics for the supplied results.
param results: Numeric array of results (e.g. kappas).
param varname: (Optional) Name of the variable being summarized.
"""
if varname is not None:
print(varname)
if len(results) == 1:
print(f"{results[0]:.3f}")
else:
rmean = np.mean(results)
rmax = np.max(results)
rmin = np.min(results)
rstd = np.std(results)
print(f"""Mean: {rmean:.3f} +/-{rstd:.3f}\nRange: ({rmin:.3f}, {rmax:.3f})""") # noqa
def test():
"""
A small example to test ira().
"""
docs = ["The cats sat on the mat"]
ann_strs1 = ["T1\tent 4 8\tcats",
"T2\tent 9 12\tsat",
"T3\tent 20 23\tmat"]
anns1 = [[BratANN.from_string(s) for s in ann_strs1]]
ann_strs2 = ["T1\tent 4 7\tcat", "T2\tent 20 23 mat"]
anns2 = [[BratANN.from_string(s) for s in ann_strs2]]
labels = ["ent"]
kappas, agreements, disagreements = ira(docs, anns1, anns2, labels)
assert(np.isclose(kappas[0], 0.629, atol=1e-03))
assert(np.isclose(agreements[0], 0.826, atol=1e-03))
print("All tests passed.")
if __name__ == "__main__":
args = parse_args()
if args.test is True:
import doctest
doctest.testmod()
test()
else:
main(args.indir1, args.indir2, args.annotation_conf,
docdir=args.docdir, disagreements=args.disagreements)
|
[
"numpy.sum",
"os.path.basename",
"numpy.std",
"numpy.zeros",
"datetime.datetime.now",
"numpy.isclose",
"numpy.mean",
"numpy.max",
"numpy.min",
"numpy.arange",
"glob.glob",
"os.path.join",
"doctest.testmod"
] |
[((8150, 8166), 'numpy.zeros', 'np.zeros', (['n_docs'], {}), '(n_docs)\n', (8158, 8166), True, 'import numpy as np\n'), ((8180, 8196), 'numpy.zeros', 'np.zeros', (['n_docs'], {}), '(n_docs)\n', (8188, 8196), True, 'import numpy as np\n'), ((11195, 11235), 'numpy.isclose', 'np.isclose', (['kappas[0]', '(0.629)'], {'atol': '(0.001)'}), '(kappas[0], 0.629, atol=0.001)\n', (11205, 11235), True, 'import numpy as np\n'), ((11248, 11292), 'numpy.isclose', 'np.isclose', (['agreements[0]', '(0.826)'], {'atol': '(0.001)'}), '(agreements[0], 0.826, atol=0.001)\n', (11258, 11292), True, 'import numpy as np\n'), ((4683, 4706), 'glob.glob', 'glob', (['f"""{docdir}/*.txt"""'], {}), "(f'{docdir}/*.txt')\n", (4687, 4706), False, 'from glob import glob\n'), ((4738, 4761), 'glob.glob', 'glob', (['f"""{indir1}/*.txt"""'], {}), "(f'{indir1}/*.txt')\n", (4742, 4761), False, 'from glob import glob\n'), ((4943, 4976), 'os.path.join', 'os.path.join', (['indir1', 'f"""{bn}.ann"""'], {}), "(indir1, f'{bn}.ann')\n", (4955, 4976), False, 'import os\n'), ((5017, 5050), 'os.path.join', 'os.path.join', (['indir2', 'f"""{bn}.ann"""'], {}), "(indir2, f'{bn}.ann')\n", (5029, 5050), False, 'import os\n'), ((10493, 10509), 'numpy.mean', 'np.mean', (['results'], {}), '(results)\n', (10500, 10509), True, 'import numpy as np\n'), ((10525, 10540), 'numpy.max', 'np.max', (['results'], {}), '(results)\n', (10531, 10540), True, 'import numpy as np\n'), ((10556, 10571), 'numpy.min', 'np.min', (['results'], {}), '(results)\n', (10562, 10571), True, 'import numpy as np\n'), ((10587, 10602), 'numpy.std', 'np.std', (['results'], {}), '(results)\n', (10593, 10602), True, 'import numpy as np\n'), ((11435, 11452), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (11450, 11452), False, 'import doctest\n'), ((5692, 5725), 'os.path.basename', 'os.path.basename', (['doc_fnames[idx]'], {}), '(doc_fnames[idx])\n', (5708, 5725), False, 'import os\n'), ((8512, 8528), 'numpy.sum', 'np.sum', (['(v1 == v2)'], {}), '(v1 == v2)\n', (8518, 8528), True, 'import numpy as np\n'), ((9957, 9982), 'numpy.arange', 'np.arange', (['a.start', 'a.end'], {}), '(a.start, a.end)\n', (9966, 9982), True, 'import numpy as np\n'), ((4860, 4880), 'os.path.basename', 'os.path.basename', (['fn'], {}), '(fn)\n', (4876, 4880), False, 'import os\n'), ((5984, 6017), 'os.path.basename', 'os.path.basename', (['doc_fnames[idx]'], {}), '(doc_fnames[idx])\n', (6000, 6017), False, 'import os\n'), ((5885, 5908), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5906, 5908), False, 'import datetime\n'), ((8890, 8905), 'numpy.sum', 'np.sum', (['(v1 == k)'], {}), '(v1 == k)\n', (8896, 8905), True, 'import numpy as np\n'), ((8908, 8923), 'numpy.sum', 'np.sum', (['(v2 == k)'], {}), '(v2 == k)\n', (8914, 8923), True, 'import numpy as np\n')]
|
import datetime
import os
import keras
import numpy as np
import pandas as pd
from base_model import BaseModel
from multivariate_container import MultivariateContainer
from typing import Union
class MultivariateLSTM(BaseModel):
def __init__(
self,
container: MultivariateContainer,
config: bool=None,
create_empty: bool=False) -> None:
"""
Initialization method.
"""
_, self.time_steps, self.num_fea = container.train_X.shape
print(f"MultivariateLSTM Initialized: \
\n\tTime Step: {self.time_steps}\
\n\tFeature: {self.num_fea}")
self.config = config
self.container = container
self.hist = None
if create_empty:
self.core = None
else:
self.core = self._construct_lstm_model(self.config)
self._gen_file_name()
print(
f"\tMultivariateLSTM: Current model will be save to ./saved_models/f{self.file_name}/")
def _construct_lstm_model(
self,
config: dict,
verbose: bool=True
) -> keras.Model:
"""
Construct the Stacked lstm model,
Note: Modify this method to change model configurations.
# TODO: Add arbitray layer support.
"""
print("MultivariateLSTM: Generating LSTM model using Model API.")
input_sequence = keras.layers.Input(
shape=(self.time_steps, self.num_fea),
dtype="float32",
name="input_sequence")
normalization = keras.layers.BatchNormalization()(input_sequence)
lstm = keras.layers.LSTM(
units=config["nn.lstm1"],
return_sequences=False
)(normalization)
dense1 = keras.layers.Dense(
units=config["nn.dense1"],
name="Dense1"
)(lstm)
predictions = keras.layers.Dense(
1,
name="Prediction"
)(dense1)
model = keras.Model(inputs=input_sequence, outputs=predictions)
model.compile(loss="mse", optimizer="adam")
if verbose:
print("\tMultivariateLSTM: LSTM model constructed with configuration: ")
keras.utils.print_summary(model)
return model
def _construct_lstm_sequential(
self,
config: dict,
verbose: bool=True
) -> keras.Sequential:
"""
Construct the Stacked lstm model,
Note: Modify this method to change model configurations.
# TODO: Add arbitray layer support.
"""
print("MultivariateLSTM: Generating LSTM model with Keras Sequential API")
model = keras.Sequential()
model.add(keras.layers.LSTM(
units=config["nn.lstm1"],
input_shape=(self.time_steps, self.num_fea),
return_sequences=True,
name="LSTM1"
))
model.add(
keras.layers.LSTM(
units=config["nn.lstm2"],
name="LSTM2"
))
model.add(
keras.layers.Dense(
units=config["nn.dense1"],
name="Dense1"
))
model.add(
keras.layers.Dense(
units=1,
name="Dense_output"
))
model.compile(loss="mse", optimizer="adam")
if verbose:
print("\tMultivariateLSTM: LSTM model constructed with configuration: ")
keras.utils.print_summary(model)
return model
def update_config(
self,
new_config: dict
) -> None:
"""
Update the neural network configuration, and re-construct, re-compile the core.
"""
# TODO: add check configuration method here.
print("MultivariateLSTM: Updating neural network configuration...")
self.prev_config = self.config
self.config = new_config
self.core = self._construct_lstm_model(self.config, verbose=False)
print("\tDone.")
def fit_model(
self,
epochs: int=10
) -> None:
start_time = datetime.datetime.now()
print("MultivariateLSTM: Start fitting.")
self.hist = self.core.fit(
self.container.train_X,
self.container.train_y,
epochs=epochs,
batch_size=32 if self.config is None else self.config["batch_size"],
validation_split=0.1 if self.config is None else self.config["validation_split"]
)
finish_time = datetime.datetime.now()
time_taken = finish_time - start_time
print(f"\tFitting finished, {epochs} epochs for {str(time_taken)}")
def predict(
self,
X_feed: np.ndarray
) -> np.ndarray:
y_hat = self.core.predict(X_feed, verbose=1)
# y_hat = self.container.scaler_y.inverse_transform(y_hat)
# y_hat returned used to compare with self.container.*_X directly.
return y_hat
def save_model(
self,
file_dir: str=None
) -> None:
if file_dir is None:
# If no file directory specified, use the default one.
file_dir = self.file_name
# Try to create record folder.
try:
folder = f"./saved_models/{file_dir}/"
os.system(f"mkdir {folder}")
print(f"Experiment record directory created: {folder}")
except:
print("Current directory: ")
_ = os.system("pwd")
raise FileNotFoundError(
"Failed to create directory, please create directory ./saved_models/")
# Save model structure to JSON
print("Saving model structure...")
model_json = self.core.to_json()
with open(f"{folder}model_structure.json", "w") as json_file:
json_file.write(model_json)
print("Done.")
# Save model weight to h5
print("Saving model weights...")
self.core.save_weights(f"{folder}model_weights.h5")
print("Done")
# Save model illustration to png file.
print("Saving model visualization...")
try:
keras.utils.plot_model(
self.core,
to_file=f"{folder}model.png",
show_shapes=True,
show_layer_names=True)
except:
print("Model illustration cannot be saved.")
# Save training history (if any)
if self.hist is not None:
hist_loss = np.squeeze(np.array(self.hist.history["loss"]))
hist_val_loss = np.squeeze(np.array(self.hist.history["val_loss"]))
combined = np.stack([hist_loss, hist_val_loss])
combined = np.transpose(combined)
df = pd.DataFrame(combined, dtype=np.float32)
df.columns = ["loss", "val_loss"]
df.to_csv(f"{folder}hist.csv", sep=",")
print(f"Training history is saved to {folder}hist.csv...")
else:
print("No training history found.")
print("Done.")
def load_model(
self,
folder_dir: str
) -> None:
"""
#TODO: doc
"""
if not folder_dir.endswith("/"):
# Assert the correct format, folder_dir should be
folder_dir += "/"
print(f"Load model from folder {folder_dir}")
# construct model from json
print("Reconstruct model from Json file...")
try:
json_file = open(f"{folder_dir}model_structure.json", "r")
except FileNotFoundError:
raise Warning(
f"Json file not found. Expected: {folder_dir}model_structure.json"
)
model_file = json_file.read()
json_file.close()
self.core = keras.models.model_from_json(model_file)
print("Done.")
# load weights from h5
print("Loading model weights...")
try:
self.core.load_weights(
f"{folder_dir}model_weights.h5", by_name=True)
except FileNotFoundError:
raise Warning(
f"h5 file not found. Expected: {folder_dir}model_weights.h5"
)
print("Done.")
self.core.compile(loss="mse", optimizer="adam")
def summarize_training(self):
"""
Summarize training result to string file.
- Loss
- Epochs
- Time taken
"""
raise NotImplementedError
def visualize_training(self):
"""
Visualize the training result:
- Plot training set loss and validation set loss.
"""
# TODO: move visualize training to general methods.
raise NotImplementedError
|
[
"numpy.stack",
"pandas.DataFrame",
"keras.Model",
"keras.Sequential",
"keras.layers.LSTM",
"numpy.transpose",
"os.system",
"keras.utils.plot_model",
"keras.models.model_from_json",
"keras.layers.Dense",
"numpy.array",
"keras.layers.Input",
"keras.utils.print_summary",
"datetime.datetime.now",
"keras.layers.BatchNormalization"
] |
[((1417, 1518), 'keras.layers.Input', 'keras.layers.Input', ([], {'shape': '(self.time_steps, self.num_fea)', 'dtype': '"""float32"""', 'name': '"""input_sequence"""'}), "(shape=(self.time_steps, self.num_fea), dtype='float32',\n name='input_sequence')\n", (1435, 1518), False, 'import keras\n'), ((2002, 2057), 'keras.Model', 'keras.Model', ([], {'inputs': 'input_sequence', 'outputs': 'predictions'}), '(inputs=input_sequence, outputs=predictions)\n', (2013, 2057), False, 'import keras\n'), ((2698, 2716), 'keras.Sequential', 'keras.Sequential', ([], {}), '()\n', (2714, 2716), False, 'import keras\n'), ((4146, 4169), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4167, 4169), False, 'import datetime\n'), ((4560, 4583), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4581, 4583), False, 'import datetime\n'), ((7834, 7874), 'keras.models.model_from_json', 'keras.models.model_from_json', (['model_file'], {}), '(model_file)\n', (7862, 7874), False, 'import keras\n'), ((1577, 1610), 'keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (1608, 1610), False, 'import keras\n'), ((1643, 1710), 'keras.layers.LSTM', 'keras.layers.LSTM', ([], {'units': "config['nn.lstm1']", 'return_sequences': '(False)'}), "(units=config['nn.lstm1'], return_sequences=False)\n", (1660, 1710), False, 'import keras\n'), ((1778, 1838), 'keras.layers.Dense', 'keras.layers.Dense', ([], {'units': "config['nn.dense1']", 'name': '"""Dense1"""'}), "(units=config['nn.dense1'], name='Dense1')\n", (1796, 1838), False, 'import keras\n'), ((1902, 1942), 'keras.layers.Dense', 'keras.layers.Dense', (['(1)'], {'name': '"""Prediction"""'}), "(1, name='Prediction')\n", (1920, 1942), False, 'import keras\n'), ((2229, 2261), 'keras.utils.print_summary', 'keras.utils.print_summary', (['model'], {}), '(model)\n', (2254, 2261), False, 'import keras\n'), ((2735, 2864), 'keras.layers.LSTM', 'keras.layers.LSTM', ([], {'units': "config['nn.lstm1']", 'input_shape': '(self.time_steps, self.num_fea)', 'return_sequences': '(True)', 'name': '"""LSTM1"""'}), "(units=config['nn.lstm1'], input_shape=(self.time_steps,\n self.num_fea), return_sequences=True, name='LSTM1')\n", (2752, 2864), False, 'import keras\n'), ((2951, 3008), 'keras.layers.LSTM', 'keras.layers.LSTM', ([], {'units': "config['nn.lstm2']", 'name': '"""LSTM2"""'}), "(units=config['nn.lstm2'], name='LSTM2')\n", (2968, 3008), False, 'import keras\n'), ((3087, 3147), 'keras.layers.Dense', 'keras.layers.Dense', ([], {'units': "config['nn.dense1']", 'name': '"""Dense1"""'}), "(units=config['nn.dense1'], name='Dense1')\n", (3105, 3147), False, 'import keras\n'), ((3226, 3274), 'keras.layers.Dense', 'keras.layers.Dense', ([], {'units': '(1)', 'name': '"""Dense_output"""'}), "(units=1, name='Dense_output')\n", (3244, 3274), False, 'import keras\n'), ((3492, 3524), 'keras.utils.print_summary', 'keras.utils.print_summary', (['model'], {}), '(model)\n', (3517, 3524), False, 'import keras\n'), ((5346, 5374), 'os.system', 'os.system', (['f"""mkdir {folder}"""'], {}), "(f'mkdir {folder}')\n", (5355, 5374), False, 'import os\n'), ((6192, 6301), 'keras.utils.plot_model', 'keras.utils.plot_model', (['self.core'], {'to_file': 'f"""{folder}model.png"""', 'show_shapes': '(True)', 'show_layer_names': '(True)'}), "(self.core, to_file=f'{folder}model.png', show_shapes\n =True, show_layer_names=True)\n", (6214, 6301), False, 'import keras\n'), ((6694, 6730), 'numpy.stack', 'np.stack', (['[hist_loss, hist_val_loss]'], {}), '([hist_loss, hist_val_loss])\n', (6702, 6730), True, 'import numpy as np\n'), ((6754, 6776), 'numpy.transpose', 'np.transpose', (['combined'], {}), '(combined)\n', (6766, 6776), True, 'import numpy as np\n'), ((6794, 6834), 'pandas.DataFrame', 'pd.DataFrame', (['combined'], {'dtype': 'np.float32'}), '(combined, dtype=np.float32)\n', (6806, 6834), True, 'import pandas as pd\n'), ((5516, 5532), 'os.system', 'os.system', (['"""pwd"""'], {}), "('pwd')\n", (5525, 5532), False, 'import os\n'), ((6554, 6589), 'numpy.array', 'np.array', (["self.hist.history['loss']"], {}), "(self.hist.history['loss'])\n", (6562, 6589), True, 'import numpy as np\n'), ((6630, 6669), 'numpy.array', 'np.array', (["self.hist.history['val_loss']"], {}), "(self.hist.history['val_loss'])\n", (6638, 6669), True, 'import numpy as np\n')]
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
df = pd.read_csv('medals_data.csv')
df[['Gold','Silver','Bronze']].plot(kind='bar',stacked=True)
plt.title('India Olympics Medal')
plt.xlabel('Years')
plt.ylabel('Medals')
n = len(df['Games'])
labels = df.Games.str.slice(0,4)
plt.xticks(np.arange(n),labels,rotation='horizontal')
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"pandas.read_csv",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((78, 108), 'pandas.read_csv', 'pd.read_csv', (['"""medals_data.csv"""'], {}), "('medals_data.csv')\n", (89, 108), True, 'import pandas as pd\n'), ((171, 204), 'matplotlib.pyplot.title', 'plt.title', (['"""India Olympics Medal"""'], {}), "('India Olympics Medal')\n", (180, 204), True, 'import matplotlib.pyplot as plt\n'), ((205, 224), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Years"""'], {}), "('Years')\n", (215, 224), True, 'import matplotlib.pyplot as plt\n'), ((225, 245), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Medals"""'], {}), "('Medals')\n", (235, 245), True, 'import matplotlib.pyplot as plt\n'), ((354, 364), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (362, 364), True, 'import matplotlib.pyplot as plt\n'), ((311, 323), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (320, 323), True, 'import numpy as np\n')]
|
'''
This code was written by following the following tutorial:
Link: https://medium.com/@martinpella/how-to-use-pre-trained-word-embeddings-in-pytorch-71ca59249f76
This script processes and generates GloVe embeddings
'''
# coding: utf-8
import pickle
from preprocess import Vocabulary
import numpy as np
import json
from scipy import misc
import bcolz
words = []
idx = 0
word2idx = {}
vectors = bcolz.carray(np.zeros(1), rootdir='glove.6B/6B.300.dat', mode='w')
with open('glove.6B/glove.6B.300d.txt', 'rb') as f:
for l in f:
line = l.decode().split()
word = line[0]
words.append(word)
word2idx[word] = idx
idx += 1
vect = np.array(line[1:]).astype(np.float)
vectors.append(vect)
vectors = bcolz.carray(vectors[1:].reshape((400000, 300)), rootdir='glove.6B/6B.300.dat', mode='w')
vectors.flush()
pickle.dump(words, open('glove.6B/6B.300_words.pkl', 'wb'))
pickle.dump(word2idx, open('glove.6B/6B.300_idx.pkl', 'wb'))
with open('data/vocab.pkl', 'rb') as f:
vocab = pickle.load(f)
print('Loading vocab...')
vectors = bcolz.open('glove.6B/6B.300.dat')[:]
words = pickle.load(open('glove.6B/6B.300_words.pkl', 'rb'))
word2idx = pickle.load(open('glove.6B/6B.300_idx.pkl', 'rb'))
print('glove is loaded...')
glove = {w: vectors[word2idx[w]] for w in words}
matrix_len = len(vocab)
weights_matrix = np.zeros((matrix_len, 300))
words_found = 0
for i, word in enumerate(vocab.idx2word):
try:
weights_matrix[i] = glove[word]
words_found += 1
except KeyError:
weights_matrix[i] = np.random.normal(scale=0.6, size=(300, ))
pickle.dump(weights_matrix, open('glove.6B/glove_words.pkl', 'wb'), protocol=2)
print('weights_matrix is created')
|
[
"numpy.zeros",
"pickle.load",
"numpy.array",
"numpy.random.normal",
"bcolz.open"
] |
[((1372, 1399), 'numpy.zeros', 'np.zeros', (['(matrix_len, 300)'], {}), '((matrix_len, 300))\n', (1380, 1399), True, 'import numpy as np\n'), ((411, 422), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (419, 422), True, 'import numpy as np\n'), ((1039, 1053), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1050, 1053), False, 'import pickle\n'), ((1092, 1125), 'bcolz.open', 'bcolz.open', (['"""glove.6B/6B.300.dat"""'], {}), "('glove.6B/6B.300.dat')\n", (1102, 1125), False, 'import bcolz\n'), ((1583, 1623), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(0.6)', 'size': '(300,)'}), '(scale=0.6, size=(300,))\n', (1599, 1623), True, 'import numpy as np\n'), ((679, 697), 'numpy.array', 'np.array', (['line[1:]'], {}), '(line[1:])\n', (687, 697), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3.5
import os
import dlib
import numpy as np
import cv2
import time
import darknet
from ctypes import *
import math
import random
class YOLO_NN:
def __init__(self, yoloDataFolder):
self.configPath = yoloDataFolder + "/cfg/yolov3-tiny.cfg"
self.weightPath = yoloDataFolder + "/yolov3-tiny.weights"
self.metaPath = yoloDataFolder + "/cfg/coco.data"
print("self.configPath: " + self.configPath)
print("self.weightPath: " + self.weightPath)
print("self.metaPath: " + self.metaPath)
self.netMain = None
self.metaMain = None
self.altNames = None
if not os.path.exists(self.configPath):
raise ValueError("Invalid config path `" +
os.path.abspath(self.configPath)+"`")
if not os.path.exists(self.weightPath):
raise ValueError("Invalid weight path `" +
os.path.abspath(self.weightPath)+"`")
if not os.path.exists(self.metaPath):
raise ValueError("Invalid data file path `" +
os.path.abspath(self.metaPath)+"`")
if self.netMain is None:
self.netMain = darknet.load_net_custom(self.configPath.encode(
"ascii"), self.weightPath.encode("ascii"), 0, 1) # batch size = 1
if self.metaMain is None:
self.metaMain = darknet.load_meta(self.metaPath.encode("ascii"))
if self.altNames is None:
try:
with open(self.metaPath) as metaFH:
metaContents = metaFH.read()
import re
match = re.search("names *= *(.*)$", metaContents,
re.IGNORECASE | re.MULTILINE)
if match:
result = match.group(1)
else:
result = None
try:
if os.path.exists(result):
with open(result) as namesFH:
namesList = namesFH.read().strip().split("\n")
self.altNames = [x.strip() for x in namesList]
except TypeError:
pass
except Exception:
pass
# Create an image we reuse for each detect
self.darknet_image = darknet.make_image(darknet.network_width(self.netMain),
darknet.network_height(self.netMain),3)
self.data_dir = os.path.expanduser(yoloDataFolder+'/face_data')
self.faces_folder_path = self.data_dir + '/users/'
self.face_detector = dlib.get_frontal_face_detector()
self.shape_predictor = dlib.shape_predictor(self.data_dir + '/dlib/shape_predictor_68_face_landmarks.dat')
self.face_recognition_model = dlib.face_recognition_model_v1(self.data_dir + '/dlib/dlib_face_recognition_resnet_model_v1.dat')
def convertBack(self, x, y, w, h):
xmin = int(round(x - (w / 2)))
xmax = int(round(x + (w / 2)))
ymin = int(round(y - (h / 2)))
ymax = int(round(y + (h / 2)))
return xmin, ymin, xmax, ymax
def cvDrawBoxes(self, detections, img):
for detection in detections:
x, y, w, h = detection[2][0],\
detection[2][1],\
detection[2][2],\
detection[2][3]
xmin, ymin, xmax, ymax = self.convertBack(
float(x), float(y), float(w), float(h))
pt1 = (xmin, ymin)
pt2 = (xmax, ymax)
cv2.rectangle(img, pt1, pt2, (0, 255, 0), 1)
cv2.putText(img,
detection[0].decode() +
" [" + str(round(detection[1] * 100, 2)) + "]",
(pt1[0], pt1[1] - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
[0, 255, 0], 2)
return img
def get_face_encodings(self, face):
bounds = self.face_detector(face, 1)
faces_landmarks = [self.shape_predictor(face, face_bounds) for face_bounds in bounds]
try:
h = [np.array(self.face_recognition_model.compute_face_descriptor(face, face_pose, 1)) for face_pose in faces_landmarks]
except:
return []
return h
def get_face_matches(self, known_faces, face):
return np.linalg.norm(known_faces - face, axis=1)
def find_match(self, known_faces, person_names, face):
matches = self.get_face_matches(known_faces, face) # get a list of True/False
min_index = matches.argmin()
min_value = matches[min_index]
if min_value < 0.55:
return person_names[min_index]+"! ({0:.2f})".format(min_value)
if min_value < 0.58:
return person_names[min_index]+" ({0:.2f})".format(min_value)
if min_value < 0.65:
return person_names[min_index]+"?"+" ({0:.2f})".format(min_value)
return 'Not Found'
def load_face_encodings(self):
image_filenames = filter(lambda x: x.endswith('.jpg'), os.listdir(self.faces_folder_path))
image_filenames = sorted(image_filenames)
person_names = [x[:-4] for x in image_filenames]
full_paths_to_images = [self.faces_folder_path + x for x in image_filenames]
face_encodings = []
win = dlib.image_window()
for path_to_image in full_paths_to_images:
print("Loading user: " + path_to_image)
#face = io.imread(path_to_image)
face = cv2.imread(path_to_image)
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
faces_bounds = self.face_detector(face, 1)
if len(faces_bounds) != 1:
print("Expected one and only one face per image: " + path_to_image + " - it has " + str(len(faces_bounds)))
exit()
face_bounds = faces_bounds[0]
face_landmarks = self.shape_predictor(face, face_bounds)
face_encoding = np.array(self.face_recognition_model.compute_face_descriptor(face, face_landmarks, 1))
win.clear_overlay()
win.set_image(face)
win.add_overlay(face_bounds)
win.add_overlay(face_landmarks)
face_encodings.append(face_encoding)
#print(face_encoding)
#dlib.hit_enter_to_continue()
return face_encodings, person_names
def detect(self, frame_read):
prev_time = time.time()
frame_resized = cv2.resize(frame_read,
(darknet.network_width(rn.netMain),
darknet.network_height(rn.netMain)),
interpolation=cv2.INTER_LINEAR)
frame_rgb = cv2.cvtColor(frame_resized, cv2.COLOR_BGR2RGB)
darknet.copy_image_from_bytes(self.darknet_image, frame_rgb.tobytes())
detections = darknet.detect_image(self.netMain, self.metaMain, self.darknet_image, thresh=0.25)
#print(1/(time.time()-prev_time))
return detections
# function to get the output layer names
# in the architecture
def get_output_layers(self,net):
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
return output_layers
# function to draw bounding box on the detected object with class name
def draw_bounding_box(self,img, class_id, confidence, x, y, x_plus_w, y_plus_h):
cv2.rectangle(img, (x,y), (x_plus_w,y_plus_h), (0, 0, 255), 2)
#cv2.putText(img, label, (x-10,y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
if __name__ == "__main__":
# Start Yolo Setup
rn = YOLO_NN('.')
# initialize video input
cap = cv2.VideoCapture(1)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 360)
face_encodings, person_names = rn.load_face_encodings()
faceClassifier = cv2.CascadeClassifier(rn.data_dir + '/dlib/haarcascade_frontalface_default.xml')
#rn.recognize_faces_in_video(face_encodings, person_names)
while True:
ret, frame_read = cap.read()
draw_frame = frame_read.copy()
gray = cv2.cvtColor(frame_read, cv2.COLOR_BGR2GRAY)
overlay = frame_read.copy()
cv2.rectangle(overlay, (0, 0), (640, 35), (0, 0, 0), -1)
alpha = 0.8
draw_frame = cv2.addWeighted(overlay, alpha, draw_frame, 1 - alpha, 0)
# Yolo Detection
detections = rn.detect(frame_read.copy())
filter_detections = []
n_users = 0
n_persons = 0
for detection in detections:
if detection[0] == b'person': # It is a person
filter_detections.append(detection)
if len(filter_detections) == 0: # Case Yolo didn't detected any person, try with dlib
face_rects = faceClassifier.detectMultiScale( # Detect faces with dlib
gray,
scaleFactor = 1.1,
minNeighbors = 5,
minSize = (50, 50),
flags = cv2.CASCADE_SCALE_IMAGE)
n_persons = len(face_rects)
if len(face_rects) > 0: # Case find any face
for (x, y, w, h) in face_rects:
face = draw_frame[y:y + h, x:x + w]
face_encodings_in_image = rn.get_face_encodings(face)
if (face_encodings_in_image):
match = rn.find_match(face_encodings, person_names, face_encodings_in_image[0])
if match == "Not Found":
cv2.putText(draw_frame, "Unknow", (x+5, y-15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.rectangle(draw_frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
else:
cv2.putText(draw_frame, match, (x+5, y-15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.rectangle(draw_frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
n_users += 1
else:
cv2.putText(draw_frame, "Unknow", (x+5, y-15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.rectangle(draw_frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
else:
for detection in filter_detections:
x1, y1, w1, h1 = detection[2][0],\
detection[2][1],\
detection[2][2],\
detection[2][3]
xmin, ymin, xmax, ymax = rn.convertBack(
float(x1), float(y1), float(w1), float(h1))
sx = 640.0/416.0
sy = 360.0/416.0
xmin = int(xmin*sx)
ymin = int(ymin*sy)
xmax = int(xmax*sx)
ymax = int(ymax*sy)
pt1 = (xmin, ymin)
pt2 = (xmax, ymax)
cropped = gray[ymin:ymax, xmin:xmax]
face_rects = faceClassifier.detectMultiScale( # Detect faces with dlib
gray,
scaleFactor = 1.1,
minNeighbors = 5,
minSize = (50, 50),
flags = cv2.CASCADE_SCALE_IMAGE)
n_persons += 1
if len(face_rects) > 0:
for (x, y, w, h) in face_rects:
face = cropped[y:y + h, x:x + w]
face_encodings_in_image = rn.get_face_encodings(face)
#x += xmin
#y += ymin
if (face_encodings_in_image):
match = rn.find_match(face_encodings, person_names, face_encodings_in_image[0])
if match == "Not Found":
cv2.putText(draw_frame, "Unknow", (x+5, y-15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.rectangle(draw_frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
else:
cv2.putText(draw_frame, match, (x+5, y-15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.rectangle(draw_frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
n_users += 1
else:
cv2.putText(draw_frame, "Unknow", (x+5, y-15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.rectangle(draw_frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
else:
cv2.rectangle(draw_frame, pt1, pt2, (0, 0, 255), 2)
cv2.putText(draw_frame, "Unknow", (pt1[0], pt1[1] - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(draw_frame, "InteliCam Users: " + str(n_users) + " | "+ \
"Persons: " + str(n_persons),
(5, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
[255, 255, 255], 1)
cv2.imshow("Frame", draw_frame)
key = cv2.waitKey(3) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
cv2.destroyAllWindows()
|
[
"numpy.linalg.norm",
"cv2.rectangle",
"darknet.network_height",
"dlib.shape_predictor",
"cv2.imshow",
"os.path.abspath",
"cv2.cvtColor",
"os.path.exists",
"cv2.destroyAllWindows",
"re.search",
"cv2.waitKey",
"cv2.addWeighted",
"dlib.face_recognition_model_v1",
"dlib.get_frontal_face_detector",
"darknet.detect_image",
"os.listdir",
"dlib.image_window",
"cv2.putText",
"time.time",
"cv2.VideoCapture",
"cv2.imread",
"cv2.CascadeClassifier",
"os.path.expanduser",
"darknet.network_width"
] |
[((7846, 7865), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(1)'], {}), '(1)\n', (7862, 7865), False, 'import cv2\n'), ((8035, 8120), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (["(rn.data_dir + '/dlib/haarcascade_frontalface_default.xml')"], {}), "(rn.data_dir + '/dlib/haarcascade_frontalface_default.xml'\n )\n", (8056, 8120), False, 'import cv2\n'), ((13538, 13561), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (13559, 13561), False, 'import cv2\n'), ((2570, 2619), 'os.path.expanduser', 'os.path.expanduser', (["(yoloDataFolder + '/face_data')"], {}), "(yoloDataFolder + '/face_data')\n", (2588, 2619), False, 'import os\n'), ((2707, 2739), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (2737, 2739), False, 'import dlib\n'), ((2771, 2858), 'dlib.shape_predictor', 'dlib.shape_predictor', (["(self.data_dir + '/dlib/shape_predictor_68_face_landmarks.dat')"], {}), "(self.data_dir +\n '/dlib/shape_predictor_68_face_landmarks.dat')\n", (2791, 2858), False, 'import dlib\n'), ((2893, 2994), 'dlib.face_recognition_model_v1', 'dlib.face_recognition_model_v1', (["(self.data_dir + '/dlib/dlib_face_recognition_resnet_model_v1.dat')"], {}), "(self.data_dir +\n '/dlib/dlib_face_recognition_resnet_model_v1.dat')\n", (2923, 2994), False, 'import dlib\n'), ((4428, 4470), 'numpy.linalg.norm', 'np.linalg.norm', (['(known_faces - face)'], {'axis': '(1)'}), '(known_faces - face, axis=1)\n', (4442, 4470), True, 'import numpy as np\n'), ((5407, 5426), 'dlib.image_window', 'dlib.image_window', ([], {}), '()\n', (5424, 5426), False, 'import dlib\n'), ((6524, 6535), 'time.time', 'time.time', ([], {}), '()\n', (6533, 6535), False, 'import time\n'), ((6814, 6860), 'cv2.cvtColor', 'cv2.cvtColor', (['frame_resized', 'cv2.COLOR_BGR2RGB'], {}), '(frame_resized, cv2.COLOR_BGR2RGB)\n', (6826, 6860), False, 'import cv2\n'), ((6962, 7048), 'darknet.detect_image', 'darknet.detect_image', (['self.netMain', 'self.metaMain', 'self.darknet_image'], {'thresh': '(0.25)'}), '(self.netMain, self.metaMain, self.darknet_image,\n thresh=0.25)\n', (6982, 7048), False, 'import darknet\n'), ((7580, 7644), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x_plus_w, y_plus_h)', '(0, 0, 255)', '(2)'], {}), '(img, (x, y), (x_plus_w, y_plus_h), (0, 0, 255), 2)\n', (7593, 7644), False, 'import cv2\n'), ((8296, 8340), 'cv2.cvtColor', 'cv2.cvtColor', (['frame_read', 'cv2.COLOR_BGR2GRAY'], {}), '(frame_read, cv2.COLOR_BGR2GRAY)\n', (8308, 8340), False, 'import cv2\n'), ((8385, 8441), 'cv2.rectangle', 'cv2.rectangle', (['overlay', '(0, 0)', '(640, 35)', '(0, 0, 0)', '(-1)'], {}), '(overlay, (0, 0), (640, 35), (0, 0, 0), -1)\n', (8398, 8441), False, 'import cv2\n'), ((8483, 8540), 'cv2.addWeighted', 'cv2.addWeighted', (['overlay', 'alpha', 'draw_frame', '(1 - alpha)', '(0)'], {}), '(overlay, alpha, draw_frame, 1 - alpha, 0)\n', (8498, 8540), False, 'import cv2\n'), ((13346, 13377), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'draw_frame'], {}), "('Frame', draw_frame)\n", (13356, 13377), False, 'import cv2\n'), ((659, 690), 'os.path.exists', 'os.path.exists', (['self.configPath'], {}), '(self.configPath)\n', (673, 690), False, 'import os\n'), ((829, 860), 'os.path.exists', 'os.path.exists', (['self.weightPath'], {}), '(self.weightPath)\n', (843, 860), False, 'import os\n'), ((999, 1028), 'os.path.exists', 'os.path.exists', (['self.metaPath'], {}), '(self.metaPath)\n', (1013, 1028), False, 'import os\n'), ((2428, 2463), 'darknet.network_width', 'darknet.network_width', (['self.netMain'], {}), '(self.netMain)\n', (2449, 2463), False, 'import darknet\n'), ((2505, 2541), 'darknet.network_height', 'darknet.network_height', (['self.netMain'], {}), '(self.netMain)\n', (2527, 2541), False, 'import darknet\n'), ((3636, 3680), 'cv2.rectangle', 'cv2.rectangle', (['img', 'pt1', 'pt2', '(0, 255, 0)', '(1)'], {}), '(img, pt1, pt2, (0, 255, 0), 1)\n', (3649, 3680), False, 'import cv2\n'), ((5135, 5169), 'os.listdir', 'os.listdir', (['self.faces_folder_path'], {}), '(self.faces_folder_path)\n', (5145, 5169), False, 'import os\n'), ((5595, 5620), 'cv2.imread', 'cv2.imread', (['path_to_image'], {}), '(path_to_image)\n', (5605, 5620), False, 'import cv2\n'), ((5640, 5677), 'cv2.cvtColor', 'cv2.cvtColor', (['face', 'cv2.COLOR_BGR2RGB'], {}), '(face, cv2.COLOR_BGR2RGB)\n', (5652, 5677), False, 'import cv2\n'), ((13392, 13406), 'cv2.waitKey', 'cv2.waitKey', (['(3)'], {}), '(3)\n', (13403, 13406), False, 'import cv2\n'), ((6619, 6652), 'darknet.network_width', 'darknet.network_width', (['rn.netMain'], {}), '(rn.netMain)\n', (6640, 6652), False, 'import darknet\n'), ((6690, 6724), 'darknet.network_height', 'darknet.network_height', (['rn.netMain'], {}), '(rn.netMain)\n', (6712, 6724), False, 'import darknet\n'), ((1665, 1737), 're.search', 're.search', (['"""names *= *(.*)$"""', 'metaContents', '(re.IGNORECASE | re.MULTILINE)'], {}), "('names *= *(.*)$', metaContents, re.IGNORECASE | re.MULTILINE)\n", (1674, 1737), False, 'import re\n'), ((12851, 12902), 'cv2.rectangle', 'cv2.rectangle', (['draw_frame', 'pt1', 'pt2', '(0, 0, 255)', '(2)'], {}), '(draw_frame, pt1, pt2, (0, 0, 255), 2)\n', (12864, 12902), False, 'import cv2\n'), ((12923, 13030), 'cv2.putText', 'cv2.putText', (['draw_frame', '"""Unknow"""', '(pt1[0], pt1[1] - 5)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(0, 0, 255)', '(2)'], {}), "(draw_frame, 'Unknow', (pt1[0], pt1[1] - 5), cv2.\n FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)\n", (12934, 13030), False, 'import cv2\n'), ((776, 808), 'os.path.abspath', 'os.path.abspath', (['self.configPath'], {}), '(self.configPath)\n', (791, 808), False, 'import os\n'), ((946, 978), 'os.path.abspath', 'os.path.abspath', (['self.weightPath'], {}), '(self.weightPath)\n', (961, 978), False, 'import os\n'), ((1117, 1147), 'os.path.abspath', 'os.path.abspath', (['self.metaPath'], {}), '(self.metaPath)\n', (1132, 1147), False, 'import os\n'), ((1970, 1992), 'os.path.exists', 'os.path.exists', (['result'], {}), '(result)\n', (1984, 1992), False, 'import os\n'), ((10306, 10407), 'cv2.putText', 'cv2.putText', (['draw_frame', '"""Unknow"""', '(x + 5, y - 15)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(0, 0, 255)', '(2)'], {}), "(draw_frame, 'Unknow', (x + 5, y - 15), cv2.FONT_HERSHEY_SIMPLEX,\n 0.5, (0, 0, 255), 2)\n", (10317, 10407), False, 'import cv2\n'), ((10424, 10489), 'cv2.rectangle', 'cv2.rectangle', (['draw_frame', '(x, y)', '(x + w, y + h)', '(0, 0, 255)', '(2)'], {}), '(draw_frame, (x, y), (x + w, y + h), (0, 0, 255), 2)\n', (10437, 10489), False, 'import cv2\n'), ((9784, 9885), 'cv2.putText', 'cv2.putText', (['draw_frame', '"""Unknow"""', '(x + 5, y - 15)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(0, 0, 255)', '(2)'], {}), "(draw_frame, 'Unknow', (x + 5, y - 15), cv2.FONT_HERSHEY_SIMPLEX,\n 0.5, (0, 0, 255), 2)\n", (9795, 9885), False, 'import cv2\n'), ((9906, 9971), 'cv2.rectangle', 'cv2.rectangle', (['draw_frame', '(x, y)', '(x + w, y + h)', '(0, 0, 255)', '(2)'], {}), '(draw_frame, (x, y), (x + w, y + h), (0, 0, 255), 2)\n', (9919, 9971), False, 'import cv2\n'), ((10030, 10129), 'cv2.putText', 'cv2.putText', (['draw_frame', 'match', '(x + 5, y - 15)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(0, 255, 0)', '(2)'], {}), '(draw_frame, match, (x + 5, y - 15), cv2.FONT_HERSHEY_SIMPLEX, \n 0.5, (0, 255, 0), 2)\n', (10041, 10129), False, 'import cv2\n'), ((10149, 10214), 'cv2.rectangle', 'cv2.rectangle', (['draw_frame', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(2)'], {}), '(draw_frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n', (10162, 10214), False, 'import cv2\n'), ((12621, 12722), 'cv2.putText', 'cv2.putText', (['draw_frame', '"""Unknow"""', '(x + 5, y - 15)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(0, 0, 255)', '(2)'], {}), "(draw_frame, 'Unknow', (x + 5, y - 15), cv2.FONT_HERSHEY_SIMPLEX,\n 0.5, (0, 0, 255), 2)\n", (12632, 12722), False, 'import cv2\n'), ((12743, 12808), 'cv2.rectangle', 'cv2.rectangle', (['draw_frame', '(x, y)', '(x + w, y + h)', '(0, 0, 255)', '(2)'], {}), '(draw_frame, (x, y), (x + w, y + h), (0, 0, 255), 2)\n', (12756, 12808), False, 'import cv2\n'), ((12071, 12172), 'cv2.putText', 'cv2.putText', (['draw_frame', '"""Unknow"""', '(x + 5, y - 15)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(0, 0, 255)', '(2)'], {}), "(draw_frame, 'Unknow', (x + 5, y - 15), cv2.FONT_HERSHEY_SIMPLEX,\n 0.5, (0, 0, 255), 2)\n", (12082, 12172), False, 'import cv2\n'), ((12197, 12262), 'cv2.rectangle', 'cv2.rectangle', (['draw_frame', '(x, y)', '(x + w, y + h)', '(0, 0, 255)', '(2)'], {}), '(draw_frame, (x, y), (x + w, y + h), (0, 0, 255), 2)\n', (12210, 12262), False, 'import cv2\n'), ((12329, 12428), 'cv2.putText', 'cv2.putText', (['draw_frame', 'match', '(x + 5, y - 15)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(0, 255, 0)', '(2)'], {}), '(draw_frame, match, (x + 5, y - 15), cv2.FONT_HERSHEY_SIMPLEX, \n 0.5, (0, 255, 0), 2)\n', (12340, 12428), False, 'import cv2\n'), ((12452, 12517), 'cv2.rectangle', 'cv2.rectangle', (['draw_frame', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(2)'], {}), '(draw_frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n', (12465, 12517), False, 'import cv2\n')]
|
import numpy as np
image_dimensions = (25, 6)
def load(image_dims, path: str = "input/08.txt"):
with open(path) as file:
return np.array([c for c in file.read()]).reshape((-1, image_dims[0] * image_dims[1]))
def number_of_values_in_layer(layer, value):
return np.count_nonzero(layer == value)
def stack_layers(image_layers):
final_layer = list()
for i in range(len(image_layers[0])):
for j in range(len(image_layers)):
if image_layers[j][i] != "2":
final_layer.append(image_layers[j][i])
break
return np.array(final_layer)
# Prep
layers = load(image_dimensions)
# First
wanted_layer = None
minimum = None
for l in layers:
n = number_of_values_in_layer(l, "0")
if minimum is None or wanted_layer is None or n < minimum:
minimum = n
wanted_layer = l
wanted_1 = number_of_values_in_layer(wanted_layer, "1") * number_of_values_in_layer(wanted_layer, "2")
print(f"[1]\t{wanted_1}")
# Second
stacked_layer = stack_layers(layers).reshape(image_dimensions[::-1])
final_image = list()
for row in stacked_layer:
r = ""
for element in row:
r += "##" if element == "1" else " " if element == "0" else " "
final_image.append(r)
print(f"[2]")
for r in final_image:
print(r)
|
[
"numpy.array",
"numpy.count_nonzero"
] |
[((281, 313), 'numpy.count_nonzero', 'np.count_nonzero', (['(layer == value)'], {}), '(layer == value)\n', (297, 313), True, 'import numpy as np\n'), ((590, 611), 'numpy.array', 'np.array', (['final_layer'], {}), '(final_layer)\n', (598, 611), True, 'import numpy as np\n')]
|
import argparse
import numpy as np
from benchmark_statistics import Statistics
from benchmark_containers import BenchmarkResultsContainer
##############################################################################
def createBenchmarkResults(benchmark_samples, operation):
benchmark_results = BenchmarkResultsContainer()
benchmark_results.operation = operation
# Filter outliers
lower_fence, upper_fence = Statistics.getTukeyFences(benchmark_samples)
lower_outliers_samples = benchmark_samples[benchmark_samples < lower_fence]
benchmark_no_outliers_samples = benchmark_samples[(benchmark_samples >= lower_fence) & (benchmark_samples <= upper_fence)]
upper_outliers_samples = benchmark_samples[benchmark_samples > upper_fence]
benchmark_results.sorted_lower_outliers_samples = np.sort(lower_outliers_samples).tolist()
benchmark_results.sorted_no_outliers_samples = np.sort(benchmark_no_outliers_samples).tolist()
benchmark_results.sorted_upper_outliers_samples = np.sort(upper_outliers_samples).tolist()
# Create statistics info from benchmark samples
for key in benchmark_results.statistics:
without_outliers = key == "Without outliers"
benchmark_samples_to_process = benchmark_no_outliers_samples if without_outliers else benchmark_samples
benchmark_stats = benchmark_results.statistics[key]
benchmark_stats.num_analyzed_samples = Statistics.getNumAnalyzedSamples(benchmark_samples_to_process)
benchmark_stats.minimum = Statistics.getMin(benchmark_samples_to_process)
benchmark_stats.lower_fence = benchmark_results.sorted_no_outliers_samples[0] # Plotly uses first non outlier point, for exact lower_fence set to: lower_fence
benchmark_stats.q1 = Statistics.getPercentile(benchmark_samples_to_process, 25)
benchmark_stats.mean = Statistics.getMean(benchmark_samples_to_process)
benchmark_stats.median = Statistics.getPercentile(benchmark_samples_to_process, 50)
benchmark_stats.q3 = Statistics.getPercentile(benchmark_samples_to_process, 75)
benchmark_stats.upper_fence = benchmark_results.sorted_no_outliers_samples[-1] # Plotly uses last non outlier point, for exact upper_fence set to: upper_fence
benchmark_stats.maximum = Statistics.getMax(benchmark_samples_to_process)
benchmark_stats.iqr = Statistics.getIQR(benchmark_samples_to_process)
benchmark_stats.std_dev = Statistics.getStdDev(benchmark_samples_to_process)
benchmark_stats.std_err = Statistics.getStdErr(benchmark_samples_to_process)
benchmark_stats.std_err_percentage = benchmark_stats.std_err / benchmark_stats.mean * 100.0 if benchmark_stats.std_err > 0.0 else 0.0
benchmark_stats.margin = Statistics.getMargin(benchmark_samples_to_process)
benchmark_stats.margin_percentage = benchmark_stats.margin / benchmark_stats.mean * 100.0 if benchmark_stats.margin > 0.0 else 0.0
benchmark_stats.confidence_interval = Statistics.getConfidenceInterval(benchmark_samples_to_process)
benchmark_stats.skewness = Statistics.getSkewness(benchmark_samples_to_process)
benchmark_stats.kurtosis = Statistics.getKurtosis(benchmark_samples_to_process)
return benchmark_results
##############################################################################
def printBenchmarkResults(benchmark_samples, benchmark_results):
print("Samples:")
print(benchmark_samples, "\n")
print("Sorted Samples:")
print(benchmark_results.sorted_lower_outliers_samples, benchmark_results.sorted_no_outliers_samples, benchmark_results.sorted_upper_outliers_samples, "\n")
for key in benchmark_results.statistics:
without_outliers = key == "Without outliers"
statistics_results = benchmark_results.getFormatedStatisticsResultsWithoutOutliers() if without_outliers else benchmark_results.getFormatedStatisticsResultsWithOutliers()
text_alignment_offset = len(max(statistics_results, key=len)) + 3
print(key + ":")
for stat_key in statistics_results:
print(stat_key + "= ".rjust(text_alignment_offset - len(stat_key)) + statistics_results[stat_key])
print("\n")
##############################################################################
def runAnalyzer(kwargs=None):
# Parse args
parser = argparse.ArgumentParser(description="Benchmark Analyzer")
parser.add_argument("-in",
"--benchmark_samples_file",
type=str,
required=True,
help="File path containing the benchmark observations as comma separated numbers.")
parser.add_argument("-out",
"--json_output_path",
type=str,
required=True,
help="JSON output path for file containing the statistical information of the analyzed benchmark.")
parser.add_argument("-op",
"--operation_name",
type=str,
required=True,
help="Name of the operation related to the benchmark observations.")
parser.add_argument("-out_name",
"--output_file_name",
type=str,
required=False,
help="(Optional) The name of the output file, if this option is not used the file will be called Benchmark_Results_<MONTH>-<DAY>-<YEAR>_<HOUR>h<MINUTE>m<SECOND>s.")
args = parser.parse_args()
# Input Params
benchmark_samples_file = args.benchmark_samples_file
json_output_path = args.json_output_path
operation_name = args.operation_name
output_file_name = args.output_file_name
# Create an array from benchmark samples in file
with open(benchmark_samples_file) as file:
benchmark_samples = np.fromfile(file, dtype=float, sep=",")
# Create benchmark results
benchmark_results = createBenchmarkResults(benchmark_samples, operation_name)
# Print benchmark results
printBenchmarkResults(benchmark_samples, benchmark_results)
# Export benchmark results to a JSON file
benchmark_results.toJSONFile(json_output_path, operation_name, output_file_name)
##############################################################################
#-----------------------------------------------------------------------------
# Main
#-----------------------------------------------------------------------------
if __name__ == '__main__':
runAnalyzer()
|
[
"benchmark_containers.BenchmarkResultsContainer",
"benchmark_statistics.Statistics.getTukeyFences",
"benchmark_statistics.Statistics.getStdErr",
"argparse.ArgumentParser",
"benchmark_statistics.Statistics.getKurtosis",
"benchmark_statistics.Statistics.getIQR",
"numpy.fromfile",
"benchmark_statistics.Statistics.getConfidenceInterval",
"benchmark_statistics.Statistics.getMean",
"benchmark_statistics.Statistics.getMax",
"numpy.sort",
"benchmark_statistics.Statistics.getStdDev",
"benchmark_statistics.Statistics.getPercentile",
"benchmark_statistics.Statistics.getMin",
"benchmark_statistics.Statistics.getSkewness",
"benchmark_statistics.Statistics.getMargin",
"benchmark_statistics.Statistics.getNumAnalyzedSamples"
] |
[((304, 331), 'benchmark_containers.BenchmarkResultsContainer', 'BenchmarkResultsContainer', ([], {}), '()\n', (329, 331), False, 'from benchmark_containers import BenchmarkResultsContainer\n'), ((439, 483), 'benchmark_statistics.Statistics.getTukeyFences', 'Statistics.getTukeyFences', (['benchmark_samples'], {}), '(benchmark_samples)\n', (464, 483), False, 'from benchmark_statistics import Statistics\n'), ((4643, 4700), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Benchmark Analyzer"""'}), "(description='Benchmark Analyzer')\n", (4666, 4700), False, 'import argparse\n'), ((1476, 1538), 'benchmark_statistics.Statistics.getNumAnalyzedSamples', 'Statistics.getNumAnalyzedSamples', (['benchmark_samples_to_process'], {}), '(benchmark_samples_to_process)\n', (1508, 1538), False, 'from benchmark_statistics import Statistics\n'), ((1586, 1633), 'benchmark_statistics.Statistics.getMin', 'Statistics.getMin', (['benchmark_samples_to_process'], {}), '(benchmark_samples_to_process)\n', (1603, 1633), False, 'from benchmark_statistics import Statistics\n'), ((1858, 1916), 'benchmark_statistics.Statistics.getPercentile', 'Statistics.getPercentile', (['benchmark_samples_to_process', '(25)'], {}), '(benchmark_samples_to_process, 25)\n', (1882, 1916), False, 'from benchmark_statistics import Statistics\n'), ((1964, 2012), 'benchmark_statistics.Statistics.getMean', 'Statistics.getMean', (['benchmark_samples_to_process'], {}), '(benchmark_samples_to_process)\n', (1982, 2012), False, 'from benchmark_statistics import Statistics\n'), ((2060, 2118), 'benchmark_statistics.Statistics.getPercentile', 'Statistics.getPercentile', (['benchmark_samples_to_process', '(50)'], {}), '(benchmark_samples_to_process, 50)\n', (2084, 2118), False, 'from benchmark_statistics import Statistics\n'), ((2166, 2224), 'benchmark_statistics.Statistics.getPercentile', 'Statistics.getPercentile', (['benchmark_samples_to_process', '(75)'], {}), '(benchmark_samples_to_process, 75)\n', (2190, 2224), False, 'from benchmark_statistics import Statistics\n'), ((2449, 2496), 'benchmark_statistics.Statistics.getMax', 'Statistics.getMax', (['benchmark_samples_to_process'], {}), '(benchmark_samples_to_process)\n', (2466, 2496), False, 'from benchmark_statistics import Statistics\n'), ((2544, 2591), 'benchmark_statistics.Statistics.getIQR', 'Statistics.getIQR', (['benchmark_samples_to_process'], {}), '(benchmark_samples_to_process)\n', (2561, 2591), False, 'from benchmark_statistics import Statistics\n'), ((2639, 2689), 'benchmark_statistics.Statistics.getStdDev', 'Statistics.getStdDev', (['benchmark_samples_to_process'], {}), '(benchmark_samples_to_process)\n', (2659, 2689), False, 'from benchmark_statistics import Statistics\n'), ((2737, 2787), 'benchmark_statistics.Statistics.getStdErr', 'Statistics.getStdErr', (['benchmark_samples_to_process'], {}), '(benchmark_samples_to_process)\n', (2757, 2787), False, 'from benchmark_statistics import Statistics\n'), ((2979, 3029), 'benchmark_statistics.Statistics.getMargin', 'Statistics.getMargin', (['benchmark_samples_to_process'], {}), '(benchmark_samples_to_process)\n', (2999, 3029), False, 'from benchmark_statistics import Statistics\n'), ((3219, 3281), 'benchmark_statistics.Statistics.getConfidenceInterval', 'Statistics.getConfidenceInterval', (['benchmark_samples_to_process'], {}), '(benchmark_samples_to_process)\n', (3251, 3281), False, 'from benchmark_statistics import Statistics\n'), ((3329, 3381), 'benchmark_statistics.Statistics.getSkewness', 'Statistics.getSkewness', (['benchmark_samples_to_process'], {}), '(benchmark_samples_to_process)\n', (3351, 3381), False, 'from benchmark_statistics import Statistics\n'), ((3429, 3481), 'benchmark_statistics.Statistics.getKurtosis', 'Statistics.getKurtosis', (['benchmark_samples_to_process'], {}), '(benchmark_samples_to_process)\n', (3451, 3481), False, 'from benchmark_statistics import Statistics\n'), ((6246, 6285), 'numpy.fromfile', 'np.fromfile', (['file'], {'dtype': 'float', 'sep': '""","""'}), "(file, dtype=float, sep=',')\n", (6257, 6285), True, 'import numpy as np\n'), ((849, 880), 'numpy.sort', 'np.sort', (['lower_outliers_samples'], {}), '(lower_outliers_samples)\n', (856, 880), True, 'import numpy as np\n'), ((944, 982), 'numpy.sort', 'np.sort', (['benchmark_no_outliers_samples'], {}), '(benchmark_no_outliers_samples)\n', (951, 982), True, 'import numpy as np\n'), ((1046, 1077), 'numpy.sort', 'np.sort', (['upper_outliers_samples'], {}), '(upper_outliers_samples)\n', (1053, 1077), True, 'import numpy as np\n')]
|
import torch
import numpy as np
from torch.utils.data import DataLoader
from torchvision import transforms
from data_loader.datasets_custom import TextImageDataset, COCOTextImageDataset
from base import BaseDataLoader
def text_image_collate_fn(data):
collate_data = {}
# Sort a data list by right caption length (descending order).
data.sort(key=lambda x: x['right_caption'].size(0), reverse=True)
collate_data['right_img_id'] = []
collate_data['class_id'] = []
collate_data['right_txt'] = []
class_ids = []
right_captions = []
right_embeds = []
right_images_32 = []
right_images_64 = []
right_images_128 = []
right_images_256 = []
collate_data['wrong_img_id'] = []
collate_data['wrong_txt'] = []
wrong_captions = []
wrong_embeds = []
wrong_images_32 = []
wrong_images_64 = []
wrong_images_128 = []
wrong_images_256 = []
for i in range(len(data)):
class_ids.append(data[i]['right_img_id'])
collate_data['class_id'].append(data[i]['right_class_id'])
collate_data['right_txt'].append(data[i]['right_txt'])
right_captions.append(data[i]['right_caption'])
right_embeds.append(data[i]['right_embed'])
right_images_32.append(data[i]['right_image_32'])
right_images_64.append(data[i]['right_image_64'])
right_images_128.append(data[i]['right_image_128'])
right_images_256.append(data[i]['right_image_256'])
collate_data['wrong_txt'].append(data[i]['wrong_txt'])
wrong_captions.append(data[i]['wrong_caption'])
wrong_embeds.append(data[i]['wrong_embed'])
wrong_images_32.append(data[i]['wrong_image_32'])
wrong_images_64.append(data[i]['wrong_image_64'])
wrong_images_128.append(data[i]['wrong_image_128'])
wrong_images_256.append(data[i]['wrong_image_256'])
# sort and get captions, lengths, images, embeds, etc.
right_caption_lengths = [len(cap) for cap in right_captions]
collate_data['right_caption_lengths'] = torch.LongTensor(right_caption_lengths)
collate_data['right_captions'] = torch.zeros(len(right_caption_lengths), max(right_caption_lengths)).long()
for i, cap in enumerate(right_captions):
end = right_caption_lengths[i]
collate_data['right_captions'][i, :end] = cap[:end]
# sort and get captions, lengths, images, embeds, etc.
wrong_captions.sort(key=lambda x: len(x), reverse=True)
wrong_caption_lengths = [len(cap) for cap in wrong_captions]
collate_data['wrong_caption_lengths'] = torch.LongTensor(wrong_caption_lengths)
collate_data['wrong_captions'] = torch.zeros(len(wrong_caption_lengths), max(wrong_caption_lengths)).long()
for i, cap in enumerate(wrong_captions):
end = wrong_caption_lengths[i]
collate_data['wrong_captions'][i, :end] = cap[:end]
collate_data['class_id'] = np.stack(class_ids)
collate_data['right_embeds'] = torch.stack(right_embeds, 0)
collate_data['right_images_32'] = torch.stack(right_images_32, 0)
collate_data['right_images_64'] = torch.stack(right_images_64, 0)
collate_data['right_images_128'] = torch.stack(right_images_128, 0)
collate_data['right_images_256'] = torch.stack(right_images_256, 0)
collate_data['wrong_embeds'] = torch.stack(wrong_embeds, 0)
collate_data['wrong_images_32'] = torch.stack(wrong_images_32, 0)
collate_data['wrong_images_64'] = torch.stack(wrong_images_64, 0)
collate_data['wrong_images_128'] = torch.stack(wrong_images_128, 0)
collate_data['wrong_images_256'] = torch.stack(wrong_images_256, 0)
return collate_data
class TextImageDataLoader(DataLoader):
def __init__(self, data_dir, dataset_name, which_set, image_size, batch_size, num_workers):
self.data_dir = data_dir
self.which_set = which_set
self.dataset_name = dataset_name
assert self.which_set in {'train', 'valid', 'test'}
self.image_size = (image_size, image_size)
self.batch_size = batch_size
self.num_workers = num_workers
# transforms.ToTensor convert PIL images in range [0, 255] to a torch in range [-1.0, 1.0]
self.transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])
self.dataset = TextImageDataset(self.data_dir, self.dataset_name, self.which_set, self.transform, vocab_from_file=False)
self.n_samples = len(self.dataset)
if self.which_set == 'train' or self.which_set == 'valid':
super(TextImageDataLoader, self).__init__(
dataset=self.dataset,
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers,
collate_fn=text_image_collate_fn
)
else:
super(TextImageDataLoader, self).__init__(
dataset=self.dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=0,
collate_fn=text_image_collate_fn)
class COCOTextImageDataLoader(BaseDataLoader):
"""
COCO Image Caption Model Data Loader
"""
def __init__(self, data_dir, which_set, image_size, batch_size, validation_split, num_workers):
self.data_dir = data_dir
self.which_set = which_set
self.validation_split = validation_split
assert self.which_set in {'train', 'val', 'test'}
self.image_size = (image_size, image_size)
self.batch_size = batch_size
self.num_workers = num_workers
# transforms.ToTensor convert PIL images in range [0, 255] to a torch in range [-1.0, 1.0]
mean = torch.tensor([0.5, 0.5, 0.5], dtype=torch.float32)
std = torch.tensor([0.5, 0.5, 0.5], dtype=torch.float32)
if which_set == 'val' or which_set == 'test':
self.transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)
])
else:
self.transform = transforms.Compose([
# transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)
])
self.dataset = COCOTextImageDataset(self.data_dir, self.which_set, self.transform, vocab_from_file=True)
# self.n_samples = len(self.dataset)
if self.which_set == 'train':
super(COCOTextImageDataLoader, self).__init__(
dataset=self.dataset,
batch_size=self.batch_size,
shuffle=True,
validation_split=validation_split,
num_workers=self.num_workers,
collate_fn=text_image_collate_fn
)
else:
super(COCOTextImageDataLoader, self).__init__(
dataset=self.dataset,
batch_size=self.batch_size,
shuffle=False,
validation_split=0,
num_workers=self.num_workers,
collate_fn=text_image_collate_fn)
if __name__ == '__main__':
data_loader = COCOTextImageDataLoader(
data_dir='/Users/leon/Projects/I2T2I/data/coco/',
# dataset_name="birds",
which_set='val',
image_size=256,
batch_size=16,
validation_split=0.05,
num_workers=0)
print(len(data_loader.dataset.vocab))
print(len(data_loader.dataset.vocab.word2idx))
for i, data in enumerate(data_loader):
print(i)
print("right_img_id:", data['right_img_id'])
# print("class_ids:", data["class_id"])
print('right images 32 shape:', data['right_images_32'].shape)
print('right images 64 shape:', data['right_images_64'].shape)
print('right images 128 shape:', data['right_images_128'].shape)
print('right images 256 shape:', data['right_images_256'].shape)
print("right embed shape:", data['right_embeds'].shape)
print("right caption shape:", data['right_captions'].shape)
print("right caption lengths:", data['right_caption_lengths'])
print("right txt:", data["right_txt"])
print("wrong_img_id:", data['wrong_img_id'])
print('wrong images 32 shape:', data['wrong_images_32'].shape)
print('wrong images 64 shape:', data['wrong_images_64'].shape)
print('wrong images 128 shape:', data['wrong_images_128'].shape)
print('wrong images 256 shape:', data['wrong_images_256'].shape)
print("wrong embed shape:", data['wrong_embeds'].shape)
print("wrong caption shape:", data['wrong_captions'].shape)
print("wrong caption lengths:", data['wrong_caption_lengths'])
print("wrong txt:", data["wrong_txt"])
if i == 10:
print("done")
break
|
[
"numpy.stack",
"data_loader.datasets_custom.COCOTextImageDataset",
"torch.stack",
"torchvision.transforms.RandomHorizontalFlip",
"torch.LongTensor",
"data_loader.datasets_custom.TextImageDataset",
"torchvision.transforms.Normalize",
"torch.tensor",
"torchvision.transforms.ToTensor"
] |
[((2042, 2081), 'torch.LongTensor', 'torch.LongTensor', (['right_caption_lengths'], {}), '(right_caption_lengths)\n', (2058, 2081), False, 'import torch\n'), ((2567, 2606), 'torch.LongTensor', 'torch.LongTensor', (['wrong_caption_lengths'], {}), '(wrong_caption_lengths)\n', (2583, 2606), False, 'import torch\n'), ((2895, 2914), 'numpy.stack', 'np.stack', (['class_ids'], {}), '(class_ids)\n', (2903, 2914), True, 'import numpy as np\n'), ((2950, 2978), 'torch.stack', 'torch.stack', (['right_embeds', '(0)'], {}), '(right_embeds, 0)\n', (2961, 2978), False, 'import torch\n'), ((3017, 3048), 'torch.stack', 'torch.stack', (['right_images_32', '(0)'], {}), '(right_images_32, 0)\n', (3028, 3048), False, 'import torch\n'), ((3087, 3118), 'torch.stack', 'torch.stack', (['right_images_64', '(0)'], {}), '(right_images_64, 0)\n', (3098, 3118), False, 'import torch\n'), ((3158, 3190), 'torch.stack', 'torch.stack', (['right_images_128', '(0)'], {}), '(right_images_128, 0)\n', (3169, 3190), False, 'import torch\n'), ((3230, 3262), 'torch.stack', 'torch.stack', (['right_images_256', '(0)'], {}), '(right_images_256, 0)\n', (3241, 3262), False, 'import torch\n'), ((3299, 3327), 'torch.stack', 'torch.stack', (['wrong_embeds', '(0)'], {}), '(wrong_embeds, 0)\n', (3310, 3327), False, 'import torch\n'), ((3366, 3397), 'torch.stack', 'torch.stack', (['wrong_images_32', '(0)'], {}), '(wrong_images_32, 0)\n', (3377, 3397), False, 'import torch\n'), ((3436, 3467), 'torch.stack', 'torch.stack', (['wrong_images_64', '(0)'], {}), '(wrong_images_64, 0)\n', (3447, 3467), False, 'import torch\n'), ((3507, 3539), 'torch.stack', 'torch.stack', (['wrong_images_128', '(0)'], {}), '(wrong_images_128, 0)\n', (3518, 3539), False, 'import torch\n'), ((3579, 3611), 'torch.stack', 'torch.stack', (['wrong_images_256', '(0)'], {}), '(wrong_images_256, 0)\n', (3590, 3611), False, 'import torch\n'), ((4410, 4520), 'data_loader.datasets_custom.TextImageDataset', 'TextImageDataset', (['self.data_dir', 'self.dataset_name', 'self.which_set', 'self.transform'], {'vocab_from_file': '(False)'}), '(self.data_dir, self.dataset_name, self.which_set, self.\n transform, vocab_from_file=False)\n', (4426, 4520), False, 'from data_loader.datasets_custom import TextImageDataset, COCOTextImageDataset\n'), ((5791, 5841), 'torch.tensor', 'torch.tensor', (['[0.5, 0.5, 0.5]'], {'dtype': 'torch.float32'}), '([0.5, 0.5, 0.5], dtype=torch.float32)\n', (5803, 5841), False, 'import torch\n'), ((5856, 5906), 'torch.tensor', 'torch.tensor', (['[0.5, 0.5, 0.5]'], {'dtype': 'torch.float32'}), '([0.5, 0.5, 0.5], dtype=torch.float32)\n', (5868, 5906), False, 'import torch\n'), ((6426, 6519), 'data_loader.datasets_custom.COCOTextImageDataset', 'COCOTextImageDataset', (['self.data_dir', 'self.which_set', 'self.transform'], {'vocab_from_file': '(True)'}), '(self.data_dir, self.which_set, self.transform,\n vocab_from_file=True)\n', (6446, 6519), False, 'from data_loader.datasets_custom import TextImageDataset, COCOTextImageDataset\n'), ((4229, 4262), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (4260, 4262), False, 'from torchvision import transforms\n'), ((4276, 4297), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4295, 4297), False, 'from torchvision import transforms\n'), ((4311, 4374), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.5, 0.5, 0.5]', 'std': '[0.5, 0.5, 0.5]'}), '(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])\n', (4331, 4374), False, 'from torchvision import transforms\n'), ((6028, 6061), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (6059, 6061), False, 'from torchvision import transforms\n'), ((6079, 6100), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6098, 6100), False, 'from torchvision import transforms\n'), ((6118, 6158), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': 'mean', 'std': 'std'}), '(mean=mean, std=std)\n', (6138, 6158), False, 'from torchvision import transforms\n'), ((6307, 6328), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6326, 6328), False, 'from torchvision import transforms\n'), ((6346, 6386), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': 'mean', 'std': 'std'}), '(mean=mean, std=std)\n', (6366, 6386), False, 'from torchvision import transforms\n')]
|
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import tensorflow as tf
from keras.models import model_from_json
import json
from sklearn.metrics import roc_curve, auc, confusion_matrix
import numpy as np
import pandas as pd
from copy import deepcopy
import itertools
from utils import load_data
# import matplotlib
# matplotlib.use('agg')
import matplotlib.pyplot as plt
def load_model_helper(path, model_base_name):
# return load_model(path)
with open(os.path.join(path, f'{model_base_name}.architecture.json'), 'r') as json_file:
loaded_model_json = json_file.read()
m = model_from_json(loaded_model_json)
m.load_weights(os.path.join(path, f'{model_base_name}.weights.h5'))
return m
def thres(v, thr: float = 0.5):
v_ = np.array(deepcopy(v))
v_[v_ >= thr] = 1
v_[v_ < thr] = 0
return v_
if __name__ == '__main__':
tf.keras.backend.clear_session()
# path_base = '/Users/dmitryduev/_caltech/python/deep-asteroids/'
path_base = './'
with open(os.path.join(path_base, 'service/code/config.json')) as f:
config = json.load(f)
# models = config['models']
models = config['models_201901']
model_names = list(models.keys())
path_models = os.path.join(path_base, 'service/models')
c_families = {'rb': '5b96af9c0354c9000b0aea36',
'sl': '5b99b2c6aec3c500103a14de',
'kd': '5be0ae7958830a0018821794',
'os': '5c05bbdc826480000a95c0bf'}
# c_families = {'rb': '5b96af9c0354c9000b0aea36',
# 'sl': '5b99b2c6aec3c500103a14de',
# 'kd': '5be0ae7958830a0018821794'}
# c_families = {'rb': '5b96af9c0354c9000b0aea36'}
path_data = './data'
# mpl colors:
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
# [u'#1f77b4', u'#ff7f0e', u'#2ca02c', u'#d62728', u'#9467bd',
# u'#8c564b', u'#e377c2', u'#7f7f7f', u'#bcbd22', u'#17becf']
# line styles:
line_styles = ['-', '--', ':']
# thresholds
score_thresholds = [0.99, 0.9, 0.5, 0.1, 0.01]
# ROC
fig = plt.figure(figsize=(14, 5))
fig.subplots_adjust(bottom=0.09, left=0.05, right=0.70, top=0.98, wspace=0.2, hspace=0.2)
lw = 1.6
# ROCs
ax = fig.add_subplot(1, 2, 1)
# zoomed ROCs
ax2 = fig.add_subplot(1, 2, 2)
ax.plot([0, 1], [0, 1], color='#333333', lw=lw, linestyle='--')
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.05])
ax.set_xlabel('False Positive Rate (Contamination)')
ax.set_ylabel('True Positive Rate (Sensitivity)')
# ax.legend(loc="lower right")
# ax.legend(loc="best")
ax.grid(True)
ax2.set_xlim([0.0, .2])
ax2.set_ylim([0.8, 1.0])
ax2.set_xlabel('False Positive Rate (Contamination)')
ax2.set_ylabel('True Positive Rate (Sensitivity)')
# ax.legend(loc="lower right")
# ax2.legend(loc="best")
ax2.grid(True)
# Confusion matrices
fig2 = plt.figure()
fig2.subplots_adjust(bottom=0.06, left=0.01, right=1.0, top=0.93, wspace=0.0, hspace=0.12)
cn = 0
for cfi, c_family in enumerate(c_families):
project_id = c_families[c_family]
print(c_family, project_id)
# load data
x_train, y_train, x_test, y_test, classes = load_data(path=path_data,
project_id=project_id,
binary=True,
grayscale=True,
resize=(144, 144),
test_size=0.1,
verbose=True,
random_state=42)
mn = [m_ for m_ in model_names if c_family in m_]
n_mn = len(mn)
for ii, model_name in enumerate(mn):
print(f'loading model {model_name}: {models[model_name]}')
m = load_model_helper(path_models, models[model_name])
y = m.predict(x_test, batch_size=32, verbose=True)
# for thr in (0.5, 0.9):
for thr in (0.5,):
labels_pred = thres(y, thr=thr)
confusion_matr = confusion_matrix(y_test, labels_pred)
confusion_matr_normalized = confusion_matr.astype('float') / confusion_matr.sum(axis=1)[:, np.newaxis]
print(f'Threshold: {thr}')
print('Confusion matrix:')
print(confusion_matr)
print('Normalized confusion matrix:')
print(confusion_matr_normalized)
fpr, tpr, thresholds = roc_curve(y_test, y)
roc_auc = auc(fpr, tpr)
ax.plot(fpr, tpr, line_styles[ii], color=colors[cfi], lw=lw)
ax2.plot(fpr, tpr, line_styles[ii], color=colors[cfi], lw=lw,
label=f'{model_name} curve (area = {roc_auc:.5f})')
# plot thresholds
for it, thr in enumerate(score_thresholds):
x_ = np.interp(thr, thresholds[::-1], fpr)
y_ = np.interp(thr, thresholds[::-1], tpr)
# print(thr, x_, y_)
if cfi == 0 and ii == 0:
ax.plot(x_, y_, '.', markersize=8, color=colors[-(it + 1)], label=f'Threshold: {1-thr:.2f}')
else:
ax.plot(x_, y_, '.', markersize=8, color=colors[-(it + 1)])
ax2.plot(x_, y_, 'o', markersize=8, color=colors[-(it + 1)])
# plot confusion matrices
ax_ = fig2.add_subplot(3, 2 * len(c_families), ii * 8 + cfi * 2 + 1)
ax2_ = fig2.add_subplot(3, 2 * len(c_families), ii * 8 + cfi * 2 + 2)
ax_.imshow(confusion_matr, interpolation='nearest', cmap=plt.cm.Blues)
ax2_.imshow(confusion_matr_normalized, interpolation='nearest', cmap=plt.cm.Blues)
tick_marks = np.arange(2)
# ax_.set_xticks(tick_marks, tick_marks)
# ax_.set_yticks(tick_marks, tick_marks)
# ax2_.set_xticks(tick_marks, tick_marks)
# ax2_.set_yticks(tick_marks, tick_marks)
#
# ax_.xaxis.set_visible(False)
# ax_.yaxis.set_visible(False)
# ax2_.xaxis.set_visible(False)
# ax2_.yaxis.set_visible(False)
ax_.axis('off')
ax2_.axis('off')
thresh = confusion_matr.max() / 2.
thresh_norm = confusion_matr_normalized.max() / 2.
for i, j in itertools.product(range(confusion_matr.shape[0]), range(confusion_matr.shape[1])):
ax_.text(j, i, format(confusion_matr[i, j], 'd'),
horizontalalignment="center",
color="white" if confusion_matr[i, j] > thresh else "black")
ax2_.text(j, i, format(confusion_matr_normalized[i, j], '.2f'),
horizontalalignment="center",
color="white" if confusion_matr_normalized[i, j] > thresh_norm else "black")
# if ii == 0:
# break
ax.legend(loc='lower right')
ax2.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
fig.savefig(f'./roc_rb_sl_kd.png', dpi=300)
fig2.savefig(f'./cm_rb_sl_kd.png', dpi=300)
plt.show()
|
[
"copy.deepcopy",
"json.load",
"matplotlib.pyplot.show",
"utils.load_data",
"sklearn.metrics.roc_curve",
"tensorflow.keras.backend.clear_session",
"sklearn.metrics.auc",
"matplotlib.pyplot.figure",
"keras.models.model_from_json",
"numpy.arange",
"numpy.interp",
"sklearn.metrics.confusion_matrix",
"os.path.join"
] |
[((669, 703), 'keras.models.model_from_json', 'model_from_json', (['loaded_model_json'], {}), '(loaded_model_json)\n', (684, 703), False, 'from keras.models import model_from_json\n'), ((948, 980), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (978, 980), True, 'import tensorflow as tf\n'), ((1304, 1345), 'os.path.join', 'os.path.join', (['path_base', '"""service/models"""'], {}), "(path_base, 'service/models')\n", (1316, 1345), False, 'import os\n'), ((2157, 2184), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 5)'}), '(figsize=(14, 5))\n', (2167, 2184), True, 'import matplotlib.pyplot as plt\n'), ((2999, 3011), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3009, 3011), True, 'import matplotlib.pyplot as plt\n'), ((7445, 7455), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7453, 7455), True, 'import matplotlib.pyplot as plt\n'), ((723, 774), 'os.path.join', 'os.path.join', (['path', 'f"""{model_base_name}.weights.h5"""'], {}), "(path, f'{model_base_name}.weights.h5')\n", (735, 774), False, 'import os\n'), ((842, 853), 'copy.deepcopy', 'deepcopy', (['v'], {}), '(v)\n', (850, 853), False, 'from copy import deepcopy\n'), ((1164, 1176), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1173, 1176), False, 'import json\n'), ((3321, 3468), 'utils.load_data', 'load_data', ([], {'path': 'path_data', 'project_id': 'project_id', 'binary': '(True)', 'grayscale': '(True)', 'resize': '(144, 144)', 'test_size': '(0.1)', 'verbose': '(True)', 'random_state': '(42)'}), '(path=path_data, project_id=project_id, binary=True, grayscale=\n True, resize=(144, 144), test_size=0.1, verbose=True, random_state=42)\n', (3330, 3468), False, 'from utils import load_data\n'), ((537, 595), 'os.path.join', 'os.path.join', (['path', 'f"""{model_base_name}.architecture.json"""'], {}), "(path, f'{model_base_name}.architecture.json')\n", (549, 595), False, 'import os\n'), ((1088, 1139), 'os.path.join', 'os.path.join', (['path_base', '"""service/code/config.json"""'], {}), "(path_base, 'service/code/config.json')\n", (1100, 1139), False, 'import os\n'), ((4801, 4821), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_test', 'y'], {}), '(y_test, y)\n', (4810, 4821), False, 'from sklearn.metrics import roc_curve, auc, confusion_matrix\n'), ((4844, 4857), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (4847, 4857), False, 'from sklearn.metrics import roc_curve, auc, confusion_matrix\n'), ((6062, 6074), 'numpy.arange', 'np.arange', (['(2)'], {}), '(2)\n', (6071, 6074), True, 'import numpy as np\n'), ((4379, 4416), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'labels_pred'], {}), '(y_test, labels_pred)\n', (4395, 4416), False, 'from sklearn.metrics import roc_curve, auc, confusion_matrix\n'), ((5187, 5224), 'numpy.interp', 'np.interp', (['thr', 'thresholds[::-1]', 'fpr'], {}), '(thr, thresholds[::-1], fpr)\n', (5196, 5224), True, 'import numpy as np\n'), ((5246, 5283), 'numpy.interp', 'np.interp', (['thr', 'thresholds[::-1]', 'tpr'], {}), '(thr, thresholds[::-1], tpr)\n', (5255, 5283), True, 'import numpy as np\n')]
|
import numpy as np
import nimfa
V = np.random.rand(40, 100)
nmf = nimfa.Nmf(V, seed="nndsvd", rank=10, max_iter=12, update='euclidean',
objective='fro')
nmf_fit = nmf()
|
[
"numpy.random.rand",
"nimfa.Nmf"
] |
[((38, 61), 'numpy.random.rand', 'np.random.rand', (['(40)', '(100)'], {}), '(40, 100)\n', (52, 61), True, 'import numpy as np\n'), ((68, 158), 'nimfa.Nmf', 'nimfa.Nmf', (['V'], {'seed': '"""nndsvd"""', 'rank': '(10)', 'max_iter': '(12)', 'update': '"""euclidean"""', 'objective': '"""fro"""'}), "(V, seed='nndsvd', rank=10, max_iter=12, update='euclidean',\n objective='fro')\n", (77, 158), False, 'import nimfa\n')]
|
import numpy as np
import pandas as pd
from sklearn import model_selection
import tensorflow as tf
from pathlib import Path
"""
<NAME>, WAK2116, ELEN-E6889, Spring 2019
Final Project
This python file trains a neural network that predicts an activity level
based on a jpg image from a traffic camera
This is an initial attempt at doing regression based on image data.
It is loosely based on TF image classification examples and
"Deep Leaning: Image Recognition" on Lynda.com
"""
# view sample image
img_path = "./labeled_data/"
df = pd.read_csv('./labeled_data/labels.txt')
#print(df)
df_train, df_valid = model_selection.train_test_split(df, test_size=.1)
#print(df_train)
#print("---")
#print(df_valid)
def keras_data(data):
# Output arrays
x = np.empty([0, 160, 160, 3], dtype=np.float32)
y = np.empty([data.datetime.count()], dtype=np.float32)
#print(x.shape)
#print(y.shape)
# Read in and preprocess a batch of images
sess = tf.Session()
for i in range(0, data.datetime.count()):
#print(data.people[data.index[i]])
y_value = data.vehicles[data.index[i]] + data.people[data.index[i]]
#print(y_value)
#y = np.append(y, [y_value])
y[i] = y_value
# convert image to a tensor
# img_raw = tf.read_file(sample_img_path)
image_file = img_path + data.datetime[data.index[i]]
img_raw = tf.read_file(image_file)
#print(repr(img_raw)[:200]+"...")
img_tensor = tf.image.decode_image(img_raw)
#print(img_tensor.shape)
cropped_tensor = tf.image.crop_to_bounding_box(img_tensor,80, 80, 160, 220)
#print(cropped_tensor.shape)
#output_image = tf.image.encode_png(cropped_tensor)
#file = tf.write_file("text.png",output_image)
img_tensor = tf.image.resize(cropped_tensor, [160, 160])
#img_tensor = tf.image.resize(img_tensor, [240, 240]) # squish it down a bit
img_tensor /= 255.0 # normalize to [0,1] range
# print(img_tensor)
#print(img_tensor.shape)
# print(img_tensor.dtype)
sess = tf.Session()
with sess.as_default():
np_array = img_tensor.eval()
#print("np from tensor", np_array.shape)
indexed_array = np.expand_dims(np_array, axis=0)
#print("np from tensor with index",indexed_array.shape)
x = np.append(x, indexed_array, axis=0)
#print("x shape", x.shape)
#print(y.shape)
return x, y
x_train, y_train = keras_data(df_train)
x_test, y_test = keras_data(df_valid)
#y_train = tf.keras.utils.to_categorical(y_train, 16)
#y_test = tf.keras.utils.to_categorical(y_test, 16)
y_train = y_train / 16
y_test = y_test / 16
#(x_train, y_train), (x_test,y_test) = tf.keras.datasets.cifar10.load_data()
#x_train = x_train.astype("float32")
#x_test = x_test.astype("float32")
#x_train = x_train / 255
#x_test = x_test / 255
#y_train = tf.keras.utils.to_categorical(y_train, 10)
#y_test = tf.keras.utils.to_categorical(y_test, 10)
model = tf.keras.Sequential()
#model.add(tf.keras.layers.Conv2D(32,(3,3), padding='same', activation='relu', input_shape=(32, 32, 3)))
model.add(tf.keras.layers.Conv2D(32,(3,3), padding='same', activation='relu', input_shape=(160, 160, 3)))
model.add(tf.keras.layers.Conv2D(32,(3,3), activation='relu'))
model.add(tf.keras.layers.MaxPooling2D(2,2))
model.add(tf.keras.layers.Dropout(0.25))
model.add(tf.keras.layers.Conv2D(64,(3,3), padding='same', activation='relu'))
model.add(tf.keras.layers.Conv2D(64,(3,3), activation='relu'))
model.add(tf.keras.layers.MaxPooling2D(2,2))
model.add(tf.keras.layers.Dropout(0.25))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(512, activation="relu"))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(100, activation='relu'))
model.add(tf.keras.layers.Dropout(.25))
#model.add(tf.keras.layers.Dense(10, activation="softmax"))
model.add(tf.keras.layers.Dense(10, activation="relu"))
model.add(tf.keras.layers.Dense(1))
model.compile(
#loss='categorical_crossentropy',
loss='mse',
optimizer='adam',
metrics=["accuracy", "mae"]
)
model.summary()
model.fit(
x_train,
y_train,
batch_size=10,
epochs=30,
validation_data=[x_test, y_test],
shuffle=True #,
#steps_per_epoch=1000
)
# save structure
model_structure = model.to_json()
f = Path("model_structure.json")
f.write_text(model_structure)
# save weights
model.save_weights("model_weights.h5")
|
[
"tensorflow.image.crop_to_bounding_box",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.Dense",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"numpy.empty",
"tensorflow.Session",
"numpy.expand_dims",
"numpy.append",
"pathlib.Path",
"tensorflow.image.decode_image",
"tensorflow.keras.Sequential",
"tensorflow.read_file",
"tensorflow.image.resize",
"tensorflow.keras.layers.Flatten"
] |
[((568, 608), 'pandas.read_csv', 'pd.read_csv', (['"""./labeled_data/labels.txt"""'], {}), "('./labeled_data/labels.txt')\n", (579, 608), True, 'import pandas as pd\n'), ((642, 693), 'sklearn.model_selection.train_test_split', 'model_selection.train_test_split', (['df'], {'test_size': '(0.1)'}), '(df, test_size=0.1)\n', (674, 693), False, 'from sklearn import model_selection\n'), ((3077, 3098), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {}), '()\n', (3096, 3098), True, 'import tensorflow as tf\n'), ((4436, 4464), 'pathlib.Path', 'Path', (['"""model_structure.json"""'], {}), "('model_structure.json')\n", (4440, 4464), False, 'from pathlib import Path\n'), ((794, 838), 'numpy.empty', 'np.empty', (['[0, 160, 160, 3]'], {'dtype': 'np.float32'}), '([0, 160, 160, 3], dtype=np.float32)\n', (802, 838), True, 'import numpy as np\n'), ((998, 1010), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1008, 1010), True, 'import tensorflow as tf\n'), ((3214, 3314), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(32)', '(3, 3)'], {'padding': '"""same"""', 'activation': '"""relu"""', 'input_shape': '(160, 160, 3)'}), "(32, (3, 3), padding='same', activation='relu',\n input_shape=(160, 160, 3))\n", (3236, 3314), True, 'import tensorflow as tf\n'), ((3321, 3374), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (3343, 3374), True, 'import tensorflow as tf\n'), ((3384, 3418), 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (3412, 3418), True, 'import tensorflow as tf\n'), ((3429, 3458), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.25)'], {}), '(0.25)\n', (3452, 3458), True, 'import tensorflow as tf\n'), ((3471, 3540), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(64)', '(3, 3)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(64, (3, 3), padding='same', activation='relu')\n", (3493, 3540), True, 'import tensorflow as tf\n'), ((3550, 3603), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (3572, 3603), True, 'import tensorflow as tf\n'), ((3613, 3647), 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (3641, 3647), True, 'import tensorflow as tf\n'), ((3658, 3687), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.25)'], {}), '(0.25)\n', (3681, 3687), True, 'import tensorflow as tf\n'), ((3700, 3725), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (3723, 3725), True, 'import tensorflow as tf\n'), ((3738, 3783), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(512)'], {'activation': '"""relu"""'}), "(512, activation='relu')\n", (3759, 3783), True, 'import tensorflow as tf\n'), ((3795, 3823), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (3818, 3823), True, 'import tensorflow as tf\n'), ((3836, 3881), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(100)'], {'activation': '"""relu"""'}), "(100, activation='relu')\n", (3857, 3881), True, 'import tensorflow as tf\n'), ((3893, 3922), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.25)'], {}), '(0.25)\n', (3916, 3922), True, 'import tensorflow as tf\n'), ((3994, 4038), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (4015, 4038), True, 'import tensorflow as tf\n'), ((4050, 4074), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {}), '(1)\n', (4071, 4074), True, 'import tensorflow as tf\n'), ((1427, 1451), 'tensorflow.read_file', 'tf.read_file', (['image_file'], {}), '(image_file)\n', (1439, 1451), True, 'import tensorflow as tf\n'), ((1516, 1546), 'tensorflow.image.decode_image', 'tf.image.decode_image', (['img_raw'], {}), '(img_raw)\n', (1537, 1546), True, 'import tensorflow as tf\n'), ((1605, 1664), 'tensorflow.image.crop_to_bounding_box', 'tf.image.crop_to_bounding_box', (['img_tensor', '(80)', '(80)', '(160)', '(220)'], {}), '(img_tensor, 80, 80, 160, 220)\n', (1634, 1664), True, 'import tensorflow as tf\n'), ((1839, 1882), 'tensorflow.image.resize', 'tf.image.resize', (['cropped_tensor', '[160, 160]'], {}), '(cropped_tensor, [160, 160])\n', (1854, 1882), True, 'import tensorflow as tf\n'), ((2136, 2148), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2146, 2148), True, 'import tensorflow as tf\n'), ((2303, 2335), 'numpy.expand_dims', 'np.expand_dims', (['np_array'], {'axis': '(0)'}), '(np_array, axis=0)\n', (2317, 2335), True, 'import numpy as np\n'), ((2420, 2455), 'numpy.append', 'np.append', (['x', 'indexed_array'], {'axis': '(0)'}), '(x, indexed_array, axis=0)\n', (2429, 2455), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 6 18:22:04 2011
@author: -
"""
import os
import numpy
from matplotlib import pyplot
from neuronpy.graphics import spikeplot
from bulbspikes import *
from neuronpy.util import spiketrain
from params import sim_var
homedir = os.path.join(os.path.relpath('..'))
analysis_path = homedir
def format_axes(ax, dt=1, ylim=(0.,4.)):
#ax.set_xticks(numpy.arange(0,num_intervals,(num_intervals-1)/4.))
#ax.set_xticklabels(['$-\pi$','$-\pi/2$','$0$','$\pi/2$','$\pi$'], fontsize=18)
xlim = ax.get_xlim()
timesteps=int((xlim[1]*dt-xlim[0]*dt)/2.)
ax.set_xticks(numpy.linspace(xlim[0],xlim[1],5))
ax.set_xticklabels(numpy.asarray(numpy.linspace(-timesteps,timesteps,5), dtype=int))
ax.set_xlabel('lag (ms)')
ax.set_ylim(ylim)
ax.set_ylabel('Synchronization magnitude')
def draw_cell(cellid, ax, color='black'):
xloc = 10+cellid*20
# Lateral dends
y = numpy.abs(numpy.subtract(range(101), xloc))
yvec = numpy.log(numpy.add(y,1))
ax.plot(range(101), yvec, color=color)
# Soma
ax.fill_between(range(101), numpy.ones(101), yvec, \
where=numpy.ma.masked_where(yvec < 1., yvec).mask, \
color=color, linewidth=0.)
# Glom
ax.plot([xloc], [9], color=color, marker='o', markersize=10, markerfacecolor='white', markeredgecolor=color)
ax.plot([xloc], [9], color=color, marker='o', markersize=9, alpha=0.25)
ax.plot([xloc], [9], color=color, marker='1', markersize=7, markeredgewidth=2)
# Primary dendrite
ax.plot([xloc, xloc], [0,8], color=color, linewidth=2)
format_schematic_axis(ax)
def draw_weights(cellids, ax, color='black',scale=1.):
"""Draw granule cells"""
import synweightsnapshot
sws = synweightsnapshot.SynWeightSnapshot( \
nummit=sim_var['num_mitral'], \
numgran=sim_var['num_granule'])
raw=sws.read_file(sim_var['wt_input_file'],
os.path.join(homedir, sim_var['weight_dir']))
sws.parse_data(raw)
for cellid in cellids:
wts = sws.m2g[cellid,:,0]
wts = wts/numpy.max(wts)
for i in range(len(wts)):
if wts[i] > 0.0001:
cellloc = 10+cellid*20
y = numpy.abs(i - cellloc)
yloc = numpy.log(numpy.add(y,1))
gloc = -3.5+((i%2)*1.5)
ax.plot([i],[yloc], marker='o', markerfacecolor=color, markersize=4.*scale, markeredgecolor=color)
ax.plot([i,i],[yloc, gloc], color=color)
ax.plot([i],[gloc], marker='^', markerfacecolor=color, markersize=6.*scale, markeredgecolor=color)
format_schematic_axis(ax)
def format_schematic_axis(ax):
ax.set_xlim((0,100))
xticks = [10,30,50,70,90]
ax.set_xticks(xticks)
ax.set_xticklabels(numpy.multiply(xticks,10))
ax.set_xlabel('distance in microns')
ax.set_ylim((-5,11))
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.set_yticks([])
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('black')
ax.xaxis.set_ticks_position('bottom')
def read_weightevents():
M = numpy.loadtxt(os.path.join(analysis_path, 'stimweightevents.txt'))
data = []
for i in range(5):
data.append([])
for m in M:
data[int(m[0])].append(m[1])
return data
def read_delayevents():
M = numpy.loadtxt(os.path.join(analysis_path, 'stimdelayevents.txt'))
data = []
for i in range(5):
data.append([])
for m in M:
data[int(m[0])].append(m[1])
return data
def raster(pair=[0,4], cluster_width=5, fi=.005, xlim=(1000,2000)):
# pos1 = (10+pair[0]*20, cluster_width, 1, pair)
# pos2 = (10+pair[1]*20, cluster_width, 1, pair)
# stim_odor_mags = numpy.ones(5)*.55
fig = pyplot.figure(figsize=(9.5,5.7))
raster_ax = fig.add_axes([.1,.1,.8,.27])
schematic_ax = fig.add_axes([.1,.85,.8,.1])
syn_ax = fig.add_axes([.1,.45,.8,.225])
draw_cell(pair[0], schematic_ax, color='red')
draw_cell(pair[1], schematic_ax, color='blue')
draw_weights(pair, schematic_ax, color='black')
# Analyze an output file in some_dir
bulb_spikes = BulbSpikes(sim_time=sim_var['tstop'])
bulb_spikes.read_file(os.path.join(homedir,'spikeout.spk'))
breath_events = numpy.loadtxt(os.path.join(homedir, 'breathevents.txt'))
wts = read_weightevents()
delays = read_delayevents()
dt = 1
tstop = xlim[1]
x = numpy.arange(0,tstop,dt)
y0 = numpy.zeros(tstop/dt)
y1 = numpy.zeros(tstop/dt)
EXP = numpy.exp(numpy.multiply(x,-1./200.))-numpy.exp( \
numpy.multiply(x,-1./20.))
idx = 0
for b in breath_events:
if b >= tstop:
break
else:
dtidx = int((b+delays[pair[0]][idx])/dt)
y0[dtidx:] += EXP[:-dtidx]*wts[pair[0]][idx]
dtidx = int((b+delays[pair[1]][idx])/dt)
y1[dtidx:] += EXP[:-dtidx]*wts[pair[1]][idx]
idx += 1
redplt = syn_ax.plot(x,y0, color='red')
blueplt = syn_ax.plot(x,y1, color='blue')
for breath in breath_events:
breathplt = syn_ax.plot([breath, breath], [0,2], linestyle='--', \
color='gray', linewidth=2)
syn_ax.set_xlim(xlim)
syn_ax.set_ylim(0,1.6)
syn_ax.set_yticks([])
syn_ax.set_xticks([])
syn_ax.set_ylabel('EPSC onto tuft')
leg = syn_ax.legend([breathplt, redplt, blueplt], \
['sniff event', 'input onto red', 'input onto blue'], \
bbox_to_anchor=(0, 1.15, 1., .102), loc=1, ncol=3, mode="expand", \
borderaxespad=0., handletextpad=.2)
# Mark sniff interval
for i in range(len(breath_events)):
if breath_events[i] > xlim[0]:
span = syn_ax.annotate('', xy=(breath_events[i], .28), xycoords='data',
xytext=(breath_events[i+1], .28), \
textcoords='data', \
arrowprops=dict(arrowstyle="|-|", linewidth=2)
)
syn_ax.text((breath_events[i]+breath_events[i+1])/2., .53, \
'sniff every\n150 - 250 ms', \
horizontalalignment='center', verticalalignment='top', \
backgroundcolor='white')
break
# Mark amplitude interval
span = syn_ax.annotate('', xy=(1190, 1.28), xycoords='data',
xytext=(1190, 1.12), \
textcoords='data', \
arrowprops=dict(arrowstyle="|-|", linewidth=2)
)
syn_ax.text(1215, 1.21, \
'+/- 5%', \
horizontalalignment='left', verticalalignment='center')
# Mark delay interval
for i in range(len(breath_events)):
if breath_events[i] > 1400:
span = syn_ax.annotate('', xy=(breath_events[i]-2, .5), xycoords='data',
xytext=(breath_events[i]+17, .5), \
textcoords='data', \
arrowprops=dict(arrowstyle="|-|", linewidth=2)
)
syn_ax.text(breath_events[i]+7.5, .28, \
'delay 0-15 ms', \
horizontalalignment='center', verticalalignment='top', \
backgroundcolor='white')
break
spikes = bulb_spikes.get_mitral_spikes()
ref=spikes[pair[0]]
comp=spikes[pair[1]]
gcspikes = bulb_spikes.get_granule_spikes()
mididx = 10+pair[0]*20
gcleft = gcspikes[mididx-int(cluster_width/2.):mididx+int(cluster_width/2.)+1]
mididx = 10+pair[1]*20
gcright = gcspikes[mididx-int(cluster_width/2.):mididx+int(cluster_width/2.)+1]
sp = spikeplot.SpikePlot(fig=fig, savefig=False)
sp.set_markercolor('blue')
sp.set_markeredgewidth(2.)
sp.set_markerscale(4)
sp.plot_spikes([comp], label='comp', cell_offset=cluster_width*2+5, \
draw=False )
sp.set_markercolor('red')
sp.plot_spikes([ref], label='ref', cell_offset=cluster_width*2+2, \
draw=False)
sp.set_markerscale(1.3)
sp.set_markeredgewidth(1.5)
sp.set_markercolor('blue')
sp.plot_spikes(gcright, label='gcright', cell_offset=cluster_width, \
draw=False)
sp.set_markercolor('red')
sp.plot_spikes(gcleft, label='gcleft', cell_offset=0, \
draw=False)
coincidences, mask_a, mask_b, ratio = \
spiketrain.get_sync_traits(ref, comp, window=5)
# idx = 0
# for i in mask_a:
# if i == 1:
# raster_ax.plot([ref[idx]],[cluster_width*2+1.9], marker='o', color='red')
# idx += 1
idx = 0
for i in mask_b:
if i == 1:
if comp[idx] >= xlim[0] and comp[idx] < xlim[1]:
raster_ax.text(comp[idx],cluster_width*2+8.5, '*', \
color='purple', fontweight='bold', \
horizontalalignment='center', verticalalignment='center')
#raster_ax.plot([comp[idx]],[cluster_width*2+7], marker='o', color='blue')
idx += 1
raster_ax.text(2000,cluster_width*2+8.5, '(synchronized)', color='purple', \
horizontalalignment='center', verticalalignment='center',
fontsize=11)
raster_ax.set_yticks([])
ylim = (0.5, cluster_width*2+7.5)
for breath in breath_events:
raster_ax.plot([breath, breath], [ylim[0], ylim[1]], linestyle='--', color='gray', linewidth=2)
sp.update_xlim(xlim)
raster_ax.set_ylim(ylim)
raster_ax.set_xlabel('time (ms)')
raster_ax.set_ylabel('spike output\n granule mitral\n\n', horizontalalignment='center')
pos = schematic_ax.get_position()
schematic_ax.text(.025, pos.ymax+.02, 'A)', transform=fig.transFigure,
verticalalignment='baseline')
pos = syn_ax.get_position()
syn_ax.text(.025, pos.ymax+.07, 'B)', transform=fig.transFigure,
verticalalignment='baseline')
pos = raster_ax.get_position()
raster_ax.text(.025, pos.ymax+.02, 'C)', transform=fig.transFigure,
verticalalignment='baseline')
# fig.savefig(os.path.join(analysis_path, 'raster_w%d_(%d-%d)_%.3f.pdf') %(cluster_width, pair[0], pair[1], fi))
fig.savefig(os.path.join(analysis_path, 'fig1.pdf'))
raster()
|
[
"numpy.multiply",
"numpy.abs",
"numpy.ma.masked_where",
"synweightsnapshot.SynWeightSnapshot",
"neuronpy.graphics.spikeplot.SpikePlot",
"numpy.zeros",
"numpy.ones",
"matplotlib.pyplot.figure",
"numpy.max",
"numpy.arange",
"os.path.relpath",
"numpy.linspace",
"numpy.add",
"os.path.join",
"neuronpy.util.spiketrain.get_sync_traits"
] |
[((286, 307), 'os.path.relpath', 'os.path.relpath', (['""".."""'], {}), "('..')\n", (301, 307), False, 'import os\n'), ((1748, 1850), 'synweightsnapshot.SynWeightSnapshot', 'synweightsnapshot.SynWeightSnapshot', ([], {'nummit': "sim_var['num_mitral']", 'numgran': "sim_var['num_granule']"}), "(nummit=sim_var['num_mitral'], numgran=\n sim_var['num_granule'])\n", (1783, 1850), False, 'import synweightsnapshot\n'), ((3810, 3843), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {'figsize': '(9.5, 5.7)'}), '(figsize=(9.5, 5.7))\n', (3823, 3843), False, 'from matplotlib import pyplot\n'), ((4480, 4506), 'numpy.arange', 'numpy.arange', (['(0)', 'tstop', 'dt'], {}), '(0, tstop, dt)\n', (4492, 4506), False, 'import numpy\n'), ((4514, 4537), 'numpy.zeros', 'numpy.zeros', (['(tstop / dt)'], {}), '(tstop / dt)\n', (4525, 4537), False, 'import numpy\n'), ((4545, 4568), 'numpy.zeros', 'numpy.zeros', (['(tstop / dt)'], {}), '(tstop / dt)\n', (4556, 4568), False, 'import numpy\n'), ((7775, 7818), 'neuronpy.graphics.spikeplot.SpikePlot', 'spikeplot.SpikePlot', ([], {'fig': 'fig', 'savefig': '(False)'}), '(fig=fig, savefig=False)\n', (7794, 7818), False, 'from neuronpy.graphics import spikeplot\n'), ((8493, 8540), 'neuronpy.util.spiketrain.get_sync_traits', 'spiketrain.get_sync_traits', (['ref', 'comp'], {'window': '(5)'}), '(ref, comp, window=5)\n', (8519, 8540), False, 'from neuronpy.util import spiketrain\n'), ((619, 654), 'numpy.linspace', 'numpy.linspace', (['xlim[0]', 'xlim[1]', '(5)'], {}), '(xlim[0], xlim[1], 5)\n', (633, 654), False, 'import numpy\n'), ((1002, 1017), 'numpy.add', 'numpy.add', (['y', '(1)'], {}), '(y, 1)\n', (1011, 1017), False, 'import numpy\n'), ((1104, 1119), 'numpy.ones', 'numpy.ones', (['(101)'], {}), '(101)\n', (1114, 1119), False, 'import numpy\n'), ((1941, 1985), 'os.path.join', 'os.path.join', (['homedir', "sim_var['weight_dir']"], {}), "(homedir, sim_var['weight_dir'])\n", (1953, 1985), False, 'import os\n'), ((2804, 2830), 'numpy.multiply', 'numpy.multiply', (['xticks', '(10)'], {}), '(xticks, 10)\n', (2818, 2830), False, 'import numpy\n'), ((3173, 3224), 'os.path.join', 'os.path.join', (['analysis_path', '"""stimweightevents.txt"""'], {}), "(analysis_path, 'stimweightevents.txt')\n", (3185, 3224), False, 'import os\n'), ((3403, 3453), 'os.path.join', 'os.path.join', (['analysis_path', '"""stimdelayevents.txt"""'], {}), "(analysis_path, 'stimdelayevents.txt')\n", (3415, 3453), False, 'import os\n'), ((4258, 4295), 'os.path.join', 'os.path.join', (['homedir', '"""spikeout.spk"""'], {}), "(homedir, 'spikeout.spk')\n", (4270, 4295), False, 'import os\n'), ((4330, 4371), 'os.path.join', 'os.path.join', (['homedir', '"""breathevents.txt"""'], {}), "(homedir, 'breathevents.txt')\n", (4342, 4371), False, 'import os\n'), ((10317, 10356), 'os.path.join', 'os.path.join', (['analysis_path', '"""fig1.pdf"""'], {}), "(analysis_path, 'fig1.pdf')\n", (10329, 10356), False, 'import os\n'), ((691, 731), 'numpy.linspace', 'numpy.linspace', (['(-timesteps)', 'timesteps', '(5)'], {}), '(-timesteps, timesteps, 5)\n', (705, 731), False, 'import numpy\n'), ((2090, 2104), 'numpy.max', 'numpy.max', (['wts'], {}), '(wts)\n', (2099, 2104), False, 'import numpy\n'), ((4587, 4618), 'numpy.multiply', 'numpy.multiply', (['x', '(-1.0 / 200.0)'], {}), '(x, -1.0 / 200.0)\n', (4601, 4618), False, 'import numpy\n'), ((4640, 4670), 'numpy.multiply', 'numpy.multiply', (['x', '(-1.0 / 20.0)'], {}), '(x, -1.0 / 20.0)\n', (4654, 4670), False, 'import numpy\n'), ((1143, 1182), 'numpy.ma.masked_where', 'numpy.ma.masked_where', (['(yvec < 1.0)', 'yvec'], {}), '(yvec < 1.0, yvec)\n', (1164, 1182), False, 'import numpy\n'), ((2235, 2257), 'numpy.abs', 'numpy.abs', (['(i - cellloc)'], {}), '(i - cellloc)\n', (2244, 2257), False, 'import numpy\n'), ((2291, 2306), 'numpy.add', 'numpy.add', (['y', '(1)'], {}), '(y, 1)\n', (2300, 2306), False, 'import numpy\n')]
|
import os
from data_loader.data_generator import DataGenerator
from models.invariant_basic import invariant_basic
from trainers.trainer import Trainer
from Utils.config import process_config
from Utils.dirs import create_dirs
from Utils import doc_utils
from Utils.utils import get_args
from data_loader import data_helper as helper
# capture the config path from the run arguments
# then process the json configuration file
config = process_config('/Users/jiahe/PycharmProjects/gnn multiple inputs/configs/parameter_search_config.json')
os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu
import tensorflow.compat.v1 as tf
import numpy as np
tf.set_random_seed(1)
base_summary_folder = config.summary_dir
base_exp_name = config.exp_name
# create the experiments dirs
create_dirs([config.summary_dir, config.checkpoint_dir])
data = DataGenerator(config)
for lr in [0.00008*(2**i) for i in range(2,8)]:
for a1d in [[5],[10]]:
for a3d in [[5], [10],[15]]:
for fully in [[50,50],[20,20]]:
config.learning_rate = lr
config.architecture2d = a1d
config.architecture = a3d
config.fc = fully
config.exp_name = base_exp_name + " lr={0}_a2d={1}_a3d = {2}_fc = {3}".format(lr, a1d,a3d,fully)
curr_dir = os.path.join(base_summary_folder, "lr={0}_a2d={1}_a3d = {2}_fc = {3}".format(lr, a1d, a3d, fully))
config.summary_dir = curr_dir
create_dirs([curr_dir])
# create your data generator
data.config.learning_rate=lr
data.config.architecture2d = a1d
data.config.architecture3d = a3d
data.config.fc = fully
gpuconfig = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
gpuconfig.gpu_options.visible_device_list = config.gpus_list
gpuconfig.gpu_options.allow_growth = True
sess = tf.Session(config=gpuconfig)
# create an instance of the model you want
model = invariant_basic(config, data)
# create trainer and pass all the previous components to it
trainer = Trainer(sess, model, data, config)
# here you train your model
acc, loss, _ = trainer.train()
sess.close()
tf.reset_default_graph()
import pandas as pd
def summary_10fold_results(summary_dir):
df = pd.read_csv(summary_dir+"/per_epoch_stats.csv")
acc = np.array(df["val_accuracy"])
print("Results")
print("Mean Accuracy = {0}".format(np.mean(acc)))
# print("Mean std = {0}".format(np.std(acc)))
return np.mean(acc)
|
[
"Utils.dirs.create_dirs",
"Utils.config.process_config",
"pandas.read_csv",
"tensorflow.compat.v1.set_random_seed",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.reset_default_graph",
"numpy.mean",
"data_loader.data_generator.DataGenerator",
"numpy.array",
"tensorflow.compat.v1.ConfigProto",
"models.invariant_basic.invariant_basic",
"trainers.trainer.Trainer"
] |
[((434, 547), 'Utils.config.process_config', 'process_config', (['"""/Users/jiahe/PycharmProjects/gnn multiple inputs/configs/parameter_search_config.json"""'], {}), "(\n '/Users/jiahe/PycharmProjects/gnn multiple inputs/configs/parameter_search_config.json'\n )\n", (448, 547), False, 'from Utils.config import process_config\n'), ((640, 661), 'tensorflow.compat.v1.set_random_seed', 'tf.set_random_seed', (['(1)'], {}), '(1)\n', (658, 661), True, 'import tensorflow.compat.v1 as tf\n'), ((765, 821), 'Utils.dirs.create_dirs', 'create_dirs', (['[config.summary_dir, config.checkpoint_dir]'], {}), '([config.summary_dir, config.checkpoint_dir])\n', (776, 821), False, 'from Utils.dirs import create_dirs\n'), ((829, 850), 'data_loader.data_generator.DataGenerator', 'DataGenerator', (['config'], {}), '(config)\n', (842, 850), False, 'from data_loader.data_generator import DataGenerator\n'), ((2389, 2438), 'pandas.read_csv', 'pd.read_csv', (["(summary_dir + '/per_epoch_stats.csv')"], {}), "(summary_dir + '/per_epoch_stats.csv')\n", (2400, 2438), True, 'import pandas as pd\n'), ((2447, 2475), 'numpy.array', 'np.array', (["df['val_accuracy']"], {}), "(df['val_accuracy'])\n", (2455, 2475), True, 'import numpy as np\n'), ((2611, 2623), 'numpy.mean', 'np.mean', (['acc'], {}), '(acc)\n', (2618, 2623), True, 'import numpy as np\n'), ((2536, 2548), 'numpy.mean', 'np.mean', (['acc'], {}), '(acc)\n', (2543, 2548), True, 'import numpy as np\n'), ((1438, 1461), 'Utils.dirs.create_dirs', 'create_dirs', (['[curr_dir]'], {}), '([curr_dir])\n', (1449, 1461), False, 'from Utils.dirs import create_dirs\n'), ((1693, 1762), 'tensorflow.compat.v1.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)', 'log_device_placement': '(False)'}), '(allow_soft_placement=True, log_device_placement=False)\n', (1707, 1762), True, 'import tensorflow.compat.v1 as tf\n'), ((1909, 1937), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {'config': 'gpuconfig'}), '(config=gpuconfig)\n', (1919, 1937), True, 'import tensorflow.compat.v1 as tf\n'), ((2013, 2042), 'models.invariant_basic.invariant_basic', 'invariant_basic', (['config', 'data'], {}), '(config, data)\n', (2028, 2042), False, 'from models.invariant_basic import invariant_basic\n'), ((2137, 2171), 'trainers.trainer.Trainer', 'Trainer', (['sess', 'model', 'data', 'config'], {}), '(sess, model, data, config)\n', (2144, 2171), False, 'from trainers.trainer import Trainer\n'), ((2292, 2316), 'tensorflow.compat.v1.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (2314, 2316), True, 'import tensorflow.compat.v1 as tf\n')]
|
"""Methods used to build ROC."""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import roc_curve, auc
# seaborn settings
sns.set_style("white")
sns.set_context("paper")
color_palette = sns.color_palette("colorblind")
sns.set_palette(color_palette)
def _get_total_undirected_interactions(n):
return n * (n - 1) / 2
def _check_index(index, labels_set, interaction_symbol='<->'):
e1, e2 = index.split(interaction_symbol)
return (e1 in labels_set and e2 in labels_set)
def _filter_indices_with_labels(indexes, labels, interaction_symbol='<->'):
labels_set = set(labels)
filtering = pd.Series([
_check_index(index, labels_set, interaction_symbol)
for index in indexes
])
return indexes[filtering]
def _is_index_diagonal(index, interaction_indices='<->'):
a_node, another_node = index.split(interaction_indices)
return a_node == another_node
def _get_evaluation_on_given_labels(
labels, true_interactions, predicted_interactions, no_self_loops=True
):
total_interactions = _get_total_undirected_interactions(len(labels))
interaction_indices = list(
set(
_filter_indices_with_labels(predicted_interactions.index, labels) |
_filter_indices_with_labels(true_interactions.index, labels)
)
)
if no_self_loops:
interaction_indices = [
index
for index in interaction_indices
if not _is_index_diagonal(index)
]
predicted_interactions = predicted_interactions.reindex(
interaction_indices
).fillna(0.0)
true_interactions = true_interactions.reindex(
interaction_indices
).fillna(0.0)
zero_interactions = int(total_interactions) - len(interaction_indices)
y = np.append(true_interactions.values, np.zeros((zero_interactions)))
scores = np.append(
predicted_interactions.values, np.zeros((zero_interactions))
)
return y, scores
def get_roc_df(
pathway_name, method_name, true_interactions, predicted_interactions,
number_of_roc_points=100
):
"""Return dataframe that can be used to plot a ROC curve."""
labels = {
gene
for genes in [
true_interactions.e1, predicted_interactions.e1,
true_interactions.e2, predicted_interactions.e2
]
for gene in genes
}
y, scores = _get_evaluation_on_given_labels(
labels, true_interactions.intensity,
predicted_interactions.intensity
)
# print(method_name, y, scores)
reference_xx = np.linspace(0, 1, number_of_roc_points)
if sum(y) > 0:
xx, yy, threshold = roc_curve(y, scores)
print(method_name, y, scores, threshold, xx, yy)
area_under_curve = auc(xx, yy)
yy = np.interp(reference_xx, xx, yy)
else:
yy = reference_xx
area_under_curve = 0.5 # worst
roc_df = pd.DataFrame({
'pathway': number_of_roc_points * [pathway_name],
'method': (
number_of_roc_points * [method_name]
),
'YY': yy,
'XX': reference_xx.tolist()
})
return roc_df, area_under_curve
def plot_roc_curve_from_df(
df, auc_dict_list=None, output_filepath=None, figsize=(6, 6)
):
"""From a df with multiple methods plot a roc curve using sns.tspot."""
xlabel = 'False Discovery Rate'
ylabel = 'True Positive Rate'
title = 'Receiver Operating Characteristic'
# rename method name to include AUC to show it in legend
if auc_dict_list:
for method in auc_dict_list.keys():
mean_auc = np.mean(auc_dict_list[method])
method_indices = df['method'] == method
df['mean_auc'] = mean_auc
df.loc[method_indices, 'method'] = (
'{} '.format(
method.capitalize()
if method != 'INtERAcT'
else method
) +
'AUC=%0.2f' % mean_auc
)
df = df.sort_values(by='method')
df.rename(columns={'method': ''}, inplace=True) # to avoid legend title
plt.figure(figsize=figsize)
sns.set_style("whitegrid", {'axes.grid': False})
sns.tsplot(
data=df, time='XX', value='YY',
condition='', unit='pathway', legend=True
)
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
if output_filepath:
plt.savefig(output_filepath, bbox_inches='tight')
|
[
"matplotlib.pyplot.title",
"seaborn.set_style",
"matplotlib.pyplot.xlim",
"sklearn.metrics.roc_curve",
"matplotlib.pyplot.ylim",
"seaborn.tsplot",
"numpy.zeros",
"sklearn.metrics.auc",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.linspace",
"seaborn.color_palette",
"numpy.interp",
"matplotlib.pyplot.ylabel",
"seaborn.set_palette",
"matplotlib.pyplot.xlabel",
"seaborn.set_context",
"matplotlib.pyplot.savefig"
] |
[((190, 212), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (203, 212), True, 'import seaborn as sns\n'), ((213, 237), 'seaborn.set_context', 'sns.set_context', (['"""paper"""'], {}), "('paper')\n", (228, 237), True, 'import seaborn as sns\n'), ((254, 285), 'seaborn.color_palette', 'sns.color_palette', (['"""colorblind"""'], {}), "('colorblind')\n", (271, 285), True, 'import seaborn as sns\n'), ((286, 316), 'seaborn.set_palette', 'sns.set_palette', (['color_palette'], {}), '(color_palette)\n', (301, 316), True, 'import seaborn as sns\n'), ((2613, 2652), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'number_of_roc_points'], {}), '(0, 1, number_of_roc_points)\n', (2624, 2652), True, 'import numpy as np\n'), ((4156, 4183), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (4166, 4183), True, 'import matplotlib.pyplot as plt\n'), ((4188, 4236), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""', "{'axes.grid': False}"], {}), "('whitegrid', {'axes.grid': False})\n", (4201, 4236), True, 'import seaborn as sns\n'), ((4241, 4330), 'seaborn.tsplot', 'sns.tsplot', ([], {'data': 'df', 'time': '"""XX"""', 'value': '"""YY"""', 'condition': '""""""', 'unit': '"""pathway"""', 'legend': '(True)'}), "(data=df, time='XX', value='YY', condition='', unit='pathway',\n legend=True)\n", (4251, 4330), True, 'import seaborn as sns\n'), ((4353, 4369), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 1]'], {}), '([0, 1])\n', (4361, 4369), True, 'import matplotlib.pyplot as plt\n'), ((4374, 4390), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1]'], {}), '([0, 1])\n', (4382, 4390), True, 'import matplotlib.pyplot as plt\n'), ((4395, 4413), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (4405, 4413), True, 'import matplotlib.pyplot as plt\n'), ((4418, 4436), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (4428, 4436), True, 'import matplotlib.pyplot as plt\n'), ((4441, 4457), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (4450, 4457), True, 'import matplotlib.pyplot as plt\n'), ((1863, 1890), 'numpy.zeros', 'np.zeros', (['zero_interactions'], {}), '(zero_interactions)\n', (1871, 1890), True, 'import numpy as np\n'), ((1957, 1984), 'numpy.zeros', 'np.zeros', (['zero_interactions'], {}), '(zero_interactions)\n', (1965, 1984), True, 'import numpy as np\n'), ((2700, 2720), 'sklearn.metrics.roc_curve', 'roc_curve', (['y', 'scores'], {}), '(y, scores)\n', (2709, 2720), False, 'from sklearn.metrics import roc_curve, auc\n'), ((2805, 2816), 'sklearn.metrics.auc', 'auc', (['xx', 'yy'], {}), '(xx, yy)\n', (2808, 2816), False, 'from sklearn.metrics import roc_curve, auc\n'), ((2830, 2861), 'numpy.interp', 'np.interp', (['reference_xx', 'xx', 'yy'], {}), '(reference_xx, xx, yy)\n', (2839, 2861), True, 'import numpy as np\n'), ((4491, 4540), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output_filepath'], {'bbox_inches': '"""tight"""'}), "(output_filepath, bbox_inches='tight')\n", (4502, 4540), True, 'import matplotlib.pyplot as plt\n'), ((3644, 3674), 'numpy.mean', 'np.mean', (['auc_dict_list[method]'], {}), '(auc_dict_list[method])\n', (3651, 3674), True, 'import numpy as np\n')]
|
import numpy as np
import scipy.sparse
import akg
from akg import tvm
from akg import topi
from tests.common.base import get_rtol_atol
from tests.common.gen_random import random_gaussian
from tests.common.tensorio import compare_tensor
from akg.utils import kernel_exec as utils
from akg.utils.result_analysis import target_profiling
from akg.utils.format_transform import to_tvm_nd_array, get_shape
from akg.utils.dsl_create import get_broadcast_shape
def csr_mul(dense, sparse_data, col_idx, row_idx, shape):
assert len(shape) == 2, "only supports 2-dim sparse tensor"
assert len(dense.shape) <= 2
assert dense.dtype == sparse_data.dtype, "data and weight must have the same dtype"
num_rows = row_idx.shape[0] - 1
dense_shape = get_shape(dense.shape)
sparse_shape = get_shape(shape)
broadcast_shape = get_broadcast_shape(dense_shape, sparse_shape)
need_expand = tvm.const(len(dense_shape) < len(broadcast_shape))
need_broadcast_first_dim = tvm.const(
len(dense_shape) == len(broadcast_shape) and dense_shape[0] < broadcast_shape[0])
need_broadcast_last_dim = tvm.const(
len(dense_shape) == len(broadcast_shape) and dense_shape[1] < broadcast_shape[1])
def gen_ir(dense, sparse_data, col_idx, row_idx, output):
ib = tvm.ir_builder.create()
with ib.for_range(0, num_rows, name='i') as i:
start = ib.load(row_idx, i)
end = ib.load(row_idx, i + 1)
with ib.for_range(0, end - start, name='j') as j:
pos = start + j
with ib.if_scope(pos < end):
val = ib.load(sparse_data, pos)
col = ib.load(col_idx, pos)
with ib.if_scope(need_expand):
ib.store(output, pos, val * ib.load(dense, [col]))
with ib.else_scope():
with ib.if_scope(need_broadcast_first_dim):
ib.store(output, pos, val * ib.load(dense, [0, col]))
with ib.else_scope():
with ib.if_scope(need_broadcast_last_dim):
ib.store(output, pos, val * ib.load(dense, [i, 0]))
with ib.else_scope():
ib.store(output, pos, val * ib.load(dense, [i, col]))
return ib.get()
output_name = "T_csr_mul_" + dense.op.name + "_" + sparse_data.op.name
out_buf = tvm.decl_buffer(sparse_data.shape, sparse_data.dtype, output_name)
return tvm.extern([shape],
[dense, sparse_data, col_idx, row_idx],
lambda ins, outs: gen_ir(ins[0], ins[1], ins[2], ins[3], outs[0]),
dtype=sparse_data.dtype, out_buffers=[out_buf], name=output_name)
def gen_data(shape1, shape2, dtype1, dtype2):
dense = random_gaussian(shape1).astype(dtype1)
sparse_data = scipy.sparse.rand(shape2[0], shape2[1], density=0.2, format='csr', dtype=dtype1)
expect = sparse_data.multiply(np.broadcast_to(dense, shape2))
return dense, sparse_data.data, sparse_data.indices.astype(dtype2), sparse_data.indptr.astype(dtype2), expect.data
def test_csr_mul(shape1, shape2, dtype1, dtype2, poly_sch=False, attrs=None):
if not attrs:
attrs = {"target": "cuda"}
# gen data
op_attrs = [shape2]
dense, sparse_data, col_idx, row_idx, expect = gen_data(shape1, shape2, dtype1, dtype2)
output_shape = expect.shape
attrs["csr_avg_row"] = sparse_data.shape[0] // shape1[0]
mod = utils.op_build_test(csr_mul, [shape1, sparse_data.shape, col_idx.shape, row_idx.shape],
[dtype1, dtype1, dtype2, dtype2], op_attrs=op_attrs, polyhedral=poly_sch,
attrs=attrs, kernel_name="csr_mul")
if len(expect.shape) == 0:
output_shape = (1, )
output = np.zeros(output_shape, expect.dtype)
output = utils.mod_launch(mod, (dense, sparse_data, col_idx, row_idx, output), expect=expect)
atol, rtol = get_rtol_atol("csr_mul", dtype1)
res = compare_tensor(output, expect, rtol=rtol, atol=atol)
print("Test {}".format("Pass" if res else "Failed"))
target_name = attrs["target"].split()[0]
if not res:
mod_source = mod
if target_name != "llvm":
mod_source = mod.imported_modules[0]
print("Error {}:========================".format(target_name))
print(mod_source.get_source())
raise AssertionError("Test fail")
if attrs["profiling"]:
args_list = to_tvm_nd_array(
[dense, sparse_data, col_idx, row_idx, output, expect], akg.tvm.context(target_name, 0))
target_profiling(mod, *args_list, target=target_name, repeat_time=attrs["repeat_time"])
|
[
"tests.common.tensorio.compare_tensor",
"akg.tvm.context",
"tests.common.gen_random.random_gaussian",
"tests.common.base.get_rtol_atol",
"akg.tvm.ir_builder.create",
"akg.utils.dsl_create.get_broadcast_shape",
"numpy.zeros",
"akg.utils.format_transform.get_shape",
"akg.tvm.decl_buffer",
"akg.utils.kernel_exec.op_build_test",
"akg.utils.result_analysis.target_profiling",
"numpy.broadcast_to",
"akg.utils.kernel_exec.mod_launch"
] |
[((753, 775), 'akg.utils.format_transform.get_shape', 'get_shape', (['dense.shape'], {}), '(dense.shape)\n', (762, 775), False, 'from akg.utils.format_transform import to_tvm_nd_array, get_shape\n'), ((795, 811), 'akg.utils.format_transform.get_shape', 'get_shape', (['shape'], {}), '(shape)\n', (804, 811), False, 'from akg.utils.format_transform import to_tvm_nd_array, get_shape\n'), ((834, 880), 'akg.utils.dsl_create.get_broadcast_shape', 'get_broadcast_shape', (['dense_shape', 'sparse_shape'], {}), '(dense_shape, sparse_shape)\n', (853, 880), False, 'from akg.utils.dsl_create import get_broadcast_shape\n'), ((2458, 2524), 'akg.tvm.decl_buffer', 'tvm.decl_buffer', (['sparse_data.shape', 'sparse_data.dtype', 'output_name'], {}), '(sparse_data.shape, sparse_data.dtype, output_name)\n', (2473, 2524), False, 'from akg import tvm\n'), ((3548, 3753), 'akg.utils.kernel_exec.op_build_test', 'utils.op_build_test', (['csr_mul', '[shape1, sparse_data.shape, col_idx.shape, row_idx.shape]', '[dtype1, dtype1, dtype2, dtype2]'], {'op_attrs': 'op_attrs', 'polyhedral': 'poly_sch', 'attrs': 'attrs', 'kernel_name': '"""csr_mul"""'}), "(csr_mul, [shape1, sparse_data.shape, col_idx.shape,\n row_idx.shape], [dtype1, dtype1, dtype2, dtype2], op_attrs=op_attrs,\n polyhedral=poly_sch, attrs=attrs, kernel_name='csr_mul')\n", (3567, 3753), True, 'from akg.utils import kernel_exec as utils\n'), ((3881, 3917), 'numpy.zeros', 'np.zeros', (['output_shape', 'expect.dtype'], {}), '(output_shape, expect.dtype)\n', (3889, 3917), True, 'import numpy as np\n'), ((3931, 4019), 'akg.utils.kernel_exec.mod_launch', 'utils.mod_launch', (['mod', '(dense, sparse_data, col_idx, row_idx, output)'], {'expect': 'expect'}), '(mod, (dense, sparse_data, col_idx, row_idx, output),\n expect=expect)\n', (3947, 4019), True, 'from akg.utils import kernel_exec as utils\n'), ((4033, 4065), 'tests.common.base.get_rtol_atol', 'get_rtol_atol', (['"""csr_mul"""', 'dtype1'], {}), "('csr_mul', dtype1)\n", (4046, 4065), False, 'from tests.common.base import get_rtol_atol\n'), ((4076, 4128), 'tests.common.tensorio.compare_tensor', 'compare_tensor', (['output', 'expect'], {'rtol': 'rtol', 'atol': 'atol'}), '(output, expect, rtol=rtol, atol=atol)\n', (4090, 4128), False, 'from tests.common.tensorio import compare_tensor\n'), ((1289, 1312), 'akg.tvm.ir_builder.create', 'tvm.ir_builder.create', ([], {}), '()\n', (1310, 1312), False, 'from akg import tvm\n'), ((3030, 3060), 'numpy.broadcast_to', 'np.broadcast_to', (['dense', 'shape2'], {}), '(dense, shape2)\n', (3045, 3060), True, 'import numpy as np\n'), ((4680, 4772), 'akg.utils.result_analysis.target_profiling', 'target_profiling', (['mod', '*args_list'], {'target': 'target_name', 'repeat_time': "attrs['repeat_time']"}), "(mod, *args_list, target=target_name, repeat_time=attrs[\n 'repeat_time'])\n", (4696, 4772), False, 'from akg.utils.result_analysis import target_profiling\n'), ((2858, 2881), 'tests.common.gen_random.random_gaussian', 'random_gaussian', (['shape1'], {}), '(shape1)\n', (2873, 2881), False, 'from tests.common.gen_random import random_gaussian\n'), ((4639, 4670), 'akg.tvm.context', 'akg.tvm.context', (['target_name', '(0)'], {}), '(target_name, 0)\n', (4654, 4670), False, 'import akg\n')]
|
# encoding: utf-8
"""
Input/output package.
"""
from __future__ import absolute_import, division, print_function
import io as _io
import contextlib
import numpy as np
from .audio import load_audio_file
from .midi import load_midi, write_midi
from ..utils import suppress_warnings, string_types
ENCODING = 'utf8'
# dtype for numpy structured arrays that contain labelled segments
# 'label' needs to be castable to str
SEGMENT_DTYPE = [('start', np.float), ('end', np.float), ('label', object)]
# overwrite the built-in open() to transparently apply some magic file handling
@contextlib.contextmanager
def open_file(filename, mode='r'):
"""
Context manager which yields an open file or handle with the given mode
and closes it if needed afterwards.
Parameters
----------
filename : str or file handle
File (handle) to open.
mode: {'r', 'w'}
Specifies the mode in which the file is opened.
Yields
------
Open file (handle).
"""
# check if we need to open the file
if isinstance(filename, string_types):
f = fid = _io.open(filename, mode)
else:
f = filename
fid = None
# yield an open file handle
yield f
# close the file if needed
if fid:
fid.close()
@suppress_warnings
def load_events(filename):
"""
Load a events from a text file, one floating point number per line.
Parameters
----------
filename : str or file handle
File to load the events from.
Returns
-------
numpy array
Events.
Notes
-----
Comments (lines starting with '#') and additional columns are ignored,
i.e. only the first column is returned.
"""
# read in the events, one per line
events = np.loadtxt(filename, ndmin=2)
# 1st column is the event's time, the rest is ignored
return events[:, 0]
def write_events(events, filename, fmt='%.3f', delimiter='\t', header=None):
"""
Write the events to a file, one event per line.
Parameters
----------
events : numpy array
Events to be written to file.
filename : str or file handle
File to write the events to.
fmt : str or sequence of strs, optional
A single format (e.g. '%.3f'), a sequence of formats, or a multi-format
string (e.g. '%.3f %.3f'), in which case `delimiter` is ignored.
delimiter : str, optional
String or character separating columns.
header : str, optional
String that will be written at the beginning of the file as comment.
"""
events = np.array(events)
# reformat fmt to be a single string if needed
if isinstance(fmt, (list, tuple)):
fmt = delimiter.join(fmt)
# write output
with open_file(filename, 'wb') as f:
# write header
if header is not None:
f.write(bytes(('# ' + header + '\n').encode(ENCODING)))
# write events
for e in events:
try:
string = fmt % tuple(e.tolist())
except AttributeError:
string = e
except TypeError:
string = fmt % e
f.write(bytes((string + '\n').encode(ENCODING)))
f.flush()
load_onsets = load_events
write_onsets = write_events
@suppress_warnings
def load_beats(filename, downbeats=False):
"""
Load the beats from the given file, one beat per line of format
'beat_time' ['beat_number'].
Parameters
----------
filename : str or file handle
File to load the beats from.
downbeats : bool, optional
Load only downbeats instead of beats.
Returns
-------
numpy array
Beats.
"""
values = np.loadtxt(filename, ndmin=1)
if values.ndim > 1:
if downbeats:
# rows with a "1" in the 2nd column are downbeats
return values[values[:, 1] == 1][:, 0]
else:
# 1st column is the beat time, the rest is ignored
return values[:, 0]
return values
def write_beats(beats, filename, fmt=None, delimiter='\t', header=None):
"""
Write the beats to a file.
Parameters
----------
beats : numpy array
Beats to be written to file.
filename : str or file handle
File to write the beats to.
fmt : str or sequence of strs, optional
A single format (e.g. '%.3f'), a sequence of formats (e.g.
['%.3f', '%d']), or a multi-format string (e.g. '%.3f %d'), in which
case `delimiter` is ignored.
delimiter : str, optional
String or character separating columns.
header : str, optional
String that will be written at the beginning of the file as comment.
"""
if fmt is None and beats.ndim == 2:
fmt = ['%.3f', '%d']
elif fmt is None:
fmt = '%.3f'
write_events(beats, filename, fmt, delimiter, header)
def load_downbeats(filename):
"""
Load the downbeats from the given file.
Parameters
----------
filename : str or file handle
File to load the downbeats from.
Returns
-------
numpy array
Downbeats.
"""
return load_beats(filename, downbeats=True)
def write_downbeats(beats, filename, fmt=None, delimiter='\t', header=None):
"""
Write the downbeats to a file.
Parameters
----------
beats : numpy array
Beats or downbeats to be written to file.
filename : str or file handle
File to write the beats to.
fmt : str or sequence of strs, optional
A single format (e.g. '%.3f'), a sequence of formats (e.g.
['%.3f', '%d']), or a multi-format string (e.g. '%.3f %d'), in which
case `delimiter` is ignored.
delimiter : str, optional
String or character separating columns.
header : str, optional
String that will be written at the beginning of the file as comment.
Notes
-----
If `beats` contains both time and number of the beats, they are filtered
to contain only the downbeats (i.e. only the times of those beats with a
beat number of 1).
"""
if beats.ndim == 2:
beats = beats[beats[:, 1] == 1][:, 0]
if fmt is None:
fmt = '%.3f'
write_events(beats, filename, fmt, delimiter, header)
@suppress_warnings
def load_notes(filename):
"""
Load the notes from the given file, one note per line of format
'onset_time' 'note_number' ['duration' ['velocity']].
Parameters
----------
filename: str or file handle
File to load the notes from.
Returns
-------
numpy array
Notes.
"""
return np.loadtxt(filename, ndmin=2)
def write_notes(notes, filename, fmt=None, delimiter='\t', header=None):
"""
Write the notes to a file.
Parameters
----------
notes : numpy array, shape (num_notes, 2)
Notes, row format 'onset_time' 'note_number' ['duration' ['velocity']].
filename : str or file handle
File to write the notes to.
fmt : str or sequence of strs, optional
A sequence of formats (e.g. ['%.3f', '%d', '%.3f', '%d']), or a
multi-format string, e.g. '%.3f %d %.3f %d', in which case `delimiter`
is ignored.
delimiter : str, optional
String or character separating columns.
header : str, optional
String that will be written at the beginning of the file as comment.
Returns
-------
numpy array
Notes.
"""
# set default format
if fmt is None:
fmt = ['%.3f', '%d', '%.3f', '%d']
if not notes.ndim == 2:
raise ValueError('unknown format for `notes`')
# truncate format to the number of colums given
fmt = delimiter.join(fmt[:notes.shape[1]])
# write the notes
write_events(notes, filename, fmt=fmt, delimiter=delimiter, header=header)
def load_segments(filename):
"""
Load labelled segments from file, one segment per line. Each segment is of
form <start> <end> <label>, where <start> and <end> are floating point
numbers, and <label> is a string.
Parameters
----------
filename : str or file handle
File to read the labelled segments from.
Returns
-------
segments : numpy structured array
Structured array with columns 'start', 'end', and 'label',
containing the beginning, end, and label of segments.
"""
start, end, label = [], [], []
with open_file(filename) as f:
for line in f:
s, e, l = line.split()
start.append(float(s))
end.append(float(e))
label.append(l)
segments = np.zeros(len(start), dtype=SEGMENT_DTYPE)
segments['start'] = start
segments['end'] = end
segments['label'] = label
return segments
def write_segments(segments, filename, fmt=None, delimiter='\t', header=None):
"""
Write labelled segments to a file.
Parameters
----------
segments : numpy structured array
Labelled segments, one per row (column definition see SEGMENT_DTYPE).
filename : str or file handle
Output filename or handle.
fmt : str or sequence of strs, optional
A sequence of formats (e.g. ['%.3f', '%.3f', '%s']), or a multi-format
string (e.g. '%.3f %.3f %s'), in which case `delimiter` is ignored.
delimiter : str, optional
String or character separating columns.
header : str, optional
String that will be written at the beginning of the file as comment.
Returns
-------
numpy structured array
Labelled segments
Notes
-----
Labelled segments are represented as numpy structured array with three
named columns: 'start' contains the start position (e.g. seconds),
'end' the end position, and 'label' the segment label.
"""
if fmt is None:
fmt = ['%.3f', '%.3f', '%s']
write_events(segments, filename, fmt=fmt, delimiter=delimiter,
header=header)
load_chords = load_segments
write_chords = write_segments
def load_key(filename):
"""
Load the key from the given file.
Parameters
----------
filename : str or file handle
File to read key information from.
Returns
-------
str
Key.
"""
with open_file(filename) as f:
return f.read().strip()
def write_key(key, filename, header=None):
"""
Write key string to a file.
Parameters
----------
key : str
Key name.
filename : str or file handle
Output file.
header : str, optional
String that will be written at the beginning of the file as comment.
Returns
-------
key : str
Key name.
"""
write_events([key], filename, fmt='%s', header=header)
def load_tempo(filename, split_value=1., sort=None, norm_strengths=None,
max_len=None):
"""
Load tempo information from the given file.
Tempo information must have the following format:
'main tempo' ['secondary tempo' ['relative_strength']]
Parameters
----------
filename : str or file handle
File to load the tempo from.
split_value : float, optional
Value to distinguish between tempi and strengths.
`values` > `split_value` are interpreted as tempi [bpm],
`values` <= `split_value` are interpreted as strengths.
sort : bool, deprecated
Sort the tempi by their strength.
norm_strengths : bool, deprecated
Normalize the strengths to sum 1.
max_len : int, deprecated
Return at most `max_len` tempi.
Returns
-------
tempi : numpy array, shape (num_tempi[, 2])
Array with tempi. If no strength is parsed, a 1-dimensional array of
length 'num_tempi' is returned. If strengths are given, a 2D array
with tempi (first column) and their relative strengths (second column)
is returned.
"""
# try to load the data from file
values = np.loadtxt(filename, ndmin=1)
# split the filename according to their filename into tempi and strengths
# TODO: this is kind of hack-ish, find a better solution
tempi = values[values > split_value]
strengths = values[values <= split_value]
# make the strengths behave properly
strength_sum = np.sum(strengths)
# relative strengths are given (one less than tempi)
if len(tempi) - len(strengths) == 1:
strengths = np.append(strengths, 1. - strength_sum)
if np.any(strengths < 0):
raise AssertionError('strengths must be positive')
# no strength is given, assume an evenly distributed one
if strength_sum == 0:
strengths = np.ones_like(tempi) / float(len(tempi))
# normalize the strengths
if norm_strengths is not None:
import warnings
warnings.warn('`norm_strengths` is deprecated as of version 0.16 and '
'will be removed in 0.18. Please normalize strengths '
'separately.')
strengths /= float(strength_sum)
# tempi and strengths must have same length
if len(tempi) != len(strengths):
raise AssertionError('tempi and strengths must have same length')
# order the tempi according to their strengths
if sort:
import warnings
warnings.warn('`sort` is deprecated as of version 0.16 and will be '
'removed in 0.18. Please sort the returned array '
'separately.')
# Note: use 'mergesort', because we want a stable sorting algorithm
# which keeps the order of the keys in case of duplicate keys
# but we need to apply this '(-strengths)' trick because we want
# tempi with uniformly distributed strengths to keep their order
sort_idx = (-strengths).argsort(kind='mergesort')
tempi = tempi[sort_idx]
strengths = strengths[sort_idx]
# return at most 'max_len' tempi and their relative strength
if max_len is not None:
import warnings
warnings.warn('`max_len` is deprecated as of version 0.16 and will be '
'removed in 0.18. Please truncate the returned array '
'separately.')
return np.vstack((tempi[:max_len], strengths[:max_len])).T
def write_tempo(tempi, filename, delimiter='\t', header=None, mirex=None):
"""
Write the most dominant tempi and the relative strength to a file.
Parameters
----------
tempi : numpy array
Array with the detected tempi (first column) and their strengths
(second column).
filename : str or file handle
Output file.
delimiter : str, optional
String or character separating columns.
header : str, optional
String that will be written at the beginning of the file as comment.
mirex : bool, deprecated
Report the lower tempo first (as required by MIREX).
Returns
-------
tempo_1 : float
The most dominant tempo.
tempo_2 : float
The second most dominant tempo.
strength : float
Their relative strength.
"""
# make the given tempi a 2d array
tempi = np.array(tempi, ndmin=2)
# default values
t1 = t2 = strength = np.nan
# only one tempo was detected
if len(tempi) == 1:
t1 = tempi[0][0]
strength = 1.
# consider only the two strongest tempi and strengths
elif len(tempi) > 1:
t1, t2 = tempi[:2, 0]
strength = tempi[0, 1] / sum(tempi[:2, 1])
# for MIREX, the lower tempo must be given first
if mirex is not None:
import warnings
warnings.warn('`mirex` argument is deprecated as of version 0.16 '
'and will be removed in version 0.17. Please sort the '
'tempi manually')
if t1 > t2:
t1, t2, strength = t2, t1, 1. - strength
# format as a numpy array and write to output
out = np.array([t1, t2, strength], ndmin=2)
write_events(out, filename, fmt=['%.2f', '%.2f', '%.2f'],
delimiter=delimiter, header=header)
|
[
"numpy.sum",
"numpy.ones_like",
"numpy.any",
"numpy.append",
"numpy.array",
"numpy.loadtxt",
"io.open",
"warnings.warn",
"numpy.vstack"
] |
[((1773, 1802), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'ndmin': '(2)'}), '(filename, ndmin=2)\n', (1783, 1802), True, 'import numpy as np\n'), ((2590, 2606), 'numpy.array', 'np.array', (['events'], {}), '(events)\n', (2598, 2606), True, 'import numpy as np\n'), ((3721, 3750), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'ndmin': '(1)'}), '(filename, ndmin=1)\n', (3731, 3750), True, 'import numpy as np\n'), ((6644, 6673), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'ndmin': '(2)'}), '(filename, ndmin=2)\n', (6654, 6673), True, 'import numpy as np\n'), ((11974, 12003), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'ndmin': '(1)'}), '(filename, ndmin=1)\n', (11984, 12003), True, 'import numpy as np\n'), ((12290, 12307), 'numpy.sum', 'np.sum', (['strengths'], {}), '(strengths)\n', (12296, 12307), True, 'import numpy as np\n'), ((15168, 15192), 'numpy.array', 'np.array', (['tempi'], {'ndmin': '(2)'}), '(tempi, ndmin=2)\n', (15176, 15192), True, 'import numpy as np\n'), ((15944, 15981), 'numpy.array', 'np.array', (['[t1, t2, strength]'], {'ndmin': '(2)'}), '([t1, t2, strength], ndmin=2)\n', (15952, 15981), True, 'import numpy as np\n'), ((1102, 1126), 'io.open', '_io.open', (['filename', 'mode'], {}), '(filename, mode)\n', (1110, 1126), True, 'import io as _io\n'), ((12426, 12466), 'numpy.append', 'np.append', (['strengths', '(1.0 - strength_sum)'], {}), '(strengths, 1.0 - strength_sum)\n', (12435, 12466), True, 'import numpy as np\n'), ((12477, 12498), 'numpy.any', 'np.any', (['(strengths < 0)'], {}), '(strengths < 0)\n', (12483, 12498), True, 'import numpy as np\n'), ((12807, 12951), 'warnings.warn', 'warnings.warn', (['"""`norm_strengths` is deprecated as of version 0.16 and will be removed in 0.18. Please normalize strengths separately."""'], {}), "(\n '`norm_strengths` is deprecated as of version 0.16 and will be removed in 0.18. Please normalize strengths separately.'\n )\n", (12820, 12951), False, 'import warnings\n'), ((13288, 13426), 'warnings.warn', 'warnings.warn', (['"""`sort` is deprecated as of version 0.16 and will be removed in 0.18. Please sort the returned array separately."""'], {}), "(\n '`sort` is deprecated as of version 0.16 and will be removed in 0.18. Please sort the returned array separately.'\n )\n", (13301, 13426), False, 'import warnings\n'), ((14032, 14177), 'warnings.warn', 'warnings.warn', (['"""`max_len` is deprecated as of version 0.16 and will be removed in 0.18. Please truncate the returned array separately."""'], {}), "(\n '`max_len` is deprecated as of version 0.16 and will be removed in 0.18. Please truncate the returned array separately.'\n )\n", (14045, 14177), False, 'import warnings\n'), ((14229, 14278), 'numpy.vstack', 'np.vstack', (['(tempi[:max_len], strengths[:max_len])'], {}), '((tempi[:max_len], strengths[:max_len]))\n', (14238, 14278), True, 'import numpy as np\n'), ((15626, 15770), 'warnings.warn', 'warnings.warn', (['"""`mirex` argument is deprecated as of version 0.16 and will be removed in version 0.17. Please sort the tempi manually"""'], {}), "(\n '`mirex` argument is deprecated as of version 0.16 and will be removed in version 0.17. Please sort the tempi manually'\n )\n", (15639, 15770), False, 'import warnings\n'), ((12670, 12689), 'numpy.ones_like', 'np.ones_like', (['tempi'], {}), '(tempi)\n', (12682, 12689), True, 'import numpy as np\n')]
|
import os
from vibration_compensation import read_gcode, Data
import pytest
from numpy.testing import *
import numpy as np
import scipy as sp
import vibration_compensation.bokeh_imports as plt
@pytest.fixture(scope="module")
def figures():
path, filename = os.path.split(os.path.realpath(__file__))
path = os.path.join(path, "output")
os.makedirs(path, exist_ok=True)
plt.output_file(os.path.join(path, os.path.splitext(filename)[0] + ".html"))
ret = []
yield ret
plt.save(ret)
def generate_curves(gcode, maximum_error):
data = read_gcode(gcode, maximum_error)
return data
@pytest.fixture(scope="function")
def plotter(figures, request):
def plot(data: Data):
p = plt.Figure(
plot_width=1000,
plot_height=1000,
x_range=(-250, 250),
y_range=(-250, 250),
match_aspect=True,
lod_threshold=None,
title=request.node.name
)
p.segment(
x0=data.start_xy[:, 0],
x1=data.end_xy[:, 0],
y0=data.start_xy[:, 1],
y1=data.end_xy[:, 1],
line_width=1,
line_color="red",
line_dash="dotted"
)
ts = data.smoothed_toolpath.fixed_curvature_speeds(0, data.smoothed_toolpath.start_xy.shape[0], 0.1)
points = data.smoothed_toolpath(ts)
p.line(
points[:,0],
points[:,1],
line_width=2,
line_color="blue",
line_dash="solid"
)
p.circle(
points[:,0],
points[:,1],
size=4,
fill_color="white"
)
figures.append(p)
return plot
def point_on_line(linea, lineb, point):
return np.linalg.norm(linea - point) + np.linalg.norm(lineb - point)\
- np.linalg.norm(linea - lineb)
def point_on_middle_of_line(linea, lineb, point):
mid = (lineb - linea) * 0.5 + linea
return np.linalg.norm(point - mid)
class SegmentChecker(object):
def __init__(self,data, l, s, start, end, corner):
self.data = data
self.s = s
self.start = start
self.end = end
self.start_point = data.start_xy[l]
self.end_point = data.end_xy[l]
if l != data.start_xy.shape[0] - 1:
self.next_start_point = data.start_xy[l+1]
self.next_end_point = data.end_xy[l+1]
self.spline = data.smoothed_toolpath
if corner:
self.spline_start = data.smoothed_toolpath.segment_start[s]
self.spline_mid = l + 1.0
self.spline_end = data.smoothed_toolpath.segment_end[s]
else:
self.spline_start = data.smoothed_toolpath.segment_start[s]
self.spline_end = data.smoothed_toolpath.segment_end[s]
self.spline_mid = (self.spline_start + self.spline_end) / 2.0
xy_lengths = np.linalg.norm(data.end_xy - data.start_xy, axis=1)
self.start_line_dist = np.sum(xy_lengths[:l])
self.line_length = xy_lengths[l]
if l < data.start_xy.shape[0] - 1:
self.start_next_line_dist = self.start_line_dist + self.line_length
self.next_line_length = xy_lengths[l+1]
def check_distance(self, spline, line):
msg = "The spline start distance does not match"
if line <= 1.0:
line_dist = self.start_line_dist + self.line_length * line
else:
line_dist = self.start_next_line_dist + self.next_line_length * (line-1.0)
assert self.spline.distance(spline) <= line_dist and \
self.spline.distance(spline) == pytest.approx(line_dist, abs=0.1), \
msg
def check_start_point_start(self):
msg = "The start point of the spline segment does not match the line start point"
assert_array_almost_equal(self.spline(self.spline_start), self.start_point,
err_msg=msg)
self.check_distance(self.spline_start, 0)
def check_start_point_on(self):
msg = "The start point of the spline segment is not on the line"
assert point_on_line(self.start_point, self.end_point, self.spline(self.spline_start)) == \
pytest.approx(0, abs=1e-12), msg
def check_line_start_point_middle(self):
msg = "The start point of the spline segment is not on the middle of the line"
assert point_on_middle_of_line(self.start_point, self.end_point,
self.spline(self.spline_start)) == pytest.approx(0, abs=1e-3), msg
self.check_distance(self.spline_start, 0.5)
def check_line_start_point_end(self):
msg = "The start point of the spline segment is not on the end of the line"
assert_array_almost_equal(self.spline(self.spline_start), self.end_point, err_msg=msg)
self.check_distance(self.spline_start, 1.0)
def check_point_on_middle_of_line(self):
msg = "The middle point of the spline segment is not on the middle of the line"
assert point_on_middle_of_line(self.start_point, self.end_point,
self.spline(self.spline_mid)) == pytest.approx(0, abs=1e-12), msg
self.check_distance(self.spline_mid, 0.5)
def check_point_on_line(self):
msg = "The middle point of the spline segment is not on the line"
assert point_on_line(self.start_point, self.end_point,
self.spline(self.spline_mid)) == pytest.approx(0, abs=1e-12), msg
def check_end_point_end(self):
msg = "The end point of the spline segment does not match the line end point"
assert_array_almost_equal(self.spline(self.spline_end), self.end_point), msg
self.check_distance(self.spline_end, 1.0)
end_error_segment = "The end point of the spline segment is not on the line"
def check_end_point_on(self):
assert point_on_line(self.start_point, self.end_point, self.spline(self.spline_end)) == \
pytest.approx(0, abs=1e-12), SegmentChecker.end_error_segment
def check_corner_end_point_on(self):
assert point_on_line(self.next_start_point, self.next_end_point,
self.spline(self.spline_end)) == pytest.approx(0, abs=1e-12),\
SegmentChecker.end_error_segment
end_error_segment_middle = "The end point of the spline segment is not on the middle of the line"
def check_end_point_middle(self):
assert point_on_middle_of_line(self.start_point, self.end_point,
self.spline(self.spline_end)) == pytest.approx(0, abs=1e-3),\
SegmentChecker.end_error_segment_middle
self.check_distance(self.spline_end, 0.5)
def check_corner_end_point_middle(self):
assert point_on_middle_of_line(self.next_start_point, self.next_end_point,
self.spline(self.spline_end)) == pytest.approx(0, abs=1e-3),\
SegmentChecker.end_error_segment_middle
self.check_distance(self.spline_end, 1.5)
def check_continuity(self):
msg = "There's a discontinuity at the end of the spline segment"
if self.s > 0:
prev_end = self.data.smoothed_toolpath.segment_end[self.s-1]
assert prev_end == self.spline_start, \
"The previous segment does not end where the current one starts"
assert_array_almost_equal(self.spline(self.spline_start-1e-12), self.spline(self.spline_start),
err_msg=msg)
assert self.spline.distance(self.spline_start-1e-12) <=\
self.spline.distance(self.spline_start) and \
self.spline.distance(self.spline_start-1e-12) == \
pytest.approx(self.spline.distance(self.spline_start), abs=0.001), \
"The previous segment end distance and the current segment start do not match up"
def check_corner_spline_order(self):
assert self.spline_end > self.spline_mid, \
"The endpoint of the corner spline is before the line segment end"
corner_error = "The closest point of the corner is not close enough"
def check_corner_middle_normal(self):
assert np.linalg.norm(self.end_point - self.spline(self.spline_mid)) <= 0.01,\
SegmentChecker.corner_error
self.check_distance(self.spline_mid, 1.0)
def check_corner_middle_short(self):
assert np.linalg.norm(self.end_point - self.spline(self.spline_mid)) ==\
pytest.approx(0.01, abs=1e-12), \
SegmentChecker.corner_error
self.check_distance(self.spline_mid, 1.0)
def straight_segment(data, l, s, start, end):
checker = SegmentChecker(data, l, s, start, end, False)
if start == "start":
checker.check_start_point_start()
elif start == "on":
checker.check_start_point_on()
elif start == "middle":
checker.check_line_start_point_middle()
elif start == "end":
checker.check_line_start_point_end()
else:
assert False, "Invalid start type"
if start == "start" and end == "end":
checker.check_point_on_middle_of_line()
else:
checker.check_point_on_line()
if end == "end":
checker.check_end_point_end()
elif end == "on":
checker.check_end_point_on()
elif end == "middle":
checker.check_end_point_middle()
else:
assert False, "Invalid end type"
checker.check_continuity()
def corner_segment(data, l, s, start, end):
checker = SegmentChecker(data, l, s, start, end, True)
checker.check_corner_spline_order()
if start == "on":
checker.check_start_point_on()
elif start == "middle":
checker.check_line_start_point_middle()
else:
assert False, "Invalid start type"
if start == "middle" or end == "middle":
checker.check_corner_middle_normal()
else:
checker.check_corner_middle_short()
if end == "on":
checker.check_corner_end_point_on()
elif end == "middle":
checker.check_corner_end_point_middle()
else:
assert False, "Invalid end type"
checker.check_continuity()
def check_distances(data):
t = data.smoothed_toolpath.fixed_distances(0, data.smoothed_toolpath.total_distance(), 10)
assert_array_almost_equal(data.smoothed_toolpath.distance(t),
np.linspace(0, data.smoothed_toolpath.total_distance(), 10))
def test_straight_line(plotter):
data = generate_curves([
"G1 X100 Y200"
], maximum_error=0.01)
assert data.smoothed_toolpath.segment_start.shape[0] == 1
straight_segment(data, l=0, s=0, start="start", end="end")
assert np.sum(data.smoothed_toolpath.segment_lengths) ==\
pytest.approx(np.linalg.norm([100, 200]))
check_distances(data)
plotter(data)
def test_two_straight_lines(plotter):
data = generate_curves([
"G1 X50 Y50",
"G1 X100 Y100"
], maximum_error=0.01)
assert data.smoothed_toolpath.segment_start.shape[0] == 2
straight_segment(data, l=0, s=0, start="start", end="end")
straight_segment(data, l=1, s=1, start="start", end="end")
assert np.sum(data.smoothed_toolpath.segment_lengths) == \
pytest.approx(
np.linalg.norm([50, 50]) + np.linalg.norm([50, 50])
)
check_distances(data)
plotter(data)
def test_90_corner(plotter):
data = generate_curves([
"G1 X100 Y0",
"G1 X100 Y100"
], maximum_error=0.01)
assert data.smoothed_toolpath.segment_start.shape[0] == 3
straight_segment(data, l=0, s=0, start="start", end="on")
corner_segment(data, l=0, s=1, start="on", end="on")
straight_segment(data, l=1, s=2, start="on", end="end")
assert np.sum(data.smoothed_toolpath.segment_lengths) < 200.0
assert np.sum(data.smoothed_toolpath.segment_lengths) == pytest.approx(200, abs=0.1)
check_distances(data)
plotter(data)
def test_45_corner(plotter):
data = generate_curves([
"G1 X100 Y0",
"G1 X0 Y100"
], maximum_error=0.01)
assert data.smoothed_toolpath.segment_start.shape[0] == 3
straight_segment(data, l=0, s=0, start="start", end="on")
corner_segment(data, l=0, s=1, start="on", end="on")
straight_segment(data, l=1, s=2, start="on", end="end")
assert np.sum(data.smoothed_toolpath.segment_lengths) < 100 + np.linalg.norm([100, 100])
assert np.sum(data.smoothed_toolpath.segment_lengths) == \
pytest.approx(100 + np.linalg.norm([100, 100]), abs=0.1)
check_distances(data)
plotter(data)
def test_very_acute_corner(plotter):
data = generate_curves([
"G1 X100 Y0",
"G1 X0 Y1"
], maximum_error=0.01)
assert data.smoothed_toolpath.segment_start.shape[0] == 3
straight_segment(data, l=0, s=0, start="start", end="on")
corner_segment(data, l=0, s=1, start="on", end="on")
straight_segment(data, l=1, s=2, start="on", end="end")
assert np.sum(data.smoothed_toolpath.segment_lengths) < 100 + np.linalg.norm([100, 1])
assert np.sum(data.smoothed_toolpath.segment_lengths) == \
pytest.approx(100 + np.linalg.norm([100, 1]), abs=0.1)
check_distances(data)
plotter(data)
def test_135_corner(plotter):
data = generate_curves([
"G1 X100 Y0",
"G1 X200 Y100"
], maximum_error=0.01)
assert data.smoothed_toolpath.segment_start.shape[0] == 3
straight_segment(data, l=0, s=0, start="start", end="on")
straight_segment(data, l=0, s=0, start="start", end="on")
corner_segment(data, l=0, s=1, start="on", end="on")
straight_segment(data, l=1, s=2, start="on", end="end")
assert np.sum(data.smoothed_toolpath.segment_lengths) < 100 + np.linalg.norm([100, 100])
assert np.sum(data.smoothed_toolpath.segment_lengths) == \
pytest.approx(100 + np.linalg.norm([100, 100]), abs=0.1)
check_distances(data)
plotter(data)
def test_135_corner_counter_clockwise(plotter):
data = generate_curves([
"G1 X-100 Y-100",
"G1 X-200 Y-100"
], maximum_error=0.01)
assert data.smoothed_toolpath.segment_start.shape[0] == 3
straight_segment(data, l=0, s=0, start="start", end="on")
straight_segment(data, l=0, s=0, start="start", end="on")
corner_segment(data, l=0, s=1, start="on", end="on")
straight_segment(data, l=1, s=2, start="on", end="end")
assert np.sum(data.smoothed_toolpath.segment_lengths) < 100 + np.linalg.norm([100, 100])
assert np.sum(data.smoothed_toolpath.segment_lengths) == \
pytest.approx(100 + np.linalg.norm([100, 100]), abs=0.1)
check_distances(data)
plotter(data)
def test_very_obtuse_corner(plotter):
data = generate_curves([
"G1 X100 Y0",
"G1 X200 Y1"
], maximum_error=0.01)
assert data.smoothed_toolpath.segment_start.shape[0] == 3
straight_segment(data, l=0, s=0, start="start", end="on")
corner_segment(data, l=0, s=1, start="on", end="on")
straight_segment(data, l=1, s=2, start="on", end="end")
assert np.sum(data.smoothed_toolpath.segment_lengths) < 100 + np.linalg.norm([100, 1])
assert np.sum(data.smoothed_toolpath.segment_lengths) == \
pytest.approx(100 + np.linalg.norm([100, 1]), abs=0.1)
check_distances(data)
plotter(data)
def test_obtuse_corner_with_short_lines(plotter):
data = generate_curves([
"G1 X10 Y0",
"G1 X20 Y0.1"
], maximum_error=0.01)
assert data.smoothed_toolpath.segment_start.shape[0] == 3
straight_segment(data, l=0, s=0, start="start", end="middle")
corner_segment(data, l=0, s=1, start="middle", end="middle")
straight_segment(data, l=1, s=2, start="middle", end="end")
assert np.sum(data.smoothed_toolpath.segment_lengths) < 10 + np.linalg.norm([10, 0.1])
assert np.sum(data.smoothed_toolpath.segment_lengths) == \
pytest.approx(10 + np.linalg.norm([10, 0.1]), abs=0.1)
check_distances(data)
plotter(data)
def test_obtuse_corner_with_shorter_and_longer_line(plotter):
data = generate_curves([
"G1 X10 Y0",
"G1 X30 Y0.1"
], maximum_error=0.01)
assert data.smoothed_toolpath.segment_start.shape[0] == 3
straight_segment(data, l=0, s=0, start="start", end="middle")
corner_segment(data, l=0, s=1, start="middle", end="on")
straight_segment(data, l=1, s=2, start="on", end="end")
assert np.sum(data.smoothed_toolpath.segment_lengths) < 10 + np.linalg.norm([20, 0.1])
assert np.sum(data.smoothed_toolpath.segment_lengths) == \
pytest.approx(10 + np.linalg.norm([20, 0.1]), abs=0.1)
check_distances(data)
plotter(data)
def test_obtuse_corner_with_longer_and_shorter_line(plotter):
data = generate_curves([
"G1 X20 Y0",
"G1 X30 Y-0.1"
], maximum_error=0.01)
assert data.smoothed_toolpath.segment_start.shape[0] == 3
straight_segment(data, l=0, s=0, start="start", end="on")
corner_segment(data, l=0, s=1, start="on", end="middle")
straight_segment(data, l=1, s=2, start="middle", end="end")
assert np.sum(data.smoothed_toolpath.segment_lengths) < 20 + np.linalg.norm([10, 0.1])
assert np.sum(data.smoothed_toolpath.segment_lengths) == \
pytest.approx(20 + np.linalg.norm([10, 0.1]), abs=0.1)
check_distances(data)
plotter(data)
def test_three_long_lines(plotter):
data = generate_curves([
"G1 X100 Y0",
"G1 X100 Y100",
"G1 X0 Y100"
], maximum_error=0.01)
assert data.smoothed_toolpath.segment_start.shape[0] == 5
straight_segment(data, l=0, s=0, start="start", end="on")
corner_segment(data, l=0, s=1, start="on", end="on")
straight_segment(data, l=1, s=2, start="on", end="on")
corner_segment(data, l=1, s=3, start="on", end="on")
straight_segment(data, l=2, s=4, start="on", end="end")
assert np.sum(data.smoothed_toolpath.segment_lengths) < 300
assert np.sum(data.smoothed_toolpath.segment_lengths) == \
pytest.approx(300, abs=0.1)
check_distances(data)
plotter(data)
def test_three_short_lines(plotter):
data = generate_curves([
"G1 X10 Y0",
"G1 X20 Y0.1",
"G1 X30 Y0.3"
], maximum_error=0.01)
assert data.smoothed_toolpath.segment_start.shape[0] == 5
straight_segment(data, l=0, s=0, start="start", end="middle")
corner_segment(data, l=0, s=1, start="middle", end="middle")
# Note that this line is very short
straight_segment(data, l=1, s=2, start="middle", end="middle")
corner_segment(data, l=1, s=3, start="middle", end="middle")
straight_segment(data, l=2, s=4, start="middle", end="end")
assert np.sum(data.smoothed_toolpath.segment_lengths) <\
10 + np.linalg.norm([10, 0.1]) + np.linalg.norm([10, 0.2])
assert np.sum(data.smoothed_toolpath.segment_lengths) == \
pytest.approx(10 + np.linalg.norm([10, 0.1]) + np.linalg.norm([10, 0.2]), abs=0.1)
check_distances(data)
plotter(data)
def test_three_long_lines_with_z_move(plotter):
data = generate_curves([
"G1 X100 Y0",
"G1 X100 Y100",
"G1 Z10",
"G1 X0 Y100"
], maximum_error=0.01)
assert data.smoothed_toolpath.segment_start.shape[0] == 5
straight_segment(data, l=0, s=0, start="start", end="on")
corner_segment(data, l=0, s=1, start="on", end="on")
straight_segment(data, l=1, s=2, start="on", end="end")
straight_segment(data, l=1, s=3, start="end", end="end")
straight_segment(data, l=3, s=4, start="start", end="end")
assert np.sum(data.smoothed_toolpath.segment_lengths) < 300
assert np.sum(data.smoothed_toolpath.segment_lengths) == \
pytest.approx(300, abs=0.1)
check_distances(data)
plotter(data)
|
[
"numpy.sum",
"os.makedirs",
"vibration_compensation.read_gcode",
"os.path.realpath",
"pytest.fixture",
"vibration_compensation.bokeh_imports.Figure",
"numpy.linalg.norm",
"os.path.splitext",
"vibration_compensation.bokeh_imports.save",
"pytest.approx",
"os.path.join"
] |
[((205, 235), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (219, 235), False, 'import pytest\n'), ((641, 673), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (655, 673), False, 'import pytest\n'), ((328, 356), 'os.path.join', 'os.path.join', (['path', '"""output"""'], {}), "(path, 'output')\n", (340, 356), False, 'import os\n'), ((362, 394), 'os.makedirs', 'os.makedirs', (['path'], {'exist_ok': '(True)'}), '(path, exist_ok=True)\n', (373, 394), False, 'import os\n'), ((511, 524), 'vibration_compensation.bokeh_imports.save', 'plt.save', (['ret'], {}), '(ret)\n', (519, 524), True, 'import vibration_compensation.bokeh_imports as plt\n'), ((585, 617), 'vibration_compensation.read_gcode', 'read_gcode', (['gcode', 'maximum_error'], {}), '(gcode, maximum_error)\n', (595, 617), False, 'from vibration_compensation import read_gcode, Data\n'), ((2045, 2072), 'numpy.linalg.norm', 'np.linalg.norm', (['(point - mid)'], {}), '(point - mid)\n', (2059, 2072), True, 'import numpy as np\n'), ((288, 314), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (304, 314), False, 'import os\n'), ((746, 907), 'vibration_compensation.bokeh_imports.Figure', 'plt.Figure', ([], {'plot_width': '(1000)', 'plot_height': '(1000)', 'x_range': '(-250, 250)', 'y_range': '(-250, 250)', 'match_aspect': '(True)', 'lod_threshold': 'None', 'title': 'request.node.name'}), '(plot_width=1000, plot_height=1000, x_range=(-250, 250), y_range=\n (-250, 250), match_aspect=True, lod_threshold=None, title=request.node.name\n )\n', (756, 907), True, 'import vibration_compensation.bokeh_imports as plt\n'), ((1907, 1936), 'numpy.linalg.norm', 'np.linalg.norm', (['(linea - lineb)'], {}), '(linea - lineb)\n', (1921, 1936), True, 'import numpy as np\n'), ((3002, 3053), 'numpy.linalg.norm', 'np.linalg.norm', (['(data.end_xy - data.start_xy)'], {'axis': '(1)'}), '(data.end_xy - data.start_xy, axis=1)\n', (3016, 3053), True, 'import numpy as np\n'), ((3086, 3108), 'numpy.sum', 'np.sum', (['xy_lengths[:l]'], {}), '(xy_lengths[:l])\n', (3092, 3108), True, 'import numpy as np\n'), ((10870, 10916), 'numpy.sum', 'np.sum', (['data.smoothed_toolpath.segment_lengths'], {}), '(data.smoothed_toolpath.segment_lengths)\n', (10876, 10916), True, 'import numpy as np\n'), ((11372, 11418), 'numpy.sum', 'np.sum', (['data.smoothed_toolpath.segment_lengths'], {}), '(data.smoothed_toolpath.segment_lengths)\n', (11378, 11418), True, 'import numpy as np\n'), ((11975, 12021), 'numpy.sum', 'np.sum', (['data.smoothed_toolpath.segment_lengths'], {}), '(data.smoothed_toolpath.segment_lengths)\n', (11981, 12021), True, 'import numpy as np\n'), ((12042, 12088), 'numpy.sum', 'np.sum', (['data.smoothed_toolpath.segment_lengths'], {}), '(data.smoothed_toolpath.segment_lengths)\n', (12048, 12088), True, 'import numpy as np\n'), ((12092, 12119), 'pytest.approx', 'pytest.approx', (['(200)'], {'abs': '(0.1)'}), '(200, abs=0.1)\n', (12105, 12119), False, 'import pytest\n'), ((12560, 12606), 'numpy.sum', 'np.sum', (['data.smoothed_toolpath.segment_lengths'], {}), '(data.smoothed_toolpath.segment_lengths)\n', (12566, 12606), True, 'import numpy as np\n'), ((12654, 12700), 'numpy.sum', 'np.sum', (['data.smoothed_toolpath.segment_lengths'], {}), '(data.smoothed_toolpath.segment_lengths)\n', (12660, 12700), True, 'import numpy as np\n'), ((13219, 13265), 'numpy.sum', 'np.sum', (['data.smoothed_toolpath.segment_lengths'], {}), '(data.smoothed_toolpath.segment_lengths)\n', (13225, 13265), True, 'import numpy as np\n'), ((13311, 13357), 'numpy.sum', 'np.sum', (['data.smoothed_toolpath.segment_lengths'], {}), '(data.smoothed_toolpath.segment_lengths)\n', (13317, 13357), True, 'import numpy as np\n'), ((13936, 13982), 'numpy.sum', 'np.sum', (['data.smoothed_toolpath.segment_lengths'], {}), '(data.smoothed_toolpath.segment_lengths)\n', (13942, 13982), True, 'import numpy as np\n'), ((14030, 14076), 'numpy.sum', 'np.sum', (['data.smoothed_toolpath.segment_lengths'], {}), '(data.smoothed_toolpath.segment_lengths)\n', (14036, 14076), True, 'import numpy as np\n'), ((14679, 14725), 'numpy.sum', 'np.sum', (['data.smoothed_toolpath.segment_lengths'], {}), '(data.smoothed_toolpath.segment_lengths)\n', (14685, 14725), True, 'import numpy as np\n'), ((14773, 14819), 'numpy.sum', 'np.sum', (['data.smoothed_toolpath.segment_lengths'], {}), '(data.smoothed_toolpath.segment_lengths)\n', (14779, 14819), True, 'import numpy as np\n'), ((15343, 15389), 'numpy.sum', 'np.sum', (['data.smoothed_toolpath.segment_lengths'], {}), '(data.smoothed_toolpath.segment_lengths)\n', (15349, 15389), True, 'import numpy as np\n'), ((15435, 15481), 'numpy.sum', 'np.sum', (['data.smoothed_toolpath.segment_lengths'], {}), '(data.smoothed_toolpath.segment_lengths)\n', (15441, 15481), True, 'import numpy as np\n'), ((16031, 16077), 'numpy.sum', 'np.sum', (['data.smoothed_toolpath.segment_lengths'], {}), '(data.smoothed_toolpath.segment_lengths)\n', (16037, 16077), True, 'import numpy as np\n'), ((16123, 16169), 'numpy.sum', 'np.sum', (['data.smoothed_toolpath.segment_lengths'], {}), '(data.smoothed_toolpath.segment_lengths)\n', (16129, 16169), True, 'import numpy as np\n'), ((16723, 16769), 'numpy.sum', 'np.sum', (['data.smoothed_toolpath.segment_lengths'], {}), '(data.smoothed_toolpath.segment_lengths)\n', (16729, 16769), True, 'import numpy as np\n'), ((16815, 16861), 'numpy.sum', 'np.sum', (['data.smoothed_toolpath.segment_lengths'], {}), '(data.smoothed_toolpath.segment_lengths)\n', (16821, 16861), True, 'import numpy as np\n'), ((17416, 17462), 'numpy.sum', 'np.sum', (['data.smoothed_toolpath.segment_lengths'], {}), '(data.smoothed_toolpath.segment_lengths)\n', (17422, 17462), True, 'import numpy as np\n'), ((17508, 17554), 'numpy.sum', 'np.sum', (['data.smoothed_toolpath.segment_lengths'], {}), '(data.smoothed_toolpath.segment_lengths)\n', (17514, 17554), True, 'import numpy as np\n'), ((18217, 18263), 'numpy.sum', 'np.sum', (['data.smoothed_toolpath.segment_lengths'], {}), '(data.smoothed_toolpath.segment_lengths)\n', (18223, 18263), True, 'import numpy as np\n'), ((18282, 18328), 'numpy.sum', 'np.sum', (['data.smoothed_toolpath.segment_lengths'], {}), '(data.smoothed_toolpath.segment_lengths)\n', (18288, 18328), True, 'import numpy as np\n'), ((18346, 18373), 'pytest.approx', 'pytest.approx', (['(300)'], {'abs': '(0.1)'}), '(300, abs=0.1)\n', (18359, 18373), False, 'import pytest\n'), ((19037, 19083), 'numpy.sum', 'np.sum', (['data.smoothed_toolpath.segment_lengths'], {}), '(data.smoothed_toolpath.segment_lengths)\n', (19043, 19083), True, 'import numpy as np\n'), ((19170, 19216), 'numpy.sum', 'np.sum', (['data.smoothed_toolpath.segment_lengths'], {}), '(data.smoothed_toolpath.segment_lengths)\n', (19176, 19216), True, 'import numpy as np\n'), ((19946, 19992), 'numpy.sum', 'np.sum', (['data.smoothed_toolpath.segment_lengths'], {}), '(data.smoothed_toolpath.segment_lengths)\n', (19952, 19992), True, 'import numpy as np\n'), ((20011, 20057), 'numpy.sum', 'np.sum', (['data.smoothed_toolpath.segment_lengths'], {}), '(data.smoothed_toolpath.segment_lengths)\n', (20017, 20057), True, 'import numpy as np\n'), ((20075, 20102), 'pytest.approx', 'pytest.approx', (['(300)'], {'abs': '(0.1)'}), '(300, abs=0.1)\n', (20088, 20102), False, 'import pytest\n'), ((1830, 1859), 'numpy.linalg.norm', 'np.linalg.norm', (['(linea - point)'], {}), '(linea - point)\n', (1844, 1859), True, 'import numpy as np\n'), ((1862, 1891), 'numpy.linalg.norm', 'np.linalg.norm', (['(lineb - point)'], {}), '(lineb - point)\n', (1876, 1891), True, 'import numpy as np\n'), ((4341, 4368), 'pytest.approx', 'pytest.approx', (['(0)'], {'abs': '(1e-12)'}), '(0, abs=1e-12)\n', (4354, 4368), False, 'import pytest\n'), ((4632, 4659), 'pytest.approx', 'pytest.approx', (['(0)'], {'abs': '(0.001)'}), '(0, abs=0.001)\n', (4645, 4659), False, 'import pytest\n'), ((5253, 5280), 'pytest.approx', 'pytest.approx', (['(0)'], {'abs': '(1e-12)'}), '(0, abs=1e-12)\n', (5266, 5280), False, 'import pytest\n'), ((5560, 5587), 'pytest.approx', 'pytest.approx', (['(0)'], {'abs': '(1e-12)'}), '(0, abs=1e-12)\n', (5573, 5587), False, 'import pytest\n'), ((6086, 6113), 'pytest.approx', 'pytest.approx', (['(0)'], {'abs': '(1e-12)'}), '(0, abs=1e-12)\n', (6099, 6113), False, 'import pytest\n'), ((6312, 6339), 'pytest.approx', 'pytest.approx', (['(0)'], {'abs': '(1e-12)'}), '(0, abs=1e-12)\n', (6325, 6339), False, 'import pytest\n'), ((6652, 6679), 'pytest.approx', 'pytest.approx', (['(0)'], {'abs': '(0.001)'}), '(0, abs=0.001)\n', (6665, 6679), False, 'import pytest\n'), ((6963, 6990), 'pytest.approx', 'pytest.approx', (['(0)'], {'abs': '(0.001)'}), '(0, abs=0.001)\n', (6976, 6990), False, 'import pytest\n'), ((8602, 8632), 'pytest.approx', 'pytest.approx', (['(0.01)'], {'abs': '(1e-12)'}), '(0.01, abs=1e-12)\n', (8615, 8632), False, 'import pytest\n'), ((10947, 10973), 'numpy.linalg.norm', 'np.linalg.norm', (['[100, 200]'], {}), '([100, 200])\n', (10961, 10973), True, 'import numpy as np\n'), ((12615, 12641), 'numpy.linalg.norm', 'np.linalg.norm', (['[100, 100]'], {}), '([100, 100])\n', (12629, 12641), True, 'import numpy as np\n'), ((13274, 13298), 'numpy.linalg.norm', 'np.linalg.norm', (['[100, 1]'], {}), '([100, 1])\n', (13288, 13298), True, 'import numpy as np\n'), ((13991, 14017), 'numpy.linalg.norm', 'np.linalg.norm', (['[100, 100]'], {}), '([100, 100])\n', (14005, 14017), True, 'import numpy as np\n'), ((14734, 14760), 'numpy.linalg.norm', 'np.linalg.norm', (['[100, 100]'], {}), '([100, 100])\n', (14748, 14760), True, 'import numpy as np\n'), ((15398, 15422), 'numpy.linalg.norm', 'np.linalg.norm', (['[100, 1]'], {}), '([100, 1])\n', (15412, 15422), True, 'import numpy as np\n'), ((16085, 16110), 'numpy.linalg.norm', 'np.linalg.norm', (['[10, 0.1]'], {}), '([10, 0.1])\n', (16099, 16110), True, 'import numpy as np\n'), ((16777, 16802), 'numpy.linalg.norm', 'np.linalg.norm', (['[20, 0.1]'], {}), '([20, 0.1])\n', (16791, 16802), True, 'import numpy as np\n'), ((17470, 17495), 'numpy.linalg.norm', 'np.linalg.norm', (['[10, 0.1]'], {}), '([10, 0.1])\n', (17484, 17495), True, 'import numpy as np\n'), ((19132, 19157), 'numpy.linalg.norm', 'np.linalg.norm', (['[10, 0.2]'], {}), '([10, 0.2])\n', (19146, 19157), True, 'import numpy as np\n'), ((3743, 3776), 'pytest.approx', 'pytest.approx', (['line_dist'], {'abs': '(0.1)'}), '(line_dist, abs=0.1)\n', (3756, 3776), False, 'import pytest\n'), ((11467, 11491), 'numpy.linalg.norm', 'np.linalg.norm', (['[50, 50]'], {}), '([50, 50])\n', (11481, 11491), True, 'import numpy as np\n'), ((11494, 11518), 'numpy.linalg.norm', 'np.linalg.norm', (['[50, 50]'], {}), '([50, 50])\n', (11508, 11518), True, 'import numpy as np\n'), ((12738, 12764), 'numpy.linalg.norm', 'np.linalg.norm', (['[100, 100]'], {}), '([100, 100])\n', (12752, 12764), True, 'import numpy as np\n'), ((13395, 13419), 'numpy.linalg.norm', 'np.linalg.norm', (['[100, 1]'], {}), '([100, 1])\n', (13409, 13419), True, 'import numpy as np\n'), ((14114, 14140), 'numpy.linalg.norm', 'np.linalg.norm', (['[100, 100]'], {}), '([100, 100])\n', (14128, 14140), True, 'import numpy as np\n'), ((14857, 14883), 'numpy.linalg.norm', 'np.linalg.norm', (['[100, 100]'], {}), '([100, 100])\n', (14871, 14883), True, 'import numpy as np\n'), ((15519, 15543), 'numpy.linalg.norm', 'np.linalg.norm', (['[100, 1]'], {}), '([100, 1])\n', (15533, 15543), True, 'import numpy as np\n'), ((16206, 16231), 'numpy.linalg.norm', 'np.linalg.norm', (['[10, 0.1]'], {}), '([10, 0.1])\n', (16220, 16231), True, 'import numpy as np\n'), ((16898, 16923), 'numpy.linalg.norm', 'np.linalg.norm', (['[20, 0.1]'], {}), '([20, 0.1])\n', (16912, 16923), True, 'import numpy as np\n'), ((17591, 17616), 'numpy.linalg.norm', 'np.linalg.norm', (['[10, 0.1]'], {}), '([10, 0.1])\n', (17605, 17616), True, 'import numpy as np\n'), ((19104, 19129), 'numpy.linalg.norm', 'np.linalg.norm', (['[10, 0.1]'], {}), '([10, 0.1])\n', (19118, 19129), True, 'import numpy as np\n'), ((19281, 19306), 'numpy.linalg.norm', 'np.linalg.norm', (['[10, 0.2]'], {}), '([10, 0.2])\n', (19295, 19306), True, 'import numpy as np\n'), ((435, 461), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (451, 461), False, 'import os\n'), ((19253, 19278), 'numpy.linalg.norm', 'np.linalg.norm', (['[10, 0.1]'], {}), '([10, 0.1])\n', (19267, 19278), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
import numpy as np
import copy
import rospy
import rospkg
import rosparam
import threading
import argparse
from geometry_msgs.msg import Vector3
from std_msgs.msg import Header, Float64
from sub8_msgs.msg import Thrust, ThrusterStatus
from mil_ros_tools import wait_for_param, thread_lock, numpy_to_point
from sub8_msgs.srv import ThrusterInfo, ThrusterInfoResponse, FailThruster, UnfailThruster
from sub8_thruster_comm import thruster_comm_factory
from ros_alarms import AlarmBroadcaster, AlarmListener
lock = threading.Lock()
class BusVoltageMonitor(object):
'''
Class that estimates sub8's thruster bus voltage.
As of May 2017, this is just a simple rolling average with a constant width sliding
window. However add_reading and get_estimate methods are left for when smarter
filtering is needed
'''
VMAX = 50 # volts
VMIN = 0 # volts
class VoltageReading(object):
def __init__(self, voltage, time):
self.v = voltage
self.t = time
def __init__(self, window_duration):
'''
window_duration - float (amount of seconds for which to keep a reading in the buffer)
'''
self.bus_voltage_alarm = AlarmBroadcaster("bus-voltage")
self.bus_voltage_pub = rospy.Publisher('bus_voltage', Float64, queue_size=1)
self.warn_voltage = rospy.get_param("/battery/warn_voltage", 44.5)
self.kill_voltage = rospy.get_param("/battery/kill_voltage", 44.0)
self.last_estimate_time = rospy.Time.now()
self.WINDOW_DURATION = rospy.Duration(window_duration)
self.ESTIMATION_PERIOD = rospy.Duration(0.2)
self.cached_severity = 0
self.buffer = []
def add_reading(self, voltage, time):
''' Adds voltage readings to buffer '''
voltage = float(voltage)
# Only add if it makes sense (the M5's will give nonsense feedback at times)
if voltage >= self.VMIN and voltage <= self.VMAX:
self.buffer.append(self.VoltageReading(voltage, time))
self.prune_buffer()
# check bus voltage if enough time has passed
if rospy.Time.now() - self.last_estimate_time > self.ESTIMATION_PERIOD:
self.check_bus_voltage()
def prune_buffer(self):
''' Removes readings older than the window_duration from buffer '''
for reading in self.buffer:
age = rospy.Time.now() - reading.t
if age > self.WINDOW_DURATION:
self.buffer.remove(reading)
def get_voltage_estimate(self):
''' Returns average voltage in buffer '''
voltages = []
if len(self.buffer) == 0:
return None
for r in self.buffer:
voltages.append(r.v)
return np.mean(voltages)
def check_bus_voltage(self):
''' Publishes bus_voltage estimate and raises alarm if necessary '''
bus_voltage = self.get_voltage_estimate()
if bus_voltage is None:
return
self.bus_voltage_pub.publish(Float64(bus_voltage))
severity = None
if bus_voltage < self.warn_voltage:
severity = 3
if bus_voltage < self.kill_voltage:
severity = 5
if severity is not None and self.cached_severity != severity:
self.bus_voltage_alarm.raise_alarm(
problem_description='Bus voltage has fallen to {}'.format(bus_voltage),
parameters={'bus_voltage': bus_voltage},
severity=severity
)
self.cached_severity = severity
class ThrusterDriver(object):
_dropped_timeout = 1.0 # s
_window_duration = 30.0 # s
_NODE_NAME = rospy.get_name()
def __init__(self, ports_layout, thruster_definitions):
'''Thruster driver, an object for commanding all of the sub's thrusters
- Gather configuration data and make it available to other nodes
- Instantiate ThrusterPorts, (Either simulated or real), for communicating with thrusters
- Track a thrust_dict, which maps thruster names to the appropriate port
- Given a command message, route that command to the appropriate port/thruster
- Send a thruster status message describing the status of the particular thruster
'''
self.failed_thrusters = set() # This is only determined by comms
self.deactivated_thrusters = set() # These will not come back online even if comms are good (user managed)
# Alarms
self.thruster_out_alarm = AlarmBroadcaster("thruster-out")
AlarmListener("thruster-out", self.check_alarm_status, call_when_raised=False) # Prevent outside interference
# Create ThrusterPort objects in a dict indexed by port name
self.load_thruster_ports(ports_layout, thruster_definitions)
# Feedback on thrusters (thruster mapper blocks until it can use this service)
self.thruster_info_service = rospy.Service('thrusters/thruster_info', ThrusterInfo, self.get_thruster_info)
self.status_publishers = {name: rospy.Publisher('thrusters/status/' + name, ThrusterStatus, queue_size=10)
for name in self.thruster_to_port_map.keys()}
# These alarms require this service to be available before things will work
rospy.wait_for_service("update_thruster_layout")
self.update_thruster_out_alarm()
# Bus voltage
self.bus_voltage_monitor = BusVoltageMonitor(self._window_duration)
# Command thrusters
self.thrust_sub = rospy.Subscriber('thrusters/thrust', Thrust, self.thrust_cb, queue_size=1)
# To programmatically deactivate thrusters
self.fail_thruster_server = rospy.Service('fail_thruster', FailThruster, self.fail_thruster)
self.unfail_thruster_server = rospy.Service('unfail_thruster', UnfailThruster, self.unfail_thruster)
@thread_lock(lock)
def load_thruster_ports(self, ports_layout, thruster_definitions):
''' Loads a dictionary ThrusterPort objects '''
self.ports = {} # ThrusterPort objects
self.thruster_to_port_map = {} # node_id to ThrusterPort
rospack = rospkg.RosPack()
self.make_fake = rospy.get_param('simulate', False)
if self.make_fake:
rospy.logwarn("Running fake thrusters for simulation, based on parameter '/simulate'")
# Instantiate thruster comms port
for port_info in ports_layout:
port_name = port_info['port']
self.ports[port_name] = thruster_comm_factory(port_info, thruster_definitions, fake=self.make_fake)
# Add the thrusters to the thruster dict and configure if present
for thruster_name in port_info['thruster_names']:
self.thruster_to_port_map[thruster_name] = port_info['port']
if thruster_name not in self.ports[port_name].online_thruster_names:
rospy.logerr("ThrusterDriver: {} IS MISSING!".format(thruster_name))
else:
rospy.loginfo("ThrusterDriver: {} registered".format(thruster_name))
# Set firmware settings
port = self.ports[port_name]
node_id = thruster_definitions[thruster_name]['node_id']
config_path = (rospack.get_path('sub8_videoray_m5_thruster') + '/config/firmware_settings/' +
thruster_name + '.yaml')
rospy.loginfo('Configuring {} with settings specified in {}.'.format(thruster_name,
config_path))
port.set_registers_from_dict(node_id=node_id,
reg_dict=rosparam.load_file(config_path)[0][0])
port.reboot_thruster(node_id) # Necessary for some settings to take effect
def get_thruster_info(self, srv):
''' Get the thruster info for a particular thruster name '''
query_name = srv.thruster_name
info = self.ports[self.thruster_to_port_map[query_name]].thruster_info[query_name]
thruster_info = ThrusterInfoResponse(
node_id=info.node_id,
min_force=info.thrust_bounds[0],
max_force=info.thrust_bounds[1],
position=numpy_to_point(info.position),
direction=Vector3(*info.direction)
)
return thruster_info
def check_alarm_status(self, alarm):
# If someone else cleared this alarm, we need to make sure to raise it again
if not alarm.raised and alarm.node_name != self._NODE_NAME:
self.update_thruster_out_alarm()
def update_thruster_out_alarm(self):
'''
Raises or clears the thruster out alarm
Updates the 'offline_thruster_names' parameter accordingly
Sets the severity to the number of failed thrusters (clipped at 5)
'''
offline_names = list(self.failed_thrusters)
if len(self.failed_thrusters) > 0:
self.thruster_out_alarm.raise_alarm(
node_name=self._NODE_NAME,
parameters={'offline_thruster_names': offline_names},
severity=int(np.clip(len(self.failed_thrusters), 1, 5)))
else:
self.thruster_out_alarm.clear_alarm(
node_name=self._NODE_NAME,
parameters={'offline_thruster_names': offline_names})
@thread_lock(lock)
def command_thruster(self, name, thrust):
'''
Issue a a force command (in Newtons) to a named thruster
Example names are BLR, FLH, etc.
Raises RuntimeError if a thrust value outside of the configured thrust bounds is commanded
Raises UnavailableThrusterException if a thruster that is offline is commanded a non-zero thrust
'''
port_name = self.thruster_to_port_map[name]
target_port = self.ports[port_name]
thruster_model = target_port.thruster_info[name]
if thrust < thruster_model.thrust_bounds[0] or thrust > thruster_model.thrust_bounds[1]:
rospy.logwarn('Tried to command thrust ({}) outside of physical thrust bounds ({})'.format(
thrust, thruster_model.thrust_bounds))
if name in self.failed_thrusters:
if not np.isclose(thrust, 0):
rospy.logwarn('ThrusterDriver: commanding non-zero thrust to offline thruster (' + name + ')')
effort = target_port.thruster_info[name].get_effort_from_thrust(thrust)
# We immediately get thruster_status back
thruster_status = target_port.command_thruster(name, effort)
# Keep track of thrusters going online or offline
offline_on_port = target_port.get_offline_thruster_names()
for offline in offline_on_port:
if offline not in self.failed_thrusters:
self.failed_thrusters.add(offline) # Thruster went offline
for failed in copy.deepcopy(self.failed_thrusters):
if (failed in target_port.get_declared_thruster_names() and
failed not in offline_on_port and
failed not in self.deactivated_thrusters):
self.failed_thrusters.remove(failed) # Thruster came online
# Don't try to do anything if the thruster status is bad
if thruster_status is None:
return
message_contents = [
'rpm',
'bus_v',
'bus_i',
'temp',
'fault',
'command_tx_count',
'status_rx_count',
'command_latency_avg'
]
message_keyword_args = {key: thruster_status[key] for key in message_contents}
power = thruster_status['bus_v'] * thruster_status['bus_i']
self.status_publishers[name].publish(
ThrusterStatus(
header=Header(stamp=rospy.Time.now()),
name=name,
node_id=thruster_model.node_id,
power=power,
effort=effort,
thrust=thrust,
**message_keyword_args
)
)
# Will publish bus_voltage and raise alarm if necessary
self.bus_voltage_monitor.add_reading(message_keyword_args['bus_v'], rospy.Time.now())
# Undervolt/overvolt faults are unreliable (might not still be true - David)
if message_keyword_args['fault'] > 2:
fault_codes = {
(1 << 0): 'UNDERVOLT',
(1 << 1): 'OVERRVOLT',
(1 << 2): 'OVERCURRENT',
(1 << 3): 'OVERTEMP',
(1 << 4): 'STALL',
(1 << 5): 'STALL_WARN',
}
fault = int(message_keyword_args['fault'])
faults = []
for code, fault_name in fault_codes.items():
if code & fault != 0:
faults.append(fault_name)
rospy.logwarn("Thruster: {} has entered fault with status {}".format(name, message_keyword_args))
rospy.logwarn("Fault causes are: {}".format(faults))
return
def thrust_cb(self, msg):
'''
Callback for receiving thrust commands
These messages contain a list of instructions, one for each thruster
If there are any updates to the list of failed thrusters, it will raise and alarm
'''
failed_before = {x for x in self.failed_thrusters}
for thrust_cmd in list(msg.thruster_commands):
self.command_thruster(thrust_cmd.name, thrust_cmd.thrust)
# Raise or clear 'thruster-out' alarm
if not self.failed_thrusters == failed_before:
rospy.logdebug('Failed thrusters:', self.failed_thrusters)
self.update_thruster_out_alarm()
def stop(self):
''' Commands 0 thrust to all thrusters '''
for port in self.ports.values():
for thruster_name in port.online_thruster_names.copy():
self.command_thruster(thruster_name, 0.0)
def fail_thruster(self, srv):
''' Makes a thruster unavailable for thrust allocation '''
# So that thrust is not allocated to the thruster
self.failed_thrusters.add(srv.thruster_name)
# So that it won't come back online even if comms are good
self.deactivated_thrusters.add(srv.thruster_name)
# So that thruster_mapper updates the B-matrix
self.update_thruster_out_alarm()
return {}
def unfail_thruster(self, srv):
''' Undoes effect of self.fail_thruster '''
self.failed_thrusters.remove(srv.thruster_name)
self.deactivated_thrusters.remove(srv.thruster_name)
self.update_thruster_out_alarm()
return {}
if __name__ == '__main__':
PKG = 'sub8_videoray_m5_thruster'
usage_msg = "Interface to Sub8's VideoRay M5 thrusters"
desc_msg = "Specify a path to the configuration.json file containing the thrust calibration data"
parser = argparse.ArgumentParser(usage=usage_msg, description=desc_msg)
args = parser.parse_args(rospy.myargv()[1:])
rospy.init_node('videoray_m5_thruster_driver')
layout_parameter = '/thruster_layout'
rospy.loginfo("Thruster Driver waiting for parameter, {}".format(layout_parameter))
thruster_layout = wait_for_param(layout_parameter)
if thruster_layout is None:
raise IOError('/thruster_layout rosparam needs to be set before launching the thruster driver')
thruster_driver = ThrusterDriver(thruster_layout['thruster_ports'], thruster_layout['thrusters'])
rospy.spin()
|
[
"geometry_msgs.msg.Vector3",
"rosparam.load_file",
"rospy.Subscriber",
"argparse.ArgumentParser",
"mil_ros_tools.thread_lock",
"std_msgs.msg.Float64",
"numpy.mean",
"numpy.isclose",
"rospy.get_name",
"mil_ros_tools.numpy_to_point",
"ros_alarms.AlarmBroadcaster",
"rospy.Duration",
"rospy.logwarn",
"mil_ros_tools.wait_for_param",
"rospy.Time.now",
"threading.Lock",
"rospy.init_node",
"rospy.wait_for_service",
"copy.deepcopy",
"rospy.logdebug",
"sub8_thruster_comm.thruster_comm_factory",
"rospy.Service",
"ros_alarms.AlarmListener",
"rospkg.RosPack",
"rospy.Publisher",
"rospy.get_param",
"rospy.spin",
"rospy.myargv"
] |
[((533, 549), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (547, 549), False, 'import threading\n'), ((3696, 3712), 'rospy.get_name', 'rospy.get_name', ([], {}), '()\n', (3710, 3712), False, 'import rospy\n'), ((5932, 5949), 'mil_ros_tools.thread_lock', 'thread_lock', (['lock'], {}), '(lock)\n', (5943, 5949), False, 'from mil_ros_tools import wait_for_param, thread_lock, numpy_to_point\n'), ((9489, 9506), 'mil_ros_tools.thread_lock', 'thread_lock', (['lock'], {}), '(lock)\n', (9500, 9506), False, 'from mil_ros_tools import wait_for_param, thread_lock, numpy_to_point\n'), ((15046, 15108), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'usage': 'usage_msg', 'description': 'desc_msg'}), '(usage=usage_msg, description=desc_msg)\n', (15069, 15108), False, 'import argparse\n'), ((15163, 15209), 'rospy.init_node', 'rospy.init_node', (['"""videoray_m5_thruster_driver"""'], {}), "('videoray_m5_thruster_driver')\n", (15178, 15209), False, 'import rospy\n'), ((15363, 15395), 'mil_ros_tools.wait_for_param', 'wait_for_param', (['layout_parameter'], {}), '(layout_parameter)\n', (15377, 15395), False, 'from mil_ros_tools import wait_for_param, thread_lock, numpy_to_point\n'), ((15639, 15651), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (15649, 15651), False, 'import rospy\n'), ((1222, 1253), 'ros_alarms.AlarmBroadcaster', 'AlarmBroadcaster', (['"""bus-voltage"""'], {}), "('bus-voltage')\n", (1238, 1253), False, 'from ros_alarms import AlarmBroadcaster, AlarmListener\n'), ((1285, 1338), 'rospy.Publisher', 'rospy.Publisher', (['"""bus_voltage"""', 'Float64'], {'queue_size': '(1)'}), "('bus_voltage', Float64, queue_size=1)\n", (1300, 1338), False, 'import rospy\n'), ((1367, 1413), 'rospy.get_param', 'rospy.get_param', (['"""/battery/warn_voltage"""', '(44.5)'], {}), "('/battery/warn_voltage', 44.5)\n", (1382, 1413), False, 'import rospy\n'), ((1442, 1488), 'rospy.get_param', 'rospy.get_param', (['"""/battery/kill_voltage"""', '(44.0)'], {}), "('/battery/kill_voltage', 44.0)\n", (1457, 1488), False, 'import rospy\n'), ((1523, 1539), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (1537, 1539), False, 'import rospy\n'), ((1571, 1602), 'rospy.Duration', 'rospy.Duration', (['window_duration'], {}), '(window_duration)\n', (1585, 1602), False, 'import rospy\n'), ((1636, 1655), 'rospy.Duration', 'rospy.Duration', (['(0.2)'], {}), '(0.2)\n', (1650, 1655), False, 'import rospy\n'), ((2773, 2790), 'numpy.mean', 'np.mean', (['voltages'], {}), '(voltages)\n', (2780, 2790), True, 'import numpy as np\n'), ((4562, 4594), 'ros_alarms.AlarmBroadcaster', 'AlarmBroadcaster', (['"""thruster-out"""'], {}), "('thruster-out')\n", (4578, 4594), False, 'from ros_alarms import AlarmBroadcaster, AlarmListener\n'), ((4603, 4681), 'ros_alarms.AlarmListener', 'AlarmListener', (['"""thruster-out"""', 'self.check_alarm_status'], {'call_when_raised': '(False)'}), "('thruster-out', self.check_alarm_status, call_when_raised=False)\n", (4616, 4681), False, 'from ros_alarms import AlarmBroadcaster, AlarmListener\n'), ((4978, 5056), 'rospy.Service', 'rospy.Service', (['"""thrusters/thruster_info"""', 'ThrusterInfo', 'self.get_thruster_info'], {}), "('thrusters/thruster_info', ThrusterInfo, self.get_thruster_info)\n", (4991, 5056), False, 'import rospy\n'), ((5345, 5393), 'rospy.wait_for_service', 'rospy.wait_for_service', (['"""update_thruster_layout"""'], {}), "('update_thruster_layout')\n", (5367, 5393), False, 'import rospy\n'), ((5589, 5663), 'rospy.Subscriber', 'rospy.Subscriber', (['"""thrusters/thrust"""', 'Thrust', 'self.thrust_cb'], {'queue_size': '(1)'}), "('thrusters/thrust', Thrust, self.thrust_cb, queue_size=1)\n", (5605, 5663), False, 'import rospy\n'), ((5752, 5816), 'rospy.Service', 'rospy.Service', (['"""fail_thruster"""', 'FailThruster', 'self.fail_thruster'], {}), "('fail_thruster', FailThruster, self.fail_thruster)\n", (5765, 5816), False, 'import rospy\n'), ((5855, 5925), 'rospy.Service', 'rospy.Service', (['"""unfail_thruster"""', 'UnfailThruster', 'self.unfail_thruster'], {}), "('unfail_thruster', UnfailThruster, self.unfail_thruster)\n", (5868, 5925), False, 'import rospy\n'), ((6234, 6250), 'rospkg.RosPack', 'rospkg.RosPack', ([], {}), '()\n', (6248, 6250), False, 'import rospkg\n'), ((6277, 6311), 'rospy.get_param', 'rospy.get_param', (['"""simulate"""', '(False)'], {}), "('simulate', False)\n", (6292, 6311), False, 'import rospy\n'), ((11021, 11057), 'copy.deepcopy', 'copy.deepcopy', (['self.failed_thrusters'], {}), '(self.failed_thrusters)\n', (11034, 11057), False, 'import copy\n'), ((3041, 3061), 'std_msgs.msg.Float64', 'Float64', (['bus_voltage'], {}), '(bus_voltage)\n', (3048, 3061), False, 'from std_msgs.msg import Header, Float64\n'), ((5097, 5171), 'rospy.Publisher', 'rospy.Publisher', (["('thrusters/status/' + name)", 'ThrusterStatus'], {'queue_size': '(10)'}), "('thrusters/status/' + name, ThrusterStatus, queue_size=10)\n", (5112, 5171), False, 'import rospy\n'), ((6351, 6442), 'rospy.logwarn', 'rospy.logwarn', (['"""Running fake thrusters for simulation, based on parameter \'/simulate\'"""'], {}), '(\n "Running fake thrusters for simulation, based on parameter \'/simulate\'")\n', (6364, 6442), False, 'import rospy\n'), ((6598, 6673), 'sub8_thruster_comm.thruster_comm_factory', 'thruster_comm_factory', (['port_info', 'thruster_definitions'], {'fake': 'self.make_fake'}), '(port_info, thruster_definitions, fake=self.make_fake)\n', (6619, 6673), False, 'from sub8_thruster_comm import thruster_comm_factory\n'), ((12340, 12356), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (12354, 12356), False, 'import rospy\n'), ((13742, 13800), 'rospy.logdebug', 'rospy.logdebug', (['"""Failed thrusters:"""', 'self.failed_thrusters'], {}), "('Failed thrusters:', self.failed_thrusters)\n", (13756, 13800), False, 'import rospy\n'), ((15138, 15152), 'rospy.myargv', 'rospy.myargv', ([], {}), '()\n', (15150, 15152), False, 'import rospy\n'), ((2147, 2163), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (2161, 2163), False, 'import rospy\n'), ((2412, 2428), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (2426, 2428), False, 'import rospy\n'), ((8364, 8393), 'mil_ros_tools.numpy_to_point', 'numpy_to_point', (['info.position'], {}), '(info.position)\n', (8378, 8393), False, 'from mil_ros_tools import wait_for_param, thread_lock, numpy_to_point\n'), ((8417, 8441), 'geometry_msgs.msg.Vector3', 'Vector3', (['*info.direction'], {}), '(*info.direction)\n', (8424, 8441), False, 'from geometry_msgs.msg import Vector3\n'), ((10363, 10384), 'numpy.isclose', 'np.isclose', (['thrust', '(0)'], {}), '(thrust, 0)\n', (10373, 10384), True, 'import numpy as np\n'), ((10402, 10505), 'rospy.logwarn', 'rospy.logwarn', (["('ThrusterDriver: commanding non-zero thrust to offline thruster (' + name +\n ')')"], {}), "(\n 'ThrusterDriver: commanding non-zero thrust to offline thruster (' +\n name + ')')\n", (10415, 10505), False, 'import rospy\n'), ((11951, 11967), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (11965, 11967), False, 'import rospy\n'), ((7799, 7830), 'rosparam.load_file', 'rosparam.load_file', (['config_path'], {}), '(config_path)\n', (7817, 7830), False, 'import rosparam\n')]
|
import torch
import numpy as np
from torch.autograd import Variable
import torch.optim as optim
import argparse
import random
import os
import models
import torchvision.utils as vutils
import utils
import dataLoader
from torch.utils.data import DataLoader
parser = argparse.ArgumentParser()
# The locationi of training set
parser.add_argument('--dataRoot', default='/home/zhl/SiggraphAsia18/Data/train/', help='path to images')
parser.add_argument('--experiment', default=None, help='the path to store samples and models')
# The basic training setting
parser.add_argument('--nepoch', type=int, default=18, help='the number of epochs for training')
parser.add_argument('--batchSize', type=int, default=16, help='input batch size')
parser.add_argument('--imageSize', type=int, default=256, help='the height / width of the input image to network')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--deviceIds', type=int, nargs='+', default=[0], help='the gpus used for training network')
# The training weight
parser.add_argument('--globalIllu2', type=float, default=1, help='the weight of global illumination prediction 2')
parser.add_argument('--globalIllu3', type=float, default=1, help='the weight of global illumination prediction 3')
# Fine Tune the network
parser.add_argument('--isFineTune', action = 'store_true', help='whether to fine-tune the network or not')
parser.add_argument('--epochId', type=int, default = -1, help='the training epoch of the network')
# The detail network setting
parser.add_argument('--cascadeLevel', type=int, default=0, help='how much level of cascades should we use')
opt = parser.parse_args()
print(opt)
assert(opt.cascadeLevel == 0 )
if opt.experiment is None:
opt.experiment = 'check_globalillumination'
os.system('mkdir {0}'.format(opt.experiment) )
os.system('cp *.py %s' % opt.experiment )
g2W, g3W = opt.globalIllu2, opt.globalIllu3
opt.gpuId = opt.deviceIds[0]
opt.seed = random.randint(1, 10000)
print("Random Seed: ", opt.seed)
random.seed(opt.seed)
torch.manual_seed(opt.seed)
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
####################################
# initalize tensors
albedoBatch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) )
normalBatch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) )
roughBatch = Variable(torch.FloatTensor(opt.batchSize, 1, opt.imageSize, opt.imageSize) )
segBatch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) )
depthBatch = Variable(torch.FloatTensor(opt.batchSize, 1, opt.imageSize, opt.imageSize) )
imP1Batch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) )
imP2Batch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) )
imP3Batch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) )
# Global illumination
globIllu1to2 = models.globalIllumination()
globIllu2to3 = models.globalIllumination()
#########################################
if opt.isFineTune:
globIllu1to2.load_state_dict(torch.load('{0}/globIllu1to2_{1}.pth'.format(opt.experiment, opt.epochId) ) )
globIllu2to3.load_state_dict(torch.load('{0}/globIllu2to3_{1}.pth'.format(opt.experiment, opt.epochId) ) )
############## ######################
# Send things into GPU
if opt.cuda:
albedoBatch = albedoBatch.cuda(opt.gpuId)
normalBatch = normalBatch.cuda(opt.gpuId)
roughBatch = roughBatch.cuda(opt.gpuId)
depthBatch = depthBatch.cuda(opt.gpuId)
segBatch = segBatch.cuda(opt.gpuId)
imP1Batch = imP1Batch.cuda(opt.gpuId)
imP2Batch = imP2Batch.cuda(opt.gpuId)
imP3Batch = imP3Batch.cuda(opt.gpuId)
globIllu1to2 = globIllu1to2.cuda(opt.gpuId)
globIllu2to3 = globIllu2to3.cuda(opt.gpuId)
####################################
####################################
# Global Optimier
opGlobalIllu1to2 = optim.Adam(globIllu1to2.parameters(), lr=2e-4, betas=(0.5, 0.999) )
opGlobalIllu2to3 = optim.Adam(globIllu2to3.parameters(), lr=2e-4, betas=(0.5, 0.999) )
#####################################
####################################
brdfDataset = dataLoader.BatchLoader(opt.dataRoot, imSize = opt.imageSize)
brdfLoader = DataLoader(brdfDataset, batch_size = opt.batchSize, num_workers = 8, shuffle = False)
j = 0
globalIllu1ErrsNpList= np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32)
globalIllu2ErrsNpList = np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32)
globalIllu3ErrsNpList= np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32)
renderedErrsNpList = np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32)
for epoch in list(range(opt.epochId+1, opt.nepoch) ):
trainingLog = open('{0}/trainingLog_{1}.txt'.format(opt.experiment, epoch), 'w')
for i, dataBatch in enumerate(brdfLoader):
j += 1
# Load data from cpu to gpu
albedo_cpu = dataBatch['albedo']
albedoBatch.data.resize_(albedo_cpu.shape)
albedoBatch.data.copy_(albedo_cpu )
normal_cpu = dataBatch['normal']
normalBatch.data.resize_(normal_cpu.shape)
normalBatch.data.copy_(normal_cpu )
rough_cpu = dataBatch['rough']
roughBatch.data.resize_(rough_cpu.shape)
roughBatch.data.copy_(rough_cpu )
seg_cpu = dataBatch['seg']
segBatch.data.resize_(seg_cpu.shape)
segBatch.data.copy_(seg_cpu )
depth_cpu = dataBatch['depth']
depthBatch.data.resize_(depth_cpu.shape)
depthBatch.data.copy_(depth_cpu )
imP1_cpu = dataBatch['imP1']
imP1Batch.data.resize_(imP1_cpu.shape)
imP1Batch.data.copy_(imP1_cpu )
imP2_cpu = dataBatch['imP2']
imP2Batch.data.resize_(imP2_cpu.shape)
imP2Batch.data.copy_(imP2_cpu )
imP3_cpu = dataBatch['imP3']
imP3Batch.data.resize_(imP3_cpu.shape)
imP3Batch.data.copy_(imP3_cpu )
opGlobalIllu1to2.zero_grad()
opGlobalIllu2to3.zero_grad()
########################################################
# Build the cascade network architecture #
globalIllu2s = []
globalIllu3s = []
n = 0
inputGlob2 = torch.cat([imP1Batch, albedoBatch,
normalBatch, roughBatch, depthBatch, segBatch], dim=1)
globalIllu2 = globIllu1to2(inputGlob2)
globalIllu2s.append(globalIllu2 )
inputGlob3 = torch.cat([globalIllu2s[n], albedoBatch,
normalBatch, roughBatch, depthBatch, segBatch], dim=1)
globalIllu3 = globIllu2to3(inputGlob3.detach() )
globalIllu3s.append(globalIllu3)
########################################################
globalIllu2Errs = []
globalIllu3Errs = []
pixelNum = torch.sum(segBatch ).cpu().data.item()
for m in range(0, n + 1):
globalIllu2Errs.append( torch.sum( (globalIllu2s[m] - imP2Batch)
* (globalIllu2s[m] - imP2Batch) * segBatch.expand_as(imP2Batch) ) / pixelNum / 3.0 )
globalIllu3Errs.append(torch.sum( (globalIllu3s[m] - imP3Batch)
* (globalIllu3s[m] - imP3Batch) * segBatch.expand_as(imP3Batch) ) / pixelNum / 3.0 )
globalIllu2ErrSum = sum(globalIllu2Errs)
globalIllu3ErrSum = sum(globalIllu3Errs)
totalErr = g2W * globalIllu2ErrSum + g3W * globalIllu3ErrSum
totalErr.backward()
opGlobalIllu1to2.step()
opGlobalIllu2to3.step()
# Output training error
utils.writeErrToScreen('globalIllu2', globalIllu2Errs, epoch, j)
utils.writeErrToScreen('globalIllu3', globalIllu3Errs, epoch, j)
utils.writeErrToFile('globalIllu2', globalIllu2Errs, trainingLog, epoch, j)
utils.writeErrToFile('globalIllu3', globalIllu3Errs, trainingLog, epoch, j)
globalIllu2ErrsNpList = np.concatenate( [globalIllu2ErrsNpList, utils.turnErrorIntoNumpy(globalIllu2Errs)], axis=0)
globalIllu3ErrsNpList = np.concatenate( [globalIllu3ErrsNpList, utils.turnErrorIntoNumpy(globalIllu3Errs)], axis=0)
if j < 1000:
utils.writeNpErrToScreen('globalIllu2_Accu:', np.mean(globalIllu2ErrsNpList[1:j+1, :], axis=0), epoch, j)
utils.writeNpErrToScreen('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[1:j+1, :], axis=0), epoch, j)
utils.writeNpErrToFile('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[1:j+1, :], axis=0), trainingLog, epoch, j)
utils.writeNpErrToFile('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[1:j+1, :], axis=0), trainingLog, epoch, j)
else:
utils.writeNpErrToScreen('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[j-999:j+1, :], axis=0), epoch, j)
utils.writeNpErrToScreen('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[j-999:j+1, :], axis=0), epoch, j)
utils.writeNpErrToFile('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[j-999:j+1, :], axis=0), trainingLog, epoch, j)
utils.writeNpErrToFile('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[j-999:j+1, :], axis=0), trainingLog, epoch, j)
if j == 1 or j == 1000 or j% 2000 == 0:
# Save the ground truth and the input
vutils.save_image( (0.5*(albedoBatch + 1)*segBatch.expand_as(albedoBatch) ).data,
'{0}/{1}_albedoGt.png'.format(opt.experiment, j) )
vutils.save_image( (0.5*(normalBatch + 1)*segBatch.expand_as(normalBatch) ).data,
'{0}/{1}_normalGt.png'.format(opt.experiment, j) )
vutils.save_image( (0.5*(roughBatch + 1)*segBatch.expand_as(roughBatch) ).data,
'{0}/{1}_roughGt.png'.format(opt.experiment, j) )
depthOut = 1 / torch.clamp(depthBatch, 1e-6, 10) * segBatch.expand_as(depthBatch)
depthOut = (depthOut - 0.25) /0.8
vutils.save_image( ( depthOut*segBatch.expand_as(depthBatch) ).data,
'{0}/{1}_depthGt.png'.format(opt.experiment, j) )
vutils.save_image( ( ( 0.5*(imP1Batch + 1)*segBatch.expand_as(imP1Batch))**(1.0/2.2) ).data ,
'{0}/{1}_imP1.png'.format(opt.experiment, j) )
vutils.save_image( ( ( 0.5*(imP2Batch + 1)*segBatch.expand_as(imP2Batch))**(1.0/2.2) ).data ,
'{0}/{1}_imP2.png'.format(opt.experiment, j) )
vutils.save_image( ( ( 0.5*(imP3Batch + 1)*segBatch.expand_as(imP3Batch))**(1.0/2.2) ).data ,
'{0}/{1}_imP3.png'.format(opt.experiment, j) )
# Save the predicted results
for n in range(0, opt.cascadeLevel + 1):
vutils.save_image( ( ( 0.5*(globalIllu2s[n] + 1)*segBatch.expand_as(imP2Batch) )**(1.0/2.2) ).data,
'{0}/{1}_imP2Pred_{2}.png'.format(opt.experiment, j, n) )
vutils.save_image( ( ( 0.5*(globalIllu3s[n] + 1)*segBatch.expand_as(imP3Batch) )**(1.0/2.2) ).data,
'{0}/{1}_imP3Pred_{2}.png'.format(opt.experiment, j, n) )
trainingLog.close()
# Update the training rate
if (epoch + 1) % 2 == 0:
for param_group in opGlobalIllu1to2.param_groups:
param_group['lr'] /= 2
for param_group in opGlobalIllu2to3.param_groups:
param_group['lr'] /= 2
np.save('{0}/globalIllu2_{1}.npy'.format(opt.experiment, epoch), globalIllu2ErrsNpList )
np.save('{0}/globalIllu3_{1}.npy'.format(opt.experiment, epoch), globalIllu3ErrsNpList )
torch.save(globIllu1to2.state_dict(), '{0}/globIllu1to2_{1}.pth'.format(opt.experiment, epoch) )
torch.save(globIllu2to3.state_dict(), '{0}/globIllu2to3_{1}.pth'.format(opt.experiment, epoch) )
|
[
"random.randint",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"torch.manual_seed",
"torch.FloatTensor",
"os.system",
"numpy.ones",
"torch.cat",
"models.globalIllumination",
"utils.writeErrToFile",
"utils.turnErrorIntoNumpy",
"numpy.mean",
"random.seed",
"torch.cuda.is_available",
"torch.clamp",
"utils.writeErrToScreen",
"dataLoader.BatchLoader",
"torch.sum"
] |
[((266, 291), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (289, 291), False, 'import argparse\n'), ((1842, 1882), 'os.system', 'os.system', (["('cp *.py %s' % opt.experiment)"], {}), "('cp *.py %s' % opt.experiment)\n", (1851, 1882), False, 'import os\n'), ((1970, 1994), 'random.randint', 'random.randint', (['(1)', '(10000)'], {}), '(1, 10000)\n', (1984, 1994), False, 'import random\n'), ((2028, 2049), 'random.seed', 'random.seed', (['opt.seed'], {}), '(opt.seed)\n', (2039, 2049), False, 'import random\n'), ((2050, 2077), 'torch.manual_seed', 'torch.manual_seed', (['opt.seed'], {}), '(opt.seed)\n', (2067, 2077), False, 'import torch\n'), ((3026, 3053), 'models.globalIllumination', 'models.globalIllumination', ([], {}), '()\n', (3051, 3053), False, 'import models\n'), ((3069, 3096), 'models.globalIllumination', 'models.globalIllumination', ([], {}), '()\n', (3094, 3096), False, 'import models\n'), ((4260, 4318), 'dataLoader.BatchLoader', 'dataLoader.BatchLoader', (['opt.dataRoot'], {'imSize': 'opt.imageSize'}), '(opt.dataRoot, imSize=opt.imageSize)\n', (4282, 4318), False, 'import dataLoader\n'), ((4334, 4413), 'torch.utils.data.DataLoader', 'DataLoader', (['brdfDataset'], {'batch_size': 'opt.batchSize', 'num_workers': '(8)', 'shuffle': '(False)'}), '(brdfDataset, batch_size=opt.batchSize, num_workers=8, shuffle=False)\n', (4344, 4413), False, 'from torch.utils.data import DataLoader\n'), ((4450, 4502), 'numpy.ones', 'np.ones', (['[1, 1 + opt.cascadeLevel]'], {'dtype': 'np.float32'}), '([1, 1 + opt.cascadeLevel], dtype=np.float32)\n', (4457, 4502), True, 'import numpy as np\n'), ((4528, 4580), 'numpy.ones', 'np.ones', (['[1, 1 + opt.cascadeLevel]'], {'dtype': 'np.float32'}), '([1, 1 + opt.cascadeLevel], dtype=np.float32)\n', (4535, 4580), True, 'import numpy as np\n'), ((4605, 4657), 'numpy.ones', 'np.ones', (['[1, 1 + opt.cascadeLevel]'], {'dtype': 'np.float32'}), '([1, 1 + opt.cascadeLevel], dtype=np.float32)\n', (4612, 4657), True, 'import numpy as np\n'), ((4680, 4732), 'numpy.ones', 'np.ones', (['[1, 1 + opt.cascadeLevel]'], {'dtype': 'np.float32'}), '([1, 1 + opt.cascadeLevel], dtype=np.float32)\n', (4687, 4732), True, 'import numpy as np\n'), ((2082, 2107), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2105, 2107), False, 'import torch\n'), ((2292, 2357), 'torch.FloatTensor', 'torch.FloatTensor', (['opt.batchSize', '(3)', 'opt.imageSize', 'opt.imageSize'], {}), '(opt.batchSize, 3, opt.imageSize, opt.imageSize)\n', (2309, 2357), False, 'import torch\n'), ((2384, 2449), 'torch.FloatTensor', 'torch.FloatTensor', (['opt.batchSize', '(3)', 'opt.imageSize', 'opt.imageSize'], {}), '(opt.batchSize, 3, opt.imageSize, opt.imageSize)\n', (2401, 2449), False, 'import torch\n'), ((2474, 2539), 'torch.FloatTensor', 'torch.FloatTensor', (['opt.batchSize', '(1)', 'opt.imageSize', 'opt.imageSize'], {}), '(opt.batchSize, 1, opt.imageSize, opt.imageSize)\n', (2491, 2539), False, 'import torch\n'), ((2562, 2627), 'torch.FloatTensor', 'torch.FloatTensor', (['opt.batchSize', '(3)', 'opt.imageSize', 'opt.imageSize'], {}), '(opt.batchSize, 3, opt.imageSize, opt.imageSize)\n', (2579, 2627), False, 'import torch\n'), ((2652, 2717), 'torch.FloatTensor', 'torch.FloatTensor', (['opt.batchSize', '(1)', 'opt.imageSize', 'opt.imageSize'], {}), '(opt.batchSize, 1, opt.imageSize, opt.imageSize)\n', (2669, 2717), False, 'import torch\n'), ((2742, 2807), 'torch.FloatTensor', 'torch.FloatTensor', (['opt.batchSize', '(3)', 'opt.imageSize', 'opt.imageSize'], {}), '(opt.batchSize, 3, opt.imageSize, opt.imageSize)\n', (2759, 2807), False, 'import torch\n'), ((2831, 2896), 'torch.FloatTensor', 'torch.FloatTensor', (['opt.batchSize', '(3)', 'opt.imageSize', 'opt.imageSize'], {}), '(opt.batchSize, 3, opt.imageSize, opt.imageSize)\n', (2848, 2896), False, 'import torch\n'), ((2920, 2985), 'torch.FloatTensor', 'torch.FloatTensor', (['opt.batchSize', '(3)', 'opt.imageSize', 'opt.imageSize'], {}), '(opt.batchSize, 3, opt.imageSize, opt.imageSize)\n', (2937, 2985), False, 'import torch\n'), ((6274, 6367), 'torch.cat', 'torch.cat', (['[imP1Batch, albedoBatch, normalBatch, roughBatch, depthBatch, segBatch]'], {'dim': '(1)'}), '([imP1Batch, albedoBatch, normalBatch, roughBatch, depthBatch,\n segBatch], dim=1)\n', (6283, 6367), False, 'import torch\n'), ((6486, 6585), 'torch.cat', 'torch.cat', (['[globalIllu2s[n], albedoBatch, normalBatch, roughBatch, depthBatch, segBatch]'], {'dim': '(1)'}), '([globalIllu2s[n], albedoBatch, normalBatch, roughBatch,\n depthBatch, segBatch], dim=1)\n', (6495, 6585), False, 'import torch\n'), ((7574, 7638), 'utils.writeErrToScreen', 'utils.writeErrToScreen', (['"""globalIllu2"""', 'globalIllu2Errs', 'epoch', 'j'], {}), "('globalIllu2', globalIllu2Errs, epoch, j)\n", (7596, 7638), False, 'import utils\n'), ((7647, 7711), 'utils.writeErrToScreen', 'utils.writeErrToScreen', (['"""globalIllu3"""', 'globalIllu3Errs', 'epoch', 'j'], {}), "('globalIllu3', globalIllu3Errs, epoch, j)\n", (7669, 7711), False, 'import utils\n'), ((7720, 7795), 'utils.writeErrToFile', 'utils.writeErrToFile', (['"""globalIllu2"""', 'globalIllu2Errs', 'trainingLog', 'epoch', 'j'], {}), "('globalIllu2', globalIllu2Errs, trainingLog, epoch, j)\n", (7740, 7795), False, 'import utils\n'), ((7804, 7879), 'utils.writeErrToFile', 'utils.writeErrToFile', (['"""globalIllu3"""', 'globalIllu3Errs', 'trainingLog', 'epoch', 'j'], {}), "('globalIllu3', globalIllu3Errs, trainingLog, epoch, j)\n", (7824, 7879), False, 'import utils\n'), ((7952, 7993), 'utils.turnErrorIntoNumpy', 'utils.turnErrorIntoNumpy', (['globalIllu2Errs'], {}), '(globalIllu2Errs)\n', (7976, 7993), False, 'import utils\n'), ((8076, 8117), 'utils.turnErrorIntoNumpy', 'utils.turnErrorIntoNumpy', (['globalIllu3Errs'], {}), '(globalIllu3Errs)\n', (8100, 8117), False, 'import utils\n'), ((8208, 8258), 'numpy.mean', 'np.mean', (['globalIllu2ErrsNpList[1:j + 1, :]'], {'axis': '(0)'}), '(globalIllu2ErrsNpList[1:j + 1, :], axis=0)\n', (8215, 8258), True, 'import numpy as np\n'), ((8325, 8375), 'numpy.mean', 'np.mean', (['globalIllu3ErrsNpList[1:j + 1, :]'], {'axis': '(0)'}), '(globalIllu3ErrsNpList[1:j + 1, :], axis=0)\n', (8332, 8375), True, 'import numpy as np\n'), ((8440, 8490), 'numpy.mean', 'np.mean', (['globalIllu2ErrsNpList[1:j + 1, :]'], {'axis': '(0)'}), '(globalIllu2ErrsNpList[1:j + 1, :], axis=0)\n', (8447, 8490), True, 'import numpy as np\n'), ((8568, 8618), 'numpy.mean', 'np.mean', (['globalIllu3ErrsNpList[1:j + 1, :]'], {'axis': '(0)'}), '(globalIllu3ErrsNpList[1:j + 1, :], axis=0)\n', (8575, 8618), True, 'import numpy as np\n'), ((8712, 8768), 'numpy.mean', 'np.mean', (['globalIllu2ErrsNpList[j - 999:j + 1, :]'], {'axis': '(0)'}), '(globalIllu2ErrsNpList[j - 999:j + 1, :], axis=0)\n', (8719, 8768), True, 'import numpy as np\n'), ((8833, 8889), 'numpy.mean', 'np.mean', (['globalIllu3ErrsNpList[j - 999:j + 1, :]'], {'axis': '(0)'}), '(globalIllu3ErrsNpList[j - 999:j + 1, :], axis=0)\n', (8840, 8889), True, 'import numpy as np\n'), ((8952, 9008), 'numpy.mean', 'np.mean', (['globalIllu2ErrsNpList[j - 999:j + 1, :]'], {'axis': '(0)'}), '(globalIllu2ErrsNpList[j - 999:j + 1, :], axis=0)\n', (8959, 9008), True, 'import numpy as np\n'), ((9084, 9140), 'numpy.mean', 'np.mean', (['globalIllu3ErrsNpList[j - 999:j + 1, :]'], {'axis': '(0)'}), '(globalIllu3ErrsNpList[j - 999:j + 1, :], axis=0)\n', (9091, 9140), True, 'import numpy as np\n'), ((9780, 9814), 'torch.clamp', 'torch.clamp', (['depthBatch', '(1e-06)', '(10)'], {}), '(depthBatch, 1e-06, 10)\n', (9791, 9814), False, 'import torch\n'), ((6835, 6854), 'torch.sum', 'torch.sum', (['segBatch'], {}), '(segBatch)\n', (6844, 6854), False, 'import torch\n')]
|
import numpy
""" Utility variables and functions
"""
aa2au = 1.8897261249935897 # bohr / AA
# converts nuclear charge to atom label
Z2LABEL = {
1: 'H', 2: 'He',
3: 'Li', 4: 'Be', 5: 'B', 6: 'C', 7: 'N', 8: 'O', 9: 'F', 10: 'Ne',
11: 'NA', 12: 'Mg', 13: 'Al', 14: 'Si', 15: 'P', 16: 'S', 17: 'Cl', 18: 'Ar'
}
# converts an atomic label to a nuclear charge
LABEL2Z = {}
for key in Z2LABEL:
LABEL2Z[Z2LABEL[key]] = key
# masses from UIPAC: http://www.chem.qmul.ac.uk/iupac/AtWt/
MASSES = {0: 0.00,
1: 1.00784, 2: 4.002602,
3: 6.938, 4: 9.01218, 5: 10.806, 6: 12.0096, 7: 14.00643, 8: 15.99903, 9: 18.998403, 10: 20.1797,
11: 22.9898, 12: 24.304, 13: 26.9815, 14: 28.084, 15: 30.973, 16: 32.059, 17: 35.446, 18: 39.948
}
# <NAME>al radii from Alvarez (2013), DOI: 2013/dt/c3dt50599e
# all values in Angstrom
VDWRADII = {0: 0.00,
1: 1.20, 2: 1.43,
3: 2.12, 4: 1.98, 5: 1.91, 6: 1.77, 7: 1.66, 8: 1.50, 9: 1.46, 10: 1.58,
11: 2.50, 12: 2.51, 13: 2.25, 14: 2.19, 15: 1.90, 16: 1.89, 17: 1.82, 18: 1.83
}
# Covalent radii from Pykko and Atsumi (2009), DOI: 0.1002/chem.200800987
# all values in Angstrom
COVALENTRADII = {0: 0.00,
1: 0.32, 2: 0.46,
3: 1.33, 4: 1.02, 5: 0.85, 6: 0.75, 7: 0.71, 8: 0.63, 9: 0.64, 10: 0.67,
11: 1.55, 12: 1.39, 13: 1.26, 14: 1.16, 15: 1.11, 16: 1.03, 17: 0.99, 18: 0.96
}
# Coordination numbers from Pykko and Atsumi (2009), DOI: 0.1002/chem.200800987
COORDINATION = {0: 0,
1: 1, 2: 1,
3: 1, 4: 2, 5: 3, 6: 4, 7: 3, 8: 2, 9: 1, 10: 1,
11: 1, 12: 2, 13: 3, 14: 4, 15: 3, 16: 2, 17: 1, 18: 1
}
def idamax(a):
""" Returns the index of maximum absolute value (positive or negative)
in the input array a.
Note: Loosely based of a subroutine in GAMESS with the same name
Arguments:
a -- a numpy array where we are to find the maximum
value in (either positive or negative)
Returns:
the index in the array where the maximum value is.
"""
idx = -1
v = 0.0
for i, value in enumerate(numpy.abs(a)):
if value > v:
idx = i
v = value
return idx
def idamin(a):
""" Returns the index of minimum absolute value (positive or negative)
in the input array a.
Arguments:
a -- a numpy array where we are to find the minimum
value in (either positive or negative)
Returns:
the index in the array where the maximum value is.
"""
idx = -1
v = 1.0e30
for i, value in enumerate(numpy.abs(a)):
if value < v:
idx = i
v = value
return idx
|
[
"numpy.abs"
] |
[((2360, 2372), 'numpy.abs', 'numpy.abs', (['a'], {}), '(a)\n', (2369, 2372), False, 'import numpy\n'), ((2851, 2863), 'numpy.abs', 'numpy.abs', (['a'], {}), '(a)\n', (2860, 2863), False, 'import numpy\n')]
|
from __future__ import print_function, division
import os
from os.path import exists
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from collections import OrderedDict
from lib.model import ImMatchNet
from lib.pf_willow_dataset import PFDataset
from lib.normalization import NormalizeImageDict
from lib.torch_util import BatchTensorToVars, str_to_bool
from lib.point_tnf import corr_to_matches
from lib.eval_util import pck_metric
from lib.dataloader import default_collate
from lib.torch_util import collate_custom
import argparse
print('NCNet evaluation script - PF Willow dataset')
use_cuda = torch.cuda.is_available()
# Argument parsing
parser = argparse.ArgumentParser(description='Compute PF Willow matches')
parser.add_argument('--checkpoint', type=str, default='')
parser.add_argument('--image_size', type=int, default=400)
parser.add_argument('--eval_dataset_path', type=str, default='datasets/', help='path to PF Willow dataset')
args = parser.parse_args()
# Create model
print('Creating CNN model...')
model = ImMatchNet(use_cuda=use_cuda,
checkpoint=args.checkpoint)
# Dataset and dataloader
Dataset = PFDataset
collate_fn = default_collate
csv_file = 'PF-dataset/test_pairs_pf.csv'
cnn_image_size = (args.image_size, args.image_size)
dataset = Dataset(csv_file=os.path.join(args.eval_dataset_path, csv_file),
dataset_path=args.eval_dataset_path,
transform=NormalizeImageDict(['source_image', 'target_image']),
output_size=cnn_image_size)
dataset.pck_procedure = 'scnet'
# Only batch_size=1 is supported for evaluation
batch_size = 1
dataloader = DataLoader(dataset, batch_size=batch_size,
shuffle=False, num_workers=0,
collate_fn=collate_fn)
batch_tnf = BatchTensorToVars(use_cuda=use_cuda)
model.eval()
# initialize vector for storing results
stats = {}
stats['point_tnf'] = {}
stats['point_tnf']['pck'] = np.zeros((len(dataset), 1))
# Compute
for i, batch in enumerate(dataloader):
batch = batch_tnf(batch)
batch_start_idx = batch_size * i
corr4d = model(batch)
# get matches
xA, yA, xB, yB, sB = corr_to_matches(corr4d, do_softmax=True)
matches = (xA, yA, xB, yB)
stats = pck_metric(batch, batch_start_idx, matches, stats, args, use_cuda)
print('Batch: [{}/{} ({:.0f}%)]'.format(i, len(dataloader), 100. * i / len(dataloader)))
# Print results
results = stats['point_tnf']['pck']
good_idx = np.flatnonzero((results != -1) * ~np.isnan(results))
print('Total: ' + str(results.size))
print('Valid: ' + str(good_idx.size))
filtered_results = results[good_idx]
print('PCK:', '{:.2%}'.format(np.mean(filtered_results)))
|
[
"lib.torch_util.BatchTensorToVars",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"os.path.join",
"lib.normalization.NormalizeImageDict",
"numpy.isnan",
"lib.point_tnf.corr_to_matches",
"numpy.mean",
"torch.cuda.is_available",
"lib.model.ImMatchNet",
"lib.eval_util.pck_metric"
] |
[((656, 681), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (679, 681), False, 'import torch\n'), ((711, 775), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Compute PF Willow matches"""'}), "(description='Compute PF Willow matches')\n", (734, 775), False, 'import argparse\n'), ((1084, 1141), 'lib.model.ImMatchNet', 'ImMatchNet', ([], {'use_cuda': 'use_cuda', 'checkpoint': 'args.checkpoint'}), '(use_cuda=use_cuda, checkpoint=args.checkpoint)\n', (1094, 1141), False, 'from lib.model import ImMatchNet\n'), ((1700, 1799), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': '(0)', 'collate_fn': 'collate_fn'}), '(dataset, batch_size=batch_size, shuffle=False, num_workers=0,\n collate_fn=collate_fn)\n', (1710, 1799), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((1857, 1893), 'lib.torch_util.BatchTensorToVars', 'BatchTensorToVars', ([], {'use_cuda': 'use_cuda'}), '(use_cuda=use_cuda)\n', (1874, 1893), False, 'from lib.torch_util import BatchTensorToVars, str_to_bool\n'), ((2227, 2267), 'lib.point_tnf.corr_to_matches', 'corr_to_matches', (['corr4d'], {'do_softmax': '(True)'}), '(corr4d, do_softmax=True)\n', (2242, 2267), False, 'from lib.point_tnf import corr_to_matches\n'), ((2312, 2378), 'lib.eval_util.pck_metric', 'pck_metric', (['batch', 'batch_start_idx', 'matches', 'stats', 'args', 'use_cuda'], {}), '(batch, batch_start_idx, matches, stats, args, use_cuda)\n', (2322, 2378), False, 'from lib.eval_util import pck_metric\n'), ((1359, 1405), 'os.path.join', 'os.path.join', (['args.eval_dataset_path', 'csv_file'], {}), '(args.eval_dataset_path, csv_file)\n', (1371, 1405), False, 'import os\n'), ((1490, 1542), 'lib.normalization.NormalizeImageDict', 'NormalizeImageDict', (["['source_image', 'target_image']"], {}), "(['source_image', 'target_image'])\n", (1508, 1542), False, 'from lib.normalization import NormalizeImageDict\n'), ((2732, 2757), 'numpy.mean', 'np.mean', (['filtered_results'], {}), '(filtered_results)\n', (2739, 2757), True, 'import numpy as np\n'), ((2571, 2588), 'numpy.isnan', 'np.isnan', (['results'], {}), '(results)\n', (2579, 2588), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid.axislines import SubplotZero
import numpy as np
import cyllene.f_functionclass as f_funct
import sympy as sp
'''
A lot of problems need to be resolved:
1)Can we keep a record of the graphs graphed? this can be done by just keeping the numpy arrays ?
2)we need to be able to deal with poles of functions ( for example 1/x at x = 0, 1/(x^2-1) at x = -1, 1 ...etc)
'''
class graph():
def __init__(self):
self.fig = plt.figure(1)
self.ax = SubplotZero(self.fig,111)
self.fig.add_subplot(self.ax)
for direction in ["xzero","yzero"]:
self.ax.axis[direction].set_axisline_style("-|>")
self.ax.axis[direction].set_visible(True)
for direction in ["left","right","bottom","top"]:
self.ax.axis[direction].set_visible(False)
def make_graph(self, f):
I = f.behaviour("largest interval")
ps = float(I.args[0])
pe = float(I.args[1])
t = np.arange(ps, pe, 0.01)
self.ax.plot(t, f.eval_np(t))
def make_graphs(self, *functions,Interval=None):
if(Interval == None):
f = functions[0]
I = f.behaviour("largest interval")
l,r = float(I.args[0]), float(I.args[1])
for f in functions:
I = f.behaviour("largest interval")
l,r = min(l,float(I.args[0])), max(r,float(I.args[1]))
else:
l,r = float(Interval.args[0]), float(Interval.args[1])
self.Interval = sp.Interval(l,r)
t = np.arange(l,r,.01)
for f in functions:
self.ax.plot(t,f.eval_np(t))
def make_secent(self,f,x1,x2):
I = f.behaviour("largest interval")
ps = float(I.args[0])
pe = float(I.args[1])
t = np.arange(ps, pe, 0.01)
sec = f.secent_line(x1,x2)
self.ax.plot(t, sec.eval_np(t))
self.plot_point(x1, sec.eval_np(x1))
self.plot_point(x2,sec.eval_np(x2))
def make_tangent(self,f,x):
I = f.behaviour("largest interval")
ps = float(I.args[0])
pe = float(I.args[1])
t = np.arange(ps, pe, 0.01)
tan = f.tangent_line(x)
self.ax.plot(t, tan.eval_np(t))
self.plot_point(x, tan.eval_np(x))
def plot_point(self, x, y):
self.ax.plot(np.array([x]), np.array([y]), 'ro')
def zoom_y(self, f, I):
self.zoom_x(I)
self.zoom_y(f.range(I))
def zoom_x(self,I):
ps = float(I.args[0])
pe = float(I.args[1])
self.ax.set_xlim(ps,pe)
def zoom_y(self,I):
ps = float(I.args[0])
pe = float(I.args[1])
self.ax.set_ylim(ps,pe)
def show(self):
return self.fig
|
[
"mpl_toolkits.axes_grid.axislines.SubplotZero",
"sympy.Interval",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.arange"
] |
[((486, 499), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (496, 499), True, 'import matplotlib.pyplot as plt\n'), ((512, 538), 'mpl_toolkits.axes_grid.axislines.SubplotZero', 'SubplotZero', (['self.fig', '(111)'], {}), '(self.fig, 111)\n', (523, 538), False, 'from mpl_toolkits.axes_grid.axislines import SubplotZero\n'), ((930, 953), 'numpy.arange', 'np.arange', (['ps', 'pe', '(0.01)'], {}), '(ps, pe, 0.01)\n', (939, 953), True, 'import numpy as np\n'), ((1375, 1392), 'sympy.Interval', 'sp.Interval', (['l', 'r'], {}), '(l, r)\n', (1386, 1392), True, 'import sympy as sp\n'), ((1405, 1426), 'numpy.arange', 'np.arange', (['l', 'r', '(0.01)'], {}), '(l, r, 0.01)\n', (1414, 1426), True, 'import numpy as np\n'), ((1607, 1630), 'numpy.arange', 'np.arange', (['ps', 'pe', '(0.01)'], {}), '(ps, pe, 0.01)\n', (1616, 1630), True, 'import numpy as np\n'), ((1896, 1919), 'numpy.arange', 'np.arange', (['ps', 'pe', '(0.01)'], {}), '(ps, pe, 0.01)\n', (1905, 1919), True, 'import numpy as np\n'), ((2065, 2078), 'numpy.array', 'np.array', (['[x]'], {}), '([x])\n', (2073, 2078), True, 'import numpy as np\n'), ((2080, 2093), 'numpy.array', 'np.array', (['[y]'], {}), '([y])\n', (2088, 2093), True, 'import numpy as np\n')]
|
import sc_utils
import model_factory
import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
INPUT_LENGTH = 100
# Prepare data
X_train, Y_train, X_test, Y_test = sc_utils.load_data()
X_train, Y_train, X_val, Y_val, X_test, Y_test, tokenizer = sc_utils.preprocess_data(X_train, Y_train, X_test, Y_test, INPUT_LENGTH)
embedding_matrix = sc_utils.create_embedding_matrix(tokenizer)
print("X_train.shape: " + str(X_train.shape))
print("Y_train.shape: " + str(Y_train.shape))
print("X_val.shape: " + str(X_val.shape))
print("Y_val.shape: " + str(Y_val.shape))
print("X_test.shape: " + str(X_test.shape))
print("Y_test.shape: " + str(Y_test.shape))
print("embedding_matrix.shape: " + str(embedding_matrix.shape))
# Create model
#model = model_factory.create_baseline_model(embedding_matrix, INPUT_LENGTH)
model = model_factory.create_rnn_model(embedding_matrix, INPUT_LENGTH)
#model = model_factory.create_bidir_rnn_model(embedding_matrix, INPUT_LENGTH)
#model = model_factory.create_train_emb_rnn_model(embedding_matrix, INPUT_LENGTH)
model.summary()
# Train model
model.fit(X_train, Y_train, batch_size=200, epochs=30)
# Evaluate model on validation set
val_loss, val_accuracy = model.evaluate(X_val, Y_val, verbose=0)
print("Accuracy on validation set: " + str(val_accuracy * 100) + "%")
# Evaluate model on test set
test_loss, test_accuracy = model.evaluate(X_test, Y_test, verbose=0)
print("Accuracy on test set: " + str(test_accuracy * 100) + "%")
# Test model on my own texts
reviews = [
"This movie is bad. I don't like it it all. It's terrible.",
"I love this movie. I've seen it many times and it's still awesome.",
"I don't think this movie is as bad as most people say. It's actually pretty good."
]
print("Testing model on my own texts:")
print(reviews)
reviews = tokenizer.texts_to_sequences(reviews)
reviews = pad_sequences(reviews, maxlen=INPUT_LENGTH, padding="post")
reviews = np.array(reviews)
pred = model.predict(reviews)
print(pred)
print("The model predicts:")
sentiment_str = "Negative" if pred[0][0] < 0.5 else "Positive"
print(sentiment_str + " on the first text")
sentiment_str = "Negative" if pred[1][0] < 0.5 else "Positive"
print(sentiment_str + " on the second text")
sentiment_str = "Negative" if pred[2][0] < 0.5 else "Positive"
print(sentiment_str + " on the third text")
|
[
"keras.preprocessing.sequence.pad_sequences",
"sc_utils.load_data",
"sc_utils.create_embedding_matrix",
"numpy.array",
"model_factory.create_rnn_model",
"sc_utils.preprocess_data"
] |
[((229, 249), 'sc_utils.load_data', 'sc_utils.load_data', ([], {}), '()\n', (247, 249), False, 'import sc_utils\n'), ((310, 382), 'sc_utils.preprocess_data', 'sc_utils.preprocess_data', (['X_train', 'Y_train', 'X_test', 'Y_test', 'INPUT_LENGTH'], {}), '(X_train, Y_train, X_test, Y_test, INPUT_LENGTH)\n', (334, 382), False, 'import sc_utils\n'), ((402, 445), 'sc_utils.create_embedding_matrix', 'sc_utils.create_embedding_matrix', (['tokenizer'], {}), '(tokenizer)\n', (434, 445), False, 'import sc_utils\n'), ((876, 938), 'model_factory.create_rnn_model', 'model_factory.create_rnn_model', (['embedding_matrix', 'INPUT_LENGTH'], {}), '(embedding_matrix, INPUT_LENGTH)\n', (906, 938), False, 'import model_factory\n'), ((1908, 1967), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['reviews'], {'maxlen': 'INPUT_LENGTH', 'padding': '"""post"""'}), "(reviews, maxlen=INPUT_LENGTH, padding='post')\n", (1921, 1967), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((1978, 1995), 'numpy.array', 'np.array', (['reviews'], {}), '(reviews)\n', (1986, 1995), True, 'import numpy as np\n')]
|
#%%
import os
import pickle
import cloudpickle
import itertools
import glob
import numpy as np
import scipy as sp
import pandas as pd
import git
# Import matplotlib stuff for plotting
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib as mpl
# Seaborn, useful for graphics
import seaborn as sns
# Import the project utils
import ccutils
# Set PBoC plotting format
ccutils.viz.set_plotting_style()
# Increase dpi
#%%
# Find home directory for repo
repo = git.Repo("./", search_parent_directories=True)
homedir = repo.working_dir
# Define directories for data and figure
figdir = f'{homedir}/fig/si/'
datadir = f'{homedir}/data/mRNA_FISH/'
mcmcdir = f'{homedir}/data/mcmc/'
# %%
# Read the data
df = pd.read_csv(f'{datadir}Jones_Brewster_2014.csv', index_col=0)
# Extract the lacUV5 data
dfUV5 = df[df.experiment == 'UV5']
# Load the flat-chain
with open(f'{mcmcdir}lacUV5_constitutive_mRNA_prior.pkl', 'rb') as file:
unpickler = pickle.Unpickler(file)
gauss_flatchain = unpickler.load()
gauss_flatlnprobability = unpickler.load()
# Generate a Pandas Data Frame with the mcmc chain
index = ['kp_on', 'kp_off', 'rm']
# Generate a data frame out of the MCMC chains
df_mcmc = pd.DataFrame(gauss_flatchain, columns=index)
# rerbsine the index with the new entries
index = df_mcmc.columns
# map value of the parameters
max_idx = np.argmax(gauss_flatlnprobability, axis=0)
kp_on, kp_off, rm = df_mcmc.iloc[max_idx, :]
# Define bins
bins = np.arange(0, dfUV5.mRNA_cell.max())
logp_mRNA = ccutils.model.log_p_m_unreg(bins, kp_on, kp_off, 1, rm)
# Plot the histogram of the data with bins of width 1
_ = plt.hist(dfUV5.mRNA_cell, bins=bins, density=1, histtype='stepfilled',
alpha=1, label='sm-FISH data', align='left', lw=0)
plt.step(bins, np.exp(logp_mRNA), color='r', ls='-', lw=1.5,
label='two-state promoter fit')
# Label the plot
plt.xlabel('mRNA / cell')
plt.ylabel('probability')
plt.legend()
plt.tight_layout()
plt.savefig(f'{figdir}/figS03.pdf', bbox_inches='tight')
|
[
"pandas.DataFrame",
"matplotlib.pyplot.hist",
"numpy.argmax",
"pandas.read_csv",
"matplotlib.pyplot.legend",
"git.Repo",
"ccutils.model.log_p_m_unreg",
"ccutils.viz.set_plotting_style",
"numpy.exp",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tight_layout",
"pickle.Unpickler",
"matplotlib.pyplot.savefig"
] |
[((394, 426), 'ccutils.viz.set_plotting_style', 'ccutils.viz.set_plotting_style', ([], {}), '()\n', (424, 426), False, 'import ccutils\n'), ((486, 532), 'git.Repo', 'git.Repo', (['"""./"""'], {'search_parent_directories': '(True)'}), "('./', search_parent_directories=True)\n", (494, 532), False, 'import git\n'), ((733, 794), 'pandas.read_csv', 'pd.read_csv', (['f"""{datadir}Jones_Brewster_2014.csv"""'], {'index_col': '(0)'}), "(f'{datadir}Jones_Brewster_2014.csv', index_col=0)\n", (744, 794), True, 'import pandas as pd\n'), ((1226, 1270), 'pandas.DataFrame', 'pd.DataFrame', (['gauss_flatchain'], {'columns': 'index'}), '(gauss_flatchain, columns=index)\n', (1238, 1270), True, 'import pandas as pd\n'), ((1379, 1421), 'numpy.argmax', 'np.argmax', (['gauss_flatlnprobability'], {'axis': '(0)'}), '(gauss_flatlnprobability, axis=0)\n', (1388, 1421), True, 'import numpy as np\n'), ((1538, 1593), 'ccutils.model.log_p_m_unreg', 'ccutils.model.log_p_m_unreg', (['bins', 'kp_on', 'kp_off', '(1)', 'rm'], {}), '(bins, kp_on, kp_off, 1, rm)\n', (1565, 1593), False, 'import ccutils\n'), ((1653, 1778), 'matplotlib.pyplot.hist', 'plt.hist', (['dfUV5.mRNA_cell'], {'bins': 'bins', 'density': '(1)', 'histtype': '"""stepfilled"""', 'alpha': '(1)', 'label': '"""sm-FISH data"""', 'align': '"""left"""', 'lw': '(0)'}), "(dfUV5.mRNA_cell, bins=bins, density=1, histtype='stepfilled',\n alpha=1, label='sm-FISH data', align='left', lw=0)\n", (1661, 1778), True, 'import matplotlib.pyplot as plt\n'), ((1909, 1934), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""mRNA / cell"""'], {}), "('mRNA / cell')\n", (1919, 1934), True, 'import matplotlib.pyplot as plt\n'), ((1935, 1960), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""probability"""'], {}), "('probability')\n", (1945, 1960), True, 'import matplotlib.pyplot as plt\n'), ((1961, 1973), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1971, 1973), True, 'import matplotlib.pyplot as plt\n'), ((1974, 1992), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1990, 1992), True, 'import matplotlib.pyplot as plt\n'), ((1993, 2049), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{figdir}/figS03.pdf"""'], {'bbox_inches': '"""tight"""'}), "(f'{figdir}/figS03.pdf', bbox_inches='tight')\n", (2004, 2049), True, 'import matplotlib.pyplot as plt\n'), ((969, 991), 'pickle.Unpickler', 'pickle.Unpickler', (['file'], {}), '(file)\n', (985, 991), False, 'import pickle\n'), ((1804, 1821), 'numpy.exp', 'np.exp', (['logp_mRNA'], {}), '(logp_mRNA)\n', (1810, 1821), True, 'import numpy as np\n')]
|
import unittest
import numpy as np
from frozendict import frozendict
from msdm.core.distributions import DictDistribution
from msdm.algorithms import ValueIteration, PolicyIteration, LRTDP
from msdm.tests.domains import Counter, GNTFig6_6, Geometric, VaryingActionNumber, make_russell_norvig_grid
from msdm.domains import GridWorld
class MyTestCase(unittest.TestCase):
def test_policy_iteration(self):
mdp = Counter(3)
res = PolicyIteration().plan_on(mdp)
out = res.policy.run_on(mdp)
assert out.state_traj == (0, 1, 2)
assert out.action_traj == (1, 1, 1)
assert res.policy.action(0) == 1
assert res.policy.action(1) == 1
assert res.policy.action(2) == 1
def test_policy_iteration_geometric(self):
mdp = Geometric(p=1/13)
res = PolicyIteration(iterations=500).plan_on(mdp)
assert np.isclose(res.V[0], -13), res.V
def test_policy_iteration_varying_action_number(self):
mdp = VaryingActionNumber()
res = PolicyIteration().plan_on(mdp)
assert np.isclose(res.V[0], -2), res.V
assert res.policy.run_on(mdp).action_traj == (+1, +1)
def test_equal_value(self):
'''
In this MDP, the value at the non-initial, non-terminal corners is equal.
This means the policy at the start state should assign equal probability
to either.
'''
mdp = GridWorld(
tile_array=[
'.g',
's.',
],
feature_rewards={'g': 0},
step_cost=-1,
)
res = PolicyIteration().plan_on(mdp)
assert np.isclose(res.V[frozendict(x=0, y=1)], res.V[frozendict(x=1, y=0)])
assert res.policy.action_dist(frozendict(x=0, y=0)).\
isclose(DictDistribution({
frozendict({'dx': 0, 'dy': 0}): 0,
frozendict({'dx': 1, 'dy': 0}): 1/2,
frozendict({'dx': -1, 'dy': 0}): 0,
frozendict({'dy': 1, 'dx': 0}): 1/2,
frozendict({'dy': -1, 'dx': 0}): 0
}))
assert res.policy.action_dist(frozendict(x=0, y=1)).isclose(DictDistribution({
frozendict({'dx': 1, 'dy': 0}): 1,
}))
def test_policy_iteration_gridworld(self):
gw = GridWorld(
tile_array=[
'......g',
'...####',
'..##...',
'..#....',
'.......',
'####...',
's......',
])
pi_res = PolicyIteration()(gw)
vi_res = ValueIteration()(gw)
lrtdp = LRTDP()(gw)
assert pi_res.initial_value == vi_res.initial_value == lrtdp.initial_value
def test_policy_iteration_gridworld2(self):
gw = GridWorld((
'..g..',
'.###.',
'..#..',
'..s..'
), discount_rate=1 - 1e-5)
pi = PolicyIteration().plan_on(gw)
vi = ValueIteration().plan_on(gw)
reachable = sorted(gw.reachable_states(),
key=lambda s: (s['x'], s['y']))
pi_mat = pi.policy.as_matrix(reachable, gw.action_list)
vi_mat = vi.policy.as_matrix(reachable, gw.action_list)
assert (pi_mat == vi_mat).all()
assert all([np.isclose(pi.valuefunc[s], vi.valuefunc[s])
for s in reachable])
def test_policy_iteration_and_value_iteration_russell_norvig(self):
for discount_rate in [i/10 for i in range(1, 10)] + [.95, .99, 1.0]:
for slip_prob in [i/10 for i in range(1, 10)] + [.95, .99, 1.0]:
gw = make_russell_norvig_grid(
discount_rate=discount_rate,
slip_prob=slip_prob,
)
vi_res = ValueIteration(iterations=int(1e3)).plan_on(gw)
pi_res = PolicyIteration(iterations=int(1e3)).plan_on(gw)
assert np.isclose(vi_res._qvaluemat, pi_res._qvaluemat, atol=5e-4).all()
def test_policy_iteration_heavenorhell(self):
# technically a pomdp, but we can solve underlying mdp
from msdm.domains.heavenorhell import HeavenOrHell
for discount_rate in [i/10 for i in range(1, 10, 2)] + [.95, .99, .99999]:
for coherence in [i/10 for i in range(1, 10, 2)] + [.95, .99, .99999]:
print(discount_rate, coherence)
hh = HeavenOrHell(
coherence=coherence,
grid=
"""
hcg
#.#
#s#
""",
discount_rate=discount_rate,
heaven_reward=50,
hell_reward=-50,
)
pi = PolicyIteration().plan_on(hh)
vi = ValueIteration().plan_on(hh)
reachable = sorted(hh.reachable_states())
pi_mat = pi.policy.as_matrix(reachable, hh.action_list)
vi_mat = vi.policy.as_matrix(reachable, hh.action_list)
assert (pi_mat == vi_mat).all()
assert all([np.isclose(pi.valuefunc[s], vi.valuefunc[s])
for s in reachable])
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"msdm.algorithms.PolicyIteration",
"msdm.tests.domains.Counter",
"msdm.tests.domains.Geometric",
"msdm.algorithms.ValueIteration",
"msdm.tests.domains.make_russell_norvig_grid",
"msdm.algorithms.LRTDP",
"numpy.isclose",
"msdm.domains.GridWorld",
"msdm.domains.heavenorhell.HeavenOrHell",
"msdm.tests.domains.VaryingActionNumber",
"frozendict.frozendict"
] |
[((5289, 5304), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5302, 5304), False, 'import unittest\n'), ((422, 432), 'msdm.tests.domains.Counter', 'Counter', (['(3)'], {}), '(3)\n', (429, 432), False, 'from msdm.tests.domains import Counter, GNTFig6_6, Geometric, VaryingActionNumber, make_russell_norvig_grid\n'), ((787, 806), 'msdm.tests.domains.Geometric', 'Geometric', ([], {'p': '(1 / 13)'}), '(p=1 / 13)\n', (796, 806), False, 'from msdm.tests.domains import Counter, GNTFig6_6, Geometric, VaryingActionNumber, make_russell_norvig_grid\n'), ((879, 904), 'numpy.isclose', 'np.isclose', (['res.V[0]', '(-13)'], {}), '(res.V[0], -13)\n', (889, 904), True, 'import numpy as np\n'), ((986, 1007), 'msdm.tests.domains.VaryingActionNumber', 'VaryingActionNumber', ([], {}), '()\n', (1005, 1007), False, 'from msdm.tests.domains import Counter, GNTFig6_6, Geometric, VaryingActionNumber, make_russell_norvig_grid\n'), ((1068, 1092), 'numpy.isclose', 'np.isclose', (['res.V[0]', '(-2)'], {}), '(res.V[0], -2)\n', (1078, 1092), True, 'import numpy as np\n'), ((1415, 1489), 'msdm.domains.GridWorld', 'GridWorld', ([], {'tile_array': "['.g', 's.']", 'feature_rewards': "{'g': 0}", 'step_cost': '(-1)'}), "(tile_array=['.g', 's.'], feature_rewards={'g': 0}, step_cost=-1)\n", (1424, 1489), False, 'from msdm.domains import GridWorld\n'), ((2297, 2400), 'msdm.domains.GridWorld', 'GridWorld', ([], {'tile_array': "['......g', '...####', '..##...', '..#....', '.......', '####...', 's......']"}), "(tile_array=['......g', '...####', '..##...', '..#....', '.......',\n '####...', 's......'])\n", (2306, 2400), False, 'from msdm.domains import GridWorld\n'), ((2787, 2859), 'msdm.domains.GridWorld', 'GridWorld', (["('..g..', '.###.', '..#..', '..s..')"], {'discount_rate': '(1 - 1e-05)'}), "(('..g..', '.###.', '..#..', '..s..'), discount_rate=1 - 1e-05)\n", (2796, 2859), False, 'from msdm.domains import GridWorld\n'), ((2554, 2571), 'msdm.algorithms.PolicyIteration', 'PolicyIteration', ([], {}), '()\n', (2569, 2571), False, 'from msdm.algorithms import ValueIteration, PolicyIteration, LRTDP\n'), ((2593, 2609), 'msdm.algorithms.ValueIteration', 'ValueIteration', ([], {}), '()\n', (2607, 2609), False, 'from msdm.algorithms import ValueIteration, PolicyIteration, LRTDP\n'), ((2630, 2637), 'msdm.algorithms.LRTDP', 'LRTDP', ([], {}), '()\n', (2635, 2637), False, 'from msdm.algorithms import ValueIteration, PolicyIteration, LRTDP\n'), ((447, 464), 'msdm.algorithms.PolicyIteration', 'PolicyIteration', ([], {}), '()\n', (462, 464), False, 'from msdm.algorithms import ValueIteration, PolicyIteration, LRTDP\n'), ((819, 850), 'msdm.algorithms.PolicyIteration', 'PolicyIteration', ([], {'iterations': '(500)'}), '(iterations=500)\n', (834, 850), False, 'from msdm.algorithms import ValueIteration, PolicyIteration, LRTDP\n'), ((1022, 1039), 'msdm.algorithms.PolicyIteration', 'PolicyIteration', ([], {}), '()\n', (1037, 1039), False, 'from msdm.algorithms import ValueIteration, PolicyIteration, LRTDP\n'), ((1598, 1615), 'msdm.algorithms.PolicyIteration', 'PolicyIteration', ([], {}), '()\n', (1613, 1615), False, 'from msdm.algorithms import ValueIteration, PolicyIteration, LRTDP\n'), ((1661, 1681), 'frozendict.frozendict', 'frozendict', ([], {'x': '(0)', 'y': '(1)'}), '(x=0, y=1)\n', (1671, 1681), False, 'from frozendict import frozendict\n'), ((1690, 1710), 'frozendict.frozendict', 'frozendict', ([], {'x': '(1)', 'y': '(0)'}), '(x=1, y=0)\n', (1700, 1710), False, 'from frozendict import frozendict\n'), ((2930, 2947), 'msdm.algorithms.PolicyIteration', 'PolicyIteration', ([], {}), '()\n', (2945, 2947), False, 'from msdm.algorithms import ValueIteration, PolicyIteration, LRTDP\n'), ((2973, 2989), 'msdm.algorithms.ValueIteration', 'ValueIteration', ([], {}), '()\n', (2987, 2989), False, 'from msdm.algorithms import ValueIteration, PolicyIteration, LRTDP\n'), ((3299, 3343), 'numpy.isclose', 'np.isclose', (['pi.valuefunc[s]', 'vi.valuefunc[s]'], {}), '(pi.valuefunc[s], vi.valuefunc[s])\n', (3309, 3343), True, 'import numpy as np\n'), ((3633, 3707), 'msdm.tests.domains.make_russell_norvig_grid', 'make_russell_norvig_grid', ([], {'discount_rate': 'discount_rate', 'slip_prob': 'slip_prob'}), '(discount_rate=discount_rate, slip_prob=slip_prob)\n', (3657, 3707), False, 'from msdm.tests.domains import Counter, GNTFig6_6, Geometric, VaryingActionNumber, make_russell_norvig_grid\n'), ((4420, 4649), 'msdm.domains.heavenorhell.HeavenOrHell', 'HeavenOrHell', ([], {'coherence': 'coherence', 'grid': '"""\n hcg\n #.#\n #s#\n """', 'discount_rate': 'discount_rate', 'heaven_reward': '(50)', 'hell_reward': '(-50)'}), '(coherence=coherence, grid=\n """\n hcg\n #.#\n #s#\n """\n , discount_rate=discount_rate, heaven_reward=50, hell_reward=-50)\n', (4432, 4649), False, 'from msdm.domains.heavenorhell import HeavenOrHell\n'), ((1751, 1771), 'frozendict.frozendict', 'frozendict', ([], {'x': '(0)', 'y': '(0)'}), '(x=0, y=0)\n', (1761, 1771), False, 'from frozendict import frozendict\n'), ((1830, 1860), 'frozendict.frozendict', 'frozendict', (["{'dx': 0, 'dy': 0}"], {}), "({'dx': 0, 'dy': 0})\n", (1840, 1860), False, 'from frozendict import frozendict\n'), ((1881, 1911), 'frozendict.frozendict', 'frozendict', (["{'dx': 1, 'dy': 0}"], {}), "({'dx': 1, 'dy': 0})\n", (1891, 1911), False, 'from frozendict import frozendict\n'), ((1934, 1965), 'frozendict.frozendict', 'frozendict', (["{'dx': -1, 'dy': 0}"], {}), "({'dx': -1, 'dy': 0})\n", (1944, 1965), False, 'from frozendict import frozendict\n'), ((1986, 2016), 'frozendict.frozendict', 'frozendict', (["{'dy': 1, 'dx': 0}"], {}), "({'dy': 1, 'dx': 0})\n", (1996, 2016), False, 'from frozendict import frozendict\n'), ((2039, 2070), 'frozendict.frozendict', 'frozendict', (["{'dy': -1, 'dx': 0}"], {}), "({'dy': -1, 'dx': 0})\n", (2049, 2070), False, 'from frozendict import frozendict\n'), ((2124, 2144), 'frozendict.frozendict', 'frozendict', ([], {'x': '(0)', 'y': '(1)'}), '(x=0, y=1)\n', (2134, 2144), False, 'from frozendict import frozendict\n'), ((2189, 2219), 'frozendict.frozendict', 'frozendict', (["{'dx': 1, 'dy': 0}"], {}), "({'dx': 1, 'dy': 0})\n", (2199, 2219), False, 'from frozendict import frozendict\n'), ((3945, 4006), 'numpy.isclose', 'np.isclose', (['vi_res._qvaluemat', 'pi_res._qvaluemat'], {'atol': '(0.0005)'}), '(vi_res._qvaluemat, pi_res._qvaluemat, atol=0.0005)\n', (3955, 4006), True, 'import numpy as np\n'), ((4805, 4822), 'msdm.algorithms.PolicyIteration', 'PolicyIteration', ([], {}), '()\n', (4820, 4822), False, 'from msdm.algorithms import ValueIteration, PolicyIteration, LRTDP\n'), ((4856, 4872), 'msdm.algorithms.ValueIteration', 'ValueIteration', ([], {}), '()\n', (4870, 4872), False, 'from msdm.algorithms import ValueIteration, PolicyIteration, LRTDP\n'), ((5163, 5207), 'numpy.isclose', 'np.isclose', (['pi.valuefunc[s]', 'vi.valuefunc[s]'], {}), '(pi.valuefunc[s], vi.valuefunc[s])\n', (5173, 5207), True, 'import numpy as np\n')]
|
"""
Minimal character-level Vanilla RNN model. Written b_y <NAME> (@karpathy)
BSD License
"""
import numpy as np
import unicodedata
import string
import codecs
# data I/O
data = codecs.open('data/potter.txt', 'r', encoding='utf8', errors='ignore').read()
fake = codecs.open('data/output.txt', 'w', encoding='utf8')
chars = list(set(data))
data_size = len(data) #
vocab_size = len(chars)
print(f'data has {data_size} characters,{vocab_size} unique.') # data has 1109177 characters,80 unique.
char_to_ix = { ch:i for i,ch in enumerate(chars) }
ix_to_char = { i:ch for i,ch in enumerate(chars) }
print(char_to_ix)
print(ix_to_char)
# hyperparameters
hidden_size = 256 # size of hidden layer of neurons
seq_length = 128 # number of steps to unroll the RNN for
learning_rate = 1e-1
# model parameters
W_xh = np.random.randn(hidden_size, vocab_size) * 0.01 # weight: input to hidden
W_hh = np.random.randn(hidden_size, hidden_size) * 0.01 # weight: hidden to hidden
W_hy = np.random.randn(vocab_size, hidden_size) * 0.01 # weight: hidden to output
b_h = np.zeros((hidden_size, 1)) # hidden bias
b_y = np.zeros((vocab_size, 1)) # output bias
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
and c in all_letters
)
def lossFun(inputs, targets, hprev):
"""
inputs,targets are both list of integers indicating which unique character.
inputs: a seq_length size list
hprev is (H x 1) array of initial hidden state
returns the loss, gradients on model parameters, and last hidden state
"""
xs, hs, ys, ps = {}, {}, {}, {} # sx[t] = ys[t] = ps[t] size = vocab_size x 1
hs[-1] = np.copy(hprev) # hs[t] size = hidden_size * 1
loss = 0 # xs: input line; ys: output line; hs: hidden states, multiple of them,
# even the weights are reused, the states are different from each other.
# forward pass
for t in range(len(inputs)):
xs[t] = np.zeros((vocab_size,1)) # encode in 1-of-k representation
xs[t][inputs[t]] = 1 # inputs[t] is a index number, xs[t] is a vector
hs[t] = np.tanh(np.dot(W_xh, xs[t]) + np.dot(W_hh, hs[t-1]) + b_h) # hidden state
ys[t] = np.dot(W_hy, hs[t]) + b_y # unnormalized log probabilities for next chars
ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t])) # (normalized) probabilities for next chars
loss += -np.log(ps[t][targets[t],0]) # softmax (cross-entropy loss)
print(f'loss: {loss}')
# print(f'xs:{len(xs[t])}->{xs[t]}\n hs:{len(hs[t])}->{hs[t]}\n ys:{len(ys[t])}->{ys[t]}\n ps:{len(ps[t])}->{ps[t]}')
# backward pass: compute gradients going backwards
dW_xh = np.zeros_like(W_xh) # gradient of W_xh, same shape as W_xh
dW_hh = np.zeros_like(W_hh) # gradient of W_hh, same shape as W_hh
dW_hy = np.zeros_like(W_hy) # gradient of W_hy, same shape as W_hy
db_h = np.zeros_like(b_h) # gradient of b_h, same shape as b_h
db_y = np.zeros_like(b_y) # gradient of b_y, same shape as b_y
dhnext = np.zeros_like(hs[0])
for t in reversed(range(len(inputs))):
dy = np.copy(ps[t])
dy[targets[t]] -= 1
# backprop into y. see http://cs231n.github.io/neural-networks-case-study/#grad if confused here
dW_hy += np.dot(dy, hs[t].T)
db_y += dy
dh = np.dot(W_hy.T, dy) + dhnext # backprop into h
dhraw = (1 - hs[t] * hs[t]) * dh # backprop through tanh nonlinearity
db_h += dhraw
dW_xh += np.dot(dhraw, xs[t].T)
dW_hh += np.dot(dhraw, hs[t-1].T)
dhnext = np.dot(W_hh.T, dhraw)
for dparam in [dW_xh, dW_hh, dW_hy, db_h, db_y]:
np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients
return loss, dW_xh, dW_hh, dW_hy, db_h, db_y, hs[len(inputs)-1]
def sample(h, seed_ix, n):
"""
sample a sequence of integers from the model
h is memory state, seed_ix is seed letter for first time step
i.e. do predictions :)
"""
x = np.zeros((vocab_size, 1))
x[seed_ix] = 1
ixes = []
for t in range(n):
h = np.tanh(np.dot(W_xh, x) + np.dot(W_hh, h) + b_h)
y = np.dot(W_hy, h) + b_y
p = np.exp(y) / np.sum(np.exp(y))
ix = np.random.choice(range(vocab_size), p=p.ravel())
x = np.zeros((vocab_size, 1))
x[ix] = 1
ixes.append(ix)
return ixes
n, p = 0, 0
mW_xh, mW_hh, mW_hy = np.zeros_like(W_xh), np.zeros_like(W_hh), np.zeros_like(W_hy)
mb_h, mb_y = np.zeros_like(b_h), np.zeros_like(b_y) # memory variables for Adagrad
smooth_loss = -np.log(1.0 / vocab_size) * seq_length # loss at iteration 0
while True:
try:
# prepare inputs (we're sweeping from left to right in steps seq_length long)
if p + seq_length + 1 >= len(data) or n == 0:
hprev = np.zeros((hidden_size,1)) # reset RNN memory
p = 0 # go from start of data
inputs = [char_to_ix[ch] for ch in data[p:p+seq_length]]
targets = [char_to_ix[ch] for ch in data[p+1:p+seq_length+1]]
# sample from the model now and then
if n % 100 == 0:
sample_ix = sample(hprev, inputs[0], 200)
txt = ''.join(ix_to_char[ix] for ix in sample_ix)
print('----\n %s \n----' % (txt, ))
# forward seq_length characters through the net and fetch gradient
loss, dW_xh, dW_hh, dW_hy, db_h, db_y, hprev = lossFun(inputs, targets, hprev)
smooth_loss = smooth_loss * 0.999 + loss * 0.001
if n % 100 == 0:
print(f'iter{n}, loss: {smooth_loss}') # print progress
# perform parameter update with Adagrad
for param, dparam, mem in zip([W_xh, W_hh, W_hy, b_h, b_y],
[dW_xh, dW_hh, dW_hy, db_h, db_y],
[mW_xh, mW_hh, mW_hy, mb_h, mb_y]):
mem += dparam * dparam
param += -learning_rate * dparam / np.sqrt(mem + 1e-8) # adagrad update
p += seq_length # move data pointer
n += 1 # iteration counter
except KeyboardInterrupt:
sample_ix = sample(hprev, inputs[0], data_size)
txt = ''.join(ix_to_char[ix] for ix in sample_ix)
fake.write(txt)
break
fake.close()
|
[
"unicodedata.normalize",
"numpy.zeros_like",
"codecs.open",
"numpy.copy",
"numpy.random.randn",
"numpy.log",
"unicodedata.category",
"numpy.zeros",
"numpy.clip",
"numpy.exp",
"numpy.dot",
"numpy.sqrt"
] |
[((263, 315), 'codecs.open', 'codecs.open', (['"""data/output.txt"""', '"""w"""'], {'encoding': '"""utf8"""'}), "('data/output.txt', 'w', encoding='utf8')\n", (274, 315), False, 'import codecs\n'), ((1301, 1327), 'numpy.zeros', 'np.zeros', (['(hidden_size, 1)'], {}), '((hidden_size, 1))\n', (1309, 1327), True, 'import numpy as np\n'), ((1391, 1416), 'numpy.zeros', 'np.zeros', (['(vocab_size, 1)'], {}), '((vocab_size, 1))\n', (1399, 1416), True, 'import numpy as np\n'), ((994, 1034), 'numpy.random.randn', 'np.random.randn', (['hidden_size', 'vocab_size'], {}), '(hidden_size, vocab_size)\n', (1009, 1034), True, 'import numpy as np\n'), ((1096, 1137), 'numpy.random.randn', 'np.random.randn', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (1111, 1137), True, 'import numpy as np\n'), ((1199, 1239), 'numpy.random.randn', 'np.random.randn', (['vocab_size', 'hidden_size'], {}), '(vocab_size, hidden_size)\n', (1214, 1239), True, 'import numpy as np\n'), ((2084, 2098), 'numpy.copy', 'np.copy', (['hprev'], {}), '(hprev)\n', (2091, 2098), True, 'import numpy as np\n'), ((3430, 3449), 'numpy.zeros_like', 'np.zeros_like', (['W_xh'], {}), '(W_xh)\n', (3443, 3449), True, 'import numpy as np\n'), ((3545, 3564), 'numpy.zeros_like', 'np.zeros_like', (['W_hh'], {}), '(W_hh)\n', (3558, 3564), True, 'import numpy as np\n'), ((3660, 3679), 'numpy.zeros_like', 'np.zeros_like', (['W_hy'], {}), '(W_hy)\n', (3673, 3679), True, 'import numpy as np\n'), ((3774, 3792), 'numpy.zeros_like', 'np.zeros_like', (['b_h'], {}), '(b_h)\n', (3787, 3792), True, 'import numpy as np\n'), ((3887, 3905), 'numpy.zeros_like', 'np.zeros_like', (['b_y'], {}), '(b_y)\n', (3900, 3905), True, 'import numpy as np\n'), ((4002, 4022), 'numpy.zeros_like', 'np.zeros_like', (['hs[0]'], {}), '(hs[0])\n', (4015, 4022), True, 'import numpy as np\n'), ((5068, 5093), 'numpy.zeros', 'np.zeros', (['(vocab_size, 1)'], {}), '((vocab_size, 1))\n', (5076, 5093), True, 'import numpy as np\n'), ((5482, 5501), 'numpy.zeros_like', 'np.zeros_like', (['W_xh'], {}), '(W_xh)\n', (5495, 5501), True, 'import numpy as np\n'), ((5503, 5522), 'numpy.zeros_like', 'np.zeros_like', (['W_hh'], {}), '(W_hh)\n', (5516, 5522), True, 'import numpy as np\n'), ((5524, 5543), 'numpy.zeros_like', 'np.zeros_like', (['W_hy'], {}), '(W_hy)\n', (5537, 5543), True, 'import numpy as np\n'), ((5557, 5575), 'numpy.zeros_like', 'np.zeros_like', (['b_h'], {}), '(b_h)\n', (5570, 5575), True, 'import numpy as np\n'), ((5577, 5595), 'numpy.zeros_like', 'np.zeros_like', (['b_y'], {}), '(b_y)\n', (5590, 5595), True, 'import numpy as np\n'), ((179, 248), 'codecs.open', 'codecs.open', (['"""data/potter.txt"""', '"""r"""'], {'encoding': '"""utf8"""', 'errors': '"""ignore"""'}), "('data/potter.txt', 'r', encoding='utf8', errors='ignore')\n", (190, 248), False, 'import codecs\n'), ((2544, 2569), 'numpy.zeros', 'np.zeros', (['(vocab_size, 1)'], {}), '((vocab_size, 1))\n', (2552, 2569), True, 'import numpy as np\n'), ((4080, 4094), 'numpy.copy', 'np.copy', (['ps[t]'], {}), '(ps[t])\n', (4087, 4094), True, 'import numpy as np\n'), ((4245, 4264), 'numpy.dot', 'np.dot', (['dy', 'hs[t].T'], {}), '(dy, hs[t].T)\n', (4251, 4264), True, 'import numpy as np\n'), ((4530, 4552), 'numpy.dot', 'np.dot', (['dhraw', 'xs[t].T'], {}), '(dhraw, xs[t].T)\n', (4536, 4552), True, 'import numpy as np\n'), ((4570, 4596), 'numpy.dot', 'np.dot', (['dhraw', 'hs[t - 1].T'], {}), '(dhraw, hs[t - 1].T)\n', (4576, 4596), True, 'import numpy as np\n'), ((4612, 4633), 'numpy.dot', 'np.dot', (['W_hh.T', 'dhraw'], {}), '(W_hh.T, dhraw)\n', (4618, 4633), True, 'import numpy as np\n'), ((4696, 4730), 'numpy.clip', 'np.clip', (['dparam', '(-5)', '(5)'], {'out': 'dparam'}), '(dparam, -5, 5, out=dparam)\n', (4703, 4730), True, 'import numpy as np\n'), ((5361, 5386), 'numpy.zeros', 'np.zeros', (['(vocab_size, 1)'], {}), '((vocab_size, 1))\n', (5369, 5386), True, 'import numpy as np\n'), ((5666, 5690), 'numpy.log', 'np.log', (['(1.0 / vocab_size)'], {}), '(1.0 / vocab_size)\n', (5672, 5690), True, 'import numpy as np\n'), ((2870, 2889), 'numpy.dot', 'np.dot', (['W_hy', 'hs[t]'], {}), '(W_hy, hs[t])\n', (2876, 2889), True, 'import numpy as np\n'), ((2994, 3007), 'numpy.exp', 'np.exp', (['ys[t]'], {}), '(ys[t])\n', (3000, 3007), True, 'import numpy as np\n'), ((3115, 3143), 'numpy.log', 'np.log', (['ps[t][targets[t], 0]'], {}), '(ps[t][targets[t], 0])\n', (3121, 3143), True, 'import numpy as np\n'), ((4297, 4315), 'numpy.dot', 'np.dot', (['W_hy.T', 'dy'], {}), '(W_hy.T, dy)\n', (4303, 4315), True, 'import numpy as np\n'), ((5223, 5238), 'numpy.dot', 'np.dot', (['W_hy', 'h'], {}), '(W_hy, h)\n', (5229, 5238), True, 'import numpy as np\n'), ((5257, 5266), 'numpy.exp', 'np.exp', (['y'], {}), '(y)\n', (5263, 5266), True, 'import numpy as np\n'), ((5931, 5957), 'numpy.zeros', 'np.zeros', (['(hidden_size, 1)'], {}), '((hidden_size, 1))\n', (5939, 5957), True, 'import numpy as np\n'), ((1539, 1570), 'unicodedata.normalize', 'unicodedata.normalize', (['"""NFD"""', 's'], {}), "('NFD', s)\n", (1560, 1570), False, 'import unicodedata\n'), ((3017, 3030), 'numpy.exp', 'np.exp', (['ys[t]'], {}), '(ys[t])\n', (3023, 3030), True, 'import numpy as np\n'), ((5276, 5285), 'numpy.exp', 'np.exp', (['y'], {}), '(y)\n', (5282, 5285), True, 'import numpy as np\n'), ((7170, 7190), 'numpy.sqrt', 'np.sqrt', (['(mem + 1e-08)'], {}), '(mem + 1e-08)\n', (7177, 7190), True, 'import numpy as np\n'), ((2787, 2806), 'numpy.dot', 'np.dot', (['W_xh', 'xs[t]'], {}), '(W_xh, xs[t])\n', (2793, 2806), True, 'import numpy as np\n'), ((2809, 2832), 'numpy.dot', 'np.dot', (['W_hh', 'hs[t - 1]'], {}), '(W_hh, hs[t - 1])\n', (2815, 2832), True, 'import numpy as np\n'), ((5170, 5185), 'numpy.dot', 'np.dot', (['W_xh', 'x'], {}), '(W_xh, x)\n', (5176, 5185), True, 'import numpy as np\n'), ((5188, 5203), 'numpy.dot', 'np.dot', (['W_hh', 'h'], {}), '(W_hh, h)\n', (5194, 5203), True, 'import numpy as np\n'), ((1582, 1605), 'unicodedata.category', 'unicodedata.category', (['c'], {}), '(c)\n', (1602, 1605), False, 'import unicodedata\n')]
|
"""Optimization result."""
import warnings
from collections import Counter
from copy import deepcopy
from typing import Sequence, Union
import numpy as np
import pandas as pd
from ..objective import History
from ..problem import Problem
from ..util import assign_clusters, delete_nan_inf
OptimizationResult = Union['OptimizerResult', 'OptimizeResult']
class OptimizerResult(dict):
"""
The result of an optimizer run.
Used as a standardized return value to map from the individual result
objects returned by the employed optimizers to the format understood by
pypesto.
Can be used like a dict.
Attributes
----------
id:
Id of the optimizer run. Usually the start index.
x:
The best found parameters.
fval:
The best found function value, `fun(x)`.
grad:
The gradient at `x`.
hess:
The Hessian at `x`.
res:
The residuals at `x`.
sres:
The residual sensitivities at `x`.
n_fval
Number of function evaluations.
n_grad:
Number of gradient evaluations.
n_hess:
Number of Hessian evaluations.
n_res:
Number of residuals evaluations.
n_sres:
Number of residual sensitivity evaluations.
x0:
The starting parameters.
fval0:
The starting function value, `fun(x0)`.
history:
Objective history.
exitflag:
The exitflag of the optimizer.
time:
Execution time.
message: str
Textual comment on the optimization result.
optimizer: str
The optimizer used for optimization.
Notes
-----
Any field not supported by the optimizer is filled with None.
"""
def __init__(
self,
id: str = None,
x: np.ndarray = None,
fval: float = None,
grad: np.ndarray = None,
hess: np.ndarray = None,
res: np.ndarray = None,
sres: np.ndarray = None,
n_fval: int = None,
n_grad: int = None,
n_hess: int = None,
n_res: int = None,
n_sres: int = None,
x0: np.ndarray = None,
fval0: float = None,
history: History = None,
exitflag: int = None,
time: float = None,
message: str = None,
optimizer: str = None,
):
super().__init__()
self.id = id
self.x: np.ndarray = np.array(x) if x is not None else None
self.fval: float = fval
self.grad: np.ndarray = np.array(grad) if grad is not None else None
self.hess: np.ndarray = np.array(hess) if hess is not None else None
self.res: np.ndarray = np.array(res) if res is not None else None
self.sres: np.ndarray = np.array(sres) if sres is not None else None
self.n_fval: int = n_fval
self.n_grad: int = n_grad
self.n_hess: int = n_hess
self.n_res: int = n_res
self.n_sres: int = n_sres
self.x0: np.ndarray = np.array(x0) if x0 is not None else None
self.fval0: float = fval0
self.history: History = history
self.exitflag: int = exitflag
self.time: float = time
self.message: str = message
self.optimizer = optimizer
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def summary(self):
"""Get summary of the object."""
message = (
"### Optimizer Result \n\n"
f"* optimizer used: {self.optimizer} \n"
f"* message: {self.message} \n"
f"* number of evaluations: {self.n_fval} \n"
f"* time taken to optimize: {self.time} \n"
f"* startpoint: {self.x0} \n"
f"* endpoint: {self.x} \n"
)
# add fval, gradient, hessian, res, sres if available
if self.fval is not None:
message += f"* final objective value: {self.fval} \n"
if self.grad is not None:
message += f"* final gradient value: {self.grad} \n"
if self.hess is not None:
message += f"* final hessian value: {self.hess} \n"
if self.res is not None:
message += f"* final residual value: {self.res} \n"
if self.sres is not None:
message += f"* final residual sensitivity: {self.sres} \n"
return message
def update_to_full(self, problem: Problem) -> None:
"""
Update values to full vectors/matrices.
Parameters
----------
problem:
problem which contains info about how to convert to full vectors
or matrices
"""
self.x = problem.get_full_vector(self.x, problem.x_fixed_vals)
self.grad = problem.get_full_vector(self.grad)
self.hess = problem.get_full_matrix(self.hess)
self.x0 = problem.get_full_vector(self.x0, problem.x_fixed_vals)
class OptimizeResult:
"""Result of the :py:func:`pypesto.optimize.minimize` function."""
def __init__(self):
self.list = []
def __deepcopy__(self, memo):
other = OptimizeResult()
other.list = deepcopy(self.list)
return other
def __getattr__(self, key):
"""Define `optimize_result.key`."""
try:
return [res[key] for res in self.list]
except KeyError:
raise AttributeError(key)
def __getitem__(self, index):
"""Define `optimize_result[i]` to access the i-th result."""
try:
return self.list[index]
except IndexError:
raise IndexError(
f"{index} out of range for optimize result of "
f"length {len(self.list)}."
)
def __len__(self):
return len(self.list)
def summary(self):
"""Get summary of the object."""
# perform clustering for better information
clust, clustsize = assign_clusters(delete_nan_inf(self.fval)[1])
counter_message = '\n'.join(
["\tCount\tMessage"]
+ [
f"\t{count}\t{message}"
for message, count in Counter(self.message).most_common()
]
)
times_message = (
f'\n\tMean execution time: {np.mean(self.time)}s\n'
f'\tMaximum execution time: {np.max(self.time)}s,'
f'\tid={self[np.argmax(self.time)].id}\n'
f'\tMinimum execution time: {np.min(self.time)}s,\t'
f'id={self[np.argmin(self.time)].id}'
)
summary = (
"## Optimization Result \n\n"
f"* number of starts: {len(self)} \n"
f"* execution time summary: {times_message}\n"
f"* summary of optimizer messages:\n{counter_message}\n"
f"* best value found (approximately) {clustsize[0]} time(s) \n"
f"* number of plateaus found: "
f"{1 + max(clust) - sum(clustsize == 1)} \n"
f"* best value: {self[0]['fval']}, "
f"worst value: {self[-1]['fval']} \n\n"
f"A summary of the best run:\n\n{self[0].summary()}"
)
return summary
def append(
self,
optimize_result: OptimizationResult,
sort: bool = True,
prefix: str = '',
):
"""
Append an OptimizerResult or an OptimizeResult to the result object.
Parameters
----------
optimize_result:
The result of one or more (local) optimizer run.
sort:
Boolean used so we only sort once when appending an
optimize_result.
prefix:
The IDs for all appended results will be prefixed with this.
"""
current_ids = set(self.id)
if isinstance(optimize_result, OptimizeResult):
new_ids = [
prefix + identifier
for identifier in optimize_result.id
if identifier is not None
]
if current_ids.isdisjoint(new_ids) and new_ids:
raise ValueError(
"Some id's you want to merge coincide with "
"the existing id's. Please use an "
"appropriate prefix such as 'run_2_'."
)
for optimizer_result in optimize_result.list:
self.append(optimizer_result, sort=False, prefix=prefix)
elif isinstance(optimize_result, OptimizerResult):
# if id is None, append without checking for duplicate ids
if optimize_result.id is None:
self.list.append(optimize_result)
else:
new_id = prefix + optimize_result.id
if new_id in current_ids:
raise ValueError(
"The id you want to merge coincides with "
"the existing id's. Please use an "
"appropriate prefix such as 'run_2_'."
)
optimize_result.id = new_id
self.list.append(optimize_result)
if sort:
self.sort()
def sort(self):
"""Sort the optimizer results by function value fval (ascending)."""
def get_fval(res):
return res.fval if not np.isnan(res.fval) else np.inf
self.list = sorted(self.list, key=get_fval)
def as_dataframe(self, keys=None) -> pd.DataFrame:
"""
Get as pandas DataFrame.
If keys is a list, return only the specified values, otherwise all.
"""
lst = self.as_list(keys)
df = pd.DataFrame(lst)
return df
def as_list(self, keys=None) -> Sequence:
"""
Get as list.
If keys is a list, return only the specified values.
Parameters
----------
keys: list(str), optional
Labels of the field to extract.
"""
lst = self.list
if keys is not None:
lst = [{key: res[key] for key in keys} for res in lst]
return lst
def get_for_key(self, key) -> list:
"""Extract the list of values for the specified key as a list."""
warnings.warn(
"get_for_key() is deprecated in favour of "
"optimize_result['key'] and will be removed in future "
"releases."
)
return [res[key] for res in self.list]
|
[
"pandas.DataFrame",
"copy.deepcopy",
"numpy.argmax",
"collections.Counter",
"numpy.isnan",
"numpy.argmin",
"numpy.max",
"numpy.mean",
"numpy.array",
"numpy.min",
"warnings.warn"
] |
[((5219, 5238), 'copy.deepcopy', 'deepcopy', (['self.list'], {}), '(self.list)\n', (5227, 5238), False, 'from copy import deepcopy\n'), ((9657, 9674), 'pandas.DataFrame', 'pd.DataFrame', (['lst'], {}), '(lst)\n', (9669, 9674), True, 'import pandas as pd\n'), ((10229, 10359), 'warnings.warn', 'warnings.warn', (['"""get_for_key() is deprecated in favour of optimize_result[\'key\'] and will be removed in future releases."""'], {}), '(\n "get_for_key() is deprecated in favour of optimize_result[\'key\'] and will be removed in future releases."\n )\n', (10242, 10359), False, 'import warnings\n'), ((2397, 2408), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2405, 2408), True, 'import numpy as np\n'), ((2500, 2514), 'numpy.array', 'np.array', (['grad'], {}), '(grad)\n', (2508, 2514), True, 'import numpy as np\n'), ((2577, 2591), 'numpy.array', 'np.array', (['hess'], {}), '(hess)\n', (2585, 2591), True, 'import numpy as np\n'), ((2653, 2666), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (2661, 2666), True, 'import numpy as np\n'), ((2728, 2742), 'numpy.array', 'np.array', (['sres'], {}), '(sres)\n', (2736, 2742), True, 'import numpy as np\n'), ((2971, 2983), 'numpy.array', 'np.array', (['x0'], {}), '(x0)\n', (2979, 2983), True, 'import numpy as np\n'), ((6330, 6348), 'numpy.mean', 'np.mean', (['self.time'], {}), '(self.time)\n', (6337, 6348), True, 'import numpy as np\n'), ((6395, 6412), 'numpy.max', 'np.max', (['self.time'], {}), '(self.time)\n', (6401, 6412), True, 'import numpy as np\n'), ((6512, 6529), 'numpy.min', 'np.min', (['self.time'], {}), '(self.time)\n', (6518, 6529), True, 'import numpy as np\n'), ((9336, 9354), 'numpy.isnan', 'np.isnan', (['res.fval'], {}), '(res.fval)\n', (9344, 9354), True, 'import numpy as np\n'), ((6442, 6462), 'numpy.argmax', 'np.argmax', (['self.time'], {}), '(self.time)\n', (6451, 6462), True, 'import numpy as np\n'), ((6559, 6579), 'numpy.argmin', 'np.argmin', (['self.time'], {}), '(self.time)\n', (6568, 6579), True, 'import numpy as np\n'), ((6204, 6225), 'collections.Counter', 'Counter', (['self.message'], {}), '(self.message)\n', (6211, 6225), False, 'from collections import Counter\n')]
|
import numpy as np
import matplotlib.pyplot as plt
from typing import Tuple, Union, TypeVar, Iterable, Dict
from goa import problems
T = TypeVar("T")
def plot_population(
problem: problems.BaseProblem,
X: Union[T, Iterable[T]],
ax: plt.Axes = None,
c: str = "darkblue",
linestyle: str = ":",
marker: str = "X",
markersize: int = 6,
markevery: int = 2,
antialiased: bool = True,
figsize: Tuple[float, float] = (12, 8),
kwargs: Dict = None,
) -> plt.Axes:
knobs = dict()
if kwargs is not None:
knobs.update(kwargs)
if not ax:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(projection="3d")
if X.shape == (2,):
X = [X]
for x, y in X:
ax.plot(
[x, x],
[y, y],
[problem(np.asarray([x, y])), 0],
c=c,
linestyle=linestyle,
marker=marker,
markersize=markersize,
markevery=markevery,
antialiased=antialiased,
**knobs
)
return ax
def root_mean_squared_error(
x: Union[float, np.ndarray], y: Union[float, np.ndarray]
) -> float:
return np.sqrt(np.mean(np.power(np.subtract(x, y), 2)))
def custom_init_view_function(
y: float = 20, x: float = 120, a: float = 30, b: float = 15
) -> Tuple[float, float]:
return a - np.cos(y) * b, x
|
[
"numpy.subtract",
"numpy.asarray",
"matplotlib.pyplot.figure",
"numpy.cos",
"typing.TypeVar"
] |
[((141, 153), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (148, 153), False, 'from typing import Tuple, Union, TypeVar, Iterable, Dict\n'), ((608, 635), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (618, 635), True, 'import matplotlib.pyplot as plt\n'), ((1210, 1227), 'numpy.subtract', 'np.subtract', (['x', 'y'], {}), '(x, y)\n', (1221, 1227), True, 'import numpy as np\n'), ((1372, 1381), 'numpy.cos', 'np.cos', (['y'], {}), '(y)\n', (1378, 1381), True, 'import numpy as np\n'), ((819, 837), 'numpy.asarray', 'np.asarray', (['[x, y]'], {}), '([x, y])\n', (829, 837), True, 'import numpy as np\n')]
|
import numpy as np
from casim.calculations import word_entropy
def test_word_entropy():
test_arr = np.array([1, 0, 0, 1, 1, 0, 1, 0])
assert np.round(word_entropy(test_arr, 3), decimals=1) == 2.5
|
[
"casim.calculations.word_entropy",
"numpy.array"
] |
[((105, 139), 'numpy.array', 'np.array', (['[1, 0, 0, 1, 1, 0, 1, 0]'], {}), '([1, 0, 0, 1, 1, 0, 1, 0])\n', (113, 139), True, 'import numpy as np\n'), ((161, 186), 'casim.calculations.word_entropy', 'word_entropy', (['test_arr', '(3)'], {}), '(test_arr, 3)\n', (173, 186), False, 'from casim.calculations import word_entropy\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import glob
from sys import argv
from os.path import exists as file_exists
methods = ['drude', 'c36']
mol1, mol2 = str(argv[1]), str(argv[2])
sysname = mol1+'_'+mol2
def blockavg(x,nblocks=30):
lblock = int(len(x)/nblocks)
m = []
for i in range(nblocks):
start = i*lblock
end = (i+1)*lblock
m.append(np.mean(x[start:end]))
m = np.array(m)
return np.mean(m), np.std(m)
for method in methods:
dirs = sorted(glob.glob('%s_at_*'%(method)))
if len(dirs) == 0:
continue
print(method.upper(),':',mol1.upper(),'-',mol2.upper())
osmp = []
f = open('OSMP_%s_%s_%s.dat'%(mol1,mol2,method), 'w')
f.write('# %8s %10s %10s\n'%('Conc (M)','OsmP (bar)','Error'))
print('# %8s %10s %10s'%('Conc (M)','OsmP (bar)','Error'))
for d in dirs:
c = d.split("_")[2]
r1 = np.loadtxt('%s/osmp.%s_%s_%s.1.dat'%(d,mol1,mol2,c))
r2 = np.loadtxt('%s/osmp.%s_%s_%s.2.dat'%(d,mol1,mol2,c))
r3 = np.loadtxt('%s/osmp.%s_%s_%s.3.dat'%(d,mol1,mol2,c))
r = np.concatenate((r1,r2,r3))/100000.0
m,s = blockavg(r[:,1])
print("%10.1f %10.3f %10.3f"%(float(c),m,s))
f.write("%10.1f %10.3f %10.3f\n"%(float(c),m,s))
osmp.append((float(c),m,s))
osmp = np.array(osmp)
f.close()
# plot
plt.figure()
plt.title(method.upper()+': '+mol1.upper()+' - '+mol2.upper())
plt.errorbar(osmp[:,0],osmp[:,1],yerr=osmp[:,2],marker='o',markersize=5,capsize=3)
plt.xlabel('Concentration (M)')
plt.ylabel('Osmotic Pressure (bar)')
plt.tight_layout()
plt.savefig('OSMP_%s_%s_%s.png'%(mol1,mol2,method))
plt.close()
if file_exists('OSMP_%s_%s_drude.dat'%(mol1,mol2)) and file_exists('OSMP_%s_%s_c36.dat'%(mol1,mol2)):
osmp_drude = np.loadtxt('OSMP_%s_%s_drude.dat'%(mol1,mol2))
osmp_c36 = np.loadtxt('OSMP_%s_%s_c36.dat'%(mol1,mol2))
plt.figure()
plt.title(mol1.upper()+' - '+mol2.upper())
plt.errorbar(osmp_drude[:,0],osmp_drude[:,1],yerr=osmp_drude[:,2],marker='o',markersize=5,capsize=3,label='drude')
plt.errorbar(osmp_c36[:,0],osmp_c36[:,1],yerr=osmp_c36[:,2],marker='o',markersize=5,capsize=3,label='c36')
plt.xlabel('Concentration (M)')
plt.ylabel('Osmotic Pressure (bar)')
plt.legend()
plt.tight_layout()
plt.savefig('OSMP_%s_%s_both.png'%(mol1,mol2))
plt.close()
|
[
"matplotlib.pyplot.savefig",
"numpy.concatenate",
"numpy.std",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"os.path.exists",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.array",
"numpy.loadtxt",
"glob.glob",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.errorbar"
] |
[((420, 431), 'numpy.array', 'np.array', (['m'], {}), '(m)\n', (428, 431), True, 'import numpy as np\n'), ((1330, 1344), 'numpy.array', 'np.array', (['osmp'], {}), '(osmp)\n', (1338, 1344), True, 'import numpy as np\n'), ((1374, 1386), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1384, 1386), True, 'import matplotlib.pyplot as plt\n'), ((1458, 1552), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['osmp[:, 0]', 'osmp[:, 1]'], {'yerr': 'osmp[:, 2]', 'marker': '"""o"""', 'markersize': '(5)', 'capsize': '(3)'}), "(osmp[:, 0], osmp[:, 1], yerr=osmp[:, 2], marker='o',\n markersize=5, capsize=3)\n", (1470, 1552), True, 'import matplotlib.pyplot as plt\n'), ((1545, 1576), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Concentration (M)"""'], {}), "('Concentration (M)')\n", (1555, 1576), True, 'import matplotlib.pyplot as plt\n'), ((1581, 1617), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Osmotic Pressure (bar)"""'], {}), "('Osmotic Pressure (bar)')\n", (1591, 1617), True, 'import matplotlib.pyplot as plt\n'), ((1622, 1640), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1638, 1640), True, 'import matplotlib.pyplot as plt\n'), ((1645, 1700), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('OSMP_%s_%s_%s.png' % (mol1, mol2, method))"], {}), "('OSMP_%s_%s_%s.png' % (mol1, mol2, method))\n", (1656, 1700), True, 'import matplotlib.pyplot as plt\n'), ((1701, 1712), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1710, 1712), True, 'import matplotlib.pyplot as plt\n'), ((1717, 1767), 'os.path.exists', 'file_exists', (["('OSMP_%s_%s_drude.dat' % (mol1, mol2))"], {}), "('OSMP_%s_%s_drude.dat' % (mol1, mol2))\n", (1728, 1767), True, 'from os.path import exists as file_exists\n'), ((1769, 1817), 'os.path.exists', 'file_exists', (["('OSMP_%s_%s_c36.dat' % (mol1, mol2))"], {}), "('OSMP_%s_%s_c36.dat' % (mol1, mol2))\n", (1780, 1817), True, 'from os.path import exists as file_exists\n'), ((1833, 1882), 'numpy.loadtxt', 'np.loadtxt', (["('OSMP_%s_%s_drude.dat' % (mol1, mol2))"], {}), "('OSMP_%s_%s_drude.dat' % (mol1, mol2))\n", (1843, 1882), True, 'import numpy as np\n'), ((1895, 1942), 'numpy.loadtxt', 'np.loadtxt', (["('OSMP_%s_%s_c36.dat' % (mol1, mol2))"], {}), "('OSMP_%s_%s_c36.dat' % (mol1, mol2))\n", (1905, 1942), True, 'import numpy as np\n'), ((1944, 1956), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1954, 1956), True, 'import matplotlib.pyplot as plt\n'), ((2008, 2135), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['osmp_drude[:, 0]', 'osmp_drude[:, 1]'], {'yerr': 'osmp_drude[:, 2]', 'marker': '"""o"""', 'markersize': '(5)', 'capsize': '(3)', 'label': '"""drude"""'}), "(osmp_drude[:, 0], osmp_drude[:, 1], yerr=osmp_drude[:, 2],\n marker='o', markersize=5, capsize=3, label='drude')\n", (2020, 2135), True, 'import matplotlib.pyplot as plt\n'), ((2127, 2247), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['osmp_c36[:, 0]', 'osmp_c36[:, 1]'], {'yerr': 'osmp_c36[:, 2]', 'marker': '"""o"""', 'markersize': '(5)', 'capsize': '(3)', 'label': '"""c36"""'}), "(osmp_c36[:, 0], osmp_c36[:, 1], yerr=osmp_c36[:, 2], marker=\n 'o', markersize=5, capsize=3, label='c36')\n", (2139, 2247), True, 'import matplotlib.pyplot as plt\n'), ((2238, 2269), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Concentration (M)"""'], {}), "('Concentration (M)')\n", (2248, 2269), True, 'import matplotlib.pyplot as plt\n'), ((2274, 2310), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Osmotic Pressure (bar)"""'], {}), "('Osmotic Pressure (bar)')\n", (2284, 2310), True, 'import matplotlib.pyplot as plt\n'), ((2315, 2327), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2325, 2327), True, 'import matplotlib.pyplot as plt\n'), ((2332, 2350), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2348, 2350), True, 'import matplotlib.pyplot as plt\n'), ((2355, 2404), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('OSMP_%s_%s_both.png' % (mol1, mol2))"], {}), "('OSMP_%s_%s_both.png' % (mol1, mol2))\n", (2366, 2404), True, 'import matplotlib.pyplot as plt\n'), ((2406, 2417), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2415, 2417), True, 'import matplotlib.pyplot as plt\n'), ((443, 453), 'numpy.mean', 'np.mean', (['m'], {}), '(m)\n', (450, 453), True, 'import numpy as np\n'), ((455, 464), 'numpy.std', 'np.std', (['m'], {}), '(m)\n', (461, 464), True, 'import numpy as np\n'), ((507, 536), 'glob.glob', 'glob.glob', (["('%s_at_*' % method)"], {}), "('%s_at_*' % method)\n", (516, 536), False, 'import glob\n'), ((900, 957), 'numpy.loadtxt', 'np.loadtxt', (["('%s/osmp.%s_%s_%s.1.dat' % (d, mol1, mol2, c))"], {}), "('%s/osmp.%s_%s_%s.1.dat' % (d, mol1, mol2, c))\n", (910, 957), True, 'import numpy as np\n'), ((966, 1023), 'numpy.loadtxt', 'np.loadtxt', (["('%s/osmp.%s_%s_%s.2.dat' % (d, mol1, mol2, c))"], {}), "('%s/osmp.%s_%s_%s.2.dat' % (d, mol1, mol2, c))\n", (976, 1023), True, 'import numpy as np\n'), ((1032, 1089), 'numpy.loadtxt', 'np.loadtxt', (["('%s/osmp.%s_%s_%s.3.dat' % (d, mol1, mol2, c))"], {}), "('%s/osmp.%s_%s_%s.3.dat' % (d, mol1, mol2, c))\n", (1042, 1089), True, 'import numpy as np\n'), ((389, 410), 'numpy.mean', 'np.mean', (['x[start:end]'], {}), '(x[start:end])\n', (396, 410), True, 'import numpy as np\n'), ((1101, 1129), 'numpy.concatenate', 'np.concatenate', (['(r1, r2, r3)'], {}), '((r1, r2, r3))\n', (1115, 1129), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
import numpy as np
#returns the binding energy predicted by nuclear liquid drop model
def BE_liquidDrop(N,Z): #N=num of neutrons, Z=num of protons
#num of nucleons
A = N+Z
#physical constants (from Alex's notes, in MeV)
a1 = 15.49
a2 = 17.23
a3 = 0.697
a4 = 22.6
#nuclear liquid drop model
return a1*A - a2*A**(2./3) - a3*(Z**2)/(A*(1./3)) - a4*(N-Z)**2/A
#finds the neutron dripline
def findDripLine(Z):
#test statement for finding dripline
check = False
#start with symmetric nucleus
N=Z
#iterative search for dripline
while (check == False):
BE_i = BE_liquidDrop(N+1,Z)
BE_f = BE_liquidDrop(N,Z)
Q = BE_f-BE_i
if (Q>0):
return N
else:
N = N+1
def makeMatCores(Zrange):
Nstart = 0
Nrange = int(2.3*Zrange)
Zstart = 1
mat = np.zeros((Zrange-Zstart,Nrange-Nstart))
for Z in range(Zstart,Zrange):
for N in range(Nstart,Nrange):
BE_i_up = BE_liquidDrop(N+1,Z)
BE_f_up = BE_liquidDrop(N,Z)
Qup = BE_f_up-BE_i_up
BE_i_down = BE_liquidDrop(N+1,Z)
BE_f_down = BE_liquidDrop(N,Z)
Qdown = BE_f_down-BE_i_down
if (Q<0):
mat[Z-Zstart, N-Nstart] = 1
else:
mat[Z-Zstart, N-Nstart] = 0
return mat
#plt.matshow(makeMatCores(100,100))
#define range of Z's
Z_low = 2
Z_top = 150
mat = makeMatCores(Z_top)
img2 = plt.imshow(mat,interpolation='nearest',
origin='lower')
plt.show()
#interested in finding the neutron drip line for the range Z=36-44
#Z = range(Z_low, Z_top+1)
#N = []
#
#for z in Z:
# dripline = findDripLine(z)
# print "For", z,"protons, the neutron dripline is",dripline, "neutrons"
# N.append(dripline)
#
#mat = np.zeros((max(Z)+1,max(N)+1))
#
#for i in range(0,len(Z)):
# mat[Z[i],N[i]] = 1
#plt.matshow(mat)
#plt.show()
|
[
"numpy.zeros",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show"
] |
[((1336, 1392), 'matplotlib.pyplot.imshow', 'plt.imshow', (['mat'], {'interpolation': '"""nearest"""', 'origin': '"""lower"""'}), "(mat, interpolation='nearest', origin='lower')\n", (1346, 1392), True, 'import matplotlib.pyplot as plt\n'), ((1412, 1422), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1420, 1422), True, 'import matplotlib.pyplot as plt\n'), ((810, 854), 'numpy.zeros', 'np.zeros', (['(Zrange - Zstart, Nrange - Nstart)'], {}), '((Zrange - Zstart, Nrange - Nstart))\n', (818, 854), True, 'import numpy as np\n')]
|
import pytest
from anndata import AnnData
from pandas.testing import assert_frame_equal
import numpy as np
from squidpy.gr import moran, ripley_k, co_occurrence
MORAN_K = "moranI"
def test_ripley_k(adata: AnnData):
"""Check ripley score and shape."""
ripley_k(adata, cluster_key="leiden")
# assert ripley in adata.uns
assert "ripley_k_leiden" in adata.uns.keys()
# assert clusters intersection
cat_ripley = set(adata.uns["ripley_k_leiden"]["leiden"].unique())
cat_adata = set(adata.obs["leiden"].cat.categories)
assert cat_ripley.isdisjoint(cat_adata) is False
def test_moran_seq_par(dummy_adata: AnnData):
"""Check whether moran results are the same for seq. and parallel computation."""
moran(dummy_adata)
dummy_adata.var["highly_variable"] = np.random.choice([True, False], size=dummy_adata.var_names.shape)
df = moran(dummy_adata, copy=True, n_jobs=1, seed=42, n_perms=50)
df_parallel = moran(dummy_adata, copy=True, n_jobs=2, seed=42, n_perms=50)
idx_df = df.index.values
idx_adata = dummy_adata[:, dummy_adata.var.highly_variable.values].var_names.values
assert MORAN_K in dummy_adata.uns.keys()
assert "pval_sim_fdr_bh" in dummy_adata.uns[MORAN_K]
assert dummy_adata.uns[MORAN_K].columns.shape == (4,)
# test highly variable
assert dummy_adata.uns[MORAN_K].shape != df.shape
# assert idx are sorted and contain same elements
assert not np.array_equal(idx_df, idx_adata)
np.testing.assert_array_equal(sorted(idx_df), sorted(idx_adata))
# check parallel gives same results
with pytest.raises(AssertionError, match=r'.*\(column name="pval_sim"\) are different.*'):
# because the seeds will be different, we don't expect the pval_sim values to be the same
assert_frame_equal(df, df_parallel)
@pytest.mark.parametrize("n_jobs", [1, 2])
def test_moran_reproducibility(dummy_adata: AnnData, n_jobs: int):
"""Check moran reproducibility results."""
moran(dummy_adata)
dummy_adata.var["highly_variable"] = np.random.choice([True, False], size=dummy_adata.var_names.shape)
# seed will work only when multiprocessing/loky
df_1 = moran(dummy_adata, copy=True, n_jobs=n_jobs, seed=42, n_perms=50)
df_2 = moran(dummy_adata, copy=True, n_jobs=n_jobs, seed=42, n_perms=50)
idx_df = df_1.index.values
idx_adata = dummy_adata[:, dummy_adata.var.highly_variable.values].var_names.values
assert MORAN_K in dummy_adata.uns.keys()
# assert fdr correction in adata.uns
assert "pval_sim_fdr_bh" in dummy_adata.uns[MORAN_K]
assert dummy_adata.uns[MORAN_K].columns.shape == (4,)
# test highly variable
assert dummy_adata.uns[MORAN_K].shape != df_1.shape
# assert idx are sorted and contain same elements
assert not np.array_equal(idx_df, idx_adata)
np.testing.assert_array_equal(sorted(idx_df), sorted(idx_adata))
# check parallel gives same results
assert_frame_equal(df_1, df_2)
def test_co_occurrence(adata: AnnData):
"""
check ripley score and shape
"""
co_occurrence(adata, cluster_key="leiden")
# assert occurrence in adata.uns
assert "leiden_co_occurrence" in adata.uns.keys()
assert "occ" in adata.uns["leiden_co_occurrence"].keys()
assert "interval" in adata.uns["leiden_co_occurrence"].keys()
# assert shapes
arr = adata.uns["leiden_co_occurrence"]["occ"]
assert arr.ndim == 3
assert arr.shape[2] == 49
assert arr.shape[1] == arr.shape[0] == adata.obs["leiden"].unique().shape[0]
# @pytest.mark.parametrize(("ys", "xs"), [(10, 10), (None, None), (10, 20)])
@pytest.mark.parametrize(("n_jobs", "n_splits"), [(1, 2), (2, 2)])
def test_co_occurrence_reproducibility(adata: AnnData, n_jobs: int, n_splits: int):
"""Check co_occurrence reproducibility results."""
arr_1, interval_1 = co_occurrence(adata, cluster_key="leiden", copy=True, n_jobs=n_jobs, n_splits=n_splits)
arr_2, interval_2 = co_occurrence(adata, cluster_key="leiden", copy=True, n_jobs=n_jobs, n_splits=n_splits)
np.testing.assert_array_equal(sorted(interval_1), sorted(interval_2))
np.testing.assert_allclose(arr_1, arr_2)
|
[
"pandas.testing.assert_frame_equal",
"squidpy.gr.co_occurrence",
"numpy.testing.assert_allclose",
"pytest.raises",
"squidpy.gr.ripley_k",
"numpy.random.choice",
"numpy.array_equal",
"pytest.mark.parametrize",
"squidpy.gr.moran"
] |
[((1825, 1866), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n_jobs"""', '[1, 2]'], {}), "('n_jobs', [1, 2])\n", (1848, 1866), False, 'import pytest\n'), ((3614, 3679), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('n_jobs', 'n_splits')", '[(1, 2), (2, 2)]'], {}), "(('n_jobs', 'n_splits'), [(1, 2), (2, 2)])\n", (3637, 3679), False, 'import pytest\n'), ((265, 302), 'squidpy.gr.ripley_k', 'ripley_k', (['adata'], {'cluster_key': '"""leiden"""'}), "(adata, cluster_key='leiden')\n", (273, 302), False, 'from squidpy.gr import moran, ripley_k, co_occurrence\n'), ((738, 756), 'squidpy.gr.moran', 'moran', (['dummy_adata'], {}), '(dummy_adata)\n', (743, 756), False, 'from squidpy.gr import moran, ripley_k, co_occurrence\n'), ((798, 863), 'numpy.random.choice', 'np.random.choice', (['[True, False]'], {'size': 'dummy_adata.var_names.shape'}), '([True, False], size=dummy_adata.var_names.shape)\n', (814, 863), True, 'import numpy as np\n'), ((873, 933), 'squidpy.gr.moran', 'moran', (['dummy_adata'], {'copy': '(True)', 'n_jobs': '(1)', 'seed': '(42)', 'n_perms': '(50)'}), '(dummy_adata, copy=True, n_jobs=1, seed=42, n_perms=50)\n', (878, 933), False, 'from squidpy.gr import moran, ripley_k, co_occurrence\n'), ((952, 1012), 'squidpy.gr.moran', 'moran', (['dummy_adata'], {'copy': '(True)', 'n_jobs': '(2)', 'seed': '(42)', 'n_perms': '(50)'}), '(dummy_adata, copy=True, n_jobs=2, seed=42, n_perms=50)\n', (957, 1012), False, 'from squidpy.gr import moran, ripley_k, co_occurrence\n'), ((1985, 2003), 'squidpy.gr.moran', 'moran', (['dummy_adata'], {}), '(dummy_adata)\n', (1990, 2003), False, 'from squidpy.gr import moran, ripley_k, co_occurrence\n'), ((2045, 2110), 'numpy.random.choice', 'np.random.choice', (['[True, False]'], {'size': 'dummy_adata.var_names.shape'}), '([True, False], size=dummy_adata.var_names.shape)\n', (2061, 2110), True, 'import numpy as np\n'), ((2174, 2239), 'squidpy.gr.moran', 'moran', (['dummy_adata'], {'copy': '(True)', 'n_jobs': 'n_jobs', 'seed': '(42)', 'n_perms': '(50)'}), '(dummy_adata, copy=True, n_jobs=n_jobs, seed=42, n_perms=50)\n', (2179, 2239), False, 'from squidpy.gr import moran, ripley_k, co_occurrence\n'), ((2251, 2316), 'squidpy.gr.moran', 'moran', (['dummy_adata'], {'copy': '(True)', 'n_jobs': 'n_jobs', 'seed': '(42)', 'n_perms': '(50)'}), '(dummy_adata, copy=True, n_jobs=n_jobs, seed=42, n_perms=50)\n', (2256, 2316), False, 'from squidpy.gr import moran, ripley_k, co_occurrence\n'), ((2938, 2968), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['df_1', 'df_2'], {}), '(df_1, df_2)\n', (2956, 2968), False, 'from pandas.testing import assert_frame_equal\n'), ((3064, 3106), 'squidpy.gr.co_occurrence', 'co_occurrence', (['adata'], {'cluster_key': '"""leiden"""'}), "(adata, cluster_key='leiden')\n", (3077, 3106), False, 'from squidpy.gr import moran, ripley_k, co_occurrence\n'), ((3843, 3934), 'squidpy.gr.co_occurrence', 'co_occurrence', (['adata'], {'cluster_key': '"""leiden"""', 'copy': '(True)', 'n_jobs': 'n_jobs', 'n_splits': 'n_splits'}), "(adata, cluster_key='leiden', copy=True, n_jobs=n_jobs,\n n_splits=n_splits)\n", (3856, 3934), False, 'from squidpy.gr import moran, ripley_k, co_occurrence\n'), ((3955, 4046), 'squidpy.gr.co_occurrence', 'co_occurrence', (['adata'], {'cluster_key': '"""leiden"""', 'copy': '(True)', 'n_jobs': 'n_jobs', 'n_splits': 'n_splits'}), "(adata, cluster_key='leiden', copy=True, n_jobs=n_jobs,\n n_splits=n_splits)\n", (3968, 4046), False, 'from squidpy.gr import moran, ripley_k, co_occurrence\n'), ((4122, 4162), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['arr_1', 'arr_2'], {}), '(arr_1, arr_2)\n', (4148, 4162), True, 'import numpy as np\n'), ((1442, 1475), 'numpy.array_equal', 'np.array_equal', (['idx_df', 'idx_adata'], {}), '(idx_df, idx_adata)\n', (1456, 1475), True, 'import numpy as np\n'), ((1594, 1684), 'pytest.raises', 'pytest.raises', (['AssertionError'], {'match': '""".*\\\\(column name="pval_sim"\\\\) are different.*"""'}), '(AssertionError, match=\n \'.*\\\\(column name="pval_sim"\\\\) are different.*\')\n', (1607, 1684), False, 'import pytest\n'), ((1786, 1821), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['df', 'df_parallel'], {}), '(df, df_parallel)\n', (1804, 1821), False, 'from pandas.testing import assert_frame_equal\n'), ((2791, 2824), 'numpy.array_equal', 'np.array_equal', (['idx_df', 'idx_adata'], {}), '(idx_df, idx_adata)\n', (2805, 2824), True, 'import numpy as np\n')]
|
import gym
import numpy as np
import torch
import torch.optim as optim
from utils_main import make_env, save_files
from neural_network import ActorCritic
from ppo_method import ppo
from common.multiprocessing_env import SubprocVecEnv
from itertools import count
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
num_envs = 2
env_name = "CustomEnv-v0"
envs = [make_env(env_name) for i in range(num_envs)]
envs = SubprocVecEnv(envs)
num_inputs = envs.observation_space.shape[0]
num_outputs = envs.action_space.shape[0]
# Hyper params:
hidden_size = 400
lr = 3e-6
num_steps = 20
mini_batch_size = 5
ppo_epochs = 4
threshold_reward = -0.01
model = ActorCritic(num_inputs, num_outputs, hidden_size).to(device)
env = gym.make(env_name)
my_ppo = ppo(model, env)
optimizer = optim.Adam(model.parameters(), lr=lr)
max_frames = 1_500_0000
frame_idx = 0
test_rewards = []
save_iteration = 1000
model_save_iteration = 1000
state = envs.reset()
early_stop = False
def trch_ft_device(input, device):
output = torch.FloatTensor(input).to(device)
return output
saver_model = save_files()
while frame_idx < max_frames and not early_stop:
log_probs = []
values = []
states = []
actions = []
rewards = []
masks = []
entropy = 0
for _ in range(num_steps):
state = trch_ft_device(state, device)
dist, value = model(state)
action = dist.sample()
next_state, reward, done, _ = envs.step(action.cpu().numpy())
log_prob = dist.log_prob(action)
entropy += dist.entropy().mean()
# appending
log_probs.append(log_prob)
values.append(value)
rewards.append(torch.FloatTensor(reward).unsqueeze(1).to(device))
masks.append(torch.FloatTensor(1 - done).unsqueeze(1).to(device))
states.append(state)
actions.append(action)
# next iteration init.
state = next_state
frame_idx += 1
if frame_idx % save_iteration == 0:
test_reward = np.mean([my_ppo.test_env() for _ in range(num_envs)])
test_rewards.append(test_reward)
# plot(frame_idx, test_rewards)
if test_reward > threshold_reward:
early_stop = True
if frame_idx % model_save_iteration == 0:
saver_model.model_save(model)
next_state = trch_ft_device(next_state, device)
_, next_value = model(next_state)
returns = my_ppo.compute_gae(next_value, rewards, masks, values)
returns = torch.cat(returns).detach()
log_probs = torch.cat(log_probs).detach()
values = torch.cat(values).detach()
states = torch.cat(states)
actions = torch.cat(actions)
advantage = returns - values
my_ppo.ppo_update(ppo_epochs, mini_batch_size, states, actions, log_probs, returns, advantage, optimizer)
max_expert_num = 50000
num_steps = 0
expert_traj = []
# building an episode based on the current model.
for i_episode in count():
state = env.reset()
done = False
total_reward = 0
while not done:
state = torch.FloatTensor(state).unsqueeze(0).to(device)
dist, _ = model(state)
action = dist.sample().cpu().numpy()[0]
next_state, reward, done, _ = env.step(action)
state = next_state
total_reward += reward
expert_traj.append(np.hstack([state, action]))
num_steps += 1
print("episode:", i_episode, "reward:", total_reward)
if num_steps >= max_expert_num:
break
expert_traj = np.stack(expert_traj)
print()
print(expert_traj.shape)
print()
np.save("expert_traj.npy", expert_traj)
|
[
"numpy.stack",
"ppo_method.ppo",
"utils_main.save_files",
"gym.make",
"common.multiprocessing_env.SubprocVecEnv",
"numpy.save",
"neural_network.ActorCritic",
"torch.FloatTensor",
"torch.cat",
"itertools.count",
"numpy.hstack",
"torch.cuda.is_available",
"torch.device",
"utils_main.make_env"
] |
[((274, 299), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (297, 299), False, 'import torch\n'), ((309, 352), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (321, 352), False, 'import torch\n'), ((456, 475), 'common.multiprocessing_env.SubprocVecEnv', 'SubprocVecEnv', (['envs'], {}), '(envs)\n', (469, 475), False, 'from common.multiprocessing_env import SubprocVecEnv\n'), ((760, 778), 'gym.make', 'gym.make', (['env_name'], {}), '(env_name)\n', (768, 778), False, 'import gym\n'), ((789, 804), 'ppo_method.ppo', 'ppo', (['model', 'env'], {}), '(model, env)\n', (792, 804), False, 'from ppo_method import ppo\n'), ((1121, 1133), 'utils_main.save_files', 'save_files', ([], {}), '()\n', (1131, 1133), False, 'from utils_main import make_env, save_files\n'), ((2985, 2992), 'itertools.count', 'count', ([], {}), '()\n', (2990, 2992), False, 'from itertools import count\n'), ((3537, 3558), 'numpy.stack', 'np.stack', (['expert_traj'], {}), '(expert_traj)\n', (3545, 3558), True, 'import numpy as np\n'), ((3600, 3639), 'numpy.save', 'np.save', (['"""expert_traj.npy"""', 'expert_traj'], {}), "('expert_traj.npy', expert_traj)\n", (3607, 3639), True, 'import numpy as np\n'), ((404, 422), 'utils_main.make_env', 'make_env', (['env_name'], {}), '(env_name)\n', (412, 422), False, 'from utils_main import make_env, save_files\n'), ((2668, 2685), 'torch.cat', 'torch.cat', (['states'], {}), '(states)\n', (2677, 2685), False, 'import torch\n'), ((2700, 2718), 'torch.cat', 'torch.cat', (['actions'], {}), '(actions)\n', (2709, 2718), False, 'import torch\n'), ((693, 742), 'neural_network.ActorCritic', 'ActorCritic', (['num_inputs', 'num_outputs', 'hidden_size'], {}), '(num_inputs, num_outputs, hidden_size)\n', (704, 742), False, 'from neural_network import ActorCritic\n'), ((1051, 1075), 'torch.FloatTensor', 'torch.FloatTensor', (['input'], {}), '(input)\n', (1068, 1075), False, 'import torch\n'), ((2541, 2559), 'torch.cat', 'torch.cat', (['returns'], {}), '(returns)\n', (2550, 2559), False, 'import torch\n'), ((2585, 2605), 'torch.cat', 'torch.cat', (['log_probs'], {}), '(log_probs)\n', (2594, 2605), False, 'import torch\n'), ((2628, 2645), 'torch.cat', 'torch.cat', (['values'], {}), '(values)\n', (2637, 2645), False, 'import torch\n'), ((3361, 3387), 'numpy.hstack', 'np.hstack', (['[state, action]'], {}), '([state, action])\n', (3370, 3387), True, 'import numpy as np\n'), ((3093, 3117), 'torch.FloatTensor', 'torch.FloatTensor', (['state'], {}), '(state)\n', (3110, 3117), False, 'import torch\n'), ((1706, 1731), 'torch.FloatTensor', 'torch.FloatTensor', (['reward'], {}), '(reward)\n', (1723, 1731), False, 'import torch\n'), ((1778, 1805), 'torch.FloatTensor', 'torch.FloatTensor', (['(1 - done)'], {}), '(1 - done)\n', (1795, 1805), False, 'import torch\n')]
|
import cotk
from cotk._utils.file_utils import get_resource_file_path
from cotk.dataloader.dataloader import *
from collections import Counter
import numpy as np
from itertools import chain
class Score(DataField):
def get_next(self, dataset):
r"""read text and returns the next label(integer). Note that it may raise StopIteration.
Args:{DataField.GET_NEXT_ARG}
Examples:
>>> dataset = iter(["1\n", "0\n"])
>>> field = Label()
>>> field.get_next(dataset)
1
>>> field.get_next(dataset)
0
"""
score = next(dataset)
return float(score.strip())
def _map_fun(self, element, convert_ids_to_tokens=None):
"""
Returns the element itself.
Args:
element: An element of a dataset.
convert_ids_to_tokens: It's useless. This argument exists, just to keep the signature the same as that of super class.
"""
return element
class TranslationWithScore(cotk.dataloader.SingleTurnDialog):
@cotk._utils.hooks.hook_dataloader
def __init__(self, file_id, min_vocab_times, \
max_sent_length, invalid_vocab_times, \
tokenizer, remains_capital
):
super().__init__(file_id, min_vocab_times, \
max_sent_length, invalid_vocab_times, \
tokenizer, remains_capital)
def _load_data(self):
data_fields = {
'train': [['post', 'Sentence'], ['resp', 'Sentence'], ['score', Score]],
'dev': [['post', 'Sentence'], ['resp', 'Sentence']],
'test': [['post', 'Sentence'], ['resp', 'Sentence']],
}
return self._general_load_data(self._file_path, data_fields, \
self._min_vocab_times, self._max_sent_length, None, self._invalid_vocab_times)
def _general_load_data(self, file_path, data_fields, min_vocab_times, max_sent_length, max_turn_length,
invalid_vocab_times):
r'''This function implements a general loading process.
Arguments:
file_path (str): A string indicating the path of dataset.
data_fields (dict, list, tuple): If it's a list(tuple), it must be a list of (key, field) pairs.
Field must be a DataField instance,
or a subclass of DataField(in this case, its instance will be used, assuming its constructor accepts no arguments),
or a string(in this case, the instance of the class, whose __name__ is field, will be used).
For example, data_fields=[['post', 'Sentence'], ['label', Label]] means that,
in the raw file, the first line is a sentence and the second line is a label. They are saved in a dict.
dataset = {'post': [line1, line3, line5, ...], 'label': [line2, line4, line6, ...]}
data_fields=[['key1', 'Session'], ['key2', Label()]], means that, in the raw file, the first *several lines*
is a session, *followed by an empty line*, and the next line is a label.
dataset = {'key1': [session1, session2, ...], 'key2': [label1, label2, ...]}
If it's a dict, different datasets may have different formats.(If `data_fields` is a list or a tuple, different datasets have the same format).
Its keys are the same as `self.key_name` that indicate the datasets, and the values are lists as mentioned above.
For example, data_fields = {'train': [['sess', 'Session'], ['label', 'Label']], 'test': [['sess', 'session']]},
means that the train set contains sessions and labels, but the test set only contains sessions.
min_vocab_times (int): A cut-off threshold of valid tokens. All tokens appear
not less than `min_vocab_times` in **training set** will be marked as valid words.
max_sent_length (int): All sentences longer than ``max_sent_length`` will be shortened
to first ``max_sent_length`` tokens.
max_turn_length (int): All sessions, whose turn length is longer than ``max_turn_length`` will be shorten to
first ``max_turn_length`` sentences. If the dataset don't contains sessions, this parameter will be ignored.
invalid_vocab_times (int): A cut-off threshold of invalid tokens. All tokens appear
not less than ``invalid_vocab_times`` in the **whole dataset** (except valid words) will be
marked as invalid words. Otherwise, they are unknown words, which are ignored both for
model or metrics.
Returns:
(tuple): containing:
* **all_vocab_list** (list): vocabulary list of the datasets,
including valid and invalid vocabs.
* **valid_vocab_len** (int): the number of valid vocab.
``vocab_list[:valid_vocab_len]`` will be regarded as valid vocabs,
while ``vocab_list[valid_vocab_len:]`` regarded as invalid vocabs.
* **data** (dict): a dict contains data.
* **data_size** (dict): a dict contains size of each item in data.
'''
def get_fields(fields):
assert isinstance(fields, list) or isinstance(fields, tuple)
return [(data_key, DataField.get_field(field)) for data_key, field in fields]
if isinstance(data_fields, dict):
no_field_keys = [key for key in self.key_name if key not in data_fields]
if no_field_keys:
raise ValueError('There is no data fields for dataset(%s) ' % ', '.join(no_field_keys))
try:
data_fields = {key: get_fields(data_fields[key]) for key in self.key_name}
except AssertionError:
raise TypeError('If `data_field` is a dict, its value must be a list(or tuple) of lists(or tuples).')
elif isinstance(data_fields, list) or isinstance(data_fields, tuple):
data_fields = get_fields(data_fields)
data_fields = {key: data_fields for key in self.key_name}
else:
raise TypeError('`data_fields` must be a dict, or a list, or a tuple.')
# now data_fields is a dict. Keys are the same as self.key_name('train', 'test', 'dev', etc.). Each value is
# a list(tuple) of lists(tuples), which means (data_key(str), data_field(DataField)) pairs.
# For example,
# data_fields == {'train': [['sent', Sentence()], ['label', Label()]],
# 'test': [['sent', Sentence()], ['label', Label()]]}.
# Note, different dataset may have different fields.
special_tokens = set(self.ext_vocab)
origin_data = {}
for key in self.key_name:
origin_data[key] = {data_key: [] for data_key, _ in data_fields[key]}
with open("%s/%s.txt" % (file_path, key), encoding='utf-8') as f_file:
while True:
try:
for data_key, field in data_fields[key]:
element = field.convert_to_tokens(field.get_next(f_file), self.tokenize)
for token in field.iter_tokens(element):
if token in special_tokens:
raise RuntimeError(
'The dataset contains special token "%s". This is not allowed.' % token)
origin_data[key][data_key].append(element)
except StopIteration:
break
def chain_allvocab(dic, fields):
vocabs = []
for data_key, field in fields:
for element in dic[data_key]:
vocabs.extend(field.iter_tokens(element))
return vocabs
raw_vocab_list = chain_allvocab(origin_data['train'], data_fields['train'])
# Important: Sort the words preventing the index changes between
# different runs
vocab = sorted(Counter(raw_vocab_list).most_common(), \
key=lambda pair: (-pair[1], pair[0]))
left_vocab = [x[0] for x in vocab if x[1] >= min_vocab_times]
vocab_list = self.ext_vocab + list(left_vocab)
valid_vocab_len = len(vocab_list)
valid_vocab_set = set(vocab_list)
for key in self.key_name:
if key == 'train':
continue
raw_vocab_list.extend(chain_allvocab(origin_data[key], data_fields[key]))
vocab = sorted(Counter(raw_vocab_list).most_common(), \
key=lambda pair: (-pair[1], pair[0]))
left_vocab = [x[0] for x in vocab if x[1] >= invalid_vocab_times and x[0] not in valid_vocab_set]
vocab_list.extend(left_vocab)
print("valid vocab list length = %d" % valid_vocab_len)
print("vocab list length = %d" % len(vocab_list))
word2id = {w: i for i, w in enumerate(vocab_list)}
data = {}
data_size = {}
for key in self.key_name:
data[key] = {}
for data_key, field in data_fields[key]:
origin_data[key][data_key] = [field.convert_to_ids(element, word2id, self) for element in
origin_data[key][data_key]]
data[key][data_key] = [
field.cut(element, max_sent_length=max_sent_length, max_turn_length=max_turn_length) for element in
origin_data[key][data_key]]
if key not in data_size:
data_size[key] = len(data[key][data_key])
elif data_size[key] != len(data[key][data_key]):
raise RuntimeError(
"The data of input %s.txt contains different numbers of fields" % key)
vocab = chain_allvocab(origin_data[key], data_fields[key])
vocab_num = len(vocab)
oov_num = sum([word not in word2id for word in vocab])
invalid_num = sum([word not in valid_vocab_set for word in vocab]) - oov_num
sent_length = []
for data_key, field in data_fields[key]:
sent_length.extend(
[len(sent) for element in origin_data[key][data_key] for sent in field.iter_sentence(element)])
cut_word_num = np.sum(np.maximum(np.array(sent_length) - max_sent_length, 0))
session_keys = [data_key for data_key, field in data_fields[key] if field.__class__ == Session]
if session_keys:
turn_length = list(
map(len, chain.from_iterable((origin_data[key][sess_key] for sess_key in session_keys))))
max_turn_length_before_cut = max(turn_length)
sent_num = sum(turn_length)
cut_sentence_rate = np.sum(np.maximum(np.array(turn_length) - max_turn_length, 0)) / sent_num
else:
max_turn_length_before_cut = 1
cut_sentence_rate = 0
print(("%s set. invalid rate: %f, unknown rate: %f, max sentence length before cut: %d, " + \
"cut word rate: %f\n\tmax turn length before cut: %d, cut sentence rate: %f") % \
(key, invalid_num / vocab_num, oov_num / vocab_num, max(sent_length), \
cut_word_num / vocab_num, max_turn_length_before_cut, cut_sentence_rate))
# calculate hash value
hash_value = DataloaderHash(ignore_tokens=(self.go_id, self.eos_id, self.pad_id),
unk_id=self.unk_id).hash_datasets(data, data_fields, vocab_list[len(
self.ext_vocab):valid_vocab_len])
self.__hash_value = hash_value
return vocab_list, valid_vocab_len, data, data_size
def get_batch(self, key, indexes):
'''{LanguageProcessingBase.GET_BATCH_DOC_WITHOUT_RETURNS}
Returns:
(dict): A dict at least contains:
* **post_length** (:class:`numpy.ndarray`): A 1-d array, the length of post in each batch.
Size: ``[batch_size]``
* **post** (:class:`numpy.ndarray`): A 2-d padded array containing words of id form in posts.
Only provide valid words. ``unk_id`` will be used if a word is not valid.
Size: ``[batch_size, max(sent_length)]``
* **post_allvocabs** (:class:`numpy.ndarray`): A 2-d padded array containing words of id
form in posts. Provide both valid and invalid vocabs.
Size: ``[batch_size, max(sent_length)]``
* **resp_length** (:class:`numpy.ndarray`): A 1-d array, the length of response in each batch.
Size: ``[batch_size]``
* **resp** (:class:`numpy.ndarray`): A 2-d padded array containing words of id form
in responses. Only provide valid vocabs. ``unk_id`` will be used if a word is not valid.
Size: ``[batch_size, max(sent_length)]``
* **resp_allvocabs** (:class:`numpy.ndarray`):
A 2-d padded array containing words of id form in responses.
Provide both valid and invalid vocabs.
Size: ``[batch_size, max(sent_length)]``
Examples:
>>> # all_vocab_list = ["<pad>", "<unk>", "<go>", "<eos>", "how", "are", "you",
>>> # "hello", "i", "am", "fine"]
>>> # vocab_size = 9
>>> # vocab_list = ["<pad>", "<unk>", "<go>", "<eos>", "how", "are", "you", "hello", "i"]
>>> dataloader.get_batch('train', [0, 1])
{
"post_allvocabs": numpy.array([
[2, 5, 6, 10, 3], # first post: <go> are you fine <eos>
[2, 7, 3, 0, 0], # second post: <go> hello <eos> <pad> <pad>
]),
"post": numpy.array([
[2, 5, 6, 1, 3], # first post: <go> are you <unk> <eos>
[2, 7, 3, 0, 0], # second post: <go> hello <eos> <pad> <pad>
]),
"resp_allvocabs": numpy.array([
[2, 8, 9, 10, 3], # first response: <go> i am fine <eos>
[2, 7, 3, 0, 0], # second response: <go> hello <eos> <pad> <pad>
]),
"resp": numpy.array([
[2, 8, 1, 1, 3], # first response: <go> i <unk> <unk> <eos>
[2, 7, 3, 0, 0], # second response: <go> hello <eos> <pad> <pad>
]),
"post_length": numpy.array([5, 3]), # length of posts
"resp_length": numpy.array([5, 3]), # length of responses
}
'''
if key not in self.key_name:
raise ValueError("No set named %s." % key)
res = {}
batch_size = len(indexes)
res["post_length"] = np.array(list(map(lambda i: len(self.data[key]['post'][i]), indexes)), dtype=int)
res["resp_length"] = np.array(list(map(lambda i: len(self.data[key]['resp'][i]), indexes)), dtype=int)
res_post = res["post"] = np.zeros((batch_size, np.max(res["post_length"])), dtype=int)
res_resp = res["resp"] = np.zeros((batch_size, np.max(res["resp_length"])), dtype=int)
for i, j in enumerate(indexes):
post = self.data[key]['post'][j]
resp = self.data[key]['resp'][j]
res_post[i, :len(post)] = post
res_resp[i, :len(resp)] = resp
res["post_allvocabs"] = res_post.copy()
res["resp_allvocabs"] = res_resp.copy()
res_post[res_post >= self.valid_vocab_len] = self.unk_id
res_resp[res_resp >= self.valid_vocab_len] = self.unk_id
if key=='train':
res['score']=np.array([self.data[key]['score'][i] for i in indexes])
return res
def main():
max_sent_length = 50
loader = TranslationWithScore('./data/iwslt14_raml', 10, max_sent_length, 0, 'nltk', False)
loader.restart("train",batch_size=2,shuffle=True)
q=loader.get_next_batch("train")
print(len(q['score']))
print(q)
if __name__ == '__main__':
main()
|
[
"itertools.chain.from_iterable",
"collections.Counter",
"numpy.max",
"numpy.array"
] |
[((15676, 15731), 'numpy.array', 'np.array', (["[self.data[key]['score'][i] for i in indexes]"], {}), "([self.data[key]['score'][i] for i in indexes])\n", (15684, 15731), True, 'import numpy as np\n'), ((15048, 15074), 'numpy.max', 'np.max', (["res['post_length']"], {}), "(res['post_length'])\n", (15054, 15074), True, 'import numpy as np\n'), ((15143, 15169), 'numpy.max', 'np.max', (["res['resp_length']"], {}), "(res['resp_length'])\n", (15149, 15169), True, 'import numpy as np\n'), ((8020, 8043), 'collections.Counter', 'Counter', (['raw_vocab_list'], {}), '(raw_vocab_list)\n', (8027, 8043), False, 'from collections import Counter\n'), ((8532, 8555), 'collections.Counter', 'Counter', (['raw_vocab_list'], {}), '(raw_vocab_list)\n', (8539, 8555), False, 'from collections import Counter\n'), ((10352, 10373), 'numpy.array', 'np.array', (['sent_length'], {}), '(sent_length)\n', (10360, 10373), True, 'import numpy as np\n'), ((10600, 10676), 'itertools.chain.from_iterable', 'chain.from_iterable', (['(origin_data[key][sess_key] for sess_key in session_keys)'], {}), '(origin_data[key][sess_key] for sess_key in session_keys)\n', (10619, 10676), False, 'from itertools import chain\n'), ((10841, 10862), 'numpy.array', 'np.array', (['turn_length'], {}), '(turn_length)\n', (10849, 10862), True, 'import numpy as np\n')]
|
import string
import numpy as np
import pandas as pd
import pytest
from plotnine import (ggplot, aes, geom_point, geom_jitter, geom_bar,
geom_col, geom_boxplot, geom_text, geom_rect,
after_stat, position_dodge, position_dodge2,
position_jitter, position_jitterdodge,
position_nudge, position_stack, theme)
from plotnine.positions.position import position
from plotnine.exceptions import PlotnineError
n = 6
m = 10
random_state = np.random.RandomState(1234567890)
df1 = pd.DataFrame({'x': [1, 2, 1, 2],
'y': [1, 1, 2, 2]})
df2 = pd.DataFrame({'x': np.repeat(range(n+1), range(n+1)),
'z': np.repeat(range(n//2), range(3, n*2, 4))})
df3 = pd.DataFrame({
'x': random_state.choice(['A', 'B'], n*m),
'y': random_state.randint(0, 20, n*m),
'c': random_state.choice([False, False, True, False], n*m)
})
random_state.seed(1234567890)
_theme = theme(subplots_adjust={'right': 0.85})
def test_jitter():
df1 = pd.DataFrame({'x': [1, 2, 1, 2],
'y': [1, 1, 2, 2]})
p = (ggplot(df1, aes('x', 'y')) +
geom_point(size=10) +
geom_jitter(size=10, color='red', random_state=random_state) +
geom_jitter(size=10, color='blue', width=0.1,
height=0.1, random_state=random_state))
assert p + _theme == 'jitter'
with pytest.raises(PlotnineError):
geom_jitter(position=position_jitter(), width=0.1)
def test_nudge():
p = (ggplot(df1, aes('x', 'y')) +
geom_point(size=10) +
geom_point(size=10, color='red',
position=position_nudge(.25, .25)))
assert p + _theme == 'nudge'
def test_stack():
p = (ggplot(df2, aes('factor(z)')) +
geom_bar(aes(fill='factor(x)'), position='stack'))
assert p + _theme == 'stack'
def test_stack_negative():
df = df1.copy()
_loc = df.columns.get_loc
df.iloc[0, _loc('y')] *= -1
df.iloc[len(df)-1, _loc('y')] *= -1
p = (ggplot(df)
+ geom_col(aes('factor(x)', 'y', fill='factor(y)'),
position='stack')
+ geom_text(aes('factor(x)', 'y', label='y'),
position=position_stack(vjust=0.5))
)
assert p + _theme == 'stack-negative'
def test_fill():
p = (ggplot(df2, aes('factor(z)')) +
geom_bar(aes(fill='factor(x)'), position='fill'))
assert p + _theme == 'fill'
def test_dodge():
p = (ggplot(df2, aes('factor(z)')) +
geom_bar(aes(fill='factor(x)'), position='dodge'))
assert p + _theme == 'dodge'
def test_dodge_preserve_single():
df1 = pd.DataFrame({'x': ['a', 'b', 'b'],
'y': ['a', 'a', 'b']})
p = (ggplot(df1, aes('x', fill='y')) +
geom_bar(position=position_dodge(preserve='single')))
assert p + _theme == 'dodge_preserve_single'
def test_dodge_preserve_single_text():
df1 = pd.DataFrame({'x': ['a', 'b', 'b', 'b'],
'y': ['a', 'a', 'b', 'b']})
d = position_dodge(preserve='single', width=0.9)
p = (ggplot(df1, aes('x', fill='y'))
+ geom_bar(position=d)
+ geom_text(
aes(y=after_stat('count'), label=after_stat('count')),
stat='count',
position=d,
va='bottom')
)
assert p + _theme == 'dodge_preserve_single_text'
def test_dodge2():
p = (ggplot(df3, aes('x', 'y', color='c')) +
geom_boxplot(position='dodge2', size=2))
assert p + _theme == 'dodge2'
def test_dodge2_varwidth():
p = (ggplot(df3, aes('x', 'y', color='c')) +
geom_boxplot(
position=position_dodge2(preserve='single'),
varwidth=True,
size=2)
)
assert p + _theme == 'dodge2_varwidth'
def test_jitterdodge():
df = pd.DataFrame({
'x': np.ones(n*2),
'y': np.repeat(np.arange(n), 2),
'letters': np.repeat(list(string.ascii_lowercase[:n]), 2)})
position = position_jitterdodge(random_state=random_state)
p = (ggplot(df, aes('x', 'y', fill='letters')) +
geom_point(size=10, fill='black') +
geom_point(size=10, position=position))
assert p + _theme == 'jitterdodge'
def test_position_from_geom():
geom = geom_point(position='jitter')
assert isinstance(position.from_geom(geom), position_jitter)
geom = geom_point(position='position_jitter')
assert isinstance(position.from_geom(geom), position_jitter)
geom = geom_point(position=position_jitter())
assert isinstance(position.from_geom(geom), position_jitter)
geom = geom_point(position=position_jitter)
assert isinstance(position.from_geom(geom), position_jitter)
def test_dodge_empty_data():
empty_df = pd.DataFrame({'x': [], 'y': []})
p = (ggplot(df1, aes('x', 'y'))
+ geom_point()
+ geom_rect(
empty_df,
aes(xmin='x', xmax='x+1', ymin='y', ymax='y+1'),
position='dodge')
)
p.draw_test()
|
[
"plotnine.geom_boxplot",
"plotnine.position_dodge2",
"numpy.ones",
"numpy.arange",
"plotnine.position_dodge",
"plotnine.aes",
"plotnine.position_stack",
"pandas.DataFrame",
"plotnine.position_jitter",
"plotnine.after_stat",
"numpy.random.RandomState",
"plotnine.position_nudge",
"pytest.raises",
"plotnine.ggplot",
"plotnine.position_jitterdodge",
"plotnine.theme",
"plotnine.positions.position.position.from_geom",
"plotnine.geom_bar",
"plotnine.geom_point",
"plotnine.geom_jitter"
] |
[((520, 553), 'numpy.random.RandomState', 'np.random.RandomState', (['(1234567890)'], {}), '(1234567890)\n', (541, 553), True, 'import numpy as np\n'), ((560, 612), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': [1, 2, 1, 2], 'y': [1, 1, 2, 2]}"], {}), "({'x': [1, 2, 1, 2], 'y': [1, 1, 2, 2]})\n", (572, 612), True, 'import pandas as pd\n'), ((977, 1015), 'plotnine.theme', 'theme', ([], {'subplots_adjust': "{'right': 0.85}"}), "(subplots_adjust={'right': 0.85})\n", (982, 1015), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((1047, 1099), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': [1, 2, 1, 2], 'y': [1, 1, 2, 2]}"], {}), "({'x': [1, 2, 1, 2], 'y': [1, 1, 2, 2]})\n", (1059, 1099), True, 'import pandas as pd\n'), ((2675, 2733), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': ['a', 'b', 'b'], 'y': ['a', 'a', 'b']}"], {}), "({'x': ['a', 'b', 'b'], 'y': ['a', 'a', 'b']})\n", (2687, 2733), True, 'import pandas as pd\n'), ((2964, 3032), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': ['a', 'b', 'b', 'b'], 'y': ['a', 'a', 'b', 'b']}"], {}), "({'x': ['a', 'b', 'b', 'b'], 'y': ['a', 'a', 'b', 'b']})\n", (2976, 3032), True, 'import pandas as pd\n'), ((3066, 3110), 'plotnine.position_dodge', 'position_dodge', ([], {'preserve': '"""single"""', 'width': '(0.9)'}), "(preserve='single', width=0.9)\n", (3080, 3110), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((4035, 4082), 'plotnine.position_jitterdodge', 'position_jitterdodge', ([], {'random_state': 'random_state'}), '(random_state=random_state)\n', (4055, 4082), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((4314, 4343), 'plotnine.geom_point', 'geom_point', ([], {'position': '"""jitter"""'}), "(position='jitter')\n", (4324, 4343), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((4421, 4459), 'plotnine.geom_point', 'geom_point', ([], {'position': '"""position_jitter"""'}), "(position='position_jitter')\n", (4431, 4459), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((4653, 4689), 'plotnine.geom_point', 'geom_point', ([], {'position': 'position_jitter'}), '(position=position_jitter)\n', (4663, 4689), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((4801, 4833), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': [], 'y': []}"], {}), "({'x': [], 'y': []})\n", (4813, 4833), True, 'import pandas as pd\n'), ((1274, 1363), 'plotnine.geom_jitter', 'geom_jitter', ([], {'size': '(10)', 'color': '"""blue"""', 'width': '(0.1)', 'height': '(0.1)', 'random_state': 'random_state'}), "(size=10, color='blue', width=0.1, height=0.1, random_state=\n random_state)\n", (1285, 1363), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((1425, 1453), 'pytest.raises', 'pytest.raises', (['PlotnineError'], {}), '(PlotnineError)\n', (1438, 1453), False, 'import pytest\n'), ((3496, 3535), 'plotnine.geom_boxplot', 'geom_boxplot', ([], {'position': '"""dodge2"""', 'size': '(2)'}), "(position='dodge2', size=2)\n", (3508, 3535), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((4191, 4229), 'plotnine.geom_point', 'geom_point', ([], {'size': '(10)', 'position': 'position'}), '(size=10, position=position)\n', (4201, 4229), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((4366, 4390), 'plotnine.positions.position.position.from_geom', 'position.from_geom', (['geom'], {}), '(geom)\n', (4384, 4390), False, 'from plotnine.positions.position import position\n'), ((4482, 4506), 'plotnine.positions.position.position.from_geom', 'position.from_geom', (['geom'], {}), '(geom)\n', (4500, 4506), False, 'from plotnine.positions.position import position\n'), ((4598, 4622), 'plotnine.positions.position.position.from_geom', 'position.from_geom', (['geom'], {}), '(geom)\n', (4616, 4622), False, 'from plotnine.positions.position import position\n'), ((4712, 4736), 'plotnine.positions.position.position.from_geom', 'position.from_geom', (['geom'], {}), '(geom)\n', (4730, 4736), False, 'from plotnine.positions.position import position\n'), ((1202, 1262), 'plotnine.geom_jitter', 'geom_jitter', ([], {'size': '(10)', 'color': '"""red"""', 'random_state': 'random_state'}), "(size=10, color='red', random_state=random_state)\n", (1213, 1262), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((1581, 1600), 'plotnine.geom_point', 'geom_point', ([], {'size': '(10)'}), '(size=10)\n', (1591, 1600), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((1775, 1791), 'plotnine.aes', 'aes', (['"""factor(z)"""'], {}), "('factor(z)')\n", (1778, 1791), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((1813, 1834), 'plotnine.aes', 'aes', ([], {'fill': '"""factor(x)"""'}), "(fill='factor(x)')\n", (1816, 1834), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((2048, 2058), 'plotnine.ggplot', 'ggplot', (['df'], {}), '(df)\n', (2054, 2058), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((2179, 2211), 'plotnine.aes', 'aes', (['"""factor(x)"""', '"""y"""'], {'label': '"""y"""'}), "('factor(x)', 'y', label='y')\n", (2182, 2211), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((2364, 2380), 'plotnine.aes', 'aes', (['"""factor(z)"""'], {}), "('factor(z)')\n", (2367, 2380), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((2402, 2423), 'plotnine.aes', 'aes', ([], {'fill': '"""factor(x)"""'}), "(fill='factor(x)')\n", (2405, 2423), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((2516, 2532), 'plotnine.aes', 'aes', (['"""factor(z)"""'], {}), "('factor(z)')\n", (2519, 2532), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((2554, 2575), 'plotnine.aes', 'aes', ([], {'fill': '"""factor(x)"""'}), "(fill='factor(x)')\n", (2557, 2575), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((2779, 2797), 'plotnine.aes', 'aes', (['"""x"""'], {'fill': '"""y"""'}), "('x', fill='y')\n", (2782, 2797), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((3163, 3183), 'plotnine.geom_bar', 'geom_bar', ([], {'position': 'd'}), '(position=d)\n', (3171, 3183), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((3459, 3483), 'plotnine.aes', 'aes', (['"""x"""', '"""y"""'], {'color': '"""c"""'}), "('x', 'y', color='c')\n", (3462, 3483), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((3622, 3646), 'plotnine.aes', 'aes', (['"""x"""', '"""y"""'], {'color': '"""c"""'}), "('x', 'y', color='c')\n", (3625, 3646), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((3897, 3911), 'numpy.ones', 'np.ones', (['(n * 2)'], {}), '(n * 2)\n', (3904, 3911), True, 'import numpy as np\n'), ((4146, 4179), 'plotnine.geom_point', 'geom_point', ([], {'size': '(10)', 'fill': '"""black"""'}), "(size=10, fill='black')\n", (4156, 4179), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((4557, 4574), 'plotnine.position_jitter', 'position_jitter', ([], {}), '()\n', (4572, 4574), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((4881, 4893), 'plotnine.geom_point', 'geom_point', ([], {}), '()\n', (4891, 4893), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((4952, 4999), 'plotnine.aes', 'aes', ([], {'xmin': '"""x"""', 'xmax': '"""x+1"""', 'ymin': '"""y"""', 'ymax': '"""y+1"""'}), "(xmin='x', xmax='x+1', ymin='y', ymax='y+1')\n", (4955, 4999), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((1171, 1190), 'plotnine.geom_point', 'geom_point', ([], {'size': '(10)'}), '(size=10)\n', (1181, 1190), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((1484, 1501), 'plotnine.position_jitter', 'position_jitter', ([], {}), '()\n', (1499, 1501), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((1555, 1568), 'plotnine.aes', 'aes', (['"""x"""', '"""y"""'], {}), "('x', 'y')\n", (1558, 1568), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((1674, 1700), 'plotnine.position_nudge', 'position_nudge', (['(0.25)', '(0.25)'], {}), '(0.25, 0.25)\n', (1688, 1700), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((2079, 2118), 'plotnine.aes', 'aes', (['"""factor(x)"""', '"""y"""'], {'fill': '"""factor(y)"""'}), "('factor(x)', 'y', fill='factor(y)')\n", (2082, 2118), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((2243, 2268), 'plotnine.position_stack', 'position_stack', ([], {'vjust': '(0.5)'}), '(vjust=0.5)\n', (2257, 2268), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((2828, 2861), 'plotnine.position_dodge', 'position_dodge', ([], {'preserve': '"""single"""'}), "(preserve='single')\n", (2842, 2861), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((3132, 3150), 'plotnine.aes', 'aes', (['"""x"""'], {'fill': '"""y"""'}), "('x', fill='y')\n", (3135, 3150), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((3695, 3729), 'plotnine.position_dodge2', 'position_dodge2', ([], {'preserve': '"""single"""'}), "(preserve='single')\n", (3710, 3729), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((3934, 3946), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (3943, 3946), True, 'import numpy as np\n'), ((4104, 4133), 'plotnine.aes', 'aes', (['"""x"""', '"""y"""'], {'fill': '"""letters"""'}), "('x', 'y', fill='letters')\n", (4107, 4133), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((4855, 4868), 'plotnine.aes', 'aes', (['"""x"""', '"""y"""'], {}), "('x', 'y')\n", (4858, 4868), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((1145, 1158), 'plotnine.aes', 'aes', (['"""x"""', '"""y"""'], {}), "('x', 'y')\n", (1148, 1158), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((3225, 3244), 'plotnine.after_stat', 'after_stat', (['"""count"""'], {}), "('count')\n", (3235, 3244), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((3252, 3271), 'plotnine.after_stat', 'after_stat', (['"""count"""'], {}), "('count')\n", (3262, 3271), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n')]
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
import warnings
from math import exp
import numpy as np
def fit_factory(discard=1):
def fit(x, y):
p = np.polyfit(x, y, 1)
v = np.polyval(p, x)
e = np.abs(y - v)
drop_idxs = np.argsort(e)[-discard]
return np.polyfit(np.delete(x, drop_idxs),
np.delete(y, drop_idxs), 1)
return fit
def integrate_tolerance_series(odesys, atols, rtols, x, y0, params=(),
fit=lambda x, y: np.polyfit(x, y, 1), val=np.polyval, **kwargs):
"""
Parameters
----------
odesys : :class:`ODESys`
atols : array_like
Positive, monotonically increasing 1D array.
rtols : array_like
Positive, monotonically increasing 1D array.
x : array_like
Passed on to ``odesys.integrate`` for first set of tolerances.
(subsequent calls will use xout from first integration).
y0 : array_like
Passed on to ``odesys.integrate``.
params : array_like
Passed on to ``odesys.integrate``.
fit : callable
val : callable
\\*\\*kwargs:
Passed on to ``odesys.integrate``.
Returns
-------
result0 : Result
results : list of Result instances
extra : dict
errest : 2D array of error estimates for result0.yout
"""
if atols is None:
atols = rtols
if rtols is None:
rtols = atols
atols, rtols = map(np.asarray, (atols, rtols))
if atols.ndim != 1:
raise NotImplementedError("Assuming 1-dimensional array")
if atols.shape != rtols.shape:
raise ValueError("atols & rtols need to be of same length")
if 'atol' in kwargs or 'rtol' in kwargs:
raise ValueError("Neither atol nor rtol are allowed in kwargs")
if not np.all(atols > 0) or not np.all(rtols > 0):
raise ValueError("atols & rtols need to > 0")
if not np.all(np.diff(atols) > 0) or not np.all(np.diff(rtols) > 0):
raise ValueError("atols & rtols need to obey strict positive monotonicity")
if atols.size < 4:
raise ValueError("Pointless doing linear interpolation on less than 3 points")
if atols.size < 6:
warnings.warn("Statistics will be (very) shaky when doing linear "
"interpolation on less than 5 points.")
ntols = atols.size
result0 = odesys.integrate(x, y0, params, atol=atols[0], rtol=rtols[0], **kwargs)
results = [odesys.integrate(result0.xout, y0, params, atol=atols[i], rtol=rtols[i], **kwargs)
for i in range(1, ntols)]
errest = []
for ix, vx in enumerate(result0.xout):
diffs = np.array([result0.yout[ix, :] - r.yout[ix, :] for r in results])
tols = np.array([atol + rtol*np.abs(r.yout[ix, :]) for r, atol, rtol in
zip([result0] + results, atols, rtols)])
ln_tols = np.log(tols).astype(np.float64)
ln_absd = np.log(np.abs(diffs)).astype(np.float64)
yerrs = []
for iy in range(result0.yout.shape[-1]):
if np.all(diffs[:, iy] == 0):
yerrs.append(0)
else:
p = fit(ln_tols[1:, iy], ln_absd[:, iy])
yerrs.append(exp(val(p, ln_tols[0, iy])))
errest.append(yerrs)
return result0, results, {'errest': np.array(errest)}
|
[
"numpy.abs",
"numpy.log",
"numpy.polyfit",
"numpy.polyval",
"numpy.argsort",
"numpy.diff",
"numpy.array",
"warnings.warn",
"numpy.delete",
"numpy.all"
] |
[((210, 229), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(1)'], {}), '(x, y, 1)\n', (220, 229), True, 'import numpy as np\n'), ((242, 258), 'numpy.polyval', 'np.polyval', (['p', 'x'], {}), '(p, x)\n', (252, 258), True, 'import numpy as np\n'), ((271, 284), 'numpy.abs', 'np.abs', (['(y - v)'], {}), '(y - v)\n', (277, 284), True, 'import numpy as np\n'), ((570, 589), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(1)'], {}), '(x, y, 1)\n', (580, 589), True, 'import numpy as np\n'), ((2252, 2365), 'warnings.warn', 'warnings.warn', (['"""Statistics will be (very) shaky when doing linear interpolation on less than 5 points."""'], {}), "(\n 'Statistics will be (very) shaky when doing linear interpolation on less than 5 points.'\n )\n", (2265, 2365), False, 'import warnings\n'), ((2704, 2770), 'numpy.array', 'np.array', (['[(result0.yout[ix, :] - r.yout[ix, :]) for r in results]'], {}), '([(result0.yout[ix, :] - r.yout[ix, :]) for r in results])\n', (2712, 2770), True, 'import numpy as np\n'), ((305, 318), 'numpy.argsort', 'np.argsort', (['e'], {}), '(e)\n', (315, 318), True, 'import numpy as np\n'), ((355, 378), 'numpy.delete', 'np.delete', (['x', 'drop_idxs'], {}), '(x, drop_idxs)\n', (364, 378), True, 'import numpy as np\n'), ((406, 429), 'numpy.delete', 'np.delete', (['y', 'drop_idxs'], {}), '(y, drop_idxs)\n', (415, 429), True, 'import numpy as np\n'), ((1856, 1873), 'numpy.all', 'np.all', (['(atols > 0)'], {}), '(atols > 0)\n', (1862, 1873), True, 'import numpy as np\n'), ((1881, 1898), 'numpy.all', 'np.all', (['(rtols > 0)'], {}), '(rtols > 0)\n', (1887, 1898), True, 'import numpy as np\n'), ((3107, 3132), 'numpy.all', 'np.all', (['(diffs[:, iy] == 0)'], {}), '(diffs[:, iy] == 0)\n', (3113, 3132), True, 'import numpy as np\n'), ((3368, 3384), 'numpy.array', 'np.array', (['errest'], {}), '(errest)\n', (3376, 3384), True, 'import numpy as np\n'), ((2933, 2945), 'numpy.log', 'np.log', (['tols'], {}), '(tols)\n', (2939, 2945), True, 'import numpy as np\n'), ((1972, 1986), 'numpy.diff', 'np.diff', (['atols'], {}), '(atols)\n', (1979, 1986), True, 'import numpy as np\n'), ((2006, 2020), 'numpy.diff', 'np.diff', (['rtols'], {}), '(rtols)\n', (2013, 2020), True, 'import numpy as np\n'), ((2990, 3003), 'numpy.abs', 'np.abs', (['diffs'], {}), '(diffs)\n', (2996, 3003), True, 'import numpy as np\n'), ((2806, 2827), 'numpy.abs', 'np.abs', (['r.yout[ix, :]'], {}), '(r.yout[ix, :])\n', (2812, 2827), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
import os, numpy as np, argparse
def relFit(nu, eps): return 7.33972668 * np.power(eps, 1/6.0) / np.sqrt(nu)
def etaFit(nu, eps): return np.power(eps, -0.25) * np.power(nu, 0.75)
def lambdaFit(nu, eps): return 5.35507603 * np.power(eps,-1/6.0) * np.sqrt(nu);
def runspec(nu, eps, run, cs):
if cs is not None:
return "HITBND_LES_EXT2pi_EPS%.03f_NU%.04f_CS%.02f_RUN%d" \
% (eps, nu, run, cs)
else:
return "HITBND_DNS_EXT2pi_EPS%.03f_NU%.04f_RUN%d" \
% (eps, nu, run)
def getSettings(nu, eps, cs):
if cs is not None:
options = '-sgs SSM -cs %f -bpdx 4 -bpdy 4 -bpdz 4 -CFL 0.1 ' % cs
else:
options = '-bpdx 12 -bpdy 12 -bpdz 12 -CFL 0.02 '
tAnalysis = np.sqrt(nu / eps)
return options + '-extentx 6.2831853072 -dump2D 0 -dump3D 0 ' \
'-tdump 1 -BC_x periodic -BC_y periodic -BC_z periodic ' \
'-spectralIC fromFit -initCond HITurbulence -tAnalysis %f ' \
'-compute-dissipation 1 -nprocsx 1 -nprocsy 1 -nprocsz 1 ' \
'-spectralForcing 1 -tend 100 -keepMomentumConstant 1 ' \
'-analysis HIT -nu %f -energyInjectionRate %f ' \
% (tAnalysis, nu, eps)
def launchEuler(nu, eps, run):
runname = runspec(nu, eps, run)
print(runname)
tAnalysis = np.sqrt(nu / eps)
os.system("export NU=%f \n export EPS=%f \n export TANALYSIS=%f \n " \
"echo $NU $EPS \n ./launchEuler.sh settingsHIT_DNS.sh %s " \
% (nu, eps, tAnalysis, runname) )
def launchDaint(nCases, les):
SCRATCH = os.getenv('SCRATCH')
HOME = os.getenv('HOME')
f = open('HIT_sbatch','w')
f.write('#!/bin/bash -l \n')
if les: f.write('#SBATCH --job-name=LES_HIT \n')
else: f.write('#SBATCH --job-name=DNS_HIT \n')
f.write('#SBATCH --time=24:00:00 \n')
f.write('#SBATCH --output=out.%j.%a.txt \n')
f.write('#SBATCH --error=err.%j.%a.txt \n')
f.write('#SBATCH --constraint=gpu \n')
f.write('#SBATCH --account=s929 \n')
f.write('#SBATCH --array=0-%d \n' % (nCases-1))
#f.write('#SBATCH --partition=normal \n')
#f.write('#SBATCH --ntasks-per-node=1 \n')
f.write('ind=$SLURM_ARRAY_TASK_ID \n')
if les:
f.write('RUNDIRN=`./launchLESHIT.py --LES --case ${ind} --printName` \n')
f.write('OPTIONS=`./launchLESHIT.py --LES --case ${ind} --printOptions` \n')
else:
f.write('RUNDIRN=`./launchLESHIT.py --case ${ind} --printName` \n')
f.write('OPTIONS=`./launchLESHIT.py --case ${ind} --printOptions` \n')
f.write('mkdir -p %s/CubismUP3D/${RUNDIRN} \n' % SCRATCH)
f.write('cd %s/CubismUP3D/${RUNDIRN} \n' % SCRATCH)
f.write('cp %s/CubismUP_3D/bin/simulation ./exec \n' % HOME)
f.write('export OMP_NUM_THREADS=12 \n')
f.write('export CRAY_CUDA_MPS=1 \n')
f.write('srun --ntasks 1 --ntasks-per-node=1 ./exec ${OPTIONS} \n')
f.close()
os.system('sbatch HIT_sbatch')
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description = "Compute a target file for RL agent from DNS data.")
parser.add_argument('--printName', dest='printName',
action='store_true', help="Only print run name.")
parser.set_defaults(printName=False)
parser.add_argument('--printOptions', dest='printOptions',
action='store_true', help="Only print run options.")
parser.set_defaults(printOptions=False)
parser.add_argument('--launchDaint', dest='launchDaint',
action='store_true', help="Only print run options.")
parser.set_defaults(launchDaint=False)
parser.add_argument('--launchEuler', dest='launchEuler',
action='store_true', help="Only print run options.")
parser.set_defaults(launchEuler=False)
parser.add_argument('--LES', dest='LES', action='store_true',
help="Triggers LES modeling.")
parser.set_defaults(LES=False)
parser.add_argument('--case', type = int, default = -1,
help="Simulation case.")
args = parser.parse_args()
if args.LES: rangeles = np.linspace(0.16, 0.24, 9)
else: rangeles = [None]
NUS, EPS, RUN, CSS = [], [], [], []
h = 2 * np.pi / 16 / 12
for nu in np.logspace(np.log10(0.002), np.log10(0.02), 16) :
for eps in np.logspace(np.log10(0.01), np.log10(2.0), 16) :
if relFit(nu, eps) > 100 or relFit(nu, eps) < 20: continue
if lambdaFit(nu, eps) > 0.1 * 2 * np.pi: continue
if etaFit(nu, eps) > h or etaFit(nu, eps) < h/8: continue
for les in rangeles :
for i in [0, 1, 2] :
NUS,EPS,RUN,CSS = NUS+[nu], EPS+[eps], RUN+[i], CSS+[les]
nCases = len(NUS)
#print('Defined %d cases' % nCases)
if args.launchDaint: launchDaint(nCases, args.LES)
if args.case < 0: cases = range(nCases)
else: cases = [args.case]
for i in cases:
if args.printOptions:
print( getSettings(NUS[i], EPS[i], CSS[i]) )
if args.printName:
print( runspec(NUS[i], EPS[i], RUN[i], CSS[i]) )
if args.launchEuler:
launchEuler(NUS[i], EPS[i], RUN[i])
#for nu in [0.002, 0.004, 0.008] :
# for eps in [0.02, 0.04, 0.08, 0.16, 0.32] :
# tke0 = 2.77578963 * np.power(eps, (2.0/3.0) )
# for scal in [2, 3] :
# tke0 = 2.77578963 * np.power(eps, (2.0/3.0) )
# for scal in [2] :
# ext = scal * np.pi
# os.system("\
# export NU=%f \n\
# export EPS=%f \n\
# export TKE0=%f \n\
# export EXT=%f \n\
# echo $NU $EPS $TKE0 $EXT \n\
# ./launchEuler.sh settingsHIT_DNS.sh HIT_DNS_EXT%dpi_EPS%.02f_NU%.03f"
# % (nu, eps, tke0, ext, scal, eps, nu))
#for nu in [0.001, 0.002, 0.004, 0.008, 0.016] :
# for eps in [0.02, 0.04, 0.08, 0.16, 0.32, 0.64] :
|
[
"argparse.ArgumentParser",
"numpy.power",
"os.system",
"numpy.linspace",
"numpy.log10",
"os.getenv",
"numpy.sqrt"
] |
[((762, 779), 'numpy.sqrt', 'np.sqrt', (['(nu / eps)'], {}), '(nu / eps)\n', (769, 779), True, 'import os, numpy as np, argparse\n'), ((1307, 1324), 'numpy.sqrt', 'np.sqrt', (['(nu / eps)'], {}), '(nu / eps)\n', (1314, 1324), True, 'import os, numpy as np, argparse\n'), ((1329, 1496), 'os.system', 'os.system', (['("""export NU=%f \n export EPS=%f \n export TANALYSIS=%f \n echo $NU $EPS \n ./launchEuler.sh settingsHIT_DNS.sh %s """\n % (nu, eps, tAnalysis, runname))'], {}), '(\n """export NU=%f \n export EPS=%f \n export TANALYSIS=%f \n echo $NU $EPS \n ./launchEuler.sh settingsHIT_DNS.sh %s """\n % (nu, eps, tAnalysis, runname))\n', (1338, 1496), False, 'import os, numpy as np, argparse\n'), ((1569, 1589), 'os.getenv', 'os.getenv', (['"""SCRATCH"""'], {}), "('SCRATCH')\n", (1578, 1589), False, 'import os, numpy as np, argparse\n'), ((1601, 1618), 'os.getenv', 'os.getenv', (['"""HOME"""'], {}), "('HOME')\n", (1610, 1618), False, 'import os, numpy as np, argparse\n'), ((2900, 2930), 'os.system', 'os.system', (['"""sbatch HIT_sbatch"""'], {}), "('sbatch HIT_sbatch')\n", (2909, 2930), False, 'import os, numpy as np, argparse\n'), ((2972, 3065), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Compute a target file for RL agent from DNS data."""'}), "(description=\n 'Compute a target file for RL agent from DNS data.')\n", (2995, 3065), False, 'import os, numpy as np, argparse\n'), ((124, 135), 'numpy.sqrt', 'np.sqrt', (['nu'], {}), '(nu)\n', (131, 135), True, 'import os, numpy as np, argparse\n'), ((168, 188), 'numpy.power', 'np.power', (['eps', '(-0.25)'], {}), '(eps, -0.25)\n', (176, 188), True, 'import os, numpy as np, argparse\n'), ((191, 209), 'numpy.power', 'np.power', (['nu', '(0.75)'], {}), '(nu, 0.75)\n', (199, 209), True, 'import os, numpy as np, argparse\n'), ((278, 289), 'numpy.sqrt', 'np.sqrt', (['nu'], {}), '(nu)\n', (285, 289), True, 'import os, numpy as np, argparse\n'), ((4009, 4035), 'numpy.linspace', 'np.linspace', (['(0.16)', '(0.24)', '(9)'], {}), '(0.16, 0.24, 9)\n', (4020, 4035), True, 'import os, numpy as np, argparse\n'), ((4159, 4174), 'numpy.log10', 'np.log10', (['(0.002)'], {}), '(0.002)\n', (4167, 4174), True, 'import os, numpy as np, argparse\n'), ((4176, 4190), 'numpy.log10', 'np.log10', (['(0.02)'], {}), '(0.02)\n', (4184, 4190), True, 'import os, numpy as np, argparse\n'), ((101, 123), 'numpy.power', 'np.power', (['eps', '(1 / 6.0)'], {}), '(eps, 1 / 6.0)\n', (109, 123), True, 'import os, numpy as np, argparse\n'), ((255, 278), 'numpy.power', 'np.power', (['eps', '(-1 / 6.0)'], {}), '(eps, -1 / 6.0)\n', (263, 278), True, 'import os, numpy as np, argparse\n'), ((4227, 4241), 'numpy.log10', 'np.log10', (['(0.01)'], {}), '(0.01)\n', (4235, 4241), True, 'import os, numpy as np, argparse\n'), ((4243, 4256), 'numpy.log10', 'np.log10', (['(2.0)'], {}), '(2.0)\n', (4251, 4256), True, 'import os, numpy as np, argparse\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import numpy as np
def find_smallest_positive(alist):
# find first positive value
minpos = -1
for x in alist:
if x > 0:
minpos = x
break
if minpos > 0:
# find smallest positive value
for x in alist:
if x > 0 and x < minpos:
minpos = x
return minpos
def rebase_to_smallest_positive(alist):
base = find_smallest_positive(alist)
if base == -1:
return None
else:
return [x - base for x in alist]
def compute_maximum_subarray(score_vector=None):
begin_temp = begin = end = 0
start_val = score_vector[0]
max_ending_here = max_so_far = start_val
for pos, x in enumerate(score_vector[1:], 1):
if max_ending_here < 0:
max_ending_here = x
begin_temp = pos
else:
max_ending_here = max_ending_here + x
if max_ending_here > max_so_far:
max_so_far = max_ending_here
begin = begin_temp
end = pos
return begin, end
def compute_iterated_maximum_subarray(seq=None, score=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1):
original_score = score
while True:
# find (begin,end) of subarray in each element
begin, end = compute_maximum_subarray(score_vector=score)
# check that the retrieved subarray is larger than min_subarray_size
if end - begin < min_subarray_size - 1:
break
else:
# extract maximum subarray
# NOTE: in order to account for border effects we expand on the left and on the right by 'margin'
first = max(0, begin - margin)
# NOTE: we return + 1 for the rightmost postition to be compliant with the 'one after the end' semantics
last = min(len(seq), end + margin + 1)
subarray = seq[first: last]
subarray_size = len(subarray)
if max_subarray_size == -1 or subarray_size <= max_subarray_size:
# store data
acc = 0
for x in original_score[begin: end + 1]:
acc += x
if output == 'minimal':
subarray = {'subarray_string': ''.join(subarray)}
else:
subarray = {'subarray_string': ''.join(subarray), 'subarray': subarray, 'begin': first,
'end': last, 'size': subarray_size, 'seq': seq, 'score': acc}
yield subarray
if subarray_size > max_subarray_size:
# if the subarray is too large then rebase the score list, i.e. offset by the smallest positive value
score = rebase_to_smallest_positive(score)
if score is None:
break
else:
# remove current subarray by zeroing importance values of subarray
score[first: last] = [0.0] * subarray_size
# iterate after removal of current subarray
def extract_sequence_and_score(graph=None):
# make dict with positions as keys and lists of ids as values
pos_to_ids = defaultdict(list)
for u in graph.nodes():
if 'position' not in graph.node[u]: # no position attributes in graph, use the vertex id instead
raise Exception('Missing "position" attribute in node:%s %s' % (u, graph.node[u]))
else:
pos = graph.node[u]['position']
# accumulate all node ids
pos_to_ids[pos] += [u]
# extract sequence of labels and importances
seq = [None] * len(pos_to_ids)
score = [0] * len(pos_to_ids)
for pos in sorted(pos_to_ids):
ids = pos_to_ids[pos]
labels = [graph.node[u].get('label', 'N/A') for u in ids]
# check that all labels for the same position are identical
assert(sum([1 for label in labels if label == labels[0]]) == len(labels)
), 'ERROR: non identical labels referring to same position: %s %s' % (pos, labels)
seq[pos] = labels[0]
# average all importance score for the same position
importances = [graph.node[u].get('importance', 0) for u in ids]
score[pos] = np.mean(importances)
return seq, score
def compute_max_subarrays_sequence(seq=None, score=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1):
# extract subarrays
for subarray in compute_iterated_maximum_subarray(seq=seq, score=score, min_subarray_size=min_subarray_size, max_subarray_size=max_subarray_size, output=output, margin=margin):
yield subarray
def compute_max_subarrays(graph=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1):
seq, score = extract_sequence_and_score(graph)
for subarray in compute_max_subarrays_sequence(seq=seq, score=score, min_subarray_size=min_subarray_size, max_subarray_size=max_subarray_size, output=output, margin=margin):
yield subarray
|
[
"collections.defaultdict",
"numpy.mean"
] |
[((3311, 3328), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3322, 3328), False, 'from collections import defaultdict\n'), ((4362, 4382), 'numpy.mean', 'np.mean', (['importances'], {}), '(importances)\n', (4369, 4382), True, 'import numpy as np\n')]
|
from __future__ import print_function
from create_tree import *
import numpy as np
import random
DATA_DIR = "../data/"
def curriculum_depth(i, num_examples, max_depth):
curriculum_max_depth= int((max_depth*i)/num_examples)
#print(i, curriculum_max_depth,)
if curriculum_max_depth > 0:
random_depth = 2 + np.random.randint(curriculum_max_depth)
else:
random_depth = 2
#print("DEPTH = ", random_depth)
return random_depth
def copy_t2t(depth):
my_tree = generate_data(depth-1)
change_nts(my_tree)
my_list = convert_to_list_inorder(my_tree,[])
infix_tree = ' '.join(str(e) for e in my_list)
#print my_tree
return ([infix_tree, infix_tree])
def create_examples(num_examples, max_depth, function):
data = []
for i in range(num_examples):
depth = max_depth
if np.random.randint(2) == 0:
depth = curriculum_depth(i, num_examples, max_depth)
data.append(function(depth))
return data
if __name__ == "__main__":
num_examples = 1000
max_depth = 5
data_subset = "train"
t2t_operation = "COPY"
seed = 0
#NOTE: we need both -- for reproducible trees...
#numpy.random.seed(seed)
#random.seed(seed)
if t2t_operation == "COPY":
data = create_examples(num_examples,max_depth, function=copy_t2t)
trans = open(DATA_DIR + data_subset + '.copy', 'w')
elif t2t_operation == "RELABEL_1":
data = create_examples(num_examples,max_depth, function=copy_t2t)
trans = open(DATA_DIR + data_subset + '.copy', 'w')
orig = open(DATA_DIR + data_subset + '.orig', 'w')
for i in range(num_examples):
print(data[i][0], file=orig)
print(data[i][1], file=trans)
#orig_vocab = open(DATA_DIR + 'vocab.train.orig', 'w')
#trans_vocab = open(DATA_DIR + 'vocab.train.copy', 'w')
#max_num = 256
#operators = ['+','-','*','/']
#for i in range(1, max_num+1):
# print >> orig_vocab, i, i
# print >> trans_vocab, i, i
#for i in range(len(operators)):
# print >> orig_vocab, operators[i], max_num+i+1
# print >> trans_vocab, operators[i], max_num+i+1
#print >> orig_vocab, '(', max_num + len(operators) + 1
#print >> orig_vocab, ')', max_num + len(operators) + 2
#print >> trans_vocab, '(', max_num + len(operators) + 1
#print >> trans_vocab, ')', max_num + len(operators) + 2
|
[
"numpy.random.randint"
] |
[((326, 365), 'numpy.random.randint', 'np.random.randint', (['curriculum_max_depth'], {}), '(curriculum_max_depth)\n', (343, 365), True, 'import numpy as np\n'), ((857, 877), 'numpy.random.randint', 'np.random.randint', (['(2)'], {}), '(2)\n', (874, 877), True, 'import numpy as np\n')]
|
# Network
import numpy as np
import pandas as pd
import simulator
import random
from igraph import *
import matplotlib.pyplot as plt
class Network():
"""docstring for Network"""
def __init__(self, simulator):
# Genero un grafo random
self.g = Graph.Erdos_Renyi(simulator.num_nodi,simulator.p_link)
# Inizializzazione dei vettori degli step temporali e degli stati epidemici
self.t_state = np.zeros((simulator.num_nodi,1))
self.e_state = np.zeros((simulator.num_nodi,1),dtype=np.int8)
# assegnazione iniziale random dei nodi esposti
np.put(self.e_state,np.random.choice(range(simulator.num_nodi*1), simulator.exp0, replace=False),1)
self.states = {} # Lista degli stati
self.data = pd.DataFrame(columns=["index","days","exposed","infected","severe infected","recovered","dead","susceptible","total"]) # Tabella stati
def update_states(self,i,simulator): # Aggiornamento degli stati
"""Lista degli stati:
- Susceptible = 0
- Exposed = 1
- Infected = 2
- Severe Infected = 3
- Recovered = 4
- Dead = 5
"""
# Copia degli stati epidemici dagli array degli stati epidemici al dizionario
self.states = { 'exposed':np.where(np.copy(self.e_state)==1,self.e_state,0),
'infected':np.where(np.copy(self.e_state)==2,self.e_state,0),
'recovered':np.where(np.copy(self.e_state)==4,self.e_state,0),
'severe_infected':np.where(np.copy(self.e_state)==3,self.e_state,0),
'dead':np.where(np.copy(self.e_state)==5,self.e_state,0),
'susceptible':(simulator.num_nodi - np.count_nonzero(np.copy(self.e_state))),
'total_cases':np.count_nonzero(np.copy(self.e_state)) }
# Inserimento della somma di ogni stato epidemico nel dataframe
self.data.loc[i,:] = [i, i*simulator.dt_state,np.count_nonzero(self.states['exposed']), np.count_nonzero(self.states['infected']),
np.count_nonzero(self.states['severe_infected']), np.count_nonzero(self.states['recovered']),
np.count_nonzero(self.states['dead']), self.states['susceptible'], self.states['total_cases']]
#print(self.data)
def plot(self,i,simulator): # Creazione Grafici
plt.clf()
ax = plt.gca()
self.data.plot(x = 'days', y = 'susceptible', kind = 'line', color = 'cyan', ax = ax)
self.data.plot(x = 'days', y = 'exposed', kind = 'line', color = 'yellow', ax = ax)
self.data.plot(x = 'days', y = 'infected', kind = 'line', color = 'blue', ax = ax)
self.data.plot(x = 'days', y = 'severe infected', kind = 'line', color = 'magenta', ax = ax)
self.data.plot(x = 'days', y = 'recovered', kind = 'line', color = 'green', ax = ax)
self.data.plot(x = 'days', y = 'dead', kind = 'line', color = 'brown', ax = ax)
plt.title('link_p: {}; exp0: {}; t_inc: {}; t_inf: {}\n alpha: {}; beta: {}; gamma: {}'.format(simulator.p_link, simulator.exp0,simulator.t_exp,simulator.t_inf,simulator.alfa,simulator.beta,simulator.gamma))
plt.xlabel('Time (days)')
plt.ylabel('Number of nodes')
plt.savefig('./plots/states.png')
def update_nodes(self,i,simulator): # Aggiornamento dei nodi del network (rimozione dei nodi morti e isolamento dei nodi gravemente infetti)
pass
def get_new_cases(self,i,simulator): # Nuovi casi (aggiornamento dei nodi che propagano l'epidemia)
# Trova i vicini degli esposti, infetti e gravemente infetti
# Calcola la probabilità che i vicini siano contaggiati con tasso alfa
# Nodi esposti
n_exp = np.array(np.nonzero(self.states['exposed'])[0])
# Nodi infetti
n_inf = np.array(np.nonzero(self.states['infected'])[0])
# Nodi gravemente infetti
n_g_inf = np.array(np.nonzero(self.states['severe_infected'])[0])
# Nodi guariti
n_rec = np.array(np.nonzero(self.states['recovered'])[0])
# Nodi morti
n_dead = np.array(np.nonzero(self.states['dead'])[0])
new_cases = []
# Ciclo i Nodi esposti, infetti e gravemente infetti e trovo i loro vicini suscettibili che vengono contaggiati con tasso alfa
contaggiosi = np.concatenate((n_exp,n_inf,n_g_inf), axis=None)
for x in contaggiosi:
for n in self.g.neighbors(x):
Rand = np.random.random()
# Condizione per entrare nei nuovi casi di esposto (Rientra nella prob, non è nella categoria contaggiati, ne in quella guariti ne in quella morti, nemmeno doppione)
if (Rand<simulator.alfa) and (n not in contaggiosi) and (n not in n_rec) and (n not in n_dead) and (n not in new_cases):
new_cases.append(n)
#print(new_cases)
return new_cases
|
[
"pandas.DataFrame",
"numpy.count_nonzero",
"numpy.concatenate",
"numpy.copy",
"matplotlib.pyplot.clf",
"numpy.zeros",
"numpy.nonzero",
"numpy.random.random",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((408, 441), 'numpy.zeros', 'np.zeros', (['(simulator.num_nodi, 1)'], {}), '((simulator.num_nodi, 1))\n', (416, 441), True, 'import numpy as np\n'), ((458, 506), 'numpy.zeros', 'np.zeros', (['(simulator.num_nodi, 1)'], {'dtype': 'np.int8'}), '((simulator.num_nodi, 1), dtype=np.int8)\n', (466, 506), True, 'import numpy as np\n'), ((718, 848), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['index', 'days', 'exposed', 'infected', 'severe infected', 'recovered',\n 'dead', 'susceptible', 'total']"}), "(columns=['index', 'days', 'exposed', 'infected',\n 'severe infected', 'recovered', 'dead', 'susceptible', 'total'])\n", (730, 848), True, 'import pandas as pd\n'), ((2123, 2132), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2130, 2132), True, 'import matplotlib.pyplot as plt\n'), ((2141, 2150), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2148, 2150), True, 'import matplotlib.pyplot as plt\n'), ((2888, 2913), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (days)"""'], {}), "('Time (days)')\n", (2898, 2913), True, 'import matplotlib.pyplot as plt\n'), ((2916, 2945), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of nodes"""'], {}), "('Number of nodes')\n", (2926, 2945), True, 'import matplotlib.pyplot as plt\n'), ((2948, 2981), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./plots/states.png"""'], {}), "('./plots/states.png')\n", (2959, 2981), True, 'import matplotlib.pyplot as plt\n'), ((3934, 3984), 'numpy.concatenate', 'np.concatenate', (['(n_exp, n_inf, n_g_inf)'], {'axis': 'None'}), '((n_exp, n_inf, n_g_inf), axis=None)\n', (3948, 3984), True, 'import numpy as np\n'), ((1769, 1809), 'numpy.count_nonzero', 'np.count_nonzero', (["self.states['exposed']"], {}), "(self.states['exposed'])\n", (1785, 1809), True, 'import numpy as np\n'), ((1811, 1852), 'numpy.count_nonzero', 'np.count_nonzero', (["self.states['infected']"], {}), "(self.states['infected'])\n", (1827, 1852), True, 'import numpy as np\n'), ((1856, 1904), 'numpy.count_nonzero', 'np.count_nonzero', (["self.states['severe_infected']"], {}), "(self.states['severe_infected'])\n", (1872, 1904), True, 'import numpy as np\n'), ((1906, 1948), 'numpy.count_nonzero', 'np.count_nonzero', (["self.states['recovered']"], {}), "(self.states['recovered'])\n", (1922, 1948), True, 'import numpy as np\n'), ((1952, 1989), 'numpy.count_nonzero', 'np.count_nonzero', (["self.states['dead']"], {}), "(self.states['dead'])\n", (1968, 1989), True, 'import numpy as np\n'), ((1628, 1649), 'numpy.copy', 'np.copy', (['self.e_state'], {}), '(self.e_state)\n', (1635, 1649), True, 'import numpy as np\n'), ((3409, 3443), 'numpy.nonzero', 'np.nonzero', (["self.states['exposed']"], {}), "(self.states['exposed'])\n", (3419, 3443), True, 'import numpy as np\n'), ((3484, 3519), 'numpy.nonzero', 'np.nonzero', (["self.states['infected']"], {}), "(self.states['infected'])\n", (3494, 3519), True, 'import numpy as np\n'), ((3573, 3615), 'numpy.nonzero', 'np.nonzero', (["self.states['severe_infected']"], {}), "(self.states['severe_infected'])\n", (3583, 3615), True, 'import numpy as np\n'), ((3656, 3692), 'numpy.nonzero', 'np.nonzero', (["self.states['recovered']"], {}), "(self.states['recovered'])\n", (3666, 3692), True, 'import numpy as np\n'), ((3732, 3763), 'numpy.nonzero', 'np.nonzero', (["self.states['dead']"], {}), "(self.states['dead'])\n", (3742, 3763), True, 'import numpy as np\n'), ((4052, 4070), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (4068, 4070), True, 'import numpy as np\n'), ((1184, 1205), 'numpy.copy', 'np.copy', (['self.e_state'], {}), '(self.e_state)\n', (1191, 1205), True, 'import numpy as np\n'), ((1252, 1273), 'numpy.copy', 'np.copy', (['self.e_state'], {}), '(self.e_state)\n', (1259, 1273), True, 'import numpy as np\n'), ((1322, 1343), 'numpy.copy', 'np.copy', (['self.e_state'], {}), '(self.e_state)\n', (1329, 1343), True, 'import numpy as np\n'), ((1398, 1419), 'numpy.copy', 'np.copy', (['self.e_state'], {}), '(self.e_state)\n', (1405, 1419), True, 'import numpy as np\n'), ((1463, 1484), 'numpy.copy', 'np.copy', (['self.e_state'], {}), '(self.e_state)\n', (1470, 1484), True, 'import numpy as np\n'), ((1565, 1586), 'numpy.copy', 'np.copy', (['self.e_state'], {}), '(self.e_state)\n', (1572, 1586), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# @Author: wqshen
# @Email: <EMAIL>
# @Date: 2020/6/10 14:43
# @Last Modified by: wqshen
import numpy as np
from logzero import logger
from .point_stat_base import PointStatBase
class ContinuousVariableVerification(PointStatBase):
def __init__(self, forecast=None, obs=None, fcsterr=None, group=None):
if (forecast is None or obs is None) and fcsterr is None:
raise Exception("Initialize failed, check forecast and obs and fcsterr values.")
elif forecast is not None and obs is not None and fcsterr is not None:
logger.warning("You give forecast, obs and fcsterr, but the fcsterr will be ignored.")
fcsterr = None
self._available_score = ['N', 'ME', 'ME2', 'MSE', 'RMSE', 'ESTDEV', 'BCMSE', 'MAE', 'IQR', 'MAD', 'EPCT']
if fcsterr is not None:
self._error = fcsterr[~np.isnan(fcsterr)]
if forecast is None:
forecast = fcsterr + obs
if obs is None:
obs = forecast - fcsterr
# Not Available, 'BAGSS', 'ANOM_CORR'
self._available_score += ['FBAR', 'OBAR', 'FSTDEV', 'OSTDEV', 'PR_CORR', 'SP_CORR', 'KT_CORR', 'MBIAS', ]
super(ContinuousVariableVerification, self).__init__(forecast, obs, group)
@property
def FBAR(self):
"""**The sample mean forecast, FBAR**"""
return self.mean_forecast(self._f)
@staticmethod
def mean_forecast(forecast):
r"""**The sample mean forecast, FBAR**
the sample mean forecast (FBAR) is defined as,
.. math::
\bar{f} = \frac{1}{n}\sum_{i=1}^{n}f_i
Returns
------
numpy.ndarray, the sample mean forecast (FBAR)
"""
return np.average(forecast)
@property
def OBAR(self):
"""**The sample mean observation, OBAR**"""
return self.mean_observation(self._o)
@staticmethod
def mean_observation(obs):
r"""**The sample mean observation, OBAR**
the sample mean observation (OBAR) is defined as,
.. math::
\bar{o} = \frac{1}{n}\sum_{i=1}^{n}o_i
Returns
-------
numpy.ndarray, the sample mean observation (OBAR)
"""
return np.average(obs)
@property
def FSTDEV(self):
"""**The forecast standard deviation (FSTDEV)**"""
return self.forecast_standard_deviation(self._f)
@staticmethod
def forecast_standard_deviation(forecast):
r"""**The forecast standard deviation (FSTDEV)**
The sample variance of the forecasts is defined as
.. math::
s^{2}_{f} = \frac{1}{T-1}\sum_{i=1}^{T}(f_i - \bar{f})^2
The forecast standard deviation, FSTDEV, is defined as
.. math::
s_{f} = \sqrt{s^{2}_{f}}
Returns
-------
numpy.ndarray, the forecast standard deviation (FSTDEV)
"""
return np.std(forecast)
@property
def OSTDEV(self):
r"""**The observed standard deviation (OSTDEV)**"""
return self.observation_standard_deviation(self._o)
@staticmethod
def observation_standard_deviation(obs):
r"""**The observed standard deviation (OSTDEV)**
The sample variance of the observations is defined as
.. math::
s^{2}_{o} = \frac{1}{T-1}\sum_{i=1}^{T}(o_i - \bar{o})^2
The observed standard deviation, OSTDEV, is defined as
.. math::
s_{o} = \sqrt{s^{2}_{o}}
Returns
-------
numpy.ndarray, the observed standard deviation (OSTDEV)
"""
return np.std(obs)
@property
def PR_CORR(self):
r"""**The Pearson correlation coefficient ( :math:`r` , PR_CORR)**"""
return self.pearson_correlation_coefficient(self._f, self._o)
@staticmethod
def pearson_correlation_coefficient(forecast, obs):
r"""**The Pearson correlation coefficient ( :math:`r` , PR_CORR)**
The Pearson correlation coefficient, **r**,
measures the strength of linear association between the forecasts and observations.
The Pearson correlation coefficient is defined as:
.. math::
r = \frac{\sum^{T}_{i=1}(f_i - \bar{f})(o_i - \bar{o})}{\sqrt{\sum{(f_i - \bar{f})^2}}\sqrt{\sum{(o_i - \bar{o})^2}}}
r can range between -1 and 1;
a value of 1 indicates perfect correlation and
a value of -1 indicates perfect negative correlation.
A value of 0 indicates that the forecasts and observations are not correlated.
Returns
-------
numpy.ndarray, the Pearson correlation coefficient (PR_CORR)
"""
return np.corrcoef(forecast, obs)[1, 0]
@property
def SP_CORR(self):
r"""**The Spearman rank correlation coefficient ( :math:`\rho_s` , SP_CORR)**"""
return self.spearman_rank_correlation_cofficient(self._f, self._o)
@staticmethod
def spearman_rank_correlation_cofficient(forecast, obs):
r"""**The Spearman rank correlation coefficient ( :math:`\rho_s` , SP_CORR)**
The Spearman rank correlation cofficient ( :math:`\rho_s` ) is a robust measure of association
that is based on the ranks of the forecast and observed values rather than the actual values.
That is, the forecast and observed samples are ordered from smallest to largest
and rank values (from 1 to **n**, where **n** is the total number of pairs) are assigned.
The pairs of forecast-observed ranks are then used to compute a correlation cofficient,
analogous to the Pearson correlation cofficient, **r**.
A simpler formulation of the Spearman-rank correlation is based on differences
between the each of the pairs of ranks (denoted as ( :math:`d_i` ) ):
.. math::
\rho_s = \frac{6}{n(n^2 - 1)}\sum^{n}_{i=1}d^{2}_{i}
Like **r**, the Spearman rank correlation coecient ranges between -1 and 1;
a value of 1 indicates perfect correlation and
a value of -1 indicates perfect negative correlation.
A value of 0 indicates that the forecasts and observations are not correlated.
Returns
-------
numpy.ndarray, the Spearman correlation coefficient (SP_CORR)
"""
from scipy.stats import spearmanr
return spearmanr(forecast, obs)
@property
def KT_CORR(self):
r"""**Kendall's Tau statistic ( :math:`\tau` , KT_CORR)**"""
return self.kendall_tau_statistic(self._f, self._o)
@staticmethod
def kendall_tau_statistic(forecast, obs):
r"""**Kendall's Tau statistic ( :math:`\tau` , KT_CORR)**
Kendall's Tau statistic ( :math:`\tau` ) is a robust measure of the level of association
between the forecast and observation pairs. It is defined as
.. math::
\tau = \frac{N_c - N_p}{n(n-1)/2}
where NC is the number of "concordant" pairs and ND is the number of "discordant" pairs.
Concordant pairs are identied by comparing each pair with all other pairs in the sample;
this can be done most easily by ordering all of the ( :math:`f_i, o_i` ) pairs
according to :math:`f_i`, in which case the :math:`o_i`, values won't necessarily be in order.
The number of concordant matches of a particular pair with other pairs is computed by
counting the number of pairs (with larger values)
for which the value of oi for the current pair is exceeded (that is, pairs for which
the values of **f** and **o** are both larger than the value for the current pair).
Once this is done, Nc is computed by summing the counts for all pairs.
The total number of possible pairs is ; thus, the number of discordant pairs is .
Like **r** and :math:`\rho_s` , Kendall's Tau ( :math:`\tau` ) ranges between -1 and 1;
a value of 1 indicates perfect association (concor-dance) and
a value of -1 indicates perfect negative association.
A value of 0 indicates that the forecasts and observations are not associated.
Returns
-------
numpy.ndarray, Kendall's Tau statistic ( :math:`\tau` , KT_CORR)
"""
from scipy.stats import kendalltau
return kendalltau(forecast, obs)
@property
def ME(self):
"""**The Mean Error (ME)**"""
return self.mean_error(self.error)
@staticmethod
def mean_error(error):
r"""**The Mean Error (ME)**
The Mean Error, ME, is a measure of overall bias for continuous variables;
in particular ME = Bias. It is defined as
.. math::
ME = \frac{1}{n}\sum^{n}_{i=1}(f_i - o_i) = \bar{f} - \bar{o}
A perfect forecast has ME = 0.
Returns
-------
numpy.ndarray, The Mean Error (ME)
"""
return np.average(error)
@property
def ME2(self):
"""**The Mean Error Squared** (ME2)"""
return self.mean_error_squared(self.error)
@staticmethod
def mean_error_squared(error):
"""**The Mean Error Squared** (ME2)
The Mean Error Squared, ME2, is provided to give a complete breakdown of MSE
in terms of squared Bias plus estimated variance of the error,
as detailed below in the section on BCMSE. It is defined as ME2 = ME2.
A perfect forecast has ME2 = 0.
Returns
-------
numpy.ndarray, The Mean Error (ME)
"""
return np.square(np.average(error))
@property
def MBIAS(self):
"""**Multiplicative bias (MBIAS)**"""
return self.multiplicative_bias(self._f, self._o)
@staticmethod
def multiplicative_bias(forecast, error):
r"""**Multiplicative bias (MBIAS)**
Multiplicative bias is simply the ratio of the means of the forecasts and the observations:
.. math::
MBIAS = \frac{\bar{f}}{\bar{o}}
Returns
-------
numpy.ndarray, Multiplicative bias (MBIAS)
"""
return np.average(forecast) / np.average(error)
@property
def MSE(self):
"""**Mean-squared error (MSE)**"""
return self.mean_squared_error(self.error)
@staticmethod
def mean_squared_error(error):
r"""**Mean-squared error (MSE)**
MSE measures the average squared error of the forecasts. Specifically,
.. math::
MSE = \frac{1}{n}\sum{(f_i - o_i)^2}
Returns
-------
numpy.ndarray, Mean-squared error (MSE)
"""
return np.average(error ** 2)
@property
def RMSE(self):
"""**Root-mean-squared error (RMSE)**"""
return self.root_mean_squared_error(self.error)
@staticmethod
def root_mean_squared_error(error):
"""**Root-mean-squared error (RMSE)**
RMSE is simply the square root of the MSE, :math:`RMSE = \sqrt{MSE}`
Returns
-------
numpy.ndarray, Root-mean-squared error (RMSE)
"""
return np.sqrt(np.average(error ** 2))
@property
def ESTDEV(self):
"""**Standard deviation of the error** (ESTDEV)"""
return self.standard_deviation_of_error(self.error)
@staticmethod
def standard_deviation_of_error(error):
"""**Standard deviation of the error** (ESTDEV)
Returns
-------
numpy.ndaray, Standard deviation of the error
"""
return np.std(error)
@property
def BCMSE(self):
"""**Bias-Corrected MSE (BCMSE)**"""
return self.bias_corrected_mse(self.error)
@staticmethod
def bias_corrected_mse(error):
r"""**Bias-Corrected MSE (BCMSE)**
MSE and RMSE are strongly impacted by large errors.
They also are strongly impacted by large bias (ME) values.
MSE and RMSE can range from 0 to infinity.
A perfect forecast would have MSE = RMSE = 0.
MSE can be re-written as,
.. math::
MSE = (\bar{f} - \bar{o})^2 + s^{2}_{f} + s^{2}_{o} -2 s_f s_o r_{fo}
where :math:`\bar{f} - \bar{o} = ME` and :math:`s^{2}_{f} + s^{2}_{o} -2 s_f s_o r_{fo}` is
the estimated variance of the error, :math:`s^{2}_{fo}` . Thus, :math:`MSE = ME^2 + s^{2}_{f-o}`
To understand the behavior of MSE, it is important to examine both of the terms of MSE,
rather than examining MSE alone. Moreover, MSE can be strongly influenced by ME,
as shown by this decomposition.
The standard deviation of the error, :math:`s_{f-o}` , is
.. math::
s_{f-o}=\sqrt{s^{2}_{f-o}}=\sqrt{s^{2}_{f} + s^{2}_{o} -2 s_f s_o r_{fo}}
Note that the square of the standard deviation of the error (ESTDEV2) is
sometimes called the "Bias-corrected MSE" (BCMSE)
because it removes the effect of overall bias from the forecast-observation squared differences.
Returns
-------
numpy.ndarray, Bias-Corrected MSE (BCMSE)
"""
return np.square(np.std(error))
@property
def MAE(self):
"""**Mean Absolute Error (MAE)**"""
return self.mean_absolute_error(self.error)
@staticmethod
def mean_absolute_error(error):
r"""**Mean Absolute Error (MAE)**
The Mean Absolute Error (MAE) is defined as :math:`MAE = \frac{1}{n}\sum{|f_i - o_i|}`
MAE is less inuenced by large errors and also does not depend on the mean error.
A perfect forecast would have MAE = 0.
Returns
-------
numpy.ndarray, Mean Absolute Error (MAE)
"""
return np.average(np.abs(error))
@property
def IQR(self):
""""**Inter Quartile Range of the Errors (IQR)**"""
return self.inter_quartile_range_of_errors(self.error)
@staticmethod
def inter_quartile_range_of_errors(error):
r"""**Inter Quartile Range of the Errors (IQR)**
The Inter Quartile Range of the Errors (IQR) is the difference
between the 75th and 25th percentiles of the errors. It is dened as
.. math::
IQR = p_{75} (f_i - o_i) - p_{25}(f_i - o_i)
IQR is another estimate of spread, similar to standard error,
but is less inuenced by large errors and also does not depend on the mean error.
A perfect forecast would have IQR = 0.
Returns
-------
nupmy.ndarray, Inter Quartile Range of the Errors (IQR)
"""
return np.percentile(error, 75) - np.percentile(error, 25)
@property
def MAD(self):
"""Median Absolute Deviation (MAD)"""
return self.median_absolute_deviation(self.error)
@staticmethod
def median_absolute_deviation(error):
"""Median Absolute Deviation (MAD)
The Median Absolute Deviation (MAD) is defined as :math:`MAD=median|f_i - o_i|`
MAD is an estimate of spread, similar to standard error,
but is less inuenced by large errors and also does not depend on the mean error.
A perfect forecast would have MAD = 0.
Returns
-------
numpy.ndarray, Median Absolute Deviation (MAD)
"""
return np.median(np.abs(error))
@property
def BAGSS(self):
"""Bias Adjusted Gilbert Skill Score (BAGSS)"""
return self.bias_adjusted_gilbert_skill_score(self._f, self._o)
@staticmethod
def bias_adjusted_gilbert_skill_score(forecast, obs):
"""Bias Adjusted Gilbert Skill Score (BAGSS)
The Bias Adjusted Gilbert Skill Score (BAGSS) is the Gilbert Skill Score,
but with the contingency table counts adjusted to eliminate
as much bias in the forecast as possible.
For details, see `Brill and Messinger, 2009. <https://www.adv-geosci.net/16/137/2008/>`_
Returns
-------
Not implemented
numpy.ndarray, Bias Adjusted Gilbert Skill Score (BAGSS)
"""
return
@property
def EPCT(self):
"""Percentiles (0.1, 0.25, 0.5, 0.75, 0.9) of the errors"""
return self.percentile_errors(self.error)
@staticmethod
def percentile_errors(error):
"""Percentiles of the errors
Percentiles of the errors provide more information about the distribution of errors
than can be obtained from the mean and standard deviations of the errors.
Percentiles are computed by ordering the errors from smallest to largest
and computing the rank location of each percentile in the ordering,
and matching the rank to the actual value.
Percentiles can also be used to create box plots of the errors.
The 0.10th, 0.25th, 0.50th, 0.75th, and 0.90th quantile values of the errors are computed.
Returns
-------
numpy.ndarray, Percentiles of the errors
"""
quantiles = np.array([0.1, 0.25, 0.5, 0.75, 0.9])
return np.quantile(error, quantiles)
@property
def ANOM_CORR(self):
"""The Anomaly correlation coefficient (ANOM_CORR)"""
return self.anomaly_correlation_coefficient(self._f, self._o, None)
@staticmethod
def anomaly_correlation_coefficient(forecast, obs, climate):
r"""The Anomaly correlation coefficient (ANOM_CORR)
The Anomaly correlation coecient is equivalent to the Pearson correlation coefficient,
except that both the forecasts and observations are first adjusted according to a climatology value.
The anomaly is the difference between the individual forecast or observation and the typical situation,
as measured by a climatology (**c**) of some variety.
It measures the strength of linear association between the forecast anomolies and observed anomalies.
The Anomaly correlation coefficient is defined as:
.. math::
Anomoly Correlation = \frac{\sum{(f_i - c)(o_i - c)}} {\sqrt{\sum{(f_i - c)^2}} \sqrt{\sum{(o_i - c)^2}}}
Anomaly correlation can range between -1 and 1;
- a value of 1 indicates perfect correlation and
- a value of -1 indicates perfect negative correlation.
- A value of 0 indicates that the forecast and observed anomalies are not correlated.
Returns
-------
Not implemented
"""
return
def list_score(self):
"""list all available score"""
return {k: np.round(getattr(self, k), self.round) for k in self._available_score}
|
[
"numpy.quantile",
"numpy.average",
"numpy.abs",
"numpy.std",
"numpy.corrcoef",
"scipy.stats.spearmanr",
"numpy.isnan",
"numpy.percentile",
"numpy.array",
"logzero.logger.warning",
"scipy.stats.kendalltau"
] |
[((1751, 1771), 'numpy.average', 'np.average', (['forecast'], {}), '(forecast)\n', (1761, 1771), True, 'import numpy as np\n'), ((2252, 2267), 'numpy.average', 'np.average', (['obs'], {}), '(obs)\n', (2262, 2267), True, 'import numpy as np\n'), ((2936, 2952), 'numpy.std', 'np.std', (['forecast'], {}), '(forecast)\n', (2942, 2952), True, 'import numpy as np\n'), ((3626, 3637), 'numpy.std', 'np.std', (['obs'], {}), '(obs)\n', (3632, 3637), True, 'import numpy as np\n'), ((6365, 6389), 'scipy.stats.spearmanr', 'spearmanr', (['forecast', 'obs'], {}), '(forecast, obs)\n', (6374, 6389), False, 'from scipy.stats import spearmanr\n'), ((8305, 8330), 'scipy.stats.kendalltau', 'kendalltau', (['forecast', 'obs'], {}), '(forecast, obs)\n', (8315, 8330), False, 'from scipy.stats import kendalltau\n'), ((8897, 8914), 'numpy.average', 'np.average', (['error'], {}), '(error)\n', (8907, 8914), True, 'import numpy as np\n'), ((10599, 10621), 'numpy.average', 'np.average', (['(error ** 2)'], {}), '(error ** 2)\n', (10609, 10621), True, 'import numpy as np\n'), ((11480, 11493), 'numpy.std', 'np.std', (['error'], {}), '(error)\n', (11486, 11493), True, 'import numpy as np\n'), ((16891, 16928), 'numpy.array', 'np.array', (['[0.1, 0.25, 0.5, 0.75, 0.9]'], {}), '([0.1, 0.25, 0.5, 0.75, 0.9])\n', (16899, 16928), True, 'import numpy as np\n'), ((16944, 16973), 'numpy.quantile', 'np.quantile', (['error', 'quantiles'], {}), '(error, quantiles)\n', (16955, 16973), True, 'import numpy as np\n'), ((4699, 4725), 'numpy.corrcoef', 'np.corrcoef', (['forecast', 'obs'], {}), '(forecast, obs)\n', (4710, 4725), True, 'import numpy as np\n'), ((9536, 9553), 'numpy.average', 'np.average', (['error'], {}), '(error)\n', (9546, 9553), True, 'import numpy as np\n'), ((10079, 10099), 'numpy.average', 'np.average', (['forecast'], {}), '(forecast)\n', (10089, 10099), True, 'import numpy as np\n'), ((10102, 10119), 'numpy.average', 'np.average', (['error'], {}), '(error)\n', (10112, 10119), True, 'import numpy as np\n'), ((11067, 11089), 'numpy.average', 'np.average', (['(error ** 2)'], {}), '(error ** 2)\n', (11077, 11089), True, 'import numpy as np\n'), ((13060, 13073), 'numpy.std', 'np.std', (['error'], {}), '(error)\n', (13066, 13073), True, 'import numpy as np\n'), ((13656, 13669), 'numpy.abs', 'np.abs', (['error'], {}), '(error)\n', (13662, 13669), True, 'import numpy as np\n'), ((14509, 14533), 'numpy.percentile', 'np.percentile', (['error', '(75)'], {}), '(error, 75)\n', (14522, 14533), True, 'import numpy as np\n'), ((14536, 14560), 'numpy.percentile', 'np.percentile', (['error', '(25)'], {}), '(error, 25)\n', (14549, 14560), True, 'import numpy as np\n'), ((15221, 15234), 'numpy.abs', 'np.abs', (['error'], {}), '(error)\n', (15227, 15234), True, 'import numpy as np\n'), ((584, 675), 'logzero.logger.warning', 'logger.warning', (['"""You give forecast, obs and fcsterr, but the fcsterr will be ignored."""'], {}), "(\n 'You give forecast, obs and fcsterr, but the fcsterr will be ignored.')\n", (598, 675), False, 'from logzero import logger\n'), ((879, 896), 'numpy.isnan', 'np.isnan', (['fcsterr'], {}), '(fcsterr)\n', (887, 896), True, 'import numpy as np\n')]
|
import numpy as np
from scipy.integrate import solve_ivp
import matplotlib.pyplot as plt
from numpy.random import randint, rand
from sir import *
def SIR_continuous_reinfected(b,k,time,ii,r):
"""
Simulates continuous SIR model
ii = initial percentage of infected
time = Days of simulation
b = probability that people getting infectious
k = probability that people getting recovered
r = reinfected probability
returns sol from solve_ivp
"""
def SIR(t, X):
#The main set of equations
Y = np.zeros((3))
Y[0] = -b * X[0] * X[2]
Y[1] = k * X[2] - r * X[1]
Y[2] = b * X[0] * X[2] - (k * X[2]) + r * X[1]
return Y
t_eval = np.linspace(0, time, time)
sol1 = solve_ivp(SIR, [0, time], [1-ii, 0, ii], method='RK45', t_eval=t_eval) # solve the equation
return sol1
## Discrete
class Person_reinfection(Person):
"""
An agent representing a person.
By default, a person is susceptible but not infectious. They can become infectious by exposing with disease method.
Status: 0 = susceptible 1 = infected 2 = removed
"""
def __init__(self,startpos=None):
self.status = 0
if startpos==None:
self.pos = np.random.rand(2)
else:
self.pos = np.array(startpos)
self.reinfection=1
def reinfectionrate(self):
return self.reinfection
def immunization(self,p):
q=self.reinfection-p
if q<0:
q=0
self.reinfection=q
def count_susceptible(pop):
"""
counts number of susceptible
"""
return sum(p.is_susceptible() for p in pop)
def count_infected(pop):
"""
counts number of infected
"""
return sum(p.is_infected() for p in pop)
def count_removed(pop):
"""
counts number of removed
"""
return sum(p.is_removed() for p in pop)
def SIR_discrete_reinfection(N,ii,b,T,k):
"""
Simulates discrete SIR model
N = Total number of people
ii = initial percentage of infected
b = number of contacts per day
T = Days of simulation
k = probability that people getting recovered
returns list of s,i,r
"""
pop = [Person_reinfection() for i in range(N)]
initial_infection = randint(N,size=np.int(N*ii))
for i in initial_infection:
pop[i].infection()
counts_susceptible = [count_susceptible(pop)]
counts_infected = [count_infected(pop)]
counts_removed = [count_removed(pop)]
for t in range(T):
# update the population
for i in range(N):
if pop[i].is_infected():
# person i infected all their contacts
contacts = randint(N, size=b)
for j in contacts:
if not pop[j].is_removed():
pop[j].infection()
#if rand() < p:
# pop[j].infection()
if pop[j].is_removed():
if rand()<pop[j].reinfectionrate():
pop[j].infection()
if rand()< k:
pop[i].remove()
pop[i].immunization(rand())
# add to our counts
counts_susceptible.append(count_susceptible(pop))
counts_infected.append(count_infected(pop))
counts_removed.append(count_removed(pop))
return np.array([counts_susceptible,counts_infected,counts_removed])
|
[
"numpy.zeros",
"scipy.integrate.solve_ivp",
"numpy.random.randint",
"numpy.array",
"numpy.int",
"numpy.linspace",
"numpy.random.rand"
] |
[((717, 743), 'numpy.linspace', 'np.linspace', (['(0)', 'time', 'time'], {}), '(0, time, time)\n', (728, 743), True, 'import numpy as np\n'), ((755, 827), 'scipy.integrate.solve_ivp', 'solve_ivp', (['SIR', '[0, time]', '[1 - ii, 0, ii]'], {'method': '"""RK45"""', 't_eval': 't_eval'}), "(SIR, [0, time], [1 - ii, 0, ii], method='RK45', t_eval=t_eval)\n", (764, 827), False, 'from scipy.integrate import solve_ivp\n'), ((3439, 3502), 'numpy.array', 'np.array', (['[counts_susceptible, counts_infected, counts_removed]'], {}), '([counts_susceptible, counts_infected, counts_removed])\n', (3447, 3502), True, 'import numpy as np\n'), ((551, 562), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (559, 562), True, 'import numpy as np\n'), ((1284, 1301), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (1298, 1301), True, 'import numpy as np\n'), ((1339, 1357), 'numpy.array', 'np.array', (['startpos'], {}), '(startpos)\n', (1347, 1357), True, 'import numpy as np\n'), ((2329, 2343), 'numpy.int', 'np.int', (['(N * ii)'], {}), '(N * ii)\n', (2335, 2343), True, 'import numpy as np\n'), ((2741, 2759), 'numpy.random.randint', 'randint', (['N'], {'size': 'b'}), '(N, size=b)\n', (2748, 2759), False, 'from numpy.random import randint, rand\n'), ((3144, 3150), 'numpy.random.rand', 'rand', ([], {}), '()\n', (3148, 3150), False, 'from numpy.random import randint, rand\n'), ((3231, 3237), 'numpy.random.rand', 'rand', ([], {}), '()\n', (3235, 3237), False, 'from numpy.random import randint, rand\n'), ((3045, 3051), 'numpy.random.rand', 'rand', ([], {}), '()\n', (3049, 3051), False, 'from numpy.random import randint, rand\n')]
|
"""
==================================================================
Compare LogisticRegression solver with sklearn's liblinear backend
==================================================================
"""
import time
import warnings
import numpy as np
from numpy.linalg import norm
import matplotlib.pyplot as plt
from sklearn import linear_model
from libsvmdata import fetch_libsvm
from celer import LogisticRegression
warnings.filterwarnings("ignore", message="Objective did not converge")
warnings.filterwarnings("ignore", message="Liblinear failed to converge")
X, y = fetch_libsvm("news20.binary")
C_min = 2 / norm(X.T @ y, ord=np.inf)
C = 20 * C_min
def pobj_logreg(w):
return np.sum(np.log(1 + np.exp(-y * (X @ w)))) + 1. / C * norm(w, ord=1)
pobj_celer = []
t_celer = []
for n_iter in range(10):
t0 = time.time()
clf = LogisticRegression(
C=C, solver="celer-pn", max_iter=n_iter, tol=0).fit(X, y)
t_celer.append(time.time() - t0)
w_celer = clf.coef_.ravel()
pobj_celer.append(pobj_logreg(w_celer))
pobj_celer = np.array(pobj_celer)
pobj_libl = []
t_libl = []
for n_iter in np.arange(0, 50, 10):
t0 = time.time()
clf = linear_model.LogisticRegression(
C=C, solver="liblinear", penalty='l1', fit_intercept=False,
max_iter=n_iter, random_state=0, tol=1e-10).fit(X, y)
t_libl.append(time.time() - t0)
w_libl = clf.coef_.ravel()
pobj_libl.append(pobj_logreg(w_libl))
pobj_libl = np.array(pobj_libl)
p_star = min(pobj_celer.min(), pobj_libl.min())
plt.close("all")
fig = plt.figure(figsize=(4, 2), constrained_layout=True)
plt.semilogy(t_celer, pobj_celer - p_star, label="Celer-PN")
plt.semilogy(t_libl, pobj_libl - p_star, label="liblinear")
plt.legend()
plt.xlabel("Time (s)")
plt.ylabel("objective suboptimality")
plt.show(block=False)
|
[
"celer.LogisticRegression",
"matplotlib.pyplot.show",
"libsvmdata.fetch_libsvm",
"warnings.filterwarnings",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"time.time",
"matplotlib.pyplot.figure",
"sklearn.linear_model.LogisticRegression",
"numpy.arange",
"numpy.array",
"numpy.linalg.norm",
"numpy.exp",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.semilogy",
"matplotlib.pyplot.xlabel"
] |
[((427, 498), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'message': '"""Objective did not converge"""'}), "('ignore', message='Objective did not converge')\n", (450, 498), False, 'import warnings\n'), ((499, 572), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'message': '"""Liblinear failed to converge"""'}), "('ignore', message='Liblinear failed to converge')\n", (522, 572), False, 'import warnings\n'), ((581, 610), 'libsvmdata.fetch_libsvm', 'fetch_libsvm', (['"""news20.binary"""'], {}), "('news20.binary')\n", (593, 610), False, 'from libsvmdata import fetch_libsvm\n'), ((1066, 1086), 'numpy.array', 'np.array', (['pobj_celer'], {}), '(pobj_celer)\n', (1074, 1086), True, 'import numpy as np\n'), ((1131, 1151), 'numpy.arange', 'np.arange', (['(0)', '(50)', '(10)'], {}), '(0, 50, 10)\n', (1140, 1151), True, 'import numpy as np\n'), ((1469, 1488), 'numpy.array', 'np.array', (['pobj_libl'], {}), '(pobj_libl)\n', (1477, 1488), True, 'import numpy as np\n'), ((1539, 1555), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (1548, 1555), True, 'import matplotlib.pyplot as plt\n'), ((1562, 1613), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 2)', 'constrained_layout': '(True)'}), '(figsize=(4, 2), constrained_layout=True)\n', (1572, 1613), True, 'import matplotlib.pyplot as plt\n'), ((1614, 1674), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['t_celer', '(pobj_celer - p_star)'], {'label': '"""Celer-PN"""'}), "(t_celer, pobj_celer - p_star, label='Celer-PN')\n", (1626, 1674), True, 'import matplotlib.pyplot as plt\n'), ((1675, 1734), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['t_libl', '(pobj_libl - p_star)'], {'label': '"""liblinear"""'}), "(t_libl, pobj_libl - p_star, label='liblinear')\n", (1687, 1734), True, 'import matplotlib.pyplot as plt\n'), ((1735, 1747), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1745, 1747), True, 'import matplotlib.pyplot as plt\n'), ((1748, 1770), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (1758, 1770), True, 'import matplotlib.pyplot as plt\n'), ((1771, 1808), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""objective suboptimality"""'], {}), "('objective suboptimality')\n", (1781, 1808), True, 'import matplotlib.pyplot as plt\n'), ((1809, 1830), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (1817, 1830), True, 'import matplotlib.pyplot as plt\n'), ((624, 649), 'numpy.linalg.norm', 'norm', (['(X.T @ y)'], {'ord': 'np.inf'}), '(X.T @ y, ord=np.inf)\n', (628, 649), False, 'from numpy.linalg import norm\n'), ((831, 842), 'time.time', 'time.time', ([], {}), '()\n', (840, 842), False, 'import time\n'), ((1162, 1173), 'time.time', 'time.time', ([], {}), '()\n', (1171, 1173), False, 'import time\n'), ((750, 764), 'numpy.linalg.norm', 'norm', (['w'], {'ord': '(1)'}), '(w, ord=1)\n', (754, 764), False, 'from numpy.linalg import norm\n'), ((853, 919), 'celer.LogisticRegression', 'LogisticRegression', ([], {'C': 'C', 'solver': '"""celer-pn"""', 'max_iter': 'n_iter', 'tol': '(0)'}), "(C=C, solver='celer-pn', max_iter=n_iter, tol=0)\n", (871, 919), False, 'from celer import LogisticRegression\n'), ((958, 969), 'time.time', 'time.time', ([], {}), '()\n', (967, 969), False, 'import time\n'), ((1184, 1323), 'sklearn.linear_model.LogisticRegression', 'linear_model.LogisticRegression', ([], {'C': 'C', 'solver': '"""liblinear"""', 'penalty': '"""l1"""', 'fit_intercept': '(False)', 'max_iter': 'n_iter', 'random_state': '(0)', 'tol': '(1e-10)'}), "(C=C, solver='liblinear', penalty='l1',\n fit_intercept=False, max_iter=n_iter, random_state=0, tol=1e-10)\n", (1215, 1323), False, 'from sklearn import linear_model\n'), ((1365, 1376), 'time.time', 'time.time', ([], {}), '()\n', (1374, 1376), False, 'import time\n'), ((716, 736), 'numpy.exp', 'np.exp', (['(-y * (X @ w))'], {}), '(-y * (X @ w))\n', (722, 736), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Minibatching utilities."""
import itertools
import operator
import os
import pickle
import numpy as np
import torch
from sklearn.utils import shuffle
from torch.autograd import Variable
# Change to python3+.
# from itertools import zip
class DataIterator(object):
"""Data Iterator."""
@staticmethod
def _trim_vocab(vocab, vocab_size):
"""Discard start, end, pad and unk tokens if already present.
Args:
vocab(list): Vocabulary.
vocab_size(int): The size of the vocabulary.
Returns:
word2id(list): Word to index list.
id2word(list): Index to word list.
"""
if "<s>" in vocab:
del vocab["<s>"]
if "<pad>" in vocab:
del vocab["<pad>"]
if "</s>" in vocab:
del vocab["</s>"]
if "<unk>" in vocab:
del vocab["<unk>"]
word2id = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
id2word = {0: "<s>", 1: "<pad>", 2: "</s>", 3: "<unk>"}
sorted_word2id = sorted(
vocab.items(), key=operator.itemgetter(1), reverse=True
)
if vocab_size != -1:
sorted_words = [x[0] for x in sorted_word2id[:vocab_size]]
else:
sorted_words = [x[0] for x in sorted_word2id]
for ind, word in enumerate(sorted_words):
word2id[word] = ind + 4
for ind, word in enumerate(sorted_words):
id2word[ind + 4] = word
return word2id, id2word
def construct_vocab(
self, sentences, vocab_size, lowercase=False, charlevel=False
):
"""Create vocabulary.
Args:
sentences(list): The list of sentences.
vocab_size(int): The size of vocabulary.
lowercase(bool): If lowercase the sentences.
charlevel(bool): If need to split the sentence with space.
Returns:
word2id(list): Word to index list.
id2word(list): Index to word list.
"""
vocab = {}
for sentence in sentences:
if isinstance(sentence, str):
if lowercase:
sentence = sentence.lower()
if not charlevel:
sentence = sentence.split()
for word in sentence:
if word not in vocab:
vocab[word] = 1
else:
vocab[word] += 1
word2id, id2word = self._trim_vocab(vocab, vocab_size)
return word2id, id2word
class BufferedDataIterator(DataIterator):
"""Multi Parallel corpus data iterator."""
def __init__(
self,
src,
trg,
src_vocab_size,
trg_vocab_size,
tasknames,
save_dir,
buffer_size=1e6,
lowercase=False,
seed=0,
):
"""Initialize params.
Args:
src(list): source dataset.
trg(list): target dataset.
src_vocab_size(int): The size of source vocab.
trg_vocab_size(int): The size of target vocab.
tasknames(list): The list of task names.
save_dir(str): The saving dir.
buffer_size(float): Buffer size.
lowercase(bool): if lowercase the data.
"""
self.seed = seed
self.fname_src = src
self.fname_trg = trg
self.src_vocab_size = src_vocab_size
self.trg_vocab_size = trg_vocab_size
self.tasknames = tasknames
self.save_dir = save_dir
self.buffer_size = buffer_size
self.lowercase = lowercase
# Open a list of file pointers to all the files.
self.f_src = [
open(fname, "r", encoding="utf-8") for fname in self.fname_src
]
self.f_trg = [
open(fname, "r", encoding="utf-8") for fname in self.fname_trg
]
# Initialize dictionaries that contain sentences & word mapping dicts
self.src = [
{"data": [], "word2id": None, "id2word": None}
for i in range(len(self.fname_src))
]
self.trg = [
{"data": [], "word2id": None, "id2word": None}
for i in range(len(self.fname_trg))
]
self.build_vocab()
"""Reset file pointers to the start after reading the file to
build vocabularies."""
for idx in range(len(self.src)):
self._reset_filepointer(idx)
for idx in range(len(self.src)):
self.fetch_buffer(idx)
def _reset_filepointer(self, idx):
"""Reset file pointer.
Args:
idx(int): Index used to reset file pointer.
"""
self.f_src[idx] = open(self.fname_src[idx], "r", encoding="utf-8")
self.f_trg[idx] = open(self.fname_trg[idx], "r", encoding="utf-8")
def fetch_buffer(self, idx, reset=True):
"""Fetch sentences from the file into the buffer.
Args:
idx(int): Index used to fetch the sentences.
reset(bool): If need to reset the contents of the current buffer.
"""
# Reset the contents of the current buffer.
if reset:
self.src[idx]["data"] = []
self.trg[idx]["data"] = []
# Populate buffer
for src, trg in zip(self.f_src[idx], self.f_trg[idx]):
if len(self.src[idx]["data"]) == self.buffer_size:
break
if self.lowercase:
self.src[idx]["data"].append(src.lower().split())
self.trg[idx]["data"].append(trg.lower().split())
else:
self.src[idx]["data"].append(src.split())
self.trg[idx]["data"].append(trg.split())
# Sort sentences by decreasing length (hacky bucketing)
self.src[idx]["data"], self.trg[idx]["data"] = zip(
*sorted(
zip(self.src[idx]["data"], self.trg[idx]["data"]),
key=lambda x: len(x[0]),
reverse=True,
)
)
"""If buffer isn't full after reading the contents of the file,
cycle around. """
if len(self.src[idx]["data"]) < self.buffer_size:
assert len(self.src[idx]["data"]) == len(self.trg[idx]["data"])
# Cast things to list to avoid issue with calling .append above
self.src[idx]["data"] = list(self.src[idx]["data"])
self.trg[idx]["data"] = list(self.trg[idx]["data"])
self._reset_filepointer(idx)
self.fetch_buffer(idx, reset=False)
def build_vocab(self):
"""Build a memory efficient vocab."""
# Construct common source vocab.
# Check if save directory exists.
if not os.path.exists(self.save_dir):
raise ValueError("Could not find save dir : %s" % self.save_dir)
# Check if a cached vocab file exists.
if os.path.exists(os.path.join(self.save_dir, "src_vocab.pkl")):
vocab = pickle.load(
open(os.path.join(self.save_dir, "src_vocab.pkl"), "rb")
)
word2id, id2word = vocab["word2id"], vocab["id2word"]
# If not, compute the vocab from scratch and store a cache.
else:
word2id, id2word = self.construct_vocab(
itertools.chain.from_iterable(self.f_src),
self.src_vocab_size,
self.lowercase,
)
pickle.dump(
{"word2id": word2id, "id2word": id2word},
open(os.path.join(self.save_dir, "src_vocab.pkl"), "wb"),
)
for corpus in self.src:
corpus["word2id"], corpus["id2word"] = word2id, id2word
# Do the same for the target vocabulary.
if os.path.exists(os.path.join(self.save_dir, "trg_vocab.pkl")):
vocab = pickle.load(
open(os.path.join(self.save_dir, "trg_vocab.pkl"), "rb")
)
for idx, (corpus, fname) in enumerate(zip(self.trg, self.f_trg)):
word2id, id2word = (
vocab[self.tasknames[idx]]["word2id"],
vocab[self.tasknames[idx]]["id2word"],
)
corpus["word2id"], corpus["id2word"] = word2id, id2word
else:
trg_vocab_dump = {}
for idx, (corpus, fname) in enumerate(zip(self.trg, self.f_trg)):
word2id, id2word = self.construct_vocab(
fname, self.trg_vocab_size, self.lowercase
)
corpus["word2id"], corpus["id2word"] = word2id, id2word
trg_vocab_dump[self.tasknames[idx]] = {}
trg_vocab_dump[self.tasknames[idx]]["word2id"] = word2id
trg_vocab_dump[self.tasknames[idx]]["id2word"] = id2word
pickle.dump(
trg_vocab_dump,
open(os.path.join(self.save_dir, "trg_vocab.pkl"), "wb"),
)
def shuffle_dataset(self, idx):
"""Shuffle current buffer."""
self.src[idx]["data"], self.trg[idx]["data"] = shuffle(
self.src[idx]["data"],
self.trg[idx]["data"],
random_state=self.seed,
)
def get_parallel_minibatch(
self, corpus_idx, index, batch_size, max_len_src, max_len_trg
):
"""Prepare minibatch.
Args:
corpus_idx(int): Corpus Index.
index(int): Index.
batch_size(int): Batch Size.
max_len_src(int): Max length for resource.
max_len_trg(int): Max length ofr target.
Returns: minibatch of src-trg pairs(dict).
"""
src_lines = [
["<s>"] + line[: max_len_src - 2] + ["</s>"]
for line in self.src[corpus_idx]["data"][
index : index + batch_size
]
]
trg_lines = [
["<s>"] + line[: max_len_trg - 2] + ["</s>"]
for line in self.trg[corpus_idx]["data"][
index : index + batch_size
]
]
"""Sort sentences by decreasing length within a minibatch for
`torch.nn.utils.packed_padded_sequence`"""
src_lens = [len(line) for line in src_lines]
sorted_indices = np.argsort(src_lens)[::-1]
sorted_src_lines = [src_lines[idx] for idx in sorted_indices]
sorted_trg_lines = [trg_lines[idx] for idx in sorted_indices]
sorted_src_lens = [len(line) for line in sorted_src_lines]
sorted_trg_lens = [len(line) for line in sorted_trg_lines]
max_src_len = max(sorted_src_lens)
max_trg_len = max(sorted_trg_lens)
# Map words to indices
input_lines_src = [
[
self.src[corpus_idx]["word2id"][w]
if w in self.src[corpus_idx]["word2id"]
else self.src[corpus_idx]["word2id"]["<unk>"]
for w in line
]
+ [self.src[corpus_idx]["word2id"]["<pad>"]]
* (max_src_len - len(line))
for line in sorted_src_lines
]
input_lines_trg = [
[
self.trg[corpus_idx]["word2id"][w]
if w in self.trg[corpus_idx]["word2id"]
else self.trg[corpus_idx]["word2id"]["<unk>"]
for w in line[:-1]
]
+ [self.trg[corpus_idx]["word2id"]["<pad>"]]
* (max_trg_len - len(line))
for line in sorted_trg_lines
]
output_lines_trg = [
[
self.trg[corpus_idx]["word2id"][w]
if w in self.trg[corpus_idx]["word2id"]
else self.trg[corpus_idx]["word2id"]["<unk>"]
for w in line[1:]
]
+ [self.trg[corpus_idx]["word2id"]["<pad>"]]
* (max_trg_len - len(line))
for line in sorted_trg_lines
]
# Cast lists to torch tensors
input_lines_src = Variable(torch.LongTensor(input_lines_src)).cuda()
input_lines_trg = Variable(torch.LongTensor(input_lines_trg)).cuda()
output_lines_trg = Variable(torch.LongTensor(output_lines_trg)).cuda()
sorted_src_lens = (
Variable(torch.LongTensor(sorted_src_lens), volatile=True)
.squeeze()
.cuda()
)
# Return minibatch of src-trg pairs
return {
"input_src": input_lines_src,
"input_trg": input_lines_trg,
"output_trg": output_lines_trg,
"src_lens": sorted_src_lens,
"type": "seq2seq",
}
class NLIIterator(DataIterator):
"""Data iterator for tokenized NLI datasets."""
def __init__(
self, train, dev, test, vocab_size, lowercase=True, vocab=None, seed=0
):
"""Initialize params.
Each of train/dev/test is a tab-separate file of the form
premise \t hypothesis \t label.
Args:
train(torch.Tensor): Training dataset.
dev(torch.Tensor): Validation dataset.
test(torch.Tensor): Testing dataset.
vocab_size(int): The size of the vocabulary.
lowercase(bool): If lowercase the dataset.
vocab(Union[bytes,str): The list of the vocabulary.
"""
self.seed = seed
self.train = train
self.dev = dev
self.test = test
self.vocab_size = vocab_size
self.lowercase = lowercase
self.vocab = vocab
self.train_lines = [
line.strip().lower().split("\t")
for line in open(self.train, encoding="utf-8")
]
self.dev_lines = [
line.strip().lower().split("\t")
for line in open(self.dev, encoding="utf-8")
]
self.test_lines = [
line.strip().lower().split("\t")
for line in open(self.test, encoding="utf-8")
]
if self.vocab is not None:
# binary mode doesn't take an encoding argument
self.vocab = pickle.load(open(self.vocab, "rb"))
self.word2id = self.vocab["word2id"]
self.id2word = self.vocab["id2word"]
self.vocab_size = len(self.word2id)
else:
self.word2id, self.id2word = self.construct_vocab(
[x[0] for x in self.train_lines]
+ [x[1] for x in self.train_lines],
self.vocab_size,
lowercase=self.lowercase,
)
# Label text to class mapping.
self.text2label = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.shuffle_dataset()
def shuffle_dataset(self):
"""Shuffle training data."""
self.train_lines = shuffle(self.train_lines, random_state=self.seed)
def get_parallel_minibatch(self, index, batch_size, sent_type="train"):
"""Prepare minibatch.
Args:
index(int): The index for line.
batch_size(int): Batch size.
sent_type(str): Type of dataset.
Returns:
dict for batch training.
"""
if sent_type == "train":
lines = self.train_lines
elif sent_type == "dev":
lines = self.dev_lines
else:
lines = self.test_lines
sent1 = [
["<s>"] + line[0].split() + ["</s>"]
for line in lines[index : index + batch_size]
]
sent2 = [
["<s>"] + line[1].split() + ["</s>"]
for line in lines[index : index + batch_size]
]
labels = [
self.text2label[line[2]]
for line in lines[index : index + batch_size]
]
sent1_lens = [len(line) for line in sent1]
sorted_sent1_indices = np.argsort(sent1_lens)[::-1]
sorted_sent1_lines = [sent1[idx] for idx in sorted_sent1_indices]
rev_sent1 = np.argsort(sorted_sent1_indices)
sent2_lens = [len(line) for line in sent2]
sorted_sent2_indices = np.argsort(sent2_lens)[::-1]
sorted_sent2_lines = [sent2[idx] for idx in sorted_sent2_indices]
rev_sent2 = np.argsort(sorted_sent2_indices)
sorted_sent1_lens = [len(line) for line in sorted_sent1_lines]
sorted_sent2_lens = [len(line) for line in sorted_sent2_lines]
max_sent1_len = max(sorted_sent1_lens)
max_sent2_len = max(sorted_sent2_lens)
sent1 = [
[
self.word2id[w] if w in self.word2id else self.word2id["<unk>"]
for w in line
]
+ [self.word2id["<pad>"]] * (max_sent1_len - len(line))
for line in sorted_sent1_lines
]
sent2 = [
[
self.word2id[w] if w in self.word2id else self.word2id["<unk>"]
for w in line
]
+ [self.word2id["<pad>"]] * (max_sent2_len - len(line))
for line in sorted_sent2_lines
]
sent1 = Variable(torch.LongTensor(sent1)).cuda()
sent2 = Variable(torch.LongTensor(sent2)).cuda()
labels = Variable(torch.LongTensor(labels)).cuda()
sent1_lens = (
Variable(torch.LongTensor(sorted_sent1_lens), requires_grad=False)
.squeeze()
.cuda()
)
sent2_lens = (
Variable(torch.LongTensor(sorted_sent2_lens), requires_grad=False)
.squeeze()
.cuda()
)
rev_sent1 = (
Variable(torch.LongTensor(rev_sent1), requires_grad=False)
.squeeze()
.cuda()
)
rev_sent2 = (
Variable(torch.LongTensor(rev_sent2), requires_grad=False)
.squeeze()
.cuda()
)
return {
"sent1": sent1,
"sent2": sent2,
"sent1_lens": sent1_lens,
"sent2_lens": sent2_lens,
"rev_sent1": rev_sent1,
"rev_sent2": rev_sent2,
"labels": labels,
"type": "nli",
}
def get_validation_minibatch(
src, trg, index, batch_size, src_word2id, trg_word2id
):
"""Prepare minibatch.
Args:
src(list): source data.
trg(list): target data.
index(int): index for the file.
batch_size(int): batch size.
src_word2id(list): Word to index for source.
trg_word2id(list): Word to index for target.
Returns:
Dict for seq2seq model.
"""
src_lines = [
["<s>"] + line + ["</s>"] for line in src[index : index + batch_size]
]
trg_lines = [
["<s>"] + line + ["</s>"] for line in trg[index : index + batch_size]
]
src_lens = [len(line) for line in src_lines]
sorted_indices = np.argsort(src_lens)[::-1]
sorted_src_lines = [src_lines[idx] for idx in sorted_indices]
sorted_trg_lines = [trg_lines[idx] for idx in sorted_indices]
sorted_src_lens = [len(line) for line in sorted_src_lines]
sorted_trg_lens = [len(line) for line in sorted_trg_lines]
max_src_len = max(sorted_src_lens)
max_trg_len = max(sorted_trg_lens)
input_lines_src = [
[src_word2id[w] if w in src else src_word2id["<unk>"] for w in line]
+ [src_word2id["<pad>"]] * (max_src_len - len(line))
for line in sorted_src_lines
]
input_lines_trg = [
[
trg_word2id[w] if w in trg_word2id else trg_word2id["<unk>"]
for w in line[:-1]
]
+ [trg_word2id["<pad>"]] * (max_trg_len - len(line))
for line in sorted_trg_lines
]
output_lines_trg = [
[
trg_word2id[w] if w in trg_word2id else trg_word2id["<unk>"]
for w in line[1:]
]
+ [trg_word2id["<pad>"]] * (max_trg_len - len(line))
for line in sorted_trg_lines
]
# For pytroch 0.4
with torch.no_grad():
input_lines_src = Variable(torch.LongTensor(input_lines_src)).cuda()
input_lines_trg = Variable(torch.LongTensor(input_lines_trg)).cuda()
output_lines_trg = Variable(torch.LongTensor(output_lines_trg)).cuda()
# sorted_src_lens = Variable(
# torch.LongTensor(sorted_src_lens)
# ).squeeze().cuda()
sorted_src_lens = (
Variable(torch.LongTensor(sorted_src_lens))
.view(len(sorted_src_lens))
.cuda()
)
return {
"input_src": input_lines_src,
"input_trg": input_lines_trg,
"output_trg": output_lines_trg,
"src_lens": sorted_src_lens,
"type": "seq2seq",
}
def compute_validation_loss(
config, model, train_iterator, criterion, task_idx, lowercase=False
):
"""Compute validation loss for a task.
Args:
config(dict): configuration list.
model(MultitaskModel): model.
train_iterator(BufferedDataIterator): Multi Parallel corpus data iterator.
criterion(nn.CrossEntropyLoss): criterion function for loss.
task_idx(int): Task index.
lowercase(bool): If lowercase the data.
Returns: float as the mean of the loss.
"""
val_src = config["data"]["paths"][task_idx]["val_src"]
val_trg = config["data"]["paths"][task_idx]["val_trg"]
if lowercase:
val_src = [
line.strip().lower().split()
for line in open(val_src, "r", encoding="utf-8")
]
val_trg = [
line.strip().lower().split()
for line in open(val_trg, "r", encoding="utf-8")
]
else:
val_src = [
line.strip().split()
for line in open(val_src, "r", encoding="utf-8")
]
val_trg = [
line.strip().split()
for line in open(val_trg, "r", encoding="utf-8")
]
batch_size = config["training"]["batch_size"]
losses = []
for j in range(0, len(val_src), batch_size):
minibatch = get_validation_minibatch(
val_src,
val_trg,
j,
batch_size,
train_iterator.src[task_idx]["word2id"],
train_iterator.trg[task_idx]["word2id"],
)
decoder_logit = model(minibatch, task_idx)
loss = criterion(
decoder_logit.contiguous().view(-1, decoder_logit.size(2)),
minibatch["output_trg"].contiguous().view(-1),
)
# losses.append(loss.data[0])
losses.append(loss.item())
return np.mean(losses)
# Original source: https://github.com/Maluuba/gensen
|
[
"itertools.chain.from_iterable",
"torch.LongTensor",
"os.path.exists",
"numpy.argsort",
"numpy.mean",
"sklearn.utils.shuffle",
"torch.no_grad",
"os.path.join",
"operator.itemgetter"
] |
[((22510, 22525), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (22517, 22525), True, 'import numpy as np\n'), ((9204, 9281), 'sklearn.utils.shuffle', 'shuffle', (["self.src[idx]['data']", "self.trg[idx]['data']"], {'random_state': 'self.seed'}), "(self.src[idx]['data'], self.trg[idx]['data'], random_state=self.seed)\n", (9211, 9281), False, 'from sklearn.utils import shuffle\n'), ((14833, 14882), 'sklearn.utils.shuffle', 'shuffle', (['self.train_lines'], {'random_state': 'self.seed'}), '(self.train_lines, random_state=self.seed)\n', (14840, 14882), False, 'from sklearn.utils import shuffle\n'), ((15993, 16025), 'numpy.argsort', 'np.argsort', (['sorted_sent1_indices'], {}), '(sorted_sent1_indices)\n', (16003, 16025), True, 'import numpy as np\n'), ((16232, 16264), 'numpy.argsort', 'np.argsort', (['sorted_sent2_indices'], {}), '(sorted_sent2_indices)\n', (16242, 16264), True, 'import numpy as np\n'), ((18831, 18851), 'numpy.argsort', 'np.argsort', (['src_lens'], {}), '(src_lens)\n', (18841, 18851), True, 'import numpy as np\n'), ((19940, 19955), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (19953, 19955), False, 'import torch\n'), ((6862, 6891), 'os.path.exists', 'os.path.exists', (['self.save_dir'], {}), '(self.save_dir)\n', (6876, 6891), False, 'import os\n'), ((7044, 7088), 'os.path.join', 'os.path.join', (['self.save_dir', '"""src_vocab.pkl"""'], {}), "(self.save_dir, 'src_vocab.pkl')\n", (7056, 7088), False, 'import os\n'), ((7901, 7945), 'os.path.join', 'os.path.join', (['self.save_dir', '"""trg_vocab.pkl"""'], {}), "(self.save_dir, 'trg_vocab.pkl')\n", (7913, 7945), False, 'import os\n'), ((10373, 10393), 'numpy.argsort', 'np.argsort', (['src_lens'], {}), '(src_lens)\n', (10383, 10393), True, 'import numpy as np\n'), ((15870, 15892), 'numpy.argsort', 'np.argsort', (['sent1_lens'], {}), '(sent1_lens)\n', (15880, 15892), True, 'import numpy as np\n'), ((16109, 16131), 'numpy.argsort', 'np.argsort', (['sent2_lens'], {}), '(sent2_lens)\n', (16119, 16131), True, 'import numpy as np\n'), ((1228, 1250), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (1247, 1250), False, 'import operator\n'), ((7428, 7469), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['self.f_src'], {}), '(self.f_src)\n', (7457, 7469), False, 'import itertools\n'), ((7145, 7189), 'os.path.join', 'os.path.join', (['self.save_dir', '"""src_vocab.pkl"""'], {}), "(self.save_dir, 'src_vocab.pkl')\n", (7157, 7189), False, 'import os\n'), ((7658, 7702), 'os.path.join', 'os.path.join', (['self.save_dir', '"""src_vocab.pkl"""'], {}), "(self.save_dir, 'src_vocab.pkl')\n", (7670, 7702), False, 'import os\n'), ((8002, 8046), 'os.path.join', 'os.path.join', (['self.save_dir', '"""trg_vocab.pkl"""'], {}), "(self.save_dir, 'trg_vocab.pkl')\n", (8014, 8046), False, 'import os\n'), ((9007, 9051), 'os.path.join', 'os.path.join', (['self.save_dir', '"""trg_vocab.pkl"""'], {}), "(self.save_dir, 'trg_vocab.pkl')\n", (9019, 9051), False, 'import os\n'), ((12090, 12123), 'torch.LongTensor', 'torch.LongTensor', (['input_lines_src'], {}), '(input_lines_src)\n', (12106, 12123), False, 'import torch\n'), ((12167, 12200), 'torch.LongTensor', 'torch.LongTensor', (['input_lines_trg'], {}), '(input_lines_trg)\n', (12183, 12200), False, 'import torch\n'), ((12245, 12279), 'torch.LongTensor', 'torch.LongTensor', (['output_lines_trg'], {}), '(output_lines_trg)\n', (12261, 12279), False, 'import torch\n'), ((17085, 17108), 'torch.LongTensor', 'torch.LongTensor', (['sent1'], {}), '(sent1)\n', (17101, 17108), False, 'import torch\n'), ((17142, 17165), 'torch.LongTensor', 'torch.LongTensor', (['sent2'], {}), '(sent2)\n', (17158, 17165), False, 'import torch\n'), ((17200, 17224), 'torch.LongTensor', 'torch.LongTensor', (['labels'], {}), '(labels)\n', (17216, 17224), False, 'import torch\n'), ((19992, 20025), 'torch.LongTensor', 'torch.LongTensor', (['input_lines_src'], {}), '(input_lines_src)\n', (20008, 20025), False, 'import torch\n'), ((20069, 20102), 'torch.LongTensor', 'torch.LongTensor', (['input_lines_trg'], {}), '(input_lines_trg)\n', (20085, 20102), False, 'import torch\n'), ((20147, 20181), 'torch.LongTensor', 'torch.LongTensor', (['output_lines_trg'], {}), '(output_lines_trg)\n', (20163, 20181), False, 'import torch\n'), ((12337, 12370), 'torch.LongTensor', 'torch.LongTensor', (['sorted_src_lens'], {}), '(sorted_src_lens)\n', (12353, 12370), False, 'import torch\n'), ((17277, 17312), 'torch.LongTensor', 'torch.LongTensor', (['sorted_sent1_lens'], {}), '(sorted_sent1_lens)\n', (17293, 17312), False, 'import torch\n'), ((17432, 17467), 'torch.LongTensor', 'torch.LongTensor', (['sorted_sent2_lens'], {}), '(sorted_sent2_lens)\n', (17448, 17467), False, 'import torch\n'), ((17586, 17613), 'torch.LongTensor', 'torch.LongTensor', (['rev_sent1'], {}), '(rev_sent1)\n', (17602, 17613), False, 'import torch\n'), ((17732, 17759), 'torch.LongTensor', 'torch.LongTensor', (['rev_sent2'], {}), '(rev_sent2)\n', (17748, 17759), False, 'import torch\n'), ((20354, 20387), 'torch.LongTensor', 'torch.LongTensor', (['sorted_src_lens'], {}), '(sorted_src_lens)\n', (20370, 20387), False, 'import torch\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.