code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
# -*- coding: utf-8 -*-
import os
import numpy as np
import subprocess as subp
from sklearn.datasets import load_iris
from sklearn.datasets import load_breast_cancer
from sklearn.datasets import load_digits
from sklearn.utils import shuffle
from tests.utils.Timer import Timer
from tests.estimator.classifier.SeparatedData import SeparatedData
class Classifier(Timer, SeparatedData):
N_RANDOM_FEATURE_SETS = 30
N_EXISTING_FEATURE_SETS = 30
def setUp(self):
np.random.seed(5)
self._init_env()
self._start_test()
def tearDown(self):
self._clear_estimator()
self._stop_test()
def _init_env(self):
for param in ['N_RANDOM_FEATURE_SETS', 'N_EXISTING_FEATURE_SETS']:
n = os.environ.get(param, None)
if n is not None and str(n).strip().isdigit():
n = int(n)
if n > 0:
self.__setattr__(param, n)
def load_binary_data(self, shuffled=True):
samples = load_breast_cancer()
self.X = shuffle(samples.data) if shuffled else samples.data
self.y = shuffle(samples.target) if shuffled else samples.target
self.n_features = len(self.X[0])
def load_iris_data(self, shuffled=True):
samples = load_iris()
self.X = shuffle(samples.data) if shuffled else samples.data
self.y = shuffle(samples.target) if shuffled else samples.target
self.n_features = len(self.X[0])
def load_digits_data(self, shuffled=True):
samples = load_digits()
self.X = shuffle(samples.data) if shuffled else samples.data
self.y = shuffle(samples.target) if shuffled else samples.target
self.n_features = len(self.X[0])
def _clear_estimator(self):
self.estimator = None
cmd = 'rm -rf tmp'.split()
subp.call(cmd)
|
[
"sklearn.datasets.load_iris",
"sklearn.datasets.load_digits",
"numpy.random.seed",
"sklearn.datasets.load_breast_cancer",
"os.environ.get",
"subprocess.call",
"sklearn.utils.shuffle"
] |
[((484, 501), 'numpy.random.seed', 'np.random.seed', (['(5)'], {}), '(5)\n', (498, 501), True, 'import numpy as np\n'), ((1007, 1027), 'sklearn.datasets.load_breast_cancer', 'load_breast_cancer', ([], {}), '()\n', (1025, 1027), False, 'from sklearn.datasets import load_breast_cancer\n'), ((1275, 1286), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (1284, 1286), False, 'from sklearn.datasets import load_iris\n'), ((1536, 1549), 'sklearn.datasets.load_digits', 'load_digits', ([], {}), '()\n', (1547, 1549), False, 'from sklearn.datasets import load_digits\n'), ((1839, 1853), 'subprocess.call', 'subp.call', (['cmd'], {}), '(cmd)\n', (1848, 1853), True, 'import subprocess as subp\n'), ((754, 781), 'os.environ.get', 'os.environ.get', (['param', 'None'], {}), '(param, None)\n', (768, 781), False, 'import os\n'), ((1045, 1066), 'sklearn.utils.shuffle', 'shuffle', (['samples.data'], {}), '(samples.data)\n', (1052, 1066), False, 'from sklearn.utils import shuffle\n'), ((1114, 1137), 'sklearn.utils.shuffle', 'shuffle', (['samples.target'], {}), '(samples.target)\n', (1121, 1137), False, 'from sklearn.utils import shuffle\n'), ((1304, 1325), 'sklearn.utils.shuffle', 'shuffle', (['samples.data'], {}), '(samples.data)\n', (1311, 1325), False, 'from sklearn.utils import shuffle\n'), ((1373, 1396), 'sklearn.utils.shuffle', 'shuffle', (['samples.target'], {}), '(samples.target)\n', (1380, 1396), False, 'from sklearn.utils import shuffle\n'), ((1567, 1588), 'sklearn.utils.shuffle', 'shuffle', (['samples.data'], {}), '(samples.data)\n', (1574, 1588), False, 'from sklearn.utils import shuffle\n'), ((1636, 1659), 'sklearn.utils.shuffle', 'shuffle', (['samples.target'], {}), '(samples.target)\n', (1643, 1659), False, 'from sklearn.utils import shuffle\n')]
|
"""
###########################################################################
# @file optimization_utils.py
# @brief Functions for optimizing transformations and parameters.
#
# @author <NAME>
#
# @Link: https://www.cbica.upenn.edu/sbia/software/
#
# @Contact: <EMAIL>
##########################################################################
"""
import numpy as np
from numpy import transpose as Tr
def initialization(x,y,K):
np.random.seed()
D,M = x.shape
N = y.shape[1]
params = {'delta':None,'sigsq':None,'T':None,'t':None}
params['delta'] = np.ones((K,M))/K
sigsq = 0
for n in range(N):
tmp = x - y[:,n].reshape(-1,1)
sigsq = sigsq + np.sum(np.power(tmp,2))
params['sigsq'] = sigsq/D/M/N;
params['T'] = np.repeat(np.eye(D).reshape(D,D,1),K,axis=2)
params['t'] = np.random.uniform(size=(D,K))
return params
def transform( x,params ):
T = params['T']
t = params['t']
delta = params['delta']
[D,M] = x.shape
K = T.shape[2]
transformed_x = np.zeros((D,M))
Tym = np.zeros((D,M,K))
for k in range(K):
Tym[:,:,k] = np.dot(T[:,:,k], x) + t[:,k].reshape(-1,1)
for m in range(M):
tmp = np.zeros(D)
for k in range(K):
tmp = tmp + delta[k,m] * Tym[:,m,k]
transformed_x[:,m] = tmp;
return transformed_x
def transform2( x,params ):
T = params['T']
delta = params['delta']
D,M = x.shape
K = T.shape[2]
transformed_x = np.zeros((D,M))
Tym = np.zeros((D,M,K))
for k in range(K):
Tym[:,:,k] = np.dot(T[:,:,k],x)
for m in range(M):
tmp = np.zeros(D)
for k in range(K):
tmp = tmp + delta[k,m] * Tym[:,m,k]
transformed_x[:,m] = tmp
return transformed_x
def transform3( x,params ):
T = params['T']
t = params['t']
D,K = t.shape
transformed_x = np.zeros((D,K))
for k in range(K):
transformed_x[:,k] = np.dot(T[:,:,k],x) + t[:,k]
return transformed_x
def Estep(y,yd,ys,tx,xd,xs,sigsq,r,rs):
"""Expectation calculation.
"""
M = tx.shape[1]
N = y.shape[1]
#> calculate RBF kernel distance based on imaging features
D1 = np.diag(np.dot(Tr(y),y))
D2 = np.diag(np.dot(Tr(tx),tx))
Mid = 2 * np.dot(Tr(y),tx)
tmp1 = D1.reshape(-1,1).repeat(M,axis=1) - Mid + D2.reshape(1,-1).repeat(N,axis=0)
#> calculate RBF kernel distance based on covariate features
tmp2 = np.zeros(tmp1.shape)
if r != 0:
D1 = np.diag(np.dot(Tr(yd),yd))
D2 = np.diag(np.dot(Tr(xd),xd))
Mid = 2 * np.dot(Tr(yd),xd)
tmp2 = D1.reshape(-1,1).repeat(M,axis=1) - Mid + D2.reshape(1,-1).repeat(N,axis=0)
#> calculate RBF kernel distance based on set information
tmp3 = np.zeros(tmp1.shape)
if rs != 0:
D1 = np.diag(np.dot(Tr(ys),ys))
D2 = np.diag(np.dot(Tr(xs),xs))
Mid = 2 * np.dot(Tr(ys),xs)
tmp3 = D1.reshape(-1,1).repeat(M,axis=1) - Mid + D2.reshape(1,-1).repeat(N,axis=0)
#> combine distances and normlize to probability distribution
P = np.exp((-tmp1-r*tmp2-rs*tmp3)/2/sigsq)+np.finfo(np.float).tiny
P = P/np.sum(P,axis=1).reshape(-1,1)
return P
def Mstep(y,yd,ys,x,tx,xd,xs,P,params,config):
"""Mstep optimization, for different transformation import different modules
"""
if config['transform'] == 'affine':
from Mstep_affine import solve_sigsq,solve_delta,solve_T,solve_t
elif config['transform'] == 'duo':
from Mstep_duo import solve_sigsq,solve_delta,solve_T,solve_t
else:
from Mstep_trans import solve_sigsq,solve_delta,solve_T,solve_t
params['sigsq'] = solve_sigsq(y,yd,ys,tx,xd,xs,P,params,config)
params['delta'] = solve_delta(y,x,P,params)
params['T'] = solve_T(y,x,P,params,config)
params['t'] = solve_t(y,x,P,params,config)
return params
def calc_obj(x,y,xd,yd,xs,ys,P,params,config):
"""Objective function calculation
"""
lambda1 = config['lambda1']
lambda2 = config['lambda2']
r = config['r']
rs = config['rs']
K = config['K']
D,N = y.shape
M = x.shape[1]
d = 0
ds = 0
IM = np.ones((M,1))
IN = np.ones((N,1))
tx = transform(x,params)
tmp = 0
for i in range(K):
tmp = tmp + np.power(np.linalg.norm(params['T'][:,:,i]-np.eye(D),'fro'),2)
P1 = np.diag(np.dot(P,IM).flatten())
P2 = np.diag(np.dot(Tr(P),IN).flatten())
term1 = np.trace(y.dot(P1).dot(Tr(y)) - 2*y.dot(P).dot(Tr(tx)) + tx.dot(P2).dot(Tr(tx)))
term2 = 0
if r != 0:
d = xd.shape[0]
term2 = r * np.trace(yd.dot(P1).dot(Tr(yd)) - 2*yd.dot(P).dot(Tr(xd)) + xd.dot(P2).dot(Tr(xd)))
term3 = 0
if rs != 0:
ds = 1
term3 = rs * np.trace(ys.dot(P1).dot(Tr(ys)) - 2*ys.dot(P).dot(Tr(xs)) + xs.dot(P2).dot(Tr(xs)))
obj = 0.5/params['sigsq'] * ( term1 + term2 + term3 \
+ lambda1*np.power(np.linalg.norm(params['t'],'fro'),2) +lambda2*tmp) \
+ N*(D+d+ds)/2.0*np.log(params['sigsq'])
return obj
|
[
"numpy.random.uniform",
"Mstep_trans.solve_delta",
"numpy.random.seed",
"numpy.log",
"numpy.eye",
"Mstep_trans.solve_T",
"numpy.sum",
"Mstep_trans.solve_sigsq",
"numpy.power",
"numpy.zeros",
"numpy.ones",
"numpy.transpose",
"numpy.finfo",
"numpy.linalg.norm",
"numpy.exp",
"Mstep_trans.solve_t",
"numpy.dot"
] |
[((435, 451), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (449, 451), True, 'import numpy as np\n'), ((832, 862), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(D, K)'}), '(size=(D, K))\n', (849, 862), True, 'import numpy as np\n'), ((1040, 1056), 'numpy.zeros', 'np.zeros', (['(D, M)'], {}), '((D, M))\n', (1048, 1056), True, 'import numpy as np\n'), ((1071, 1090), 'numpy.zeros', 'np.zeros', (['(D, M, K)'], {}), '((D, M, K))\n', (1079, 1090), True, 'import numpy as np\n'), ((1508, 1524), 'numpy.zeros', 'np.zeros', (['(D, M)'], {}), '((D, M))\n', (1516, 1524), True, 'import numpy as np\n'), ((1539, 1558), 'numpy.zeros', 'np.zeros', (['(D, M, K)'], {}), '((D, M, K))\n', (1547, 1558), True, 'import numpy as np\n'), ((1919, 1935), 'numpy.zeros', 'np.zeros', (['(D, K)'], {}), '((D, K))\n', (1927, 1935), True, 'import numpy as np\n'), ((2498, 2518), 'numpy.zeros', 'np.zeros', (['tmp1.shape'], {}), '(tmp1.shape)\n', (2506, 2518), True, 'import numpy as np\n'), ((2819, 2839), 'numpy.zeros', 'np.zeros', (['tmp1.shape'], {}), '(tmp1.shape)\n', (2827, 2839), True, 'import numpy as np\n'), ((3733, 3786), 'Mstep_trans.solve_sigsq', 'solve_sigsq', (['y', 'yd', 'ys', 'tx', 'xd', 'xs', 'P', 'params', 'config'], {}), '(y, yd, ys, tx, xd, xs, P, params, config)\n', (3744, 3786), False, 'from Mstep_trans import solve_sigsq, solve_delta, solve_T, solve_t\n'), ((3801, 3829), 'Mstep_trans.solve_delta', 'solve_delta', (['y', 'x', 'P', 'params'], {}), '(y, x, P, params)\n', (3812, 3829), False, 'from Mstep_trans import solve_sigsq, solve_delta, solve_T, solve_t\n'), ((3845, 3877), 'Mstep_trans.solve_T', 'solve_T', (['y', 'x', 'P', 'params', 'config'], {}), '(y, x, P, params, config)\n', (3852, 3877), False, 'from Mstep_trans import solve_sigsq, solve_delta, solve_T, solve_t\n'), ((3892, 3924), 'Mstep_trans.solve_t', 'solve_t', (['y', 'x', 'P', 'params', 'config'], {}), '(y, x, P, params, config)\n', (3899, 3924), False, 'from Mstep_trans import solve_sigsq, solve_delta, solve_T, solve_t\n'), ((4252, 4267), 'numpy.ones', 'np.ones', (['(M, 1)'], {}), '((M, 1))\n', (4259, 4267), True, 'import numpy as np\n'), ((4276, 4291), 'numpy.ones', 'np.ones', (['(N, 1)'], {}), '((N, 1))\n', (4283, 4291), True, 'import numpy as np\n'), ((575, 590), 'numpy.ones', 'np.ones', (['(K, M)'], {}), '((K, M))\n', (582, 590), True, 'import numpy as np\n'), ((1218, 1229), 'numpy.zeros', 'np.zeros', (['D'], {}), '(D)\n', (1226, 1229), True, 'import numpy as np\n'), ((1601, 1622), 'numpy.dot', 'np.dot', (['T[:, :, k]', 'x'], {}), '(T[:, :, k], x)\n', (1607, 1622), True, 'import numpy as np\n'), ((1662, 1673), 'numpy.zeros', 'np.zeros', (['D'], {}), '(D)\n', (1670, 1673), True, 'import numpy as np\n'), ((3142, 3192), 'numpy.exp', 'np.exp', (['((-tmp1 - r * tmp2 - rs * tmp3) / 2 / sigsq)'], {}), '((-tmp1 - r * tmp2 - rs * tmp3) / 2 / sigsq)\n', (3148, 3192), True, 'import numpy as np\n'), ((1133, 1154), 'numpy.dot', 'np.dot', (['T[:, :, k]', 'x'], {}), '(T[:, :, k], x)\n', (1139, 1154), True, 'import numpy as np\n'), ((1992, 2013), 'numpy.dot', 'np.dot', (['T[:, :, k]', 'x'], {}), '(T[:, :, k], x)\n', (1998, 2013), True, 'import numpy as np\n'), ((2253, 2258), 'numpy.transpose', 'Tr', (['y'], {}), '(y)\n', (2255, 2258), True, 'from numpy import transpose as Tr\n'), ((2287, 2293), 'numpy.transpose', 'Tr', (['tx'], {}), '(tx)\n', (2289, 2293), True, 'from numpy import transpose as Tr\n'), ((2320, 2325), 'numpy.transpose', 'Tr', (['y'], {}), '(y)\n', (2322, 2325), True, 'from numpy import transpose as Tr\n'), ((3181, 3199), 'numpy.finfo', 'np.finfo', (['np.float'], {}), '(np.float)\n', (3189, 3199), True, 'import numpy as np\n'), ((5108, 5131), 'numpy.log', 'np.log', (["params['sigsq']"], {}), "(params['sigsq'])\n", (5114, 5131), True, 'import numpy as np\n'), ((699, 715), 'numpy.power', 'np.power', (['tmp', '(2)'], {}), '(tmp, 2)\n', (707, 715), True, 'import numpy as np\n'), ((779, 788), 'numpy.eye', 'np.eye', (['D'], {}), '(D)\n', (785, 788), True, 'import numpy as np\n'), ((2562, 2568), 'numpy.transpose', 'Tr', (['yd'], {}), '(yd)\n', (2564, 2568), True, 'from numpy import transpose as Tr\n'), ((2602, 2608), 'numpy.transpose', 'Tr', (['xd'], {}), '(xd)\n', (2604, 2608), True, 'from numpy import transpose as Tr\n'), ((2639, 2645), 'numpy.transpose', 'Tr', (['yd'], {}), '(yd)\n', (2641, 2645), True, 'from numpy import transpose as Tr\n'), ((2884, 2890), 'numpy.transpose', 'Tr', (['ys'], {}), '(ys)\n', (2886, 2890), True, 'from numpy import transpose as Tr\n'), ((2924, 2930), 'numpy.transpose', 'Tr', (['xs'], {}), '(xs)\n', (2926, 2930), True, 'from numpy import transpose as Tr\n'), ((2961, 2967), 'numpy.transpose', 'Tr', (['ys'], {}), '(ys)\n', (2963, 2967), True, 'from numpy import transpose as Tr\n'), ((3215, 3232), 'numpy.sum', 'np.sum', (['P'], {'axis': '(1)'}), '(P, axis=1)\n', (3221, 3232), True, 'import numpy as np\n'), ((4469, 4482), 'numpy.dot', 'np.dot', (['P', 'IM'], {}), '(P, IM)\n', (4475, 4482), True, 'import numpy as np\n'), ((4623, 4629), 'numpy.transpose', 'Tr', (['tx'], {}), '(tx)\n', (4625, 4629), True, 'from numpy import transpose as Tr\n'), ((4517, 4522), 'numpy.transpose', 'Tr', (['P'], {}), '(P)\n', (4519, 4522), True, 'from numpy import transpose as Tr\n'), ((4574, 4579), 'numpy.transpose', 'Tr', (['y'], {}), '(y)\n', (4576, 4579), True, 'from numpy import transpose as Tr\n'), ((4423, 4432), 'numpy.eye', 'np.eye', (['D'], {}), '(D)\n', (4429, 4432), True, 'import numpy as np\n'), ((4598, 4604), 'numpy.transpose', 'Tr', (['tx'], {}), '(tx)\n', (4600, 4604), True, 'from numpy import transpose as Tr\n'), ((4780, 4786), 'numpy.transpose', 'Tr', (['xd'], {}), '(xd)\n', (4782, 4786), True, 'from numpy import transpose as Tr\n'), ((4930, 4936), 'numpy.transpose', 'Tr', (['xs'], {}), '(xs)\n', (4932, 4936), True, 'from numpy import transpose as Tr\n'), ((4729, 4735), 'numpy.transpose', 'Tr', (['yd'], {}), '(yd)\n', (4731, 4735), True, 'from numpy import transpose as Tr\n'), ((4879, 4885), 'numpy.transpose', 'Tr', (['ys'], {}), '(ys)\n', (4881, 4885), True, 'from numpy import transpose as Tr\n'), ((5027, 5061), 'numpy.linalg.norm', 'np.linalg.norm', (["params['t']", '"""fro"""'], {}), "(params['t'], 'fro')\n", (5041, 5061), True, 'import numpy as np\n'), ((4755, 4761), 'numpy.transpose', 'Tr', (['xd'], {}), '(xd)\n', (4757, 4761), True, 'from numpy import transpose as Tr\n'), ((4905, 4911), 'numpy.transpose', 'Tr', (['xs'], {}), '(xs)\n', (4907, 4911), True, 'from numpy import transpose as Tr\n')]
|
#!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
m = 1e-3
i_load = np.logspace(-5,-3)
i_load = np.linspace(1e-5,1e-3,200)
i_s = 1e-12
i_ph = 1e-3
V_T = 1.38e-23*300/1.6e-19
V_D = V_T*np.log((i_ph - i_load)/(i_s) + 1)
P_load = V_D*i_load
plt.subplot(2,1,1)
plt.plot(i_load/m,V_D)
plt.ylabel("Diode voltage [V]")
plt.grid()
plt.subplot(2,1,2)
plt.plot(i_load/m,P_load/m)
plt.xlabel("Current load [mA]")
plt.ylabel("Power Load [mW]")
plt.grid()
plt.savefig("pv.pdf")
plt.show()
|
[
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"numpy.log",
"matplotlib.pyplot.plot",
"numpy.logspace",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig"
] |
[((94, 113), 'numpy.logspace', 'np.logspace', (['(-5)', '(-3)'], {}), '(-5, -3)\n', (105, 113), True, 'import numpy as np\n'), ((122, 152), 'numpy.linspace', 'np.linspace', (['(1e-05)', '(0.001)', '(200)'], {}), '(1e-05, 0.001, 200)\n', (133, 152), True, 'import numpy as np\n'), ((271, 291), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (282, 291), True, 'import matplotlib.pyplot as plt\n'), ((290, 315), 'matplotlib.pyplot.plot', 'plt.plot', (['(i_load / m)', 'V_D'], {}), '(i_load / m, V_D)\n', (298, 315), True, 'import matplotlib.pyplot as plt\n'), ((314, 345), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Diode voltage [V]"""'], {}), "('Diode voltage [V]')\n", (324, 345), True, 'import matplotlib.pyplot as plt\n'), ((346, 356), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (354, 356), True, 'import matplotlib.pyplot as plt\n'), ((357, 377), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (368, 377), True, 'import matplotlib.pyplot as plt\n'), ((376, 408), 'matplotlib.pyplot.plot', 'plt.plot', (['(i_load / m)', '(P_load / m)'], {}), '(i_load / m, P_load / m)\n', (384, 408), True, 'import matplotlib.pyplot as plt\n'), ((404, 435), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Current load [mA]"""'], {}), "('Current load [mA]')\n", (414, 435), True, 'import matplotlib.pyplot as plt\n'), ((436, 465), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Power Load [mW]"""'], {}), "('Power Load [mW]')\n", (446, 465), True, 'import matplotlib.pyplot as plt\n'), ((466, 476), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (474, 476), True, 'import matplotlib.pyplot as plt\n'), ((477, 498), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""pv.pdf"""'], {}), "('pv.pdf')\n", (488, 498), True, 'import matplotlib.pyplot as plt\n'), ((499, 509), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (507, 509), True, 'import matplotlib.pyplot as plt\n'), ((214, 247), 'numpy.log', 'np.log', (['((i_ph - i_load) / i_s + 1)'], {}), '((i_ph - i_load) / i_s + 1)\n', (220, 247), True, 'import numpy as np\n')]
|
# Modified based on the HRNet repo.
from __future__ import absolute_import, division, print_function
import logging
import os
import time
from pathlib import Path
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
class FullModel(nn.Module):
"""
Distribute the loss on multi-gpu to reduce
the memory cost in the main gpu.
You can check the following discussion.
https://discuss.pytorch.org/t/dataparallel-imbalanced-memory-usage/22551/21
"""
def __init__(self, model, loss):
super(FullModel, self).__init__()
self.model = model
self.loss = loss
def forward(self, inputs, labels, train_step=-1, **kwargs):
outputs, jac_loss, sradius = self.model(inputs, train_step=train_step, **kwargs)
loss = self.loss(outputs, labels)
return loss.unsqueeze(0), jac_loss.unsqueeze(0), outputs, sradius
def get_world_size():
if not torch.distributed.is_initialized():
return 1
return torch.distributed.get_world_size()
def get_rank():
if not torch.distributed.is_initialized():
return 0
return torch.distributed.get_rank()
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.initialized = False
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def initialize(self, val, weight):
self.val = val
self.avg = val
self.sum = val * weight
self.count = weight
self.initialized = True
def update(self, val, weight=1):
if not self.initialized:
self.initialize(val, weight)
else:
self.add(val, weight)
def add(self, val, weight):
self.val = val
self.sum += val * weight
self.count += weight
self.avg = self.sum / self.count
def value(self):
return self.val
def average(self):
return self.avg
def create_logger(cfg, cfg_name, phase="train"):
root_output_dir = Path(cfg.OUTPUT_DIR)
# set up logger
if not root_output_dir.exists():
print("=> creating {}".format(root_output_dir))
root_output_dir.mkdir()
dataset = cfg.DATASET.DATASET
model = cfg.MODEL.NAME
cfg_name = os.path.basename(cfg_name).split(".")[0]
final_output_dir = root_output_dir / dataset / cfg_name
print("=> creating {}".format(final_output_dir))
final_output_dir.mkdir(parents=True, exist_ok=True)
time_str = time.strftime("%Y-%m-%d-%H-%M")
log_file = "{}_{}_{}.log".format(cfg_name, time_str, phase)
final_log_file = final_output_dir / log_file
head = "%(asctime)-15s %(message)s"
logging.basicConfig(filename=str(final_log_file), format=head)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console = logging.StreamHandler()
logging.getLogger("").addHandler(console)
tensorboard_log_dir = Path(cfg.LOG_DIR) / dataset / model / cfg_name
print("=> creating {}".format(tensorboard_log_dir))
tensorboard_log_dir.mkdir(parents=True, exist_ok=True)
return logger, str(final_output_dir), str(tensorboard_log_dir)
def get_optimizer(cfg, model):
optimizer = None
if cfg.TRAIN.OPTIMIZER == "sgd":
optimizer = optim.SGD(
filter(lambda p: p.requires_grad, model.parameters()),
lr=cfg.TRAIN.LR,
momentum=cfg.TRAIN.MOMENTUM,
weight_decay=cfg.TRAIN.WD,
nesterov=cfg.TRAIN.NESTEROV,
)
elif cfg.TRAIN.OPTIMIZER == "adam":
optimizer = optim.Adam(
filter(lambda p: p.requires_grad, model.parameters()),
lr=cfg.TRAIN.LR,
weight_decay=cfg.TRAIN.WD,
)
elif cfg.TRAIN.OPTIMIZER == "rmsprop":
optimizer = optim.RMSprop(
filter(lambda p: p.requires_grad, model.parameters()),
lr=cfg.TRAIN.LR,
momentum=cfg.TRAIN.MOMENTUM,
weight_decay=cfg.TRAIN.WD,
alpha=cfg.TRAIN.RMSPROP_ALPHA,
centered=cfg.TRAIN.RMSPROP_CENTERED,
)
return optimizer
def save_checkpoint(states, is_best, output_dir, filename="checkpoint.pth.tar"):
torch.save(states, os.path.join(output_dir, filename))
if is_best and "state_dict" in states:
torch.save(states["state_dict"], os.path.join(output_dir, "model_best.pth.tar"))
def get_confusion_matrix(label, pred, size, num_class, ignore=-1):
"""
Calcute the confusion matrix by given label and pred
"""
output = pred.cpu().numpy().transpose(0, 2, 3, 1)
seg_pred = np.asarray(np.argmax(output, axis=3), dtype=np.uint8)
seg_gt = np.asarray(label.cpu().numpy()[:, : size[-2], : size[-1]], dtype=np.int)
ignore_index = seg_gt != ignore
seg_gt = seg_gt[ignore_index]
seg_pred = seg_pred[ignore_index]
index = (seg_gt * num_class + seg_pred).astype("int32")
label_count = np.bincount(index)
confusion_matrix = np.zeros((num_class, num_class))
for i_label in range(num_class):
for i_pred in range(num_class):
cur_index = i_label * num_class + i_pred
if cur_index < len(label_count):
confusion_matrix[i_label, i_pred] = label_count[cur_index]
return confusion_matrix
def adjust_learning_rate(optimizer, base_lr, max_iters, cur_iters, power=0.9):
lr = base_lr * ((1 - float(cur_iters) / max_iters) ** (power))
optimizer.param_groups[0]["lr"] = lr
return lr
################################################################################
# The following function are based on:
# https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/nets_utils.py
def make_pad_mask(lengths, xs=None, length_dim=-1):
"""Make mask tensor containing indices of padded part.
Args:
lengths (LongTensor or List): Batch of lengths (B,).
xs (Tensor, optional): The reference tensor.
If set, masks will be the same shape as this tensor.
length_dim (int, optional): Dimension indicator of the above tensor.
See the example.
Returns:
Tensor: Mask tensor containing indices of padded part.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
With only lengths.
>>> lengths = [5, 3, 2]
>>> make_non_pad_mask(lengths)
masks = [[0, 0, 0, 0 ,0],
[0, 0, 0, 1, 1],
[0, 0, 1, 1, 1]]
With the reference tensor.
>>> xs = torch.zeros((3, 2, 4))
>>> make_pad_mask(lengths, xs)
tensor([[[0, 0, 0, 0],
[0, 0, 0, 0]],
[[0, 0, 0, 1],
[0, 0, 0, 1]],
[[0, 0, 1, 1],
[0, 0, 1, 1]]], dtype=torch.uint8)
>>> xs = torch.zeros((3, 2, 6))
>>> make_pad_mask(lengths, xs)
tensor([[[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1]],
[[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1]],
[[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)
With the reference tensor and dimension indicator.
>>> xs = torch.zeros((3, 6, 6))
>>> make_pad_mask(lengths, xs, 1)
tensor([[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1]],
[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1]],
[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1]]], dtype=torch.uint8)
>>> make_pad_mask(lengths, xs, 2)
tensor([[[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1]],
[[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1]],
[[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)
"""
if length_dim == 0:
raise ValueError("length_dim cannot be 0: {}".format(length_dim))
if not isinstance(lengths, list):
lengths = lengths.tolist()
bs = int(len(lengths))
if xs is None:
maxlen = int(max(lengths))
else:
maxlen = xs.size(length_dim)
seq_range = torch.arange(0, maxlen, dtype=torch.int64)
seq_range_expand = seq_range.unsqueeze(0).expand(bs, maxlen)
seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1)
mask = seq_range_expand >= seq_length_expand
if xs is not None:
assert xs.size(0) == bs, (xs.size(0), bs)
if length_dim < 0:
length_dim = xs.dim() + length_dim
# ind = (:, None, ..., None, :, , None, ..., None)
ind = tuple(
slice(None) if i in (0, length_dim) else None for i in range(xs.dim())
)
mask = mask[ind].expand_as(xs).to(xs.device)
return mask
def make_non_pad_mask(lengths, xs=None, length_dim=-1):
"""Make mask tensor containing indices of non-padded part.
Args:
lengths (LongTensor or List): Batch of lengths (B,).
xs (Tensor, optional): The reference tensor.
If set, masks will be the same shape as this tensor.
length_dim (int, optional): Dimension indicator of the above tensor.
See the example.
Returns:
ByteTensor: mask tensor containing indices of padded part.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
With only lengths.
>>> lengths = [5, 3, 2]
>>> make_non_pad_mask(lengths)
masks = [[1, 1, 1, 1 ,1],
[1, 1, 1, 0, 0],
[1, 1, 0, 0, 0]]
With the reference tensor.
>>> xs = torch.zeros((3, 2, 4))
>>> make_non_pad_mask(lengths, xs)
tensor([[[1, 1, 1, 1],
[1, 1, 1, 1]],
[[1, 1, 1, 0],
[1, 1, 1, 0]],
[[1, 1, 0, 0],
[1, 1, 0, 0]]], dtype=torch.uint8)
>>> xs = torch.zeros((3, 2, 6))
>>> make_non_pad_mask(lengths, xs)
tensor([[[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0]],
[[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0]],
[[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0]]], dtype=torch.uint8)
With the reference tensor and dimension indicator.
>>> xs = torch.zeros((3, 6, 6))
>>> make_non_pad_mask(lengths, xs, 1)
tensor([[[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0]],
[[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
[[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]]], dtype=torch.uint8)
>>> make_non_pad_mask(lengths, xs, 2)
tensor([[[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0]],
[[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0]],
[[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0]]], dtype=torch.uint8)
"""
return ~make_pad_mask(lengths, xs, length_dim)
|
[
"torch.distributed.is_initialized",
"numpy.argmax",
"torch.distributed.get_rank",
"os.path.basename",
"logging.StreamHandler",
"numpy.zeros",
"time.strftime",
"pathlib.Path",
"torch.arange",
"torch.distributed.get_world_size",
"numpy.bincount",
"os.path.join",
"logging.getLogger"
] |
[((1003, 1037), 'torch.distributed.get_world_size', 'torch.distributed.get_world_size', ([], {}), '()\n', (1035, 1037), False, 'import torch\n'), ((1131, 1159), 'torch.distributed.get_rank', 'torch.distributed.get_rank', ([], {}), '()\n', (1157, 1159), False, 'import torch\n'), ((2058, 2078), 'pathlib.Path', 'Path', (['cfg.OUTPUT_DIR'], {}), '(cfg.OUTPUT_DIR)\n', (2062, 2078), False, 'from pathlib import Path\n'), ((2529, 2560), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d-%H-%M"""'], {}), "('%Y-%m-%d-%H-%M')\n", (2542, 2560), False, 'import time\n'), ((2794, 2813), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (2811, 2813), False, 'import logging\n'), ((2862, 2885), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (2883, 2885), False, 'import logging\n'), ((4946, 4964), 'numpy.bincount', 'np.bincount', (['index'], {}), '(index)\n', (4957, 4964), True, 'import numpy as np\n'), ((4988, 5020), 'numpy.zeros', 'np.zeros', (['(num_class, num_class)'], {}), '((num_class, num_class))\n', (4996, 5020), True, 'import numpy as np\n'), ((9070, 9112), 'torch.arange', 'torch.arange', (['(0)', 'maxlen'], {'dtype': 'torch.int64'}), '(0, maxlen, dtype=torch.int64)\n', (9082, 9112), False, 'import torch\n'), ((939, 973), 'torch.distributed.is_initialized', 'torch.distributed.is_initialized', ([], {}), '()\n', (971, 973), False, 'import torch\n'), ((1067, 1101), 'torch.distributed.is_initialized', 'torch.distributed.is_initialized', ([], {}), '()\n', (1099, 1101), False, 'import torch\n'), ((4239, 4273), 'os.path.join', 'os.path.join', (['output_dir', 'filename'], {}), '(output_dir, filename)\n', (4251, 4273), False, 'import os\n'), ((4629, 4654), 'numpy.argmax', 'np.argmax', (['output'], {'axis': '(3)'}), '(output, axis=3)\n', (4638, 4654), True, 'import numpy as np\n'), ((2890, 2911), 'logging.getLogger', 'logging.getLogger', (['""""""'], {}), "('')\n", (2907, 2911), False, 'import logging\n'), ((4359, 4405), 'os.path.join', 'os.path.join', (['output_dir', '"""model_best.pth.tar"""'], {}), "(output_dir, 'model_best.pth.tar')\n", (4371, 4405), False, 'import os\n'), ((2301, 2327), 'os.path.basename', 'os.path.basename', (['cfg_name'], {}), '(cfg_name)\n', (2317, 2327), False, 'import os\n'), ((2959, 2976), 'pathlib.Path', 'Path', (['cfg.LOG_DIR'], {}), '(cfg.LOG_DIR)\n', (2963, 2976), False, 'from pathlib import Path\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 14 10:40:39 2016
Converting reflectance spectrum to a CIE coordinate
@author: Bonan
"""
import numpy as np
from scipy import interpolate
import os
# Adobe RGB (1998) D65 as reference white
# http://www.brucelindbloom.com/index.html?Eqn_XYZ_to_RGB.html
_RGB_to_XYZ = np.array([
[0.5767309, 0.1855540, 0.1881852],
[0.2973769, 0.6273491, 0.0752741],
[0.0270343, 0.0706872, 0.9911085], ])
_XYZ_to_RGB = np.array([
[2.0413690, -0.5649464, -0.3446944],
[-0.9692660, 1.8760108, 0.0415560],
[0.0134474, -0.1183897, 1.0154096], ])
# Load the
_dirname = os.path.dirname(__file__)
fn1 = os.path.join(_dirname, 'CIE_1931_XYZ.txt')
fn2 = os.path.join(_dirname, 'CIE_A.txt')
fn3 = os.path.join(_dirname, 'CIE_D65.txt')
CIE_XYZ_table = np.loadtxt(fn1).T # Transpose column into rows
CIE_A = np.loadtxt(fn2).T
CIE_D65 = np.loadtxt(fn3).T
def splineInterp(xNew, xRaw, yRaw):
"""
Compute the spline interpolation(cubic) of the data
"""
tck = interpolate.splrep(xRaw, yRaw)
return interpolate.splev(xNew, tck, der=0, ext=1)
def specToXYZ(spec, SI='D65'):
"""
Calculate the XYZ coordinate of the spectrum input.
It interpolates the charts to every wavelength that was inputed.
By default the input spectrum was first eveloped using a SPD function
to simulation illumination.
spec: input spectrum, 2*N ndarray, 1st row must be the wavelength
return: (X,Y,Z)
"""
wl = spec[0] # the input must have the 1st element as the wavelength
XYZ = CIE_XYZ_table
if SI == 'D65':
interpSI = splineInterp(wl, CIE_D65[0], CIE_D65[1])
if SI == 'A':
interpSI = splineInterp(wl, CIE_A[0], CIE_A[1])
else:
interpSI = np.ones(len(wl))
interpX = splineInterp(wl, XYZ[0], XYZ[1])
interpY = splineInterp(wl, XYZ[0], XYZ[2])
interpZ = splineInterp(wl, XYZ[0], XYZ[3])
interpXYZ = np.array([interpX, interpY, interpZ])
X, Y, Z = np.sum(spec[1] * interpSI * interpXYZ, axis=1)
return X, Y, Z
def specToxyz(spec, SI='D65'):
"""
Transfer spectrum into normalised x,y,z coordinates
Return: (x, y, z)
"""
X, Y, Z = specToXYZ(spec, SI)
x = X / (X + Y + Z)
y = Y / (X + Y + Z)
z = 1 - x - y
return x, y, z
def specToRGB(spec, SI='D65', scale_factor=1):
"""
Convert the spectrum(reflectivity) into an RGB value
Return: (R,G,B)
"""
XYZArray = specToxyz(spec, SI)
RGBArray = np.dot(_XYZ_to_RGB, XYZArray).clip(0, 1)
RGBArray *= scale_factor
return tuple(RGBArray.clip(0, 1))
if __name__ == '__main__':
# Testing of the module
import matplotlib.pyplot as pl
wlRange = np.linspace(400, 800, 100)
example = np.sin((wlRange - 400) * np.pi / 400)
spec = np.array([wlRange, example])
c = specToRGB(spec)
pl.plot(spec[0], spec[1] / spec[1].max(),
label='Example distribution', color=c)
print(c)
# Use the D65 as the light source
spec = CIE_D65
c = specToRGB(spec, SI='D65')
print('Test using D65 illumination. Should give R=G=B')
print(c)
pl.plot(spec[0], spec[1] / spec[1].max(),
label='D65 distribution', color=np.array(c))
pl.title('Coloured Spectrum')
pl.legend()
|
[
"matplotlib.pyplot.title",
"numpy.sum",
"os.path.join",
"os.path.dirname",
"matplotlib.pyplot.legend",
"numpy.sin",
"numpy.array",
"numpy.loadtxt",
"numpy.linspace",
"scipy.interpolate.splev",
"numpy.dot",
"scipy.interpolate.splrep"
] |
[((313, 432), 'numpy.array', 'np.array', (['[[0.5767309, 0.185554, 0.1881852], [0.2973769, 0.6273491, 0.0752741], [\n 0.0270343, 0.0706872, 0.9911085]]'], {}), '([[0.5767309, 0.185554, 0.1881852], [0.2973769, 0.6273491, \n 0.0752741], [0.0270343, 0.0706872, 0.9911085]])\n', (321, 432), True, 'import numpy as np\n'), ((458, 579), 'numpy.array', 'np.array', (['[[2.041369, -0.5649464, -0.3446944], [-0.969266, 1.8760108, 0.041556], [\n 0.0134474, -0.1183897, 1.0154096]]'], {}), '([[2.041369, -0.5649464, -0.3446944], [-0.969266, 1.8760108, \n 0.041556], [0.0134474, -0.1183897, 1.0154096]])\n', (466, 579), True, 'import numpy as np\n'), ((615, 640), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (630, 640), False, 'import os\n'), ((647, 689), 'os.path.join', 'os.path.join', (['_dirname', '"""CIE_1931_XYZ.txt"""'], {}), "(_dirname, 'CIE_1931_XYZ.txt')\n", (659, 689), False, 'import os\n'), ((696, 731), 'os.path.join', 'os.path.join', (['_dirname', '"""CIE_A.txt"""'], {}), "(_dirname, 'CIE_A.txt')\n", (708, 731), False, 'import os\n'), ((738, 775), 'os.path.join', 'os.path.join', (['_dirname', '"""CIE_D65.txt"""'], {}), "(_dirname, 'CIE_D65.txt')\n", (750, 775), False, 'import os\n'), ((792, 807), 'numpy.loadtxt', 'np.loadtxt', (['fn1'], {}), '(fn1)\n', (802, 807), True, 'import numpy as np\n'), ((848, 863), 'numpy.loadtxt', 'np.loadtxt', (['fn2'], {}), '(fn2)\n', (858, 863), True, 'import numpy as np\n'), ((876, 891), 'numpy.loadtxt', 'np.loadtxt', (['fn3'], {}), '(fn3)\n', (886, 891), True, 'import numpy as np\n'), ((1014, 1044), 'scipy.interpolate.splrep', 'interpolate.splrep', (['xRaw', 'yRaw'], {}), '(xRaw, yRaw)\n', (1032, 1044), False, 'from scipy import interpolate\n'), ((1056, 1098), 'scipy.interpolate.splev', 'interpolate.splev', (['xNew', 'tck'], {'der': '(0)', 'ext': '(1)'}), '(xNew, tck, der=0, ext=1)\n', (1073, 1098), False, 'from scipy import interpolate\n'), ((1926, 1963), 'numpy.array', 'np.array', (['[interpX, interpY, interpZ]'], {}), '([interpX, interpY, interpZ])\n', (1934, 1963), True, 'import numpy as np\n'), ((1978, 2024), 'numpy.sum', 'np.sum', (['(spec[1] * interpSI * interpXYZ)'], {'axis': '(1)'}), '(spec[1] * interpSI * interpXYZ, axis=1)\n', (1984, 2024), True, 'import numpy as np\n'), ((2699, 2725), 'numpy.linspace', 'np.linspace', (['(400)', '(800)', '(100)'], {}), '(400, 800, 100)\n', (2710, 2725), True, 'import numpy as np\n'), ((2740, 2777), 'numpy.sin', 'np.sin', (['((wlRange - 400) * np.pi / 400)'], {}), '((wlRange - 400) * np.pi / 400)\n', (2746, 2777), True, 'import numpy as np\n'), ((2789, 2817), 'numpy.array', 'np.array', (['[wlRange, example]'], {}), '([wlRange, example])\n', (2797, 2817), True, 'import numpy as np\n'), ((3223, 3252), 'matplotlib.pyplot.title', 'pl.title', (['"""Coloured Spectrum"""'], {}), "('Coloured Spectrum')\n", (3231, 3252), True, 'import matplotlib.pyplot as pl\n'), ((3257, 3268), 'matplotlib.pyplot.legend', 'pl.legend', ([], {}), '()\n', (3266, 3268), True, 'import matplotlib.pyplot as pl\n'), ((2485, 2514), 'numpy.dot', 'np.dot', (['_XYZ_to_RGB', 'XYZArray'], {}), '(_XYZ_to_RGB, XYZArray)\n', (2491, 2514), True, 'import numpy as np\n'), ((3206, 3217), 'numpy.array', 'np.array', (['c'], {}), '(c)\n', (3214, 3217), True, 'import numpy as np\n')]
|
import h5py
import tables
import numpy as np
import sys
args=int(sys.argv[1])
# Read hdf5 file
h5file = tables.open_file(f"./data/atraining-{args}.h5", "r")
WaveformTable = h5file.root.Waveform
GroundTruthTable = h5file.root.GroundTruth
sinevet,sinchan,sintime=[],[],[]
#根据groundtruth找出只有单光子的事例
i=1
while i <100000:
if GroundTruthTable[i]['ChannelID']!=GroundTruthTable[i-1]['ChannelID'] and GroundTruthTable[i]['ChannelID']!=GroundTruthTable[i+1]['ChannelID']:
sinevet.append(GroundTruthTable[i]['EventID'])
sintime.append(GroundTruthTable[i]['PETime'])
sinchan.append(GroundTruthTable[i]['ChannelID'])
i+=1
#将单光子事例波形累加
sumwave=np.zeros(1029,dtype=np.int32)
sinlen=len(sinevet)
for x in range(sinlen):
if x%100==0:
print(f"{x*100/sinlen}%")
posi=0
while True:
if WaveformTable[posi]["EventID"]==sinevet[x] and WaveformTable[posi]["ChannelID"]==sinchan[x]:
break
posi+=1
sumwave+=np.append(WaveformTable[posi]['Waveform'][sintime[x]:],WaveformTable[posi]['Waveform'][:sintime[x]])-972
#求得平均值
averwave=sumwave/sinlen
averzero=np.average(averwave[100:])
spe=averwave-averzero
with h5py.File(f"medium/average{args+1}.h5", "w") as opt1:
opt1.create_dataset("averzero", data=np.array([averzero]))
with h5py.File(f'medium/singlewave{args+1}.h5',"w") as opt2:
opt2.create_dataset("spe",data=spe,compression="gzip", shuffle=True)
#写入文件
h5file.close()
|
[
"h5py.File",
"numpy.average",
"numpy.zeros",
"numpy.append",
"numpy.array",
"tables.open_file"
] |
[((105, 157), 'tables.open_file', 'tables.open_file', (['f"""./data/atraining-{args}.h5"""', '"""r"""'], {}), "(f'./data/atraining-{args}.h5', 'r')\n", (121, 157), False, 'import tables\n'), ((665, 695), 'numpy.zeros', 'np.zeros', (['(1029)'], {'dtype': 'np.int32'}), '(1029, dtype=np.int32)\n', (673, 695), True, 'import numpy as np\n'), ((1114, 1140), 'numpy.average', 'np.average', (['averwave[100:]'], {}), '(averwave[100:])\n', (1124, 1140), True, 'import numpy as np\n'), ((1169, 1215), 'h5py.File', 'h5py.File', (['f"""medium/average{args + 1}.h5"""', '"""w"""'], {}), "(f'medium/average{args + 1}.h5', 'w')\n", (1178, 1215), False, 'import h5py\n'), ((1291, 1340), 'h5py.File', 'h5py.File', (['f"""medium/singlewave{args + 1}.h5"""', '"""w"""'], {}), "(f'medium/singlewave{args + 1}.h5', 'w')\n", (1300, 1340), False, 'import h5py\n'), ((968, 1074), 'numpy.append', 'np.append', (["WaveformTable[posi]['Waveform'][sintime[x]:]", "WaveformTable[posi]['Waveform'][:sintime[x]]"], {}), "(WaveformTable[posi]['Waveform'][sintime[x]:], WaveformTable[posi]\n ['Waveform'][:sintime[x]])\n", (977, 1074), True, 'import numpy as np\n'), ((1264, 1284), 'numpy.array', 'np.array', (['[averzero]'], {}), '([averzero])\n', (1272, 1284), True, 'import numpy as np\n')]
|
import tensorflow as tf
import dataIO
import numpy as np
from datetime import datetime
from model import model
from parameters import *
# preprocess input data
def prepareDataTraining(seg_data, somae_data_raw):
somae_data = seg_data.copy()
somae_data[somae_data_raw==0]=0
seg_data = seg_data[:,:network_size,:network_size]
somae_data = somae_data[:,:network_size,:network_size]
# create object to hold elements for 3D input tensors of depth(*2)+1
seg_deep = np.zeros((seg_data.shape[0],seg_data.shape[1],seg_data.shape[2],depth*2+1), dtype=np.uint8)
# populate deep segmentation tensor
seg_deep[:,:,:,depth]=seg_data
for d in range(1,depth+1):
seg_deep[:-d,:,:,depth+d]=seg_data[d:,:,:]
seg_deep[d:,:,:,depth-d]=seg_data[:-d,:,:]
# cut training and validation dataset
valid_seg = seg_deep[:val_data_size,:,:,:]
valid_mask = somae_data[:val_data_size,:,:]
train_seg = seg_deep[val_data_size:,:,:,:]
train_mask = somae_data[val_data_size:,:,:]
# shuffle both training and validation data
valid_ids = np.random.permutation(valid_seg.shape[0])
train_ids = np.random.permutation(train_seg.shape[0])
valid_seg[:,:,:] = valid_seg[valid_ids,:,:,:]
valid_mask[:,:,:] = valid_mask[valid_ids,:,:]
train_seg[:,:,:] = train_seg[train_ids,:,:,:]
train_mask[:,:,:] = train_mask[train_ids,:,:]
return train_seg, train_mask, valid_seg, valid_mask
# preprocess input data
def prepareDataPrediction(seg_data):
seg_data = seg_data[:,:network_size,:network_size]
# create object to hold elements for 3D input tensors of depth(*2)+1
seg_deep = np.zeros((seg_data.shape[0],seg_data.shape[1],seg_data.shape[2],depth*2+1), dtype=np.uint8)
# populate deep segmentation tensor
seg_deep[:,:,:,depth]=seg_data
for d in range(1,depth+1):
seg_deep[:-d,:,:,depth+d]=seg_data[d:,:,:]
seg_deep[d:,:,:,depth-d]=seg_data[:-d,:,:]
# cut training and validation dataset
valid_seg = seg_deep[:,:,:,:]
return valid_seg
# define the weighted loss function
class WeightedBinaryCrossEntropy(tf.losses.Loss):
"""
Args:
pos_weight: Scalar to affect the positive labels of the loss function.
weight: Scalar to affect the entirety of the loss function.
from_logits: Whether to compute loss form logits or the probability.
reduction: Type of tf.losses.Reduction to apply to loss.
name: Name of the loss function.
"""
def __init__(self, pos_weight, weight, from_logits=False,
reduction=tf.losses.Reduction.AUTO,
name='weighted_binary_crossentropy'):
super(WeightedBinaryCrossEntropy, self).__init__(reduction=reduction,
name=name)
self.pos_weight = pos_weight
self.weight = weight
self.from_logits = from_logits
def call(self, y_true, y_pred):
if not self.from_logits:
# Manually calculate the weighted cross entropy.
# Formula is qz * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
# where z are labels, x is logits, and q is the weight.
# Since the values passed are from sigmoid (assuming in this case)
# sigmoid(x) will be replaced by y_pred
# qz * -log(sigmoid(x)) 1e-6 is added as an epsilon to stop passing a zero into the log
x_1 = y_true * self.pos_weight * -tf.math.log(y_pred + 1e-6)
# (1 - z) * -log(1 - sigmoid(x)). Epsilon is added to prevent passing a zero into the log
x_2 = (1 - y_true) * -tf.math.log(1 - y_pred + 1e-6)
return tf.add(x_1, x_2) * self.weight
# Use built in function
return tf.nn.weighted_cross_entropy_with_logits(y_true, y_pred, self.pos_weight) * self.weight
# model weights
class model_weights:
def __init__(self, shapes):
self.values = []
self.checkpoint_path = './ckpt_'+ datetime.now().strftime("%Y%m%d-%H%M%S")+'/'
initializer = tf.initializers.RandomNormal()
def get_weight( shape , name ):
return tf.Variable( initializer( shape ) , name=name , trainable=True , dtype=tf.float32 )
for i in range( len( shapes ) ):
self.values.append( get_weight( shapes[ i ] , 'weight{}'.format( i ) ) )
self.ckpt = tf.train.Checkpoint(**{f'values{i}': v for i, v in enumerate(self.values)})
def saveWeights(self):
self.ckpt.save(self.checkpoint_path)
def restoreWeights(self, ckpt_restore):
print("restoring weights from: " + str(ckpt_restore))
status = self.ckpt.restore(ckpt_restore)
status.assert_consumed() # Optional check
def initializeModel(restore, ckpt_restore):
# filters for the UNET layers:
# filters = [depth*2+1,64,128,256,512,1024,1] #original UNET
filters = [depth*2+1, 16,32, 64, 128,256,1] # modified, lighter UNET
# shapes of the weight tensors
shapes = [
[ 3, 3, filters[0], filters[1]], #L11 -> L12
[ 3, 3, filters[1], filters[1]], #L12 -> L13
[ 3, 3, filters[1], filters[2]], #L21 -> L22
[ 3, 3, filters[2], filters[2]], #L22 -> L23
[ 3, 3, filters[2], filters[3]], #L31 -> L32
[ 3, 3, filters[3], filters[3]], #L32 -> L33
[ 3, 3, filters[3], filters[4]], #L41 -> L42
[ 3, 3, filters[4], filters[4]], #L42 -> L43
[ 3, 3, filters[4], filters[5]], #L51 -> L52
[ 3, 3, filters[5], filters[5]], #L52 -> L53
[ 2, 2, filters[4], filters[5]], #L53 -> L44
[ 3, 3, 2*filters[4], filters[4]], #L44 -> L45
[ 3, 3, filters[4], filters[4]], #L45 -> L46
[ 2, 2, filters[3], filters[4]], #L46 -> L34
[ 3, 3, 2*filters[3], filters[3]], #L34 -> L35
[ 3, 3, filters[3], filters[3]], #L35 -> L36
[ 2, 2, filters[2], filters[3]], #L36 -> L24
[ 3, 3, 2*filters[2], filters[2]], #L24 -> L25
[ 3, 3, filters[2], filters[2]], #L25 -> L26
[ 2, 2, filters[1], filters[2]], #L25 -> L14
[ 3, 3, 2*filters[1], filters[1]], #L14 -> L15
[ 3, 3, filters[1], filters[1]], #L15 -> L16
[ 1, 1, filters[1], filters[6]], #L16 -> L17
]
weights = model_weights(shapes)
if restore:
weights.restoreWeights(ckpt_restore)
# initialize loss
w_loss = WeightedBinaryCrossEntropy(12, 1)
# initialize optimizer
optimizer = tf.optimizers.Adam(learning_rate)
# initialize accuracy objects
train_acc = tf.metrics.BinaryAccuracy()
valid_acc = tf.metrics.BinaryAccuracy()
train_loss = tf.metrics.Mean()
valid_loss = tf.metrics.Mean()
TP = tf.keras.metrics.TruePositives()
FP = tf.keras.metrics.FalsePositives()
TN = tf.keras.metrics.TrueNegatives()
FN = tf.keras.metrics.FalseNegatives()
return weights, w_loss, optimizer, train_acc, valid_acc, train_loss, valid_loss, TP, FP, TN, FN
# define train step
def train_step(model, weights, inputs, gt, optimizer, w_loss, train_loss, train_acc):
with tf.GradientTape() as tape:
pred = model(inputs, weights)
current_loss = w_loss( gt, pred)
grads = tape.gradient(current_loss, weights.values )
optimizer.apply_gradients(zip(grads , weights.values ) )
train_loss.update_state(current_loss)
train_acc.update_state(gt, pred)
return optimizer
#define prediction step
def predict_step(model, weights, inputs, gt, w_loss, valid_loss, valid_acc, TP, FP, TN, FN): #TODO remove paqssing of model here
pred = model(inputs, weights)
current_loss = w_loss( gt, pred)
valid_loss.update_state(current_loss)
valid_acc.update_state(gt, pred)
TP.update_state(gt,pred)
FP.update_state(gt,pred)
TN.update_state(gt,pred)
FN.update_state(gt,pred)
return pred
def trainOnEpochs(train_seg, train_mask, valid_seg, valid_mask, weights, w_loss, optimizer, train_acc, valid_acc, train_loss, valid_loss, TP, FP, TN, FN):
current_time = datetime.now().strftime("%Y%m%d-%H%M%S")
train_log_dir = 'logs/gradient_tape/' + current_time + '/train'
valid_log_dir = 'logs/gradient_tape/' + current_time + '/valid'
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
valid_summary_writer = tf.summary.create_file_writer(valid_log_dir)
valid_loss_best = 1000000000
for epoch in range(epochs):
print("TP: ")
print(TP.result().numpy())
print("FN: ")
print(FN.result().numpy())
print("FP: ")
print(FP.result().numpy())
print("TN: ")
print(TN.result().numpy())
TPR = TP.result().numpy()/(TP.result().numpy()+FN.result().numpy())
FPR = FP.result().numpy()/(FP.result().numpy()+TN.result().numpy())
print("TPR: ")
print(TPR)
print("FPR: ")
print(FPR)
with train_summary_writer.as_default():
tf.summary.scalar('loss', train_loss.result(), step=epoch)
tf.summary.scalar('accuracy', train_acc.result(), step=epoch)
with valid_summary_writer.as_default():
tf.summary.scalar('loss', valid_loss.result(), step=epoch)
tf.summary.scalar('accuracy', valid_acc.result(), step=epoch)
tf.summary.scalar('TPR', TPR, step=epoch)
tf.summary.scalar('FPR', FPR, step=epoch)
train_acc.reset_states()
valid_acc.reset_states()
train_loss.reset_states()
valid_loss.reset_states()
print("---------------------")
print("Epoch: " + str(epoch))
for k in np.arange(0,train_seg.shape[0],batch_size):
image = train_seg[k:k+batch_size,:,:,:].copy()
mask = train_mask[k:k+batch_size,:,:,None].copy()
# choose random ID
ids_present = np.unique(mask)
if ids_present[0]==0: ids_present=ids_present[1:]
id_rand = np.random.choice(ids_present)
# binarize
image[image!=id_rand]=0
image[image==id_rand]=1
mask[mask!=id_rand]=0
mask[mask==id_rand]=1
image = tf.convert_to_tensor(image, dtype=tf.float32 )
mask_gt = tf.convert_to_tensor(mask, dtype=tf.float32 )
optimizer = train_step(model, weights, image, mask_gt, optimizer, w_loss, train_loss, train_acc)
for j in np.arange(0,valid_seg.shape[0],batch_size):
image = valid_seg[j:j+batch_size,:,:,:].copy()
mask = valid_mask[j:j+batch_size,:,:,None].copy()
# choose random ID
ids_present = np.unique(mask)
if ids_present[0]==0: ids_present=ids_present[1:]
id_rand = np.random.choice(ids_present)
# binarize
image[image!=id_rand]=0
image[image==id_rand]=1
mask[mask!=id_rand]=0
mask[mask==id_rand]=1
image = tf.convert_to_tensor( image , dtype=tf.float32 )
mask_gt = tf.convert_to_tensor( mask , dtype=tf.float32 )
mask_pred = predict_step(model, weights, image, mask_gt, w_loss, valid_loss, valid_acc, TP, FP, TN, FN).numpy()
if epoch%10==0:
with valid_summary_writer.as_default():
tf.summary.image("valid-epoch"+str(epoch)+"j-"+str(j), tf.concat([tf.expand_dims(image[:,:,:,depth],3), mask_gt, mask_pred],axis=1), step=epoch, max_outputs=5)
print("Train loss: " + str(train_loss.result().numpy()))
print("Train accu: " + str(train_acc.result().numpy()))
print("Valid loss: " + str(valid_loss.result().numpy()))
print("Valid accu: " + str(valid_acc.result().numpy()))
weights.saveWeights()
print("Weights saved ------------------")
def Train(restore, ckpt_restore):
# Mouse
seg_filepath = train_seg_in_filepath
somae_filepath = train_somae_in_filepath
seg_data = dataIO.ReadH5File(seg_filepath, [1])
somae_data = dataIO.ReadH5File(somae_filepath, [1])
train_seg, train_mask, valid_seg, valid_mask = prepareDataTraining(seg_data, somae_data)
weights, w_loss, optimizer, train_acc, valid_acc, train_loss, valid_loss, TP, FP, TN, FN = initializeModel(restore=restore, ckpt_restore=ckpt_restore)
trainOnEpochs(train_seg, train_mask, valid_seg, valid_mask, weights, w_loss, optimizer, train_acc, valid_acc, train_loss, valid_loss, TP, FP, TN, FN)
def Predict(ckpt_restore):
# Zebrafinch
seg_filepath = predict_seg_in_filepath
seg_data = dataIO.ReadH5File(seg_filepath, [1])
seg_data = seg_data[:,:network_size,:network_size]
somae_mask_out = np.zeros((seg_data.shape[0],seg_data.shape[1],seg_data.shape[2]), dtype=np.float64)
weights, w_loss, optimizer, train_acc, valid_acc, train_loss, valid_loss, TP, FP, TN, FN = initializeModel(restore=True, ckpt_restore=ckpt_restore)
seg_data_prep = prepareDataPrediction(seg_data)
unique_ids = np.unique(seg_data)
for ID in unique_ids:
print("Processind ID " + str(ID))
seg_data_filtered = seg_data_prep.copy()
seg_data_filtered[seg_data_filtered!=ID]=0
# mask the data to be binary
seg_data_filtered[seg_data_filtered>0]=1
for j in np.arange(0,seg_data_filtered.shape[0],batch_size):
image = seg_data_filtered[j:j+batch_size,:,:,:]
image = tf.convert_to_tensor( image , dtype=tf.float32 )
if np.max(image[:,:,:,depth])!=0:
mask_pred = tf.squeeze(model(image, weights)).numpy()
mask_pred[mask_pred<=0.5]=0
mask_pred[mask_pred>0.5]=1
mask_pred = image[:,:,:,depth]*mask_pred
somae_mask_out[j:j+batch_size,:,:] = somae_mask_out[j:j+batch_size,:,:]+mask_pred[:,:,:]
del seg_data_filtered
somae_mask_out = somae_mask_out.astype(np.uint64)
dataIO.WriteH5File(somae_mask_out, somae_prediction_out_filepath, "main")
|
[
"tensorflow.keras.metrics.FalseNegatives",
"numpy.arange",
"numpy.unique",
"tensorflow.math.log",
"tensorflow.keras.metrics.TrueNegatives",
"tensorflow.keras.metrics.FalsePositives",
"model.model",
"numpy.max",
"numpy.random.choice",
"datetime.datetime.now",
"tensorflow.initializers.RandomNormal",
"tensorflow.summary.scalar",
"tensorflow.nn.weighted_cross_entropy_with_logits",
"tensorflow.metrics.Mean",
"tensorflow.add",
"dataIO.ReadH5File",
"tensorflow.optimizers.Adam",
"numpy.random.permutation",
"tensorflow.keras.metrics.TruePositives",
"tensorflow.expand_dims",
"dataIO.WriteH5File",
"tensorflow.convert_to_tensor",
"numpy.zeros",
"tensorflow.metrics.BinaryAccuracy",
"tensorflow.summary.create_file_writer",
"tensorflow.GradientTape"
] |
[((486, 589), 'numpy.zeros', 'np.zeros', (['(seg_data.shape[0], seg_data.shape[1], seg_data.shape[2], depth * 2 + 1)'], {'dtype': 'np.uint8'}), '((seg_data.shape[0], seg_data.shape[1], seg_data.shape[2], depth * \n 2 + 1), dtype=np.uint8)\n', (494, 589), True, 'import numpy as np\n'), ((1085, 1126), 'numpy.random.permutation', 'np.random.permutation', (['valid_seg.shape[0]'], {}), '(valid_seg.shape[0])\n', (1106, 1126), True, 'import numpy as np\n'), ((1143, 1184), 'numpy.random.permutation', 'np.random.permutation', (['train_seg.shape[0]'], {}), '(train_seg.shape[0])\n', (1164, 1184), True, 'import numpy as np\n'), ((1650, 1753), 'numpy.zeros', 'np.zeros', (['(seg_data.shape[0], seg_data.shape[1], seg_data.shape[2], depth * 2 + 1)'], {'dtype': 'np.uint8'}), '((seg_data.shape[0], seg_data.shape[1], seg_data.shape[2], depth * \n 2 + 1), dtype=np.uint8)\n', (1658, 1753), True, 'import numpy as np\n'), ((6791, 6824), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', (['learning_rate'], {}), '(learning_rate)\n', (6809, 6824), True, 'import tensorflow as tf\n'), ((6876, 6903), 'tensorflow.metrics.BinaryAccuracy', 'tf.metrics.BinaryAccuracy', ([], {}), '()\n', (6901, 6903), True, 'import tensorflow as tf\n'), ((6920, 6947), 'tensorflow.metrics.BinaryAccuracy', 'tf.metrics.BinaryAccuracy', ([], {}), '()\n', (6945, 6947), True, 'import tensorflow as tf\n'), ((6965, 6982), 'tensorflow.metrics.Mean', 'tf.metrics.Mean', ([], {}), '()\n', (6980, 6982), True, 'import tensorflow as tf\n'), ((7000, 7017), 'tensorflow.metrics.Mean', 'tf.metrics.Mean', ([], {}), '()\n', (7015, 7017), True, 'import tensorflow as tf\n'), ((7028, 7060), 'tensorflow.keras.metrics.TruePositives', 'tf.keras.metrics.TruePositives', ([], {}), '()\n', (7058, 7060), True, 'import tensorflow as tf\n'), ((7070, 7103), 'tensorflow.keras.metrics.FalsePositives', 'tf.keras.metrics.FalsePositives', ([], {}), '()\n', (7101, 7103), True, 'import tensorflow as tf\n'), ((7113, 7145), 'tensorflow.keras.metrics.TrueNegatives', 'tf.keras.metrics.TrueNegatives', ([], {}), '()\n', (7143, 7145), True, 'import tensorflow as tf\n'), ((7155, 7188), 'tensorflow.keras.metrics.FalseNegatives', 'tf.keras.metrics.FalseNegatives', ([], {}), '()\n', (7186, 7188), True, 'import tensorflow as tf\n'), ((7897, 7919), 'model.model', 'model', (['inputs', 'weights'], {}), '(inputs, weights)\n', (7902, 7919), False, 'from model import model\n'), ((8550, 8594), 'tensorflow.summary.create_file_writer', 'tf.summary.create_file_writer', (['train_log_dir'], {}), '(train_log_dir)\n', (8579, 8594), True, 'import tensorflow as tf\n'), ((8622, 8666), 'tensorflow.summary.create_file_writer', 'tf.summary.create_file_writer', (['valid_log_dir'], {}), '(valid_log_dir)\n', (8651, 8666), True, 'import tensorflow as tf\n'), ((12246, 12282), 'dataIO.ReadH5File', 'dataIO.ReadH5File', (['seg_filepath', '[1]'], {}), '(seg_filepath, [1])\n', (12263, 12282), False, 'import dataIO\n'), ((12300, 12338), 'dataIO.ReadH5File', 'dataIO.ReadH5File', (['somae_filepath', '[1]'], {}), '(somae_filepath, [1])\n', (12317, 12338), False, 'import dataIO\n'), ((12848, 12884), 'dataIO.ReadH5File', 'dataIO.ReadH5File', (['seg_filepath', '[1]'], {}), '(seg_filepath, [1])\n', (12865, 12884), False, 'import dataIO\n'), ((12963, 13053), 'numpy.zeros', 'np.zeros', (['(seg_data.shape[0], seg_data.shape[1], seg_data.shape[2])'], {'dtype': 'np.float64'}), '((seg_data.shape[0], seg_data.shape[1], seg_data.shape[2]), dtype=\n np.float64)\n', (12971, 13053), True, 'import numpy as np\n'), ((13272, 13291), 'numpy.unique', 'np.unique', (['seg_data'], {}), '(seg_data)\n', (13281, 13291), True, 'import numpy as np\n'), ((14211, 14284), 'dataIO.WriteH5File', 'dataIO.WriteH5File', (['somae_mask_out', 'somae_prediction_out_filepath', '"""main"""'], {}), "(somae_mask_out, somae_prediction_out_filepath, 'main')\n", (14229, 14284), False, 'import dataIO\n'), ((4048, 4078), 'tensorflow.initializers.RandomNormal', 'tf.initializers.RandomNormal', ([], {}), '()\n', (4076, 4078), True, 'import tensorflow as tf\n'), ((7407, 7424), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (7422, 7424), True, 'import tensorflow as tf\n'), ((7449, 7471), 'model.model', 'model', (['inputs', 'weights'], {}), '(inputs, weights)\n', (7454, 7471), False, 'from model import model\n'), ((9928, 9972), 'numpy.arange', 'np.arange', (['(0)', 'train_seg.shape[0]', 'batch_size'], {}), '(0, train_seg.shape[0], batch_size)\n', (9937, 9972), True, 'import numpy as np\n'), ((10709, 10753), 'numpy.arange', 'np.arange', (['(0)', 'valid_seg.shape[0]', 'batch_size'], {}), '(0, valid_seg.shape[0], batch_size)\n', (10718, 10753), True, 'import numpy as np\n'), ((13568, 13620), 'numpy.arange', 'np.arange', (['(0)', 'seg_data_filtered.shape[0]', 'batch_size'], {}), '(0, seg_data_filtered.shape[0], batch_size)\n', (13577, 13620), True, 'import numpy as np\n'), ((3755, 3828), 'tensorflow.nn.weighted_cross_entropy_with_logits', 'tf.nn.weighted_cross_entropy_with_logits', (['y_true', 'y_pred', 'self.pos_weight'], {}), '(y_true, y_pred, self.pos_weight)\n', (3795, 3828), True, 'import tensorflow as tf\n'), ((8346, 8360), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (8358, 8360), False, 'from datetime import datetime\n'), ((9603, 9644), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""TPR"""', 'TPR'], {'step': 'epoch'}), "('TPR', TPR, step=epoch)\n", (9620, 9644), True, 'import tensorflow as tf\n'), ((9657, 9698), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""FPR"""', 'FPR'], {'step': 'epoch'}), "('FPR', FPR, step=epoch)\n", (9674, 9698), True, 'import tensorflow as tf\n'), ((10152, 10167), 'numpy.unique', 'np.unique', (['mask'], {}), '(mask)\n', (10161, 10167), True, 'import numpy as np\n'), ((10252, 10281), 'numpy.random.choice', 'np.random.choice', (['ids_present'], {}), '(ids_present)\n', (10268, 10281), True, 'import numpy as np\n'), ((10467, 10512), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['image'], {'dtype': 'tf.float32'}), '(image, dtype=tf.float32)\n', (10487, 10512), True, 'import tensorflow as tf\n'), ((10536, 10580), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['mask'], {'dtype': 'tf.float32'}), '(mask, dtype=tf.float32)\n', (10556, 10580), True, 'import tensorflow as tf\n'), ((10933, 10948), 'numpy.unique', 'np.unique', (['mask'], {}), '(mask)\n', (10942, 10948), True, 'import numpy as np\n'), ((11033, 11062), 'numpy.random.choice', 'np.random.choice', (['ids_present'], {}), '(ids_present)\n', (11049, 11062), True, 'import numpy as np\n'), ((11248, 11293), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['image'], {'dtype': 'tf.float32'}), '(image, dtype=tf.float32)\n', (11268, 11293), True, 'import tensorflow as tf\n'), ((11319, 11363), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['mask'], {'dtype': 'tf.float32'}), '(mask, dtype=tf.float32)\n', (11339, 11363), True, 'import tensorflow as tf\n'), ((13702, 13747), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['image'], {'dtype': 'tf.float32'}), '(image, dtype=tf.float32)\n', (13722, 13747), True, 'import tensorflow as tf\n'), ((3676, 3692), 'tensorflow.add', 'tf.add', (['x_1', 'x_2'], {}), '(x_1, x_2)\n', (3682, 3692), True, 'import tensorflow as tf\n'), ((13767, 13796), 'numpy.max', 'np.max', (['image[:, :, :, depth]'], {}), '(image[:, :, :, depth])\n', (13773, 13796), True, 'import numpy as np\n'), ((3461, 3488), 'tensorflow.math.log', 'tf.math.log', (['(y_pred + 1e-06)'], {}), '(y_pred + 1e-06)\n', (3472, 3488), True, 'import tensorflow as tf\n'), ((3625, 3656), 'tensorflow.math.log', 'tf.math.log', (['(1 - y_pred + 1e-06)'], {}), '(1 - y_pred + 1e-06)\n', (3636, 3656), True, 'import tensorflow as tf\n'), ((3981, 3995), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3993, 3995), False, 'from datetime import datetime\n'), ((13838, 13859), 'model.model', 'model', (['image', 'weights'], {}), '(image, weights)\n', (13843, 13859), False, 'from model import model\n'), ((11662, 11702), 'tensorflow.expand_dims', 'tf.expand_dims', (['image[:, :, :, depth]', '(3)'], {}), '(image[:, :, :, depth], 3)\n', (11676, 11702), True, 'import tensorflow as tf\n')]
|
import numpy as np
# Select dataset
dataset = ['A', 'B', 'C']
dataset_id = 0
print(dataset[dataset_id])
# Select model
models = ['fNIRS-T', 'fNIRS-PreT']
models_id = 0
print(models[models_id])
test_acc = []
for tr in range(1, 26):
path = 'save/' + dataset[dataset_id] + '/KFold/' + models[models_id] + '/' + str(tr)
test_max_acc = open(path + '/test_max_acc.txt', "r")
string = test_max_acc.read()
acc = string.split('best_acc=')[1]
acc = float(acc)
test_acc.append(acc)
test_acc = np.array(test_acc)
print('mean = %.2f' % np.mean(test_acc))
print('std = %.2f' % np.std(test_acc))
|
[
"numpy.std",
"numpy.mean",
"numpy.array"
] |
[((511, 529), 'numpy.array', 'np.array', (['test_acc'], {}), '(test_acc)\n', (519, 529), True, 'import numpy as np\n'), ((552, 569), 'numpy.mean', 'np.mean', (['test_acc'], {}), '(test_acc)\n', (559, 569), True, 'import numpy as np\n'), ((592, 608), 'numpy.std', 'np.std', (['test_acc'], {}), '(test_acc)\n', (598, 608), True, 'import numpy as np\n')]
|
from os.path import join
import cv2
import numpy as np
from numpy.random import uniform
from sys import exit
import tensorflow as tf
model_path = join('models', 'symbol_classifier', 'model.h5')
model = tf.keras.models.load_model(model_path)
path = join('data', 'raw', 'n', '1.jpeg')
image_name = "data"
drawing = False
pt1_x , pt1_y = None , None
img = None
color = None
thickness = None
def draw(event, x, y, r1, r2):
global pt1_x, pt1_y, drawing, img, color
if event==cv2.EVENT_LBUTTONDOWN:
drawing=True
pt1_x,pt1_y=x,y
elif event==cv2.EVENT_LBUTTONUP:
drawing=False
cv2.line(img,(pt1_x,pt1_y),(x,y),color=color,thickness=thickness)
elif event==cv2.EVENT_MOUSEMOVE:
if drawing==True:
cv2.line(img,(pt1_x,pt1_y),(x,y),color=color,thickness=thickness)
pt1_x,pt1_y=x,y
elif event==cv2.EVENT_RBUTTONUP:
image = tf.convert_to_tensor(np.asarray(img, np.uint8), np.uint8)
tensor = tf.io.encode_jpeg(image)
print(predict(tensor))
new_image()
elif event==cv2.EVENT_MBUTTONUP:
new_image()
def new_image():
global img, color, thickness
w_on_b = round(uniform())
thickness = 5 + round(uniform(0, 255))
img = np.ones((512,512,3), np.uint8)
img *= round(uniform(0, 255))
color = (255,255,255) if w_on_b else (0,0,0)
def predict(image):
label = ['n', 'o', 'x']
blob = tf.io.decode_jpeg(image, channels=1)
blob = tf.image.convert_image_dtype(blob, tf.float32)
blob = tf.image.resize(blob, (32, 32))
blob = tf.reshape(blob, (1, 32, 32, 1))
pred = list(model.predict(blob, steps = 1)[0])
index = pred.index(max(pred))
return label[index]
new_image()
cv2.namedWindow(image_name)
cv2.setMouseCallback(image_name, draw)
while(1):
cv2.imshow(image_name, img)
if cv2.waitKey(1)&0xFF==27:
break
cv2.destroyAllWindows()
|
[
"cv2.line",
"numpy.random.uniform",
"tensorflow.keras.models.load_model",
"cv2.waitKey",
"tensorflow.io.encode_jpeg",
"numpy.asarray",
"tensorflow.reshape",
"cv2.imshow",
"numpy.ones",
"tensorflow.io.decode_jpeg",
"cv2.setMouseCallback",
"tensorflow.image.resize",
"cv2.destroyAllWindows",
"os.path.join",
"cv2.namedWindow",
"tensorflow.image.convert_image_dtype"
] |
[((148, 195), 'os.path.join', 'join', (['"""models"""', '"""symbol_classifier"""', '"""model.h5"""'], {}), "('models', 'symbol_classifier', 'model.h5')\n", (152, 195), False, 'from os.path import join\n'), ((204, 242), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['model_path'], {}), '(model_path)\n', (230, 242), True, 'import tensorflow as tf\n'), ((251, 285), 'os.path.join', 'join', (['"""data"""', '"""raw"""', '"""n"""', '"""1.jpeg"""'], {}), "('data', 'raw', 'n', '1.jpeg')\n", (255, 285), False, 'from os.path import join\n'), ((1728, 1755), 'cv2.namedWindow', 'cv2.namedWindow', (['image_name'], {}), '(image_name)\n', (1743, 1755), False, 'import cv2\n'), ((1756, 1794), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['image_name', 'draw'], {}), '(image_name, draw)\n', (1776, 1794), False, 'import cv2\n'), ((1884, 1907), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1905, 1907), False, 'import cv2\n'), ((1249, 1281), 'numpy.ones', 'np.ones', (['(512, 512, 3)', 'np.uint8'], {}), '((512, 512, 3), np.uint8)\n', (1256, 1281), True, 'import numpy as np\n'), ((1423, 1459), 'tensorflow.io.decode_jpeg', 'tf.io.decode_jpeg', (['image'], {'channels': '(1)'}), '(image, channels=1)\n', (1440, 1459), True, 'import tensorflow as tf\n'), ((1471, 1517), 'tensorflow.image.convert_image_dtype', 'tf.image.convert_image_dtype', (['blob', 'tf.float32'], {}), '(blob, tf.float32)\n', (1499, 1517), True, 'import tensorflow as tf\n'), ((1529, 1560), 'tensorflow.image.resize', 'tf.image.resize', (['blob', '(32, 32)'], {}), '(blob, (32, 32))\n', (1544, 1560), True, 'import tensorflow as tf\n'), ((1572, 1604), 'tensorflow.reshape', 'tf.reshape', (['blob', '(1, 32, 32, 1)'], {}), '(blob, (1, 32, 32, 1))\n', (1582, 1604), True, 'import tensorflow as tf\n'), ((1810, 1837), 'cv2.imshow', 'cv2.imshow', (['image_name', 'img'], {}), '(image_name, img)\n', (1820, 1837), False, 'import cv2\n'), ((1185, 1194), 'numpy.random.uniform', 'uniform', ([], {}), '()\n', (1192, 1194), False, 'from numpy.random import uniform\n'), ((1297, 1312), 'numpy.random.uniform', 'uniform', (['(0)', '(255)'], {}), '(0, 255)\n', (1304, 1312), False, 'from numpy.random import uniform\n'), ((618, 689), 'cv2.line', 'cv2.line', (['img', '(pt1_x, pt1_y)', '(x, y)'], {'color': 'color', 'thickness': 'thickness'}), '(img, (pt1_x, pt1_y), (x, y), color=color, thickness=thickness)\n', (626, 689), False, 'import cv2\n'), ((1222, 1237), 'numpy.random.uniform', 'uniform', (['(0)', '(255)'], {}), '(0, 255)\n', (1229, 1237), False, 'from numpy.random import uniform\n'), ((1845, 1859), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1856, 1859), False, 'import cv2\n'), ((759, 830), 'cv2.line', 'cv2.line', (['img', '(pt1_x, pt1_y)', '(x, y)'], {'color': 'color', 'thickness': 'thickness'}), '(img, (pt1_x, pt1_y), (x, y), color=color, thickness=thickness)\n', (767, 830), False, 'import cv2\n'), ((982, 1006), 'tensorflow.io.encode_jpeg', 'tf.io.encode_jpeg', (['image'], {}), '(image)\n', (999, 1006), True, 'import tensorflow as tf\n'), ((928, 953), 'numpy.asarray', 'np.asarray', (['img', 'np.uint8'], {}), '(img, np.uint8)\n', (938, 953), True, 'import numpy as np\n')]
|
import numpy as np
import g2o
class MotionModel(object):
def __init__(self,
timestamp=None,
initial_position=np.zeros(3),
initial_orientation=g2o.Quaternion(),
initial_covariance=None):
self.timestamp = timestamp
self.position = initial_position
self.orientation = initial_orientation
self.covariance = initial_covariance # pose covariance
self.v_linear = np.zeros(3) # linear velocity
self.v_angular_angle = 0
self.v_angular_axis = np.array([1, 0, 0])
self.initialized = False
# damping factor
self.damp = 0.95
def current_pose(self):
'''
Get the current camera pose.
'''
return (g2o.Isometry3d(self.orientation, self.position),
self.covariance)
def predict_pose(self, timestamp):
'''
Predict the next camera pose.
'''
if not self.initialized:
return (g2o.Isometry3d(self.orientation, self.position),
self.covariance)
dt = timestamp - self.timestamp
delta_angle = g2o.AngleAxis(
self.v_angular_angle * dt * self.damp,
self.v_angular_axis)
delta_orientation = g2o.Quaternion(delta_angle)
position = self.position + self.v_linear * dt * self.damp
orientation = self.orientation * delta_orientation
return (g2o.Isometry3d(orientation, position), self.covariance)
def update_pose(self, timestamp,
new_position, new_orientation, new_covariance=None):
'''
Update the motion model when given a new camera pose.
'''
if self.initialized:
dt = timestamp - self.timestamp
assert dt != 0
v_linear = (new_position - self.position) / dt
self.v_linear = v_linear
delta_q = self.orientation.inverse() * new_orientation
delta_q.normalize()
delta_angle = g2o.AngleAxis(delta_q)
angle = delta_angle.angle()
axis = delta_angle.axis()
if angle > np.pi:
axis = axis * -1
angle = 2 * np.pi - angle
self.v_angular_axis = axis
self.v_angular_angle = angle / dt
self.timestamp = timestamp
self.position = new_position
self.orientation = new_orientation
self.covariance = new_covariance
self.initialized = True
def apply_correction(self, correction): # corr: g2o.Isometry3d or matrix44
'''
Reset the model given a new camera pose.
Note: This method will be called when it happens an abrupt change in the pose (LoopClosing)
'''
if not isinstance(correction, g2o.Isometry3d):
correction = g2o.Isometry3d(correction)
current = g2o.Isometry3d(self.orientation, self.position)
current = current * correction
self.position = current.position()
self.orientation = current.orientation()
self.v_linear = (
correction.inverse().orientation() * self.v_linear)
self.v_angular_axis = (
correction.inverse().orientation() * self.v_angular_axis)
|
[
"g2o.Quaternion",
"g2o.AngleAxis",
"g2o.Isometry3d",
"numpy.zeros",
"numpy.array"
] |
[((143, 154), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (151, 154), True, 'import numpy as np\n'), ((189, 205), 'g2o.Quaternion', 'g2o.Quaternion', ([], {}), '()\n', (203, 205), False, 'import g2o\n'), ((461, 472), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (469, 472), True, 'import numpy as np\n'), ((557, 576), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (565, 576), True, 'import numpy as np\n'), ((1156, 1229), 'g2o.AngleAxis', 'g2o.AngleAxis', (['(self.v_angular_angle * dt * self.damp)', 'self.v_angular_axis'], {}), '(self.v_angular_angle * dt * self.damp, self.v_angular_axis)\n', (1169, 1229), False, 'import g2o\n'), ((1284, 1311), 'g2o.Quaternion', 'g2o.Quaternion', (['delta_angle'], {}), '(delta_angle)\n', (1298, 1311), False, 'import g2o\n'), ((2902, 2949), 'g2o.Isometry3d', 'g2o.Isometry3d', (['self.orientation', 'self.position'], {}), '(self.orientation, self.position)\n', (2916, 2949), False, 'import g2o\n'), ((767, 814), 'g2o.Isometry3d', 'g2o.Isometry3d', (['self.orientation', 'self.position'], {}), '(self.orientation, self.position)\n', (781, 814), False, 'import g2o\n'), ((1455, 1492), 'g2o.Isometry3d', 'g2o.Isometry3d', (['orientation', 'position'], {}), '(orientation, position)\n', (1469, 1492), False, 'import g2o\n'), ((2025, 2047), 'g2o.AngleAxis', 'g2o.AngleAxis', (['delta_q'], {}), '(delta_q)\n', (2038, 2047), False, 'import g2o\n'), ((2856, 2882), 'g2o.Isometry3d', 'g2o.Isometry3d', (['correction'], {}), '(correction)\n', (2870, 2882), False, 'import g2o\n'), ((1001, 1048), 'g2o.Isometry3d', 'g2o.Isometry3d', (['self.orientation', 'self.position'], {}), '(self.orientation, self.position)\n', (1015, 1048), False, 'import g2o\n')]
|
import numpy as np
import string
import pandas as pd
from keras.preprocessing.sequence import pad_sequences
char_limit = 1014
def get_data(path):
labels = []
inputs = []
df = pd.read_csv(path, names=['one','second','third'])
df = df.drop('second', axis=1)
data = df.values
for label,text in data:
inputs.append(text.lower())
if label == 1:
labels.append([1, 0, 0, 0])
elif label == 2:
labels.append([0, 1, 0, 0])
elif label == 3:
labels.append([0, 0, 1, 0])
else:
labels.append([0, 0, 0, 1])
return inputs, np.array(labels, dtype=np.float32)
def create_vocab_set(inputs):
vocab = set()
for i in inputs:
for j in i.split(' '):
vocab.add(j)
vocab_size = len(vocab)
word2idx = {}
for i, c in enumerate(vocab):
word2idx[c] = i
return vocab, vocab_size, word2idx
def _encode_text(s, word2idx):
vec = []
for i in s.split(' '):
vec.append(word2idx[i])
return np.array(vec)
def get_encoded_text(text, word2idx, sent_limit):
encoded_text = []
for single_text in text:
encoded_text.append(_encode_text(single_text, word2idx))
encoded_text = pad_sequences(encoded_text, maxlen=sent_limit, value=0.)
return np.array(encoded_text)
def batch_gen(encoded_text, labels, batch_size):
for ii in range(0, len(encoded_text), batch_size):
x = encoded_text[ii:ii + batch_size]
y = labels[ii:ii + batch_size]
yield (x, y)
|
[
"pandas.read_csv",
"keras.preprocessing.sequence.pad_sequences",
"numpy.array"
] |
[((190, 241), 'pandas.read_csv', 'pd.read_csv', (['path'], {'names': "['one', 'second', 'third']"}), "(path, names=['one', 'second', 'third'])\n", (201, 241), True, 'import pandas as pd\n'), ((1047, 1060), 'numpy.array', 'np.array', (['vec'], {}), '(vec)\n', (1055, 1060), True, 'import numpy as np\n'), ((1248, 1305), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['encoded_text'], {'maxlen': 'sent_limit', 'value': '(0.0)'}), '(encoded_text, maxlen=sent_limit, value=0.0)\n', (1261, 1305), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((1316, 1338), 'numpy.array', 'np.array', (['encoded_text'], {}), '(encoded_text)\n', (1324, 1338), True, 'import numpy as np\n'), ((626, 660), 'numpy.array', 'np.array', (['labels'], {'dtype': 'np.float32'}), '(labels, dtype=np.float32)\n', (634, 660), True, 'import numpy as np\n')]
|
#%%
import matplotlib.pyplot as plt
import numpy as np
Rload = 3300
R_25 = 10000
T_25 = 25 + 273.15 #Kelvin
Beta = 3434
Tmin = 0
Tmax = 140
temps = np.linspace(Tmin, Tmax, 1000)
tempsK = temps + 273.15
# https://en.wikipedia.org/wiki/Thermistor#B_or_%CE%B2_parameter_equation
r_inf = R_25 * np.exp(-Beta/T_25)
R_temps = r_inf * np.exp(Beta/tempsK)
V = Rload / (Rload + R_temps)
fit = np.polyfit(V, temps, 3)
p1 = np.poly1d(fit)
fit_temps = p1(V)
#%%
print(fit)
plt.plot(V, temps, label='actual')
plt.plot(V, fit_temps, label='fit')
plt.xlabel('normalized voltage')
plt.ylabel('Temp [C]')
plt.legend(loc=0)
plt.show()
|
[
"numpy.poly1d",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.polyfit",
"matplotlib.pyplot.legend",
"numpy.exp",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((150, 179), 'numpy.linspace', 'np.linspace', (['Tmin', 'Tmax', '(1000)'], {}), '(Tmin, Tmax, 1000)\n', (161, 179), True, 'import numpy as np\n'), ((388, 411), 'numpy.polyfit', 'np.polyfit', (['V', 'temps', '(3)'], {}), '(V, temps, 3)\n', (398, 411), True, 'import numpy as np\n'), ((417, 431), 'numpy.poly1d', 'np.poly1d', (['fit'], {}), '(fit)\n', (426, 431), True, 'import numpy as np\n'), ((467, 501), 'matplotlib.pyplot.plot', 'plt.plot', (['V', 'temps'], {'label': '"""actual"""'}), "(V, temps, label='actual')\n", (475, 501), True, 'import matplotlib.pyplot as plt\n'), ((502, 537), 'matplotlib.pyplot.plot', 'plt.plot', (['V', 'fit_temps'], {'label': '"""fit"""'}), "(V, fit_temps, label='fit')\n", (510, 537), True, 'import matplotlib.pyplot as plt\n'), ((538, 570), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""normalized voltage"""'], {}), "('normalized voltage')\n", (548, 570), True, 'import matplotlib.pyplot as plt\n'), ((571, 593), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Temp [C]"""'], {}), "('Temp [C]')\n", (581, 593), True, 'import matplotlib.pyplot as plt\n'), ((594, 611), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(0)'}), '(loc=0)\n', (604, 611), True, 'import matplotlib.pyplot as plt\n'), ((612, 622), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (620, 622), True, 'import matplotlib.pyplot as plt\n'), ((294, 314), 'numpy.exp', 'np.exp', (['(-Beta / T_25)'], {}), '(-Beta / T_25)\n', (300, 314), True, 'import numpy as np\n'), ((331, 352), 'numpy.exp', 'np.exp', (['(Beta / tempsK)'], {}), '(Beta / tempsK)\n', (337, 352), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# @Date : 2020/5/24
# @Author: Luokun
# @Email : <EMAIL>
import sys
from os.path import dirname, abspath
import matplotlib.pyplot as plt
import numpy as np
sys.path.append(dirname(dirname(abspath(__file__))))
def test_knn():
from models.knn import KNN
x, y = np.random.randn(3, 200, 2), np.zeros([3, 200])
x[0] += np.array([2, 2]) # 右偏移2,上偏移2
x[1] += np.array([2, -2]) # 右偏移2,下偏移2
y[1] = 1
y[2] = 2
plot_scatter(x, 'Real')
x = x.reshape(-1, 2)
y = y.flatten()
# train
knn = KNN(3)
knn.fit(x, y)
pred = knn.predict(x)
plot_scatter([x[pred == i] for i in [0, 1, 2]], 'Pred')
# print accuracy
acc = np.sum(pred == y) / len(pred)
print(f'Acc = {100 * acc:.2f}%')
def plot_scatter(xys, title):
plt.figure(figsize=(10, 10))
for xy, color in zip(xys, ['r', 'g', 'b']):
plt.scatter(xy[:, 0], xy[:, 1], color=color)
plt.title(title)
plt.show()
if __name__ == '__main__':
test_knn()
|
[
"matplotlib.pyplot.title",
"os.path.abspath",
"matplotlib.pyplot.show",
"numpy.sum",
"numpy.random.randn",
"matplotlib.pyplot.scatter",
"numpy.zeros",
"models.knn.KNN",
"matplotlib.pyplot.figure",
"numpy.array"
] |
[((357, 373), 'numpy.array', 'np.array', (['[2, 2]'], {}), '([2, 2])\n', (365, 373), True, 'import numpy as np\n'), ((399, 416), 'numpy.array', 'np.array', (['[2, -2]'], {}), '([2, -2])\n', (407, 416), True, 'import numpy as np\n'), ((553, 559), 'models.knn.KNN', 'KNN', (['(3)'], {}), '(3)\n', (556, 559), False, 'from models.knn import KNN\n'), ((800, 828), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (810, 828), True, 'import matplotlib.pyplot as plt\n'), ((934, 950), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (943, 950), True, 'import matplotlib.pyplot as plt\n'), ((955, 965), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (963, 965), True, 'import matplotlib.pyplot as plt\n'), ((298, 324), 'numpy.random.randn', 'np.random.randn', (['(3)', '(200)', '(2)'], {}), '(3, 200, 2)\n', (313, 324), True, 'import numpy as np\n'), ((326, 344), 'numpy.zeros', 'np.zeros', (['[3, 200]'], {}), '([3, 200])\n', (334, 344), True, 'import numpy as np\n'), ((697, 714), 'numpy.sum', 'np.sum', (['(pred == y)'], {}), '(pred == y)\n', (703, 714), True, 'import numpy as np\n'), ((885, 929), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xy[:, 0]', 'xy[:, 1]'], {'color': 'color'}), '(xy[:, 0], xy[:, 1], color=color)\n', (896, 929), True, 'import matplotlib.pyplot as plt\n'), ((216, 233), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (223, 233), False, 'from os.path import dirname, abspath\n')]
|
from dataclasses import dataclass
import numpy as np
@dataclass
class ObjectTrackingResult:
frame_index: int
tracking_id: int
class_id: int
class_name: str
xmin: int
ymin: int
xmax: int
ymax: int
confidence: float
is_active: bool
def to_txt(self):
return "{} {} {} {} {} {}".format(self.class_name,
self.confidence,
self.xmin,
self.ymin,
self.xmax,
self.ymax)
def to_dict(self):
return self.__dict__
def to_array(self):
return np.array([self.xmin,
self.ymin,
self.xmax,
self.ymax,
self.confidence,
])
def to_list(self):
return [self.xmin,
self.ymin,
self.xmax,
self.ymax,
self.confidence,
]
|
[
"numpy.array"
] |
[((718, 789), 'numpy.array', 'np.array', (['[self.xmin, self.ymin, self.xmax, self.ymax, self.confidence]'], {}), '([self.xmin, self.ymin, self.xmax, self.ymax, self.confidence])\n', (726, 789), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import numpy as np
#%%
def tol2side_x_eq_y(x, y, tol_below=0.0, tol_above=0.0):
'''在上界误差tol_above和下界误差tol_below范围内判断x是否等于y'''
return y - tol_below <= x <= y + tol_above
def tol_eq(x, y, tol=0.0):
'''在绝对误差tol范围内判断x和y相等'''
return abs(x - y) <= tol
def tol_x_big_y(x, y, tol=0.0):
'''在绝对误差tol范围外判断x大于y'''
return x > y and abs(x - y) > tol
def tol_x_big_eq_y(x, y, tol=0.0):
'''在绝对误差tol范围内判断x大于等于y'''
return tol_x_big_y(x, y, tol) or tol_eq(x, y, tol)
def tol_x_sml_y(x, y, tol=0.0):
'''在绝对误差tol范围外判断x小于y'''
return x < y and abs(y - x) > tol
def tol_x_sml_eq_y(x, y, tol=0.0):
'''在绝对误差tol范围内判断x小于等于y'''
return tol_x_sml_y(x, y, tol) or tol_eq(x, y, tol)
#%%
def get_alts_sml(tgt_sum, alts, sort_type='descend', tol=0.0, add_num=None):
'''
从给定备选列表alts中挑选出和小于等于tgt_sum的可行备选数
Parameters
----------
tgt_sum : float, int
目标和
alts : list
备选数列表
sort_type : str
对alts进行排序的方式,默认'descend'降序,可选'ascend'升序、None不排
tol : float
两个数进行比较时的绝对误差控制范围
add_num : int, None
限制在加起来和大于等于tgt_sum的基础上增加的备选数个数,默认无限制
Returns
-------
alts : list
可行备选数列表
'''
# 备选数不能大于目标和
alts = [x for x in alts if tol_x_sml_eq_y(x, tgt_sum, tol)]
if len(alts) == 0:
return []
if sort_type == 'descend':
alts = sorted(alts, reverse=True)
if sort_type == 'ascend':
alts = sorted(alts, reverse=False)
if add_num is None or add_num >= len(alts):
return alts
cumSum = list(np.cumsum(alts))
tmp = [1 if s >= tgt_sum else 0 for s in cumSum]
try:
strt_idx = tmp.index(1)
if strt_idx+add_num+1 <= len(alts):
return alts[:strt_idx+add_num+1]
else:
return alts
except:
return alts
#%%
def backfind_sml1st_index(tgt_sum, alts, tol=0.0, loop_count=None):
'''
alts从后往前搜索,返回第一个小于等于tgt_sum的数的索引
Parameters
----------
tgt_sum : int, float
目标值
alts : list
待比较数列表
tol : float
两个数进行比较时的绝对误差控制范围
loop_count : int
初始迭代次数值,默认为None;若loop_count为None,则不记录迭代次数,
否则在loop_count基础上继续记录迭代次数
Returns
-------
idx : int
从后往前搜索,alts中小于等于tgt_sum的第一个数的索引
loop_count : int
搜索结束时的迭代次数
'''
if len(alts) == 0:
return -1, loop_count
idx = len(alts) - 1
if loop_count is None:
while idx >= 1 and tol_x_big_y(alts[idx], tgt_sum, tol):
idx -= 1
return idx, loop_count
else:
while idx >= 1 and tol_x_big_y(alts[idx], tgt_sum, tol):
idx -= 1
loop_count += 1
return idx, loop_count
#%%
if __name__ == '__main__':
tgt_sum = 10
alts = [2, 5, 12, 11, 7, 8, 6, 3, 1, 10, 13]
sort_type = 'descend'
tol = 1.0
add_num = None
alts_new = get_alts_sml(tgt_sum, alts, sort_type=sort_type, tol=tol,
add_num=add_num)
print(alts_new)
alts = sorted(alts, reverse=False)
idx, loop_count = backfind_sml1st_index(tgt_sum, alts, tol=tol,
loop_count=None)
print(alts)
print(idx, loop_count)
|
[
"numpy.cumsum"
] |
[((1570, 1585), 'numpy.cumsum', 'np.cumsum', (['alts'], {}), '(alts)\n', (1579, 1585), True, 'import numpy as np\n')]
|
import numpy as np
import cv2
from poisson_disk import PoissonDiskSampler
import skimage.morphology
import skimage.measure
import scipy.stats
class Box(object):
"""
This class represents a box in an image. This could be a bounding box of an object or part.
Internally each box is represented by a tuple of 4 integers: (xmin, xmax, ymin, ymax)
"""
POINT_GENERATION_POLECIES = ['poisson_disk']
def __init__(self, xmin, xmax, ymin, ymax):
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
def __repr__(self):
return "%d - %d - %d - %d" % (self.xmin, self.xmax, self.ymin, self.ymax)
def is_valid(self):
return int(self.xmin) != -1
@staticmethod
def box_from_img(img):
"""
Creats a box from the image
"""
height, width = img.shape[:2]
return Box(0, height, 0, width)
@staticmethod
def box_from_cendim(cen, dim):
"""
Create a box from a pair of center and dimension. Each center or dimension is a tuple. For short we call the center and dimension the `cendim`
Center: (cenX, cenY)
Dimension: (height, width)
"""
cenX, cenY = cen
height, width = dim
height_2 = height / 2.
width_2 = width / 2.
xmin = int(round(cenX - height_2))
xmax = int(round(cenX + height_2))
ymin = int(round(cenY - width_2))
ymax = int(round(cenY + width_2))
return Box(xmin, xmax, ymin, ymax)
def cendim(self):
"""
Convert the box into cendim format. In cendim format the center and dimension are stored as floating point numbers.
"""
cenX = float(self.xmin + self.xmax) / 2
cenY = float(self.ymin + self.ymax) / 2
height = float(self.xmax - self.xmin)
width = float(self.ymax - self.ymin)
cen = (cenX, cenY)
dim = (height, width)
return cen, dim
def trim_to_borders(self, img_shape):
"""
Trims the box with respect to the image provided.
"""
img_h, img_w = img_shape[:2]
self.xmin = max(0, self.xmin)
self.xmax = min(img_h - 1, self.xmax)
self.ymin = max(0, self.ymin)
self.ymax = min(img_w - 1, self.ymax)
return self
def draw_box(self, img, color=(1, 0, 0), width=2):
"""
Annotate the `img` with this Box. This returns a new image with the box annotated on it.
"""
new_img = img.copy()
cv2.rectangle(new_img, (self.ymin, self.xmin), (self.ymax, self.xmax), color, width)
return new_img
def get_sub_image(self, img):
"""
Return a sub-image only containing information inside this Box.
"""
self.trim_to_borders(img.shape)
return img[self.xmin:self.xmax, self.ymin:self.ymax]
@staticmethod
def expand_cendim(cen, dim, alpha):
height, width = dim
height = (2 * alpha) * height
width = (2 * alpha) * width
dim = (height, width)
return cen, dim
def expand(self, alpha=0.666):
cen, dim = self.cendim()
cen, dim = Box.expand_cendim(cen, dim, alpha)
new_box = Box.box_from_cendim(cen, dim)
self.xmin = new_box.xmin
self.xmax = new_box.xmax
self.ymin = new_box.ymin
self.ymax = new_box.ymax
return self
def evalIOU(self, gt_box, source_shape):
# TODO
# making sure not to generate errors further down the line
self.trim_to_borders(source_shape)
gt_box.trim_to_borders(source_shape)
height, width = source_shape[:2]
gt_part = np.zeros((height, width), np.uint8)
gt_part[gt_box.xmin:gt_box.xmax, gt_box.ymin:gt_box.ymax] = 1
sl_part = np.zeros((height, width), np.uint8)
sl_part[self.xmin:self.xmax, self.ymin:self.ymax] = 1
intersection = (gt_part & sl_part).sum()
union = (gt_part | sl_part).sum()
return intersection / float(union)
def evalPCP(self, gt_box, source_shape, thresh=0.5):
iou = self.evalIOU(gt_box, source_shape)
if iou >= thresh:
return 1
else:
return 0
def generate_points_inside(self, policy='poisson_disk', param=None, img=None):
"""
This function generates points inside this rectangle. It uses the poisson disk to do it by default. But there is a policy option that is configurable.
There is an optional `param` parameter that specifies the parameters of the generation policy.
Different Policies:
- `poisson_disk`:
The param is expected to be the radius. The radius is the parameter of the poisson disk sampler.
By default radius is set to be average of 1/10 of width and height of the box.
Each point is a row vector [x, y]. A set of `n` points will be represented as a numpy array of shape (n,2). The dtype is numpy.int.
There can be an optional img option. We can use the image's shape to further prune points that are located outside the boundary of the image.
"""
assert(policy in self.POINT_GENERATION_POLECIES)
cen, dim = self.cendim()
height, width = dim
if policy == 'poisson_disk':
if param is None:
radius = ((height / 10.) + (width / 10.)) / 2.
else:
radius = param
# please note that PoissonDiskSampler does use a flipped version of the axis
# also the algorithm generates points in the range [0, height] but we want [0, height) that is
# the reason behind the "-1".
pds = PoissonDiskSampler(height - 1, width - 1, radius)
samples = pds.get_sample()
points = np.zeros((len(samples), 2), dtype=np.int)
for i, s in enumerate(samples):
points[i, :] = [int(round(s[0])), int(round(s[1]))]
points += np.array([self.xmin, self.ymin])
return points
def draw_points(points, ax, color=None):
if color is None:
color = 'red'
for p in points:
# Notice that in plt the axis are different from what we work with
# namely in plt the horizontal axis is x and vertical axis is y
# whereas in numpy and images that we work with the vertical axis is x
# this is the reason behind the flipping of points here.
ax.plot(p[1], p[0], 'o', color=color)
def filter_points(points, box):
"""
Remove points that lie inside the box from the set.
"""
new_points_ind = []
for i, p in enumerate(points):
if (box.xmin <= p[0] <= box.xmax and box.ymin <= p[1] <= box.ymax):
continue
else:
new_points_ind.append(i)
return points[new_points_ind, :]
def post_process_preds(preds):
preds = skimage.morphology.closing(preds, skimage.morphology.square(10))
preds = skimage.morphology.remove_small_objects(preds, min_size=10, connectivity=1)
return preds
def find_rect_from_preds(preds):
L, N = skimage.measure.label(preds, return_num=True, background=0)
if N > 0:
L_no_bg = L[L != 0].flatten()
vals, counts = scipy.stats.mode(L_no_bg)
part_label = int(vals[0])
indices = np.where(L == part_label)
xmin = indices[0].min()
xmax = indices[0].max()
ymin = indices[1].min()
ymax = indices[1].max()
return Box(xmin, xmax, ymin, ymax)
else:
return Box(-1, -1, -1, -1)
|
[
"numpy.zeros",
"poisson_disk.PoissonDiskSampler",
"numpy.where",
"numpy.array",
"cv2.rectangle"
] |
[((2542, 2630), 'cv2.rectangle', 'cv2.rectangle', (['new_img', '(self.ymin, self.xmin)', '(self.ymax, self.xmax)', 'color', 'width'], {}), '(new_img, (self.ymin, self.xmin), (self.ymax, self.xmax),\n color, width)\n', (2555, 2630), False, 'import cv2\n'), ((3701, 3736), 'numpy.zeros', 'np.zeros', (['(height, width)', 'np.uint8'], {}), '((height, width), np.uint8)\n', (3709, 3736), True, 'import numpy as np\n'), ((3826, 3861), 'numpy.zeros', 'np.zeros', (['(height, width)', 'np.uint8'], {}), '((height, width), np.uint8)\n', (3834, 3861), True, 'import numpy as np\n'), ((7345, 7370), 'numpy.where', 'np.where', (['(L == part_label)'], {}), '(L == part_label)\n', (7353, 7370), True, 'import numpy as np\n'), ((5731, 5780), 'poisson_disk.PoissonDiskSampler', 'PoissonDiskSampler', (['(height - 1)', '(width - 1)', 'radius'], {}), '(height - 1, width - 1, radius)\n', (5749, 5780), False, 'from poisson_disk import PoissonDiskSampler\n'), ((6018, 6050), 'numpy.array', 'np.array', (['[self.xmin, self.ymin]'], {}), '([self.xmin, self.ymin])\n', (6026, 6050), True, 'import numpy as np\n')]
|
import numpy as np
# direct cluster
class FCM(object):
def __init__(self, data):
self.lambd = 0
self.data = data
self.cluster = []
self.F_S = []
def standard(self):
data_min, data_max = np.min(self.data, axis=0), np.max(self.data, axis=0)
num_samples, num_shapes = np.shape(self.data)
for i in range(num_samples):
self.data[i, :] = (self.data[i, :])/data_max
for j in range(num_shapes):
self.data[i, j] = round(float(self.data[i, j]), 2)
def matrix_alike(self):
num_samples, num_shapes = np.shape(self.data)
data = self.data
r = np.zeros((num_samples, num_samples))
# using max min method
for i in range(num_samples):
for j in range(num_samples):
r[i, j] = np.sum(self.min(data[i, :], data[j, :]))/np.sum(self.max(data[i, :], data[j, :]))
r[i, j] = round(r[i, j], 2)
return r
def max(self, a, b):
a_or_b = []
for (i, j) in zip(a, b):
if i > j:
a_or_b.append(i)
else:
a_or_b.append(j)
return a_or_b
def min(self, a, b):
a_and_b = []
for (i, j) in zip(a, b):
if i < j:
a_and_b.append(i)
else:
a_and_b.append(j)
return a_and_b
def merge_alike_class(self, a):
b = []
for i in range(len(a)):
temp = []
sign = False
for j in range(len(a[i])):
if len(b) != 0:
for k in range(len(b)):
if a[i][j] in b[k]:
b[k].extend(a[i])
b[k] = list(np.unique(b[k]))
sign = True
break
if sign:
break
temp.append(a[i][j])
if sign:
continue
b.append(temp)
return b
def remove_same_cluster(self):
length = len(self.cluster)
temp = self.cluster.copy()
for i in range(length-1):
if self.cluster[i]['result'] == self.cluster[i+1]['result']:
index = 0
while True:
if temp[index]['lambd'] == self.cluster[i+1]['lambd']:
break
else:
index = index+1
temp.pop(index)
self.cluster = temp
def cluster_t(self, T, lam):
answer = T >= lam
num_i, num_j = answer.shape
x_index, y_index = [], []
for i in range(num_i):
for j in range(num_j):
if answer[i, j]:
x_index.append(i+1)
y_index.append(j+1)
num = list(np.unique(x_index))
result = []
for i in num:
temp = []
for j, k in zip(x_index, y_index):
if i == j:
temp.append(k)
result.append(temp)
result = self.merge_alike_class(result) # merge alike class
return result
# start cluster
def fcm(self):
self.standard() # data standardization
r = self.matrix_alike() # create fuzzy alike matrix
lambd = np.unique(r) # get confidence level lambda
lambd_length = len(lambd)
for i in range(lambd_length):
temp = {}
temp['lambd'] = round(lambd[lambd_length-i-1], 2)
temp['result'] = self.cluster_t(r, lambd[lambd_length-i-1])
self.cluster.append(temp)
self.remove_same_cluster()
print('The result of cluster is ', self.cluster)
self.select_lambda()
best = self.F_S.index(min(self.F_S))+1 # use the F-S function to be the validate measure of lambda
print('The best lambda is ', self.cluster[best]['lambd'])
print('The best result of cluster is ', self.cluster[best]['result'])
def data_mean(self, data, index):
if len(index) == 1:
return data
else:
return np.mean(data, axis=0)
def select_lambda(self):
total_mean = np.mean(self.data, axis=0)
length = len(self.cluster)
for option in range(1, length-1):
F_S = 0
temp = 0
for i in self.cluster[option]['result']:
i = [j-1 for j in i] # fix list index
vi = self.data_mean(self.data[i, :], i)
temp = 0
for j in i:
temp = temp + (np.sum(np.square(self.data[j, :] - vi)) - np.sum(np.square(vi - total_mean)))
F_S = F_S + temp
self.F_S.append(F_S)
def main():
data = np.array([[80., 10., 6., 2.],
[50., 1., 6., 4.],
[90., 6., 4., 6.],
[40., 5., 7., 3.],
[10., 1., 2., 4.]])
fcm = FCM(data)
fcm.fcm()
if __name__ == '__main__':
main()
|
[
"numpy.square",
"numpy.zeros",
"numpy.shape",
"numpy.min",
"numpy.mean",
"numpy.array",
"numpy.max",
"numpy.unique"
] |
[((4941, 5072), 'numpy.array', 'np.array', (['[[80.0, 10.0, 6.0, 2.0], [50.0, 1.0, 6.0, 4.0], [90.0, 6.0, 4.0, 6.0], [\n 40.0, 5.0, 7.0, 3.0], [10.0, 1.0, 2.0, 4.0]]'], {}), '([[80.0, 10.0, 6.0, 2.0], [50.0, 1.0, 6.0, 4.0], [90.0, 6.0, 4.0, \n 6.0], [40.0, 5.0, 7.0, 3.0], [10.0, 1.0, 2.0, 4.0]])\n', (4949, 5072), True, 'import numpy as np\n'), ((337, 356), 'numpy.shape', 'np.shape', (['self.data'], {}), '(self.data)\n', (345, 356), True, 'import numpy as np\n'), ((628, 647), 'numpy.shape', 'np.shape', (['self.data'], {}), '(self.data)\n', (636, 647), True, 'import numpy as np\n'), ((687, 723), 'numpy.zeros', 'np.zeros', (['(num_samples, num_samples)'], {}), '((num_samples, num_samples))\n', (695, 723), True, 'import numpy as np\n'), ((3459, 3471), 'numpy.unique', 'np.unique', (['r'], {}), '(r)\n', (3468, 3471), True, 'import numpy as np\n'), ((4363, 4389), 'numpy.mean', 'np.mean', (['self.data'], {'axis': '(0)'}), '(self.data, axis=0)\n', (4370, 4389), True, 'import numpy as np\n'), ((249, 274), 'numpy.min', 'np.min', (['self.data'], {'axis': '(0)'}), '(self.data, axis=0)\n', (255, 274), True, 'import numpy as np\n'), ((276, 301), 'numpy.max', 'np.max', (['self.data'], {'axis': '(0)'}), '(self.data, axis=0)\n', (282, 301), True, 'import numpy as np\n'), ((2961, 2979), 'numpy.unique', 'np.unique', (['x_index'], {}), '(x_index)\n', (2970, 2979), True, 'import numpy as np\n'), ((4287, 4308), 'numpy.mean', 'np.mean', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (4294, 4308), True, 'import numpy as np\n'), ((1832, 1847), 'numpy.unique', 'np.unique', (['b[k]'], {}), '(b[k])\n', (1841, 1847), True, 'import numpy as np\n'), ((4777, 4808), 'numpy.square', 'np.square', (['(self.data[j, :] - vi)'], {}), '(self.data[j, :] - vi)\n', (4786, 4808), True, 'import numpy as np\n'), ((4819, 4845), 'numpy.square', 'np.square', (['(vi - total_mean)'], {}), '(vi - total_mean)\n', (4828, 4845), True, 'import numpy as np\n')]
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
# This matains a global state.
# Only consider two expression that has the same subscripts and operand shapes
# to be the same einsum expression.
class _EinsumPathCached:
def __init__(self):
self.path = {}
def __call__(self, *args, **kwargs):
subscript = args[0]
operands = args[1:]
key = subscript
key += '|'
for operand in operands:
key += '-'.join([str(dim) for dim in operand.shape])
key += '|'
if key not in self.path:
self.path[key] = np.einsum_path(*args,
**kwargs,
optimize='optimal')[0]
kwargs['optimize'] = self.path[key]
return np.einsum(*args, **kwargs)
einsum_pc = _EinsumPathCached()
# import time
# N = 10
# C = np.random.rand(N, N)
# I = np.random.rand(N, N, N, N)
# begin = time.time()
# for i in range(10):
# einsum_pc('pi,qj,ijkl,rk,sl->pqrs', C, C, I, C, C)
# einsum_pc('pi,qj,ijko,rk,so->pqrs', C, C, I, C, C)
# end = time.time()
# print(einsum_pc.path)
# print(f'{end - begin}')
# begin = time.time()
# for i in range(10):
# np.einsum('pi,qj,ijkl,rk,sl->pqrs', C, C, I, C, C, optimize='optimal')
# end = time.time()
# print(f'{end - begin}')
|
[
"numpy.einsum",
"numpy.einsum_path"
] |
[((1342, 1368), 'numpy.einsum', 'np.einsum', (['*args'], {}), '(*args, **kwargs)\n', (1351, 1368), True, 'import numpy as np\n'), ((1140, 1191), 'numpy.einsum_path', 'np.einsum_path', (['*args'], {'optimize': '"""optimal"""'}), "(*args, **kwargs, optimize='optimal')\n", (1154, 1191), True, 'import numpy as np\n')]
|
# # -*- coding: utf-8 -*-
# from chatterbot import ChatBot
# bot = ChatBot(
# "Math & Time Bot",
# logic_adapters=[
# "chatterbot.logic.MathematicalEvaluation",
# "chatterbot.logic.TimeLogicAdapter"
# ],
# input_adapter="chatterbot.input.VariableInputTypeAdapter",
# output_adapter="chatterbot.output.OutputAdapter",
# trainer='chatterbot.trainers.ChatterBotCorpusTrainer'
# )
# # Print an example of getting one math based response
# response = bot.get_response("What is 4 + 9?")
# print(response)
# # Print an example of getting one time based response
# response = bot.get_response("What time is it?")
# print(response)
import numpy as np
from matplotlib import pyplot as plt
import scipy.io.wavfile as wav
from numpy.lib import stride_tricks
import sys
import os
import pickle
def stft(sig, frameSize, overlapFac=0.5, window=np.hanning):
win = window(frameSize)
hopSize = int(frameSize - np.floor(overlapFac * frameSize))
samples = np.append(np.zeros(int(np.floor(frameSize/2.0))), sig)
cols = np.ceil( (len(samples) - frameSize) / float(hopSize)) + 1
samples = np.append(samples, np.zeros(frameSize))
frames = stride_tricks.as_strided(samples, shape=(int(cols), frameSize), strides=(samples.strides[0]*hopSize, samples.strides[0])).copy()
frames *= win
return np.fft.rfft(frames)
def logscale_spec(spec, sr=22000, factor=20.):
timebins, freqbins = np.shape(spec)
scale = np.linspace(0, 1, freqbins) ** factor
scale *= (freqbins-1)/max(scale)
scale = np.unique(np.round(scale))
newspec = np.complex128(np.zeros([timebins, len(scale)]))
for i in range(0, len(scale)):
if i == len(scale)-1:
newspec[:,i] = np.sum(spec[:,int(scale[i]):], axis=1)
else:
newspec[:,i] = np.sum(spec[:,int(scale[i]):int(scale[i+1])], axis=1)
allfreqs = np.abs(np.fft.fftfreq(freqbins*2, 1./sr)[:freqbins+1])
freqs = []
for i in range(0, len(scale)):
if i == len(scale)-1:
freqs += [np.mean(allfreqs[int(scale[i]):])]
else:
freqs += [np.mean(allfreqs[int(scale[i]):int(scale[i+1])])]
return newspec, freqs
def plotstft(audiopath, binsize=2**10, plotpath=None, colormap="jet"):
samplerate, samples = wav.read(audiopath)
s = stft(samples, binsize)
sshow, freq = logscale_spec(s, factor=1.0, sr=samplerate)
ims = 20.*np.log10(np.abs(sshow)/10e-6)
timebins, freqbins = np.shape(ims)
freqbins=freqbins/2
print("timebins: ", timebins)
print("freqbins: ", freqbins)
# plt.title('Spectrogram')
# plt.imshow(np.transpose(ims), origin="lower", aspect="auto", cmap=colormap, interpolation="none")
arr=[]
fingerprint = []
min_var=np.median(ims[0])
for i in range(0,timebins,3):
temp=np.median(ims[i])
arr.append(temp)
plt.plot(temp)
if min_var > temp and temp>0:
min_var = temp
fingerprint.append(temp)
if min_var<0:
min_var = 0
# plt.colorbar()
# plt.xlabel("timebins ")
# plt.ylabel("frequency (hz)")
# plt.xlim([0, timebins-1])
# plt.ylim([0, int(freqbins)])
# plt.plot(arr,'.',color='b')
# plt.show()
# xlocs = np.float32(np.linspace(0, timebins-1, 5))
# plt.xticks(xlocs, ["%.02f" % l for l in ((xlocs*len(samples)/timebins)+(0.5*binsize))/samplerate])
# ylocs = np.int16(np.round(np.linspace(0, freqbins-1, 10)))
# plt.yticks(ylocs, ["%.02f" % freq[i] for i in ylocs])
# if plotpath:
# plt.savefig(plotpath, bbox_inches="tight")
# plt.clf()
return ims,arr,fingerprint
filename1='test.wav'
#ims2,arr2,fingerprint2=plotstft('newSong.wav')
def check_song(filename1,ims2,arr2,fingerprint2):
ims,arr,fingerprint1 = plotstft(filename1)
# ims2,arr2,fingerprint2 = plotstft(filename2)
arrBig = fingerprint1
arrSmall = fingerprint2
l1 = len(fingerprint1)
l2 = len(fingerprint2)
err = 1000
subsong = False
sum1=0
min_sum=20000
newarr=[]
for i in range(0,l1-l2+1):
subArr = np.array(arrBig[i:i+l2])
for j in range(0,l2):
dummy = subArr[j]-arrSmall[j]
if(dummy<0): dummy=dummy*(-1)
newarr.append(dummy)
newarr=np.array(newarr)
sum1 = np.median(newarr)
if sum1<=0:
sum1 = sum1*(-1)
if sum1<err:
subsong=True
newarr=[]
if(min_sum>sum1):
min_sum=sum1
return subsong,min_sum
song_files = os.listdir('./songs')
main_lis={}
#############################
filename1='test.wav'
ims2,arr2,fingerprint1=plotstft(sys.argv[1])
fingerprint1=np.array(fingerprint1[20:])
filename2='db.pkl'
main_dir={}
def check_song1(fingerprint1):
with open(filename2,'rb') as inp:
main_lis = pickle.load(inp)
for fprint in main_lis:
arrBig = main_lis[fprint]
arrSmall = fingerprint1
l1 = len(arrBig)
l2 = len(arrSmall)
err = 1000
subsong = False
sum1=0
min_sum=20000
newarr=[]
for i in range(0,l1-l2+1):
subArr = np.array(arrBig[i:i+l2])
for j in range(0,l2):
dummy = subArr[j]-arrSmall[j]
if(dummy<0): dummy=dummy*(-1)
newarr.append(dummy)
newarr=np.array(newarr)
sum1 = np.median(newarr)
if sum1<=0:
sum1 = sum1*(-1)
if sum1<err:
subsong=True
newarr=[]
if(min_sum>sum1):
min_sum=sum1
main_dir[fprint]=min_sum
check_song1(fingerprint1)
# print(main_dir)
main_dir = sorted(main_dir.items(),key = lambda x:x[1])
print(main_dir)
|
[
"numpy.fft.rfft",
"numpy.abs",
"matplotlib.pyplot.plot",
"numpy.median",
"numpy.floor",
"numpy.zeros",
"scipy.io.wavfile.read",
"numpy.shape",
"numpy.fft.fftfreq",
"pickle.load",
"numpy.array",
"numpy.linspace",
"numpy.round",
"os.listdir"
] |
[((4136, 4157), 'os.listdir', 'os.listdir', (['"""./songs"""'], {}), "('./songs')\n", (4146, 4157), False, 'import os\n'), ((4281, 4308), 'numpy.array', 'np.array', (['fingerprint1[20:]'], {}), '(fingerprint1[20:])\n', (4289, 4308), True, 'import numpy as np\n'), ((1324, 1343), 'numpy.fft.rfft', 'np.fft.rfft', (['frames'], {}), '(frames)\n', (1335, 1343), True, 'import numpy as np\n'), ((1418, 1432), 'numpy.shape', 'np.shape', (['spec'], {}), '(spec)\n', (1426, 1432), True, 'import numpy as np\n'), ((2174, 2193), 'scipy.io.wavfile.read', 'wav.read', (['audiopath'], {}), '(audiopath)\n', (2182, 2193), True, 'import scipy.io.wavfile as wav\n'), ((2344, 2357), 'numpy.shape', 'np.shape', (['ims'], {}), '(ims)\n', (2352, 2357), True, 'import numpy as np\n'), ((2605, 2622), 'numpy.median', 'np.median', (['ims[0]'], {}), '(ims[0])\n', (2614, 2622), True, 'import numpy as np\n'), ((1141, 1160), 'numpy.zeros', 'np.zeros', (['frameSize'], {}), '(frameSize)\n', (1149, 1160), True, 'import numpy as np\n'), ((1442, 1469), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'freqbins'], {}), '(0, 1, freqbins)\n', (1453, 1469), True, 'import numpy as np\n'), ((1533, 1548), 'numpy.round', 'np.round', (['scale'], {}), '(scale)\n', (1541, 1548), True, 'import numpy as np\n'), ((2661, 2678), 'numpy.median', 'np.median', (['ims[i]'], {}), '(ims[i])\n', (2670, 2678), True, 'import numpy as np\n'), ((2700, 2714), 'matplotlib.pyplot.plot', 'plt.plot', (['temp'], {}), '(temp)\n', (2708, 2714), True, 'from matplotlib import pyplot as plt\n'), ((3793, 3819), 'numpy.array', 'np.array', (['arrBig[i:i + l2]'], {}), '(arrBig[i:i + l2])\n', (3801, 3819), True, 'import numpy as np\n'), ((3941, 3957), 'numpy.array', 'np.array', (['newarr'], {}), '(newarr)\n', (3949, 3957), True, 'import numpy as np\n'), ((3967, 3984), 'numpy.median', 'np.median', (['newarr'], {}), '(newarr)\n', (3976, 3984), True, 'import numpy as np\n'), ((4421, 4437), 'pickle.load', 'pickle.load', (['inp'], {}), '(inp)\n', (4432, 4437), False, 'import pickle\n'), ((941, 973), 'numpy.floor', 'np.floor', (['(overlapFac * frameSize)'], {}), '(overlapFac * frameSize)\n', (949, 973), True, 'import numpy as np\n'), ((1821, 1859), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['(freqbins * 2)', '(1.0 / sr)'], {}), '(freqbins * 2, 1.0 / sr)\n', (1835, 1859), True, 'import numpy as np\n'), ((1009, 1034), 'numpy.floor', 'np.floor', (['(frameSize / 2.0)'], {}), '(frameSize / 2.0)\n', (1017, 1034), True, 'import numpy as np\n'), ((2301, 2314), 'numpy.abs', 'np.abs', (['sshow'], {}), '(sshow)\n', (2307, 2314), True, 'import numpy as np\n'), ((4678, 4704), 'numpy.array', 'np.array', (['arrBig[i:i + l2]'], {}), '(arrBig[i:i + l2])\n', (4686, 4704), True, 'import numpy as np\n'), ((4836, 4852), 'numpy.array', 'np.array', (['newarr'], {}), '(newarr)\n', (4844, 4852), True, 'import numpy as np\n'), ((4864, 4881), 'numpy.median', 'np.median', (['newarr'], {}), '(newarr)\n', (4873, 4881), True, 'import numpy as np\n')]
|
import loaders
import xarray as xr
import numpy as np
from loaders._utils import SAMPLE_DIM_NAME
import pytest
def test_multiple_unstacked_dims():
na, nb, nc, nd = 2, 3, 4, 5
ds = xr.Dataset(
data_vars={
"var1": xr.DataArray(
np.zeros([na, nb, nc, nd]), dims=["a", "b", "c", "d"],
),
"var2": xr.DataArray(np.zeros([na, nb, nc]), dims=["a", "b", "c"],),
}
)
unstacked_dims = ["c", "d"]
expected = xr.Dataset(
data_vars={
"var1": xr.DataArray(
np.zeros([na * nb, nc, nd]), dims=[SAMPLE_DIM_NAME, "c", "d"],
),
"var2": xr.DataArray(np.zeros([na * nb, nc]), dims=[SAMPLE_DIM_NAME, "c"],),
}
)
result = loaders.stack(ds=ds, unstacked_dims=unstacked_dims)
xr.testing.assert_identical(result.drop(result.coords.keys()), expected)
@pytest.fixture
def gridded_dataset(request):
num_nans, zdim, ydim, xdim = request.param
coords = {"z": range(zdim), "y": range(ydim), "x": range(xdim)}
# unique values for ease of set comparison in test
var = xr.DataArray(
[
[[(100 * k) + (10 * j) + i for i in range(10)] for j in range(10)]
for k in range(zdim)
],
dims=["z", "y", "x"],
coords=coords,
)
var = var.where(var >= num_nans) # assign nan values
return xr.Dataset({"var": var})
@pytest.mark.parametrize(
"gridded_dataset", [(0, 1, 10, 10), (0, 10, 10, 10)], indirect=True,
)
def test_stack_dims(gridded_dataset):
s_dim = SAMPLE_DIM_NAME
ds_train = loaders.stack(["z"], gridded_dataset)
assert set(ds_train.dims) == {s_dim, "z"}
assert len(ds_train["z"]) == len(gridded_dataset.z)
assert ds_train["var"].dims[0] == s_dim
|
[
"loaders.stack",
"pytest.mark.parametrize",
"numpy.zeros",
"xarray.Dataset"
] |
[((1424, 1521), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""gridded_dataset"""', '[(0, 1, 10, 10), (0, 10, 10, 10)]'], {'indirect': '(True)'}), "('gridded_dataset', [(0, 1, 10, 10), (0, 10, 10, 10)\n ], indirect=True)\n", (1447, 1521), False, 'import pytest\n'), ((764, 815), 'loaders.stack', 'loaders.stack', ([], {'ds': 'ds', 'unstacked_dims': 'unstacked_dims'}), '(ds=ds, unstacked_dims=unstacked_dims)\n', (777, 815), False, 'import loaders\n'), ((1396, 1420), 'xarray.Dataset', 'xr.Dataset', (["{'var': var}"], {}), "({'var': var})\n", (1406, 1420), True, 'import xarray as xr\n'), ((1605, 1642), 'loaders.stack', 'loaders.stack', (["['z']", 'gridded_dataset'], {}), "(['z'], gridded_dataset)\n", (1618, 1642), False, 'import loaders\n'), ((272, 298), 'numpy.zeros', 'np.zeros', (['[na, nb, nc, nd]'], {}), '([na, nb, nc, nd])\n', (280, 298), True, 'import numpy as np\n'), ((375, 397), 'numpy.zeros', 'np.zeros', (['[na, nb, nc]'], {}), '([na, nb, nc])\n', (383, 397), True, 'import numpy as np\n'), ((568, 595), 'numpy.zeros', 'np.zeros', (['[na * nb, nc, nd]'], {}), '([na * nb, nc, nd])\n', (576, 595), True, 'import numpy as np\n'), ((679, 702), 'numpy.zeros', 'np.zeros', (['[na * nb, nc]'], {}), '([na * nb, nc])\n', (687, 702), True, 'import numpy as np\n')]
|
# type: ignore
"""
A Tensor module on top of Numpy arrays.
TODO: Implement the reverse mode autodiff to compute gradients. It will have
to go backward through the computation graph.
"""
from __future__ import annotations
from typing import Union
import os
import pkgutil
import numpy as np
import pyopencl as cl
import pyopencl.array as clarray
import pyopencl.clmath as clmath
import pyopencl.clrandom as clrandom
import pyopencl.bitonic_sort as clbitonicsort
# Initialize the context
CONTEXT: cl.Context = cl.create_some_context(answers=[0, 1])
# Instantiate a queue
QUEUE: cl.CommandQueue = cl.CommandQueue(CONTEXT)
# OpenCL options
CLOPTS: str = "-cl-mad-enable -cl-fast-relaxed-math"
# Scalar type
Scalar = Union[float, int, np.float32]
def readcl(filename: str) -> str:
"""Read an OpenCL file and return it as a string."""
return pkgutil.get_data("miniml", f"opencl/{filename}").decode()
class Tensor:
"""A tensor class. Computations can be delegated to the GPU."""
def __init__(
self, data: Union[cl.array.Array, list, np.ndarray], gpu: bool = False
) -> None:
"""Initialize variables."""
self._gpu: bool = gpu
if isinstance(data, list):
self._data: np.ndarray = np.array(data, dtype=np.float32)
if self._gpu:
self._data = clarray.to_device(QUEUE, self._data)
elif isinstance(data, np.ndarray):
if data.dtype != np.float32:
# NOTE: The NumPy array has to be converted into a list first.
# Otherwise, the operations on cpu and gpu produce
# different results. This behavior can be caused by many
# reasons including OpenCL and even the operating system
# itself. Some research is needed to figure out cause and
# eliminate extra work for rebuilding the array.
self._data: np.ndarray = np.array(data.tolist(), np.float32)
else:
self._data: np.ndarray = data
if self._gpu:
self._data = clarray.to_device(QUEUE, self._data)
elif isinstance(data, cl.array.Array):
self._data: cl.array.Array = data
self._gpu: bool = True
else:
raise TypeError(
"Expected `list`, `np.ndarray`, or `pyopencl.array.Array` got "
f"`{type(data)}`"
)
@property
def data(self) -> Union[np.ndarray, cl.array.Array]:
"""The data inside of a tensor."""
return self._data
@data.setter
def data(self, data: Union[cl.array.Array, list, np.ndarray]) -> None:
"""Set the data inside of a tensor."""
if isinstance(data, list):
self._data: np.ndarray = np.array(data, dtype=np.float32)
if self._gpu:
self._data = clarray.to_device(QUEUE, self._data)
elif isinstance(data, np.ndarray):
if data.dtype != np.dtype("float32"):
self._data: np.ndarray = data.astype(np.float32)
else:
self._data: np.ndarray = data
if self._gpu:
self._data = clarray.to_device(QUEUE, self._data)
elif isinstance(data, cl.array.Array):
self._data: cl.array.Array = data
self._gpu: bool = True
else:
raise TypeError(
"Expected `list`, `np.ndarray`, or `pyopencl.array.Array` got "
f"`{type(data)}`"
)
def to_cpu(self) -> Tensor:
"""Load the data into CPU."""
if self._gpu:
self._data = self._data.get()
self._gpu = False
return self
def to_gpu(self) -> Tensor:
"""Load the data into GPU."""
if not self._gpu:
self._data = clarray.to_device(QUEUE, self._data)
self._gpu = True
return self
def to_numpy(self) -> np.ndarray:
"""Return a numpy ndarray."""
if self._gpu:
return self._data.get()
return self._data
@property
def gpu(self) -> bool:
"""Return the state of the GPU."""
return self._gpu
def __repr__(self) -> str:
"""A representation of a tensor."""
state: str = "GPU" if self._gpu else "CPU"
return f"{self._data}\n\nTensor[{state}]"
def __iter__(self) -> Union[np.ndarray, cl.array.Array]:
"""An iterator for tensors."""
for i in self._data:
yield i
def __len__(self) -> int:
"""Return a length of tensors."""
return len(self._data)
def __getitem__(self, idx: int) -> Union[np.ndarray, cl.array.Array]:
"""Return a length of tensors."""
return self._data[idx]
def __setitem__(
self, idx: int, item: Union[np.ndarray, cl.array.Array]
) -> None:
"""Return a length of tensors."""
self._data[idx] = item
def __add__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Add two tensors."""
if not isinstance(other, Tensor):
return Tensor(self._data + other, gpu=self._gpu)
return Tensor(self._data + other._data, gpu=self._gpu or other._gpu)
__radd__ = __add__
def __iadd__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Add two tensors in-place."""
if not isinstance(other, Tensor):
self._data += other
else:
self._data += other._data
return self
def __sub__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Subtract two tensors."""
if not isinstance(other, Tensor):
return Tensor(self._data - other, gpu=self._gpu)
return Tensor(self._data - other._data, gpu=self._gpu or other._gpu)
__rsub__ = __sub__
def __isub__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Subtract two tensors in-place."""
if not isinstance(other, Tensor):
self._data -= other
else:
self._data -= other._data
return self
def __mul__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Multiply two tensors."""
if not isinstance(other, Tensor):
return Tensor(self._data * other, gpu=self._gpu)
return Tensor(self._data * other._data, gpu=self._gpu or other._gpu)
__rmul__ = __mul__
def __imul__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Multiply two tensors in-place."""
if not isinstance(other, Tensor):
self._data *= other
else:
self._data *= other._data
return self
def __truediv__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Divide two tensors."""
if not isinstance(other, Tensor):
return Tensor(self._data / other, gpu=self._gpu)
return Tensor(self._data / other._data, gpu=self._gpu or other._gpu)
__rtruediv__ = __truediv__
def __itruediv__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Divide two tensors in-place."""
if not isinstance(other, Tensor):
self._data /= other
else:
self._data /= other._data
return self
def __lt__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Less than operation for a tensor and a tensor/scalar."""
if not isinstance(other, Tensor):
return Tensor(self._data < other, gpu=self._gpu)
return Tensor(self._data < other._data, gpu=self._gpu or other._gpu)
def __le__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Less than or equal operation for a tensor and a tensor/scalar."""
if not isinstance(other, Tensor):
return Tensor(self._data <= other, gpu=self._gpu)
return Tensor(self._data <= other._data, gpu=self._gpu or other._gpu)
def __eq__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Equal to operation for a tensor and a tensor/scalar."""
if not isinstance(other, Tensor):
return Tensor(self._data == other, gpu=self._gpu)
return Tensor(self._data == other._data, gpu=self._gpu or other._gpu)
def __ne__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Not equal to operation for a tensor and a tensor/scalar."""
if not isinstance(other, Tensor):
return Tensor(self._data != other, gpu=self._gpu)
return Tensor(self._data != other._data, gpu=self._gpu or other._gpu)
def __ge__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Greater than or equal operation for a tensor and a tensor/scalar."""
if not isinstance(other, Tensor):
return Tensor(self._data >= other, gpu=self._gpu)
return Tensor(self._data >= other._data, gpu=self._gpu or other._gpu)
def __gt__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Greater than operation for a tensor and a tensor/scalar."""
if not isinstance(other, Tensor):
return Tensor(self._data > other, gpu=self._gpu)
return Tensor(self._data > other._data, gpu=self._gpu or other._gpu)
def __neg__(self) -> Tensor:
"""Return a negated tensor."""
return Tensor(-self._data, gpu=self._gpu)
def all(self) -> bool:
"""Returns the true value if all values of a tensor are true."""
return self._data.all()
def any(self) -> bool:
"""Returns the true value if at least one value of a tensor is true."""
return self._data.any()
def view(self, dtype: np.dtype) -> None:
"""Returns the view of a tensor with the same data. If dtype is
different from current dtype, the actual bytes of memory will be
reinterpreted.
"""
return Tensor(self._data.view(dtype), gpu=self._gpu)
def astype(self, dtype: np.dtype) -> Tensoor:
"""Return a copy of self, cast to dtype."""
return Tensor(self._data.astype(dtype), gpu=self._gpu)
def squeeze(self) -> None:
"""Returns a view of the tensor with dimensions of length 1 removed."""
return Tensor(self._data.squeeze(), gpu=self._gpu)
def sort(self) -> None:
"""Sorts a tensor, uses the parallel bitonic sort when on GPU."""
if self._gpu:
sorter = clbitonicsort.BitonicSort(CONTEXT)
sorter(self._data)
else:
self._data.sort()
@property
def T(self) -> Tensor:
"""Returns a transpose of a tensor."""
return Tensor(self._data.T, gpu=self._gpu)
@property
def dtype(self) -> np.dtype:
"""The data type of a tensor."""
return self._data.dtype
@property
def flags(self) -> Union[cl.compyte.array.ArrayFlags, np.flagsobj]:
"""Return an object with attributes `c_contiguous`, `f_contiguous` and
`forc`, which may be used to query contiguity properties in analogy
to `numpy.ndarray.flags`.
"""
return self._data.size
@property
def ndim(self) -> int:
"""The dimensions of a tensor."""
return self._data.ndim
@property
def nbytes(self) -> int:
"""Return the number of bytes."""
return self._data.nbytes
@property
def shape(self) -> tuple[int, ...]:
"""The tuple of lengths of each dimension in the tensor."""
return self._data.shape
@property
def strides(self) -> tuple[int, ...]:
"""tuple of bytes to step in each dimension."""
self._data.strides
@property
def size(self) -> int:
"""The number of meaningful entries in the tensor."""
self._data.size
class Ops:
"""Tensor operations."""
@staticmethod
def dot(t1: Tensor, t2: Tensor, gpu=False) -> Tensor:
"""Returns a dot product (matrix multiplication) of two tensors."""
if gpu:
# Convert back to numpy ndarrays
t1 = t1.data.get().astype(np.float32)
t2 = t2.data.get().astype(np.float32)
t1_w = np.int32(t1.shape[1])
t1_h = np.int32(t1.shape[0])
t2_w = np.int32(t2.shape[1])
t2_h = np.int32(t2.shape[0])
rt_h = t1_h
rt_w = t2_w
rt = np.empty((rt_h, rt_w)).astype(np.float32)
# Mem flags
mf = cl.mem_flags
# Buffer variables
t1_buf = cl.Buffer(
CONTEXT, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=t1
)
t2_buf = cl.Buffer(
CONTEXT, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=t2
)
rt_buf = cl.Buffer(CONTEXT, mf.WRITE_ONLY, size=rt.nbytes)
# OpenCL program for computing a matrix multiply
prg = cl.Program(CONTEXT, readcl("matmul.cl")).build(
options=CLOPTS
)
# Perform the matrix multiplication and return the resulting tensor
prg.matmul(
QUEUE, rt.shape, None, t1_buf, t2_buf, rt_buf, t1_h, t2_w, t1_w
)
cl.enqueue_copy(QUEUE, rt, rt_buf)
return Tensor(rt, gpu=True)
return Tensor(np.dot(t1.data, t2.data))
@staticmethod
def vdot(m1: Tensor, m2: Tensor) -> Tensor:
"""Returns a dot product of two tensors."""
if m1.gpu or m2.gpu:
return Tensor(clarray.dot(m1.data, m2.data), gpu=True)
return Tensor(np.vdot(m1.data, m2.data))
@staticmethod
def flatten(t: Tensor) -> Tensor:
"""Returns flattened tensor containing the same data."""
return Tensor(t._data.ravel(), gpu=t.gpu)
@staticmethod
def fill(shape: tuple[int, ...], val: np.float32, gpu=False) -> Tensor:
"""Fill the tensor with scalar."""
if gpu:
return Tensor(
clarray.empty(QUEUE, shape, dtype=np.float32).fill(val),
gpu=True,
)
return Tensor(np.full(shape, val))
@staticmethod
def where(
cond: Tensor,
fst: Union[Tensor, Scalar],
snd: Union[Tensor, Scalar],
) -> Tensor:
"""Fill the tensor based on a condition."""
if cond.gpu:
if isinstance(fst, Tensor) and isinstance(snd, Tensor):
return Tensor(
clarray.if_positive(cond._data, fst._data, snd._data),
gpu=True,
)
shape: tuple[int, ...] = cond._data.shape
if not isinstance(fst, Tensor) and isinstance(snd, Tensor):
snd = snd._data
fst = clarray.empty(QUEUE, shape, dtype=np.float32).fill(fst)
elif isinstance(fst, Tensor) and not isinstance(snd, Tensor):
fst = fst._data
snd = clarray.empty(QUEUE, shape, dtype=np.float32).fill(snd)
elif not isinstance(fst, Tensor) and not isinstance(snd, Tensor):
fst = clarray.empty(QUEUE, shape, dtype=np.float32).fill(fst)
snd = clarray.empty(QUEUE, shape, dtype=np.float32).fill(snd)
return Tensor(clarray.if_positive(cond._data, fst, snd), gpu=True)
if not isinstance(fst, Tensor) and isinstance(snd, Tensor):
return Tensor(np.where(cond._data, fst, snd._data))
if isinstance(fst, Tensor) and not isinstance(snd, Tensor):
return Tensor(np.where(cond._data, fst._data, snd))
if not isinstance(fst, Tensor) and not isinstance(snd, Tensor):
return Tensor(np.where(cond._data, fst, snd))
return Tensor(np.where(cond._data, fst._data, snd._data))
@staticmethod
def reshape(t: Tensor, shape: tuple) -> Tensor:
"""Returns a tensor containing the same data with a new shape."""
if t.gpu:
return Tensor(clarray.reshape(t._data, shape), gpu=True)
return Tensor(np.reshape(t._data, shape))
@staticmethod
def log(t: Tensor) -> Tensor:
"""Returns a natural logarithm of a tensor."""
if t.gpu:
return Tensor(clmath.log(t._data), gpu=True)
return Tensor(np.log(t._data))
@staticmethod
def tanh(t: Tensor) -> Tensor:
"""Returns a tanh of a tensor."""
if t.gpu:
return Tensor(clmath.tanh(t._data), gpu=True)
return Tensor(np.tanh(t._data))
@staticmethod
def exp(t: Tensor) -> Tensor:
"""Returns a natural exponent of a tensor."""
if t.gpu:
return Tensor(clmath.exp(t._data), gpu=True)
return Tensor(np.exp(t._data))
@staticmethod
def maximum(t: Tensor, uts: Union[Tensor, Scalar]) -> Tensor:
"""Returns the maximum of a tensor."""
if t.gpu:
if not isinstance(uts, Tensor):
ot: cl.array.Array = clarray.empty(
QUEUE, t.shape, dtype=np.float32
).fill(uts)
return Tensor(clarray.maximum(t._data, ot), gpu=True)
return Tensor(clarray.maximum(t._data, uts._data), gpu=True)
if not isinstance(uts, Tensor):
return Tensor(np.maximum(t._data, uts))
return Tensor(np.maximum(t._data, uts._data))
@staticmethod
def minimum(t: Tensor, uts: Union[Tensor, Scalar]) -> Tensor:
"""Returns the minimum of a tensor."""
if t.gpu:
if not isinstance(uts, Tensor):
ot: cl.array.Array = clarray.empty(
QUEUE, t.shape, dtype=np.float32
).fill(uts)
return Tensor(clarray.minimum(t._data, ot), gpu=True)
return Tensor(clarray.minimum(t._data, uts._data), gpu=True)
if not isinstance(uts, Tensor):
return Tensor(np.minimum(t._data, uts))
return Tensor(np.minimum(t._data, uts._data))
@staticmethod
def power(t: Tensor, exponent: Union[Tensor, Scalar]) -> Tensor:
"""Raise all elements of the tensor to the specified power."""
if not isinstance(exponent, Tensor):
return Tensor(t._data ** exponent, gpu=t.gpu)
return Tensor(t._data ** exponent._data, gpu=t.gpu or exponent.gpu)
@staticmethod
def square(t: Tensor) -> Tensor:
"""Return a square-valued tensor."""
return Tensor(t._data ** 2, gpu=t.gpu)
@staticmethod
def transpose(t: Tensor) -> Tensor:
"""Returns a transpose of a tensor."""
if t.gpu:
return Tensor(clarray.transpose(t._data), gpu=True)
return Tensor(np.transpose(t._data), gpu=t.gpu)
@staticmethod
def zeros(shape: tuple = (1, 1), gpu=False) -> Tensor:
"""Return a new tensor of given shape and type, filled with zeros."""
if gpu:
return Tensor(clarray.zeros(QUEUE, shape, np.float32), gpu=True)
return Tensor(np.zeros(shape, dtype=np.float32))
@staticmethod
def zeros_like(t: Tensor, gpu=False) -> Tensor:
"""Return a tensor of zeros with the same shape and type as a given
tensor.
"""
if gpu:
return Tensor(clarray.zeros_like(t._data), gpu=True)
return Tensor(np.zeros_like(t._data, dtype=np.float32))
class Random:
"""Random number generation for tensors."""
@staticmethod
def normal(
shape: Union[tuple[int, ...], int] = (1, 1), gpu=False
) -> Tensor:
"""Draw random samples from a normal (Gaussian) distribution."""
if gpu:
return Tensor(
clrandom.PhiloxGenerator(CONTEXT).normal(
cq=QUEUE, shape=shape, dtype=np.float32
),
gpu=True,
)
return Tensor(np.random.normal(size=shape).astype(np.float32))
@staticmethod
def rand(shape: Union[tuple[int, ...], int] = (1, 1), gpu=False) -> Tensor:
"""Returns a tensor of random values in a given shape."""
if gpu:
return Tensor(clrandom.rand(QUEUE, shape, np.float32), gpu=True)
if isinstance(shape, tuple):
return Tensor(np.random.rand(*shape).astype(np.float32))
return Tensor(np.random.rand(shape).astype(np.float32))
@staticmethod
def uniform(
shape: Union[tuple[int, ...], int] = (1, 1),
min: float = 0.0,
max: float = 1.0,
gpu=False,
) -> Tensor:
"""Draw samples from a uniform distribution."""
if gpu:
return Tensor(
clrandom.PhiloxGenerator(CONTEXT).uniform(
cq=QUEUE, shape=shape, dtype=np.float32, a=min, b=max
),
gpu=True,
)
return Tensor(
np.random.uniform(min, max, size=shape).astype(np.float32)
)
class Reduce:
"""Reduction operations on tensors."""
@staticmethod
def max(t: Tensor) -> np.float32:
"""The maximum of the values in a tensor."""
if t.gpu:
return clarray.max(t._data).get().flat[0]
return np.max(t._data)
@staticmethod
def min(t: Tensor) -> np.float32:
"""The minimum of the values in a tensor."""
if t.gpu:
return clarray.min(t._data).get().flat[0]
return np.min(t._data)
@staticmethod
def sum(t: Tensor) -> np.float32:
"""The sum of the values in a tensor."""
if t.gpu:
return clarray.sum(t._data).get().flat[0]
return np.sum(t._data)
@staticmethod
def mean(t: Tensor) -> np.float32:
"""The mean of the values in a tensor."""
if t.gpu:
return clarray.sum(t._data).get().flat[0] / t._data.size
return np.mean(t._data)
|
[
"pyopencl.array.sum",
"numpy.sum",
"numpy.maximum",
"pyopencl.clmath.exp",
"pyopencl.enqueue_copy",
"pyopencl.array.transpose",
"numpy.empty",
"pyopencl.array.empty",
"pyopencl.array.minimum",
"pyopencl.Buffer",
"numpy.mean",
"numpy.exp",
"numpy.random.normal",
"pyopencl.array.reshape",
"pyopencl.array.zeros_like",
"pyopencl.clmath.log",
"numpy.full",
"numpy.zeros_like",
"pyopencl.clmath.tanh",
"numpy.transpose",
"pyopencl.CommandQueue",
"pyopencl.array.if_positive",
"pyopencl.array.max",
"pyopencl.bitonic_sort.BitonicSort",
"numpy.max",
"numpy.reshape",
"numpy.int32",
"pyopencl.array.maximum",
"pkgutil.get_data",
"numpy.minimum",
"numpy.tanh",
"pyopencl.create_some_context",
"numpy.min",
"pyopencl.array.min",
"numpy.dot",
"numpy.random.uniform",
"numpy.log",
"pyopencl.array.dot",
"pyopencl.clrandom.PhiloxGenerator",
"numpy.dtype",
"numpy.vdot",
"numpy.zeros",
"pyopencl.array.to_device",
"numpy.where",
"numpy.array",
"numpy.random.rand",
"pyopencl.clrandom.rand",
"pyopencl.array.zeros"
] |
[((521, 559), 'pyopencl.create_some_context', 'cl.create_some_context', ([], {'answers': '[0, 1]'}), '(answers=[0, 1])\n', (543, 559), True, 'import pyopencl as cl\n'), ((608, 632), 'pyopencl.CommandQueue', 'cl.CommandQueue', (['CONTEXT'], {}), '(CONTEXT)\n', (623, 632), True, 'import pyopencl as cl\n'), ((20996, 21011), 'numpy.max', 'np.max', (['t._data'], {}), '(t._data)\n', (21002, 21011), True, 'import numpy as np\n'), ((21211, 21226), 'numpy.min', 'np.min', (['t._data'], {}), '(t._data)\n', (21217, 21226), True, 'import numpy as np\n'), ((21422, 21437), 'numpy.sum', 'np.sum', (['t._data'], {}), '(t._data)\n', (21428, 21437), True, 'import numpy as np\n'), ((21650, 21666), 'numpy.mean', 'np.mean', (['t._data'], {}), '(t._data)\n', (21657, 21666), True, 'import numpy as np\n'), ((863, 911), 'pkgutil.get_data', 'pkgutil.get_data', (['"""miniml"""', 'f"""opencl/{filename}"""'], {}), "('miniml', f'opencl/{filename}')\n", (879, 911), False, 'import pkgutil\n'), ((1258, 1290), 'numpy.array', 'np.array', (['data'], {'dtype': 'np.float32'}), '(data, dtype=np.float32)\n', (1266, 1290), True, 'import numpy as np\n'), ((2820, 2852), 'numpy.array', 'np.array', (['data'], {'dtype': 'np.float32'}), '(data, dtype=np.float32)\n', (2828, 2852), True, 'import numpy as np\n'), ((3873, 3909), 'pyopencl.array.to_device', 'clarray.to_device', (['QUEUE', 'self._data'], {}), '(QUEUE, self._data)\n', (3890, 3909), True, 'import pyopencl.array as clarray\n'), ((10335, 10369), 'pyopencl.bitonic_sort.BitonicSort', 'clbitonicsort.BitonicSort', (['CONTEXT'], {}), '(CONTEXT)\n', (10360, 10369), True, 'import pyopencl.bitonic_sort as clbitonicsort\n'), ((12067, 12088), 'numpy.int32', 'np.int32', (['t1.shape[1]'], {}), '(t1.shape[1])\n', (12075, 12088), True, 'import numpy as np\n'), ((12108, 12129), 'numpy.int32', 'np.int32', (['t1.shape[0]'], {}), '(t1.shape[0])\n', (12116, 12129), True, 'import numpy as np\n'), ((12150, 12171), 'numpy.int32', 'np.int32', (['t2.shape[1]'], {}), '(t2.shape[1])\n', (12158, 12171), True, 'import numpy as np\n'), ((12191, 12212), 'numpy.int32', 'np.int32', (['t2.shape[0]'], {}), '(t2.shape[0])\n', (12199, 12212), True, 'import numpy as np\n'), ((12430, 12493), 'pyopencl.Buffer', 'cl.Buffer', (['CONTEXT', '(mf.READ_ONLY | mf.COPY_HOST_PTR)'], {'hostbuf': 't1'}), '(CONTEXT, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=t1)\n', (12439, 12493), True, 'import pyopencl as cl\n'), ((12545, 12608), 'pyopencl.Buffer', 'cl.Buffer', (['CONTEXT', '(mf.READ_ONLY | mf.COPY_HOST_PTR)'], {'hostbuf': 't2'}), '(CONTEXT, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=t2)\n', (12554, 12608), True, 'import pyopencl as cl\n'), ((12660, 12709), 'pyopencl.Buffer', 'cl.Buffer', (['CONTEXT', 'mf.WRITE_ONLY'], {'size': 'rt.nbytes'}), '(CONTEXT, mf.WRITE_ONLY, size=rt.nbytes)\n', (12669, 12709), True, 'import pyopencl as cl\n'), ((13094, 13128), 'pyopencl.enqueue_copy', 'cl.enqueue_copy', (['QUEUE', 'rt', 'rt_buf'], {}), '(QUEUE, rt, rt_buf)\n', (13109, 13128), True, 'import pyopencl as cl\n'), ((13192, 13216), 'numpy.dot', 'np.dot', (['t1.data', 't2.data'], {}), '(t1.data, t2.data)\n', (13198, 13216), True, 'import numpy as np\n'), ((13457, 13482), 'numpy.vdot', 'np.vdot', (['m1.data', 'm2.data'], {}), '(m1.data, m2.data)\n', (13464, 13482), True, 'import numpy as np\n'), ((13975, 13994), 'numpy.full', 'np.full', (['shape', 'val'], {}), '(shape, val)\n', (13982, 13994), True, 'import numpy as np\n'), ((15594, 15636), 'numpy.where', 'np.where', (['cond._data', 'fst._data', 'snd._data'], {}), '(cond._data, fst._data, snd._data)\n', (15602, 15636), True, 'import numpy as np\n'), ((15894, 15920), 'numpy.reshape', 'np.reshape', (['t._data', 'shape'], {}), '(t._data, shape)\n', (15904, 15920), True, 'import numpy as np\n'), ((16129, 16144), 'numpy.log', 'np.log', (['t._data'], {}), '(t._data)\n', (16135, 16144), True, 'import numpy as np\n'), ((16342, 16358), 'numpy.tanh', 'np.tanh', (['t._data'], {}), '(t._data)\n', (16349, 16358), True, 'import numpy as np\n'), ((16566, 16581), 'numpy.exp', 'np.exp', (['t._data'], {}), '(t._data)\n', (16572, 16581), True, 'import numpy as np\n'), ((17171, 17201), 'numpy.maximum', 'np.maximum', (['t._data', 'uts._data'], {}), '(t._data, uts._data)\n', (17181, 17201), True, 'import numpy as np\n'), ((17791, 17821), 'numpy.minimum', 'np.minimum', (['t._data', 'uts._data'], {}), '(t._data, uts._data)\n', (17801, 17821), True, 'import numpy as np\n'), ((18524, 18545), 'numpy.transpose', 'np.transpose', (['t._data'], {}), '(t._data)\n', (18536, 18545), True, 'import numpy as np\n'), ((18831, 18864), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'np.float32'}), '(shape, dtype=np.float32)\n', (18839, 18864), True, 'import numpy as np\n'), ((19146, 19186), 'numpy.zeros_like', 'np.zeros_like', (['t._data'], {'dtype': 'np.float32'}), '(t._data, dtype=np.float32)\n', (19159, 19186), True, 'import numpy as np\n'), ((1347, 1383), 'pyopencl.array.to_device', 'clarray.to_device', (['QUEUE', 'self._data'], {}), '(QUEUE, self._data)\n', (1364, 1383), True, 'import pyopencl.array as clarray\n'), ((2909, 2945), 'pyopencl.array.to_device', 'clarray.to_device', (['QUEUE', 'self._data'], {}), '(QUEUE, self._data)\n', (2926, 2945), True, 'import pyopencl.array as clarray\n'), ((13393, 13422), 'pyopencl.array.dot', 'clarray.dot', (['m1.data', 'm2.data'], {}), '(m1.data, m2.data)\n', (13404, 13422), True, 'import pyopencl.array as clarray\n'), ((15121, 15162), 'pyopencl.array.if_positive', 'clarray.if_positive', (['cond._data', 'fst', 'snd'], {}), '(cond._data, fst, snd)\n', (15140, 15162), True, 'import pyopencl.array as clarray\n'), ((15269, 15305), 'numpy.where', 'np.where', (['cond._data', 'fst', 'snd._data'], {}), '(cond._data, fst, snd._data)\n', (15277, 15305), True, 'import numpy as np\n'), ((15402, 15438), 'numpy.where', 'np.where', (['cond._data', 'fst._data', 'snd'], {}), '(cond._data, fst._data, snd)\n', (15410, 15438), True, 'import numpy as np\n'), ((15539, 15569), 'numpy.where', 'np.where', (['cond._data', 'fst', 'snd'], {}), '(cond._data, fst, snd)\n', (15547, 15569), True, 'import numpy as np\n'), ((15828, 15859), 'pyopencl.array.reshape', 'clarray.reshape', (['t._data', 'shape'], {}), '(t._data, shape)\n', (15843, 15859), True, 'import pyopencl.array as clarray\n'), ((16075, 16094), 'pyopencl.clmath.log', 'clmath.log', (['t._data'], {}), '(t._data)\n', (16085, 16094), True, 'import pyopencl.clmath as clmath\n'), ((16287, 16307), 'pyopencl.clmath.tanh', 'clmath.tanh', (['t._data'], {}), '(t._data)\n', (16298, 16307), True, 'import pyopencl.clmath as clmath\n'), ((16512, 16531), 'pyopencl.clmath.exp', 'clmath.exp', (['t._data'], {}), '(t._data)\n', (16522, 16531), True, 'import pyopencl.clmath as clmath\n'), ((17008, 17043), 'pyopencl.array.maximum', 'clarray.maximum', (['t._data', 'uts._data'], {}), '(t._data, uts._data)\n', (17023, 17043), True, 'import pyopencl.array as clarray\n'), ((17122, 17146), 'numpy.maximum', 'np.maximum', (['t._data', 'uts'], {}), '(t._data, uts)\n', (17132, 17146), True, 'import numpy as np\n'), ((17628, 17663), 'pyopencl.array.minimum', 'clarray.minimum', (['t._data', 'uts._data'], {}), '(t._data, uts._data)\n', (17643, 17663), True, 'import pyopencl.array as clarray\n'), ((17742, 17766), 'numpy.minimum', 'np.minimum', (['t._data', 'uts'], {}), '(t._data, uts)\n', (17752, 17766), True, 'import numpy as np\n'), ((18463, 18489), 'pyopencl.array.transpose', 'clarray.transpose', (['t._data'], {}), '(t._data)\n', (18480, 18489), True, 'import pyopencl.array as clarray\n'), ((18757, 18796), 'pyopencl.array.zeros', 'clarray.zeros', (['QUEUE', 'shape', 'np.float32'], {}), '(QUEUE, shape, np.float32)\n', (18770, 18796), True, 'import pyopencl.array as clarray\n'), ((19084, 19111), 'pyopencl.array.zeros_like', 'clarray.zeros_like', (['t._data'], {}), '(t._data)\n', (19102, 19111), True, 'import pyopencl.array as clarray\n'), ((19941, 19980), 'pyopencl.clrandom.rand', 'clrandom.rand', (['QUEUE', 'shape', 'np.float32'], {}), '(QUEUE, shape, np.float32)\n', (19954, 19980), True, 'import pyopencl.clrandom as clrandom\n'), ((2127, 2163), 'pyopencl.array.to_device', 'clarray.to_device', (['QUEUE', 'self._data'], {}), '(QUEUE, self._data)\n', (2144, 2163), True, 'import pyopencl.array as clarray\n'), ((3019, 3038), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (3027, 3038), True, 'import numpy as np\n'), ((3225, 3261), 'pyopencl.array.to_device', 'clarray.to_device', (['QUEUE', 'self._data'], {}), '(QUEUE, self._data)\n', (3242, 3261), True, 'import pyopencl.array as clarray\n'), ((12280, 12302), 'numpy.empty', 'np.empty', (['(rt_h, rt_w)'], {}), '((rt_h, rt_w))\n', (12288, 12302), True, 'import numpy as np\n'), ((14334, 14387), 'pyopencl.array.if_positive', 'clarray.if_positive', (['cond._data', 'fst._data', 'snd._data'], {}), '(cond._data, fst._data, snd._data)\n', (14353, 14387), True, 'import pyopencl.array as clarray\n'), ((16941, 16969), 'pyopencl.array.maximum', 'clarray.maximum', (['t._data', 'ot'], {}), '(t._data, ot)\n', (16956, 16969), True, 'import pyopencl.array as clarray\n'), ((17561, 17589), 'pyopencl.array.minimum', 'clarray.minimum', (['t._data', 'ot'], {}), '(t._data, ot)\n', (17576, 17589), True, 'import pyopencl.array as clarray\n'), ((19684, 19712), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'shape'}), '(size=shape)\n', (19700, 19712), True, 'import numpy as np\n'), ((20122, 20143), 'numpy.random.rand', 'np.random.rand', (['shape'], {}), '(shape)\n', (20136, 20143), True, 'import numpy as np\n'), ((20669, 20708), 'numpy.random.uniform', 'np.random.uniform', (['min', 'max'], {'size': 'shape'}), '(min, max, size=shape)\n', (20686, 20708), True, 'import numpy as np\n'), ((13855, 13900), 'pyopencl.array.empty', 'clarray.empty', (['QUEUE', 'shape'], {'dtype': 'np.float32'}), '(QUEUE, shape, dtype=np.float32)\n', (13868, 13900), True, 'import pyopencl.array as clarray\n'), ((14618, 14663), 'pyopencl.array.empty', 'clarray.empty', (['QUEUE', 'shape'], {'dtype': 'np.float32'}), '(QUEUE, shape, dtype=np.float32)\n', (14631, 14663), True, 'import pyopencl.array as clarray\n'), ((16815, 16862), 'pyopencl.array.empty', 'clarray.empty', (['QUEUE', 't.shape'], {'dtype': 'np.float32'}), '(QUEUE, t.shape, dtype=np.float32)\n', (16828, 16862), True, 'import pyopencl.array as clarray\n'), ((17435, 17482), 'pyopencl.array.empty', 'clarray.empty', (['QUEUE', 't.shape'], {'dtype': 'np.float32'}), '(QUEUE, t.shape, dtype=np.float32)\n', (17448, 17482), True, 'import pyopencl.array as clarray\n'), ((19500, 19533), 'pyopencl.clrandom.PhiloxGenerator', 'clrandom.PhiloxGenerator', (['CONTEXT'], {}), '(CONTEXT)\n', (19524, 19533), True, 'import pyopencl.clrandom as clrandom\n'), ((20056, 20078), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (20070, 20078), True, 'import numpy as np\n'), ((20457, 20490), 'pyopencl.clrandom.PhiloxGenerator', 'clrandom.PhiloxGenerator', (['CONTEXT'], {}), '(CONTEXT)\n', (20481, 20490), True, 'import pyopencl.clrandom as clrandom\n'), ((14803, 14848), 'pyopencl.array.empty', 'clarray.empty', (['QUEUE', 'shape'], {'dtype': 'np.float32'}), '(QUEUE, shape, dtype=np.float32)\n', (14816, 14848), True, 'import pyopencl.array as clarray\n'), ((20945, 20965), 'pyopencl.array.max', 'clarray.max', (['t._data'], {}), '(t._data)\n', (20956, 20965), True, 'import pyopencl.array as clarray\n'), ((21160, 21180), 'pyopencl.array.min', 'clarray.min', (['t._data'], {}), '(t._data)\n', (21171, 21180), True, 'import pyopencl.array as clarray\n'), ((21371, 21391), 'pyopencl.array.sum', 'clarray.sum', (['t._data'], {}), '(t._data)\n', (21382, 21391), True, 'import pyopencl.array as clarray\n'), ((14960, 15005), 'pyopencl.array.empty', 'clarray.empty', (['QUEUE', 'shape'], {'dtype': 'np.float32'}), '(QUEUE, shape, dtype=np.float32)\n', (14973, 15005), True, 'import pyopencl.array as clarray\n'), ((15038, 15083), 'pyopencl.array.empty', 'clarray.empty', (['QUEUE', 'shape'], {'dtype': 'np.float32'}), '(QUEUE, shape, dtype=np.float32)\n', (15051, 15083), True, 'import pyopencl.array as clarray\n'), ((21584, 21604), 'pyopencl.array.sum', 'clarray.sum', (['t._data'], {}), '(t._data)\n', (21595, 21604), True, 'import pyopencl.array as clarray\n')]
|
# -*- coding: utf-8 -*-
"""SPARC4 spectral response tests.
This script tests the operation of the SPARC4 spectral response classes.
"""
import os
import numpy as np
import pandas as pd
import pytest
from AIS.SPARC4_Spectral_Response import (
Abstract_SPARC4_Spectral_Response,
Concrete_SPARC4_Spectral_Response_1,
Concrete_SPARC4_Spectral_Response_2,
Concrete_SPARC4_Spectral_Response_3,
Concrete_SPARC4_Spectral_Response_4,
)
wavelength_interval = range(350, 1150, 50)
n = len(wavelength_interval)
specific_flux = np.ones((4, n))
ccd_transmitance_c1 = np.asarray(
pd.read_excel(os.path.join("SPARC4_Spectral_Response", "Channel 1", "ccd.xlsx"))
)[1:, 1]
ccd_transmitance_c1 = np.asarray([float(value) for value in ccd_transmitance_c1])
ccd_transmitance_c2 = np.asarray(
pd.read_excel(os.path.join("SPARC4_Spectral_Response", "Channel 2", "ccd.xlsx"))
)[1:, 1]
ccd_transmitance_c2 = np.asarray([float(value) for value in ccd_transmitance_c2])
ccd_transmitance_c3 = np.asarray(
pd.read_excel(os.path.join("SPARC4_Spectral_Response", "Channel 3", "ccd.xlsx"))
)[1:, 1]
ccd_transmitance_c3 = np.asarray([float(value) for value in ccd_transmitance_c3])
ccd_transmitance_c4 = np.asarray(
pd.read_excel(os.path.join("SPARC4_Spectral_Response", "Channel 4", "ccd.xlsx"))
)[1:, 1]
ccd_transmitance_c4 = np.asarray([float(value) for value in ccd_transmitance_c4])
# -------------------------------------------------------------------------------------------------------------
@pytest.fixture
def abs_s4_sr():
chc = Abstract_SPARC4_Spectral_Response()
chc.write_specific_flux(specific_flux, wavelength_interval)
return chc
@pytest.fixture
def c1_s4_sr():
chc = Concrete_SPARC4_Spectral_Response_1()
chc.write_specific_flux(specific_flux, wavelength_interval)
return chc
@pytest.fixture
def c2_s4_sr():
chc = Concrete_SPARC4_Spectral_Response_2()
chc.write_specific_flux(specific_flux, wavelength_interval)
return chc
@pytest.fixture
def c3_s4_sr():
chc = Concrete_SPARC4_Spectral_Response_3()
chc.write_specific_flux(specific_flux, wavelength_interval)
return chc
@pytest.fixture
def c4_s4_sr():
chc = Concrete_SPARC4_Spectral_Response_4()
chc.write_specific_flux(specific_flux, wavelength_interval)
return chc
# -------------------- Initialize the class -----------------------
def test_specific_flux_abs(abs_s4_sr):
vec = abs_s4_sr.get_specific_flux()
boolean_test = vec == specific_flux
assert boolean_test.all()
def test_specific_flux_c1(c1_s4_sr):
vec = c1_s4_sr.get_specific_flux()
boolean_test = vec == specific_flux
assert boolean_test.all()
# -------------------- Channel ID -----------------------
def test_channel_ID_abs(abs_s4_sr):
assert abs_s4_sr.get_channel_ID() == 0
def test_channel_ID_c1(c1_s4_sr):
assert c1_s4_sr.get_channel_ID() == 1
def test_channel_ID_c2(c2_s4_sr):
assert c2_s4_sr.get_channel_ID() == 2
def test_channel_ID_c3(c3_s4_sr):
assert c3_s4_sr.get_channel_ID() == 3
def test_channel_ID_c4(c4_s4_sr):
assert c4_s4_sr.get_channel_ID() == 4
# -------------------- Apply spectral response -----------------------
# def test_calibration_wheel(abs_s4_sr):
# abs_s4_sr.apply_calibration_wheel()
# vec = abs_s4_sr.get_specific_flux()
# boolean_test = vec == specific_flux
# assert boolean_test.all()
# def test_retarder(abs_s4_sr):
# abs_s4_sr.apply_retarder()
# vec = abs_s4_sr.get_specific_flux()
# boolean_test = vec == specific_flux
# assert boolean_test.all()
# def test_analyzer(abs_s4_sr):
# abs_s4_sr.apply_analyser()
# vec = abs_s4_sr.get_specific_flux()
# boolean_test = vec == specific_flux
# assert boolean_test.all()
# def test_collimator(abs_s4_sr):
# abs_s4_sr.apply_analyser()
# abs_s4_sr.apply_collimator()
# assert np.allclose(abs_s4_sr.specific_ordinary_ray, specific_flux[0, :])
# assert np.allclose(abs_s4_sr.specific_extra_ordinary_ray, specific_flux[0, :])
# def test_dichroic_abs(abs_s4_sr):
# abs_s4_sr.apply_analyser()
# abs_s4_sr.apply_dichroic()
# def test_dichroic_c1(c1_s4_sr):
# c1_s4_sr.apply_analyser()
# c1_s4_sr.apply_dichroic()
# def test_dichroic_c2(c2_s4_sr):
# c2_s4_sr.apply_analyser()
# c2_s4_sr.apply_dichroic()
# def test_dichroic_c3(c3_s4_sr):
# c3_s4_sr.apply_analyser()
# c3_s4_sr.apply_dichroic()
# def test_dichroic_c4(c4_s4_sr):
# c4_s4_sr.apply_analyser()
# c4_s4_sr.apply_dichroic()
# def test_camera_abs(abs_s4_sr):
# abs_s4_sr.apply_analyser()
# abs_s4_sr.apply_camera()
# assert np.allclose(abs_s4_sr.specific_ordinary_ray, specific_flux[0, :])
# assert np.allclose(abs_s4_sr.specific_extra_ordinary_ray, specific_flux[0, :])
# def test_camera_c1(c1_s4_sr):
# c1_s4_sr.apply_analyser()
# c1_s4_sr.apply_camera()
# assert np.allclose(c1_s4_sr.specific_ordinary_ray, specific_flux[0, :])
# assert np.allclose(c1_s4_sr.specific_extra_ordinary_ray, specific_flux[0, :])
# def test_camera_c2(c2_s4_sr):
# c2_s4_sr.apply_analyser()
# c2_s4_sr.apply_camera()
# assert np.allclose(c2_s4_sr.specific_ordinary_ray, specific_flux[0, :])
# assert np.allclose(c2_s4_sr.specific_extra_ordinary_ray, specific_flux[0, :])
# def test_camera_c3(c3_s4_sr):
# c3_s4_sr.apply_analyser()
# c3_s4_sr.apply_camera()
# assert np.allclose(c3_s4_sr.specific_ordinary_ray, specific_flux[0, :])
# assert np.allclose(c3_s4_sr.specific_extra_ordinary_ray, specific_flux[0, :])
# def test_camera_c4(c4_s4_sr):
# c4_s4_sr.apply_analyser()
# c4_s4_sr.apply_camera()
# assert np.allclose(c4_s4_sr.specific_ordinary_ray, specific_flux[0, :])
# assert np.allclose(c4_s4_sr.specific_extra_ordinary_ray, specific_flux[0, :])
# def test_ccd_abs(abs_s4_sr):
# abs_s4_sr.apply_analyser()
# abs_s4_sr.apply_ccd()
# assert np.allclose(abs_s4_sr.specific_ordinary_ray, specific_flux[0, :])
# assert np.allclose(abs_s4_sr.specific_extra_ordinary_ray, specific_flux[0, :])
# def test_ccd_c1(c1_s4_sr):
# new_specific_flux = specific_flux[0, :] * ccd_transmitance_c1 / 100
# c1_s4_sr.apply_analyser()
# c1_s4_sr.apply_ccd()
# assert np.allclose(c1_s4_sr.specific_ordinary_ray, new_specific_flux)
# assert np.allclose(c1_s4_sr.specific_extra_ordinary_ray, new_specific_flux)
# def test_ccd_c2(c2_s4_sr):
# new_specific_flux = specific_flux[0, :] * ccd_transmitance_c2 / 100
# c2_s4_sr.apply_analyser()
# c2_s4_sr.apply_ccd()
# assert np.allclose(c2_s4_sr.specific_ordinary_ray, new_specific_flux)
# assert np.allclose(c2_s4_sr.specific_extra_ordinary_ray, new_specific_flux)
# def test_ccd_c3(c3_s4_sr):
# new_specific_flux = specific_flux[0, :] * ccd_transmitance_c3 / 100
# c3_s4_sr.apply_analyser()
# c3_s4_sr.apply_ccd()
# assert np.allclose(c3_s4_sr.specific_ordinary_ray, new_specific_flux)
# assert np.allclose(c3_s4_sr.specific_extra_ordinary_ray, new_specific_flux)
# def test_ccd_c4(c4_s4_sr):
# new_specific_flux = specific_flux[0, :] * ccd_transmitance_c4 / 100
# c4_s4_sr.apply_analyser()
# c4_s4_sr.apply_ccd()
# assert np.allclose(c4_s4_sr.specific_ordinary_ray, new_specific_flux)
# assert np.allclose(c4_s4_sr.specific_extra_ordinary_ray, new_specific_flux)
# --------------------write specific_flux--------------------
def test_write_specific_flux():
specific_flux = np.asanyarray(
[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]
)
wavelength_interval = range(350, 1150, 50)
s4_sr = Abstract_SPARC4_Spectral_Response()
s4_sr.write_specific_flux(specific_flux, wavelength_interval)
boolean_test = s4_sr.specific_flux == specific_flux
assert boolean_test.all()
# ---------------------- get_specific_flux -----------------------------
def test_get_specific_flux(abs_s4_sr):
vec = abs_s4_sr.get_specific_flux()
boolean_test = vec.all() == specific_flux.all()
assert boolean_test.all()
# ----------------------- read_spreadsheet---------------------------
def test_read_spreadsheet_calibration_wheel(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "calibration_wheel.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_retarder(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "retarder.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_analyser_ordinary(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "analyser_ordinary.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_analyser_extra_ordinary(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "analyser_extra_ordinary.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_collimator(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "collimator.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_dichroic_1(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "Channel 0", "dichroic 1.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_dichroic_2(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "Channel 0", "dichroic 2.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_camera(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "Channel 0", "camera.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_ccd(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "Channel 0", "ccd.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_dichroic_1_1(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "Channel 1", "dichroic 1.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_dichroic_1_2(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "Channel 1", "dichroic 2.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_camera_1(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "Channel 1", "camera.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_ccd_1(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "Channel 1", "ccd.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_dichroic_2_1(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "Channel 2", "dichroic 1.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_dichroic_2_2(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "Channel 2", "dichroic 2.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_camera_2(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "Channel 2", "camera.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_ccd_2(abs_s4_sr):
file = "./SPARC4_Spectral_Response/Channel 2/ccd.xlsx"
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_dichroic_3_1(abs_s4_sr):
file = "./SPARC4_Spectral_Response/Channel 3/dichroic 2.xlsx"
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_dichroic_3_2(abs_s4_sr):
file = "./SPARC4_Spectral_Response/Channel 3/dichroic 2.xlsx"
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_camera_3(abs_s4_sr):
file = "./SPARC4_Spectral_Response/Channel 3/camera.xlsx"
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_ccd_3(abs_s4_sr):
file = "./SPARC4_Spectral_Response/Channel 3/ccd.xlsx"
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_dichroic_4_1(abs_s4_sr):
file = "./SPARC4_Spectral_Response/Channel 4/dichroic 1.xlsx"
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_dichroic_4_2(abs_s4_sr):
file = "./SPARC4_Spectral_Response/Channel 4/dichroic 2.xlsx"
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_camera_4(abs_s4_sr):
file = "./SPARC4_Spectral_Response/Channel 4/camera.xlsx"
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_ccd_4(abs_s4_sr):
file = "./SPARC4_Spectral_Response/Channel 4/ccd.xlsx"
abs_s4_sr._read_spreadsheet(file)
# ----------------------- miscelaneous ----------------------------
def test_multiply_matrices(abs_s4_sr):
a = np.ones((4, 4))
specific_flux = abs_s4_sr._multiply_matrices(a, a)
boolean_test = specific_flux == a
assert boolean_test.all()
def test_calculate_spline():
transmitance = np.ones((1, n))[0]
chc = Abstract_SPARC4_Spectral_Response()
chc.write_specific_flux(specific_flux, wavelength_interval)
new_transmitance = chc._calculate_spline(transmitance, wavelength_interval)
assert np.allclose(new_transmitance, transmitance)
# def test_get_specific_ordinary_ray(abs_s4_sr):
# abs_s4_sr.apply_analyser()
# ord_ray = abs_s4_sr.get_specific_ordinary_ray()
# assert np.allclose(ord_ray, specific_flux[0, :])
# def test_get_specific_extra_ordinary_ray(abs_s4_sr):
# abs_s4_sr.apply_analyser()
# eord_ray = abs_s4_sr.get_specific_extra_ordinary_ray()
# assert np.allclose(eord_ray, specific_flux[0, :])
|
[
"AIS.SPARC4_Spectral_Response.Concrete_SPARC4_Spectral_Response_3",
"AIS.SPARC4_Spectral_Response.Concrete_SPARC4_Spectral_Response_4",
"numpy.allclose",
"numpy.asanyarray",
"numpy.ones",
"AIS.SPARC4_Spectral_Response.Concrete_SPARC4_Spectral_Response_1",
"AIS.SPARC4_Spectral_Response.Concrete_SPARC4_Spectral_Response_2",
"os.path.join",
"AIS.SPARC4_Spectral_Response.Abstract_SPARC4_Spectral_Response"
] |
[((539, 554), 'numpy.ones', 'np.ones', (['(4, n)'], {}), '((4, n))\n', (546, 554), True, 'import numpy as np\n'), ((1556, 1591), 'AIS.SPARC4_Spectral_Response.Abstract_SPARC4_Spectral_Response', 'Abstract_SPARC4_Spectral_Response', ([], {}), '()\n', (1589, 1591), False, 'from AIS.SPARC4_Spectral_Response import Abstract_SPARC4_Spectral_Response, Concrete_SPARC4_Spectral_Response_1, Concrete_SPARC4_Spectral_Response_2, Concrete_SPARC4_Spectral_Response_3, Concrete_SPARC4_Spectral_Response_4\n'), ((1715, 1752), 'AIS.SPARC4_Spectral_Response.Concrete_SPARC4_Spectral_Response_1', 'Concrete_SPARC4_Spectral_Response_1', ([], {}), '()\n', (1750, 1752), False, 'from AIS.SPARC4_Spectral_Response import Abstract_SPARC4_Spectral_Response, Concrete_SPARC4_Spectral_Response_1, Concrete_SPARC4_Spectral_Response_2, Concrete_SPARC4_Spectral_Response_3, Concrete_SPARC4_Spectral_Response_4\n'), ((1876, 1913), 'AIS.SPARC4_Spectral_Response.Concrete_SPARC4_Spectral_Response_2', 'Concrete_SPARC4_Spectral_Response_2', ([], {}), '()\n', (1911, 1913), False, 'from AIS.SPARC4_Spectral_Response import Abstract_SPARC4_Spectral_Response, Concrete_SPARC4_Spectral_Response_1, Concrete_SPARC4_Spectral_Response_2, Concrete_SPARC4_Spectral_Response_3, Concrete_SPARC4_Spectral_Response_4\n'), ((2037, 2074), 'AIS.SPARC4_Spectral_Response.Concrete_SPARC4_Spectral_Response_3', 'Concrete_SPARC4_Spectral_Response_3', ([], {}), '()\n', (2072, 2074), False, 'from AIS.SPARC4_Spectral_Response import Abstract_SPARC4_Spectral_Response, Concrete_SPARC4_Spectral_Response_1, Concrete_SPARC4_Spectral_Response_2, Concrete_SPARC4_Spectral_Response_3, Concrete_SPARC4_Spectral_Response_4\n'), ((2198, 2235), 'AIS.SPARC4_Spectral_Response.Concrete_SPARC4_Spectral_Response_4', 'Concrete_SPARC4_Spectral_Response_4', ([], {}), '()\n', (2233, 2235), False, 'from AIS.SPARC4_Spectral_Response import Abstract_SPARC4_Spectral_Response, Concrete_SPARC4_Spectral_Response_1, Concrete_SPARC4_Spectral_Response_2, Concrete_SPARC4_Spectral_Response_3, Concrete_SPARC4_Spectral_Response_4\n'), ((7512, 7583), 'numpy.asanyarray', 'np.asanyarray', (['[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]'], {}), '([[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]])\n', (7525, 7583), True, 'import numpy as np\n'), ((7657, 7692), 'AIS.SPARC4_Spectral_Response.Abstract_SPARC4_Spectral_Response', 'Abstract_SPARC4_Spectral_Response', ([], {}), '()\n', (7690, 7692), False, 'from AIS.SPARC4_Spectral_Response import Abstract_SPARC4_Spectral_Response, Concrete_SPARC4_Spectral_Response_1, Concrete_SPARC4_Spectral_Response_2, Concrete_SPARC4_Spectral_Response_3, Concrete_SPARC4_Spectral_Response_4\n'), ((8224, 8290), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""calibration_wheel.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'calibration_wheel.xlsx')\n", (8236, 8290), False, 'import os\n'), ((8389, 8446), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""retarder.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'retarder.xlsx')\n", (8401, 8446), False, 'import os\n'), ((8554, 8620), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""analyser_ordinary.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'analyser_ordinary.xlsx')\n", (8566, 8620), False, 'import os\n'), ((8734, 8806), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""analyser_extra_ordinary.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'analyser_extra_ordinary.xlsx')\n", (8746, 8806), False, 'import os\n'), ((8907, 8966), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""collimator.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'collimator.xlsx')\n", (8919, 8966), False, 'import os\n'), ((9067, 9139), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""Channel 0"""', '"""dichroic 1.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'Channel 0', 'dichroic 1.xlsx')\n", (9079, 9139), False, 'import os\n'), ((9240, 9312), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""Channel 0"""', '"""dichroic 2.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'Channel 0', 'dichroic 2.xlsx')\n", (9252, 9312), False, 'import os\n'), ((9409, 9477), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""Channel 0"""', '"""camera.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'Channel 0', 'camera.xlsx')\n", (9421, 9477), False, 'import os\n'), ((9571, 9636), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""Channel 0"""', '"""ccd.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'Channel 0', 'ccd.xlsx')\n", (9583, 9636), False, 'import os\n'), ((9739, 9811), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""Channel 1"""', '"""dichroic 1.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'Channel 1', 'dichroic 1.xlsx')\n", (9751, 9811), False, 'import os\n'), ((9914, 9986), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""Channel 1"""', '"""dichroic 2.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'Channel 1', 'dichroic 2.xlsx')\n", (9926, 9986), False, 'import os\n'), ((10085, 10153), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""Channel 1"""', '"""camera.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'Channel 1', 'camera.xlsx')\n", (10097, 10153), False, 'import os\n'), ((10249, 10314), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""Channel 1"""', '"""ccd.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'Channel 1', 'ccd.xlsx')\n", (10261, 10314), False, 'import os\n'), ((10417, 10489), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""Channel 2"""', '"""dichroic 1.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'Channel 2', 'dichroic 1.xlsx')\n", (10429, 10489), False, 'import os\n'), ((10592, 10664), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""Channel 2"""', '"""dichroic 2.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'Channel 2', 'dichroic 2.xlsx')\n", (10604, 10664), False, 'import os\n'), ((10763, 10831), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""Channel 2"""', '"""camera.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'Channel 2', 'camera.xlsx')\n", (10775, 10831), False, 'import os\n'), ((12344, 12359), 'numpy.ones', 'np.ones', (['(4, 4)'], {}), '((4, 4))\n', (12351, 12359), True, 'import numpy as np\n'), ((12562, 12597), 'AIS.SPARC4_Spectral_Response.Abstract_SPARC4_Spectral_Response', 'Abstract_SPARC4_Spectral_Response', ([], {}), '()\n', (12595, 12597), False, 'from AIS.SPARC4_Spectral_Response import Abstract_SPARC4_Spectral_Response, Concrete_SPARC4_Spectral_Response_1, Concrete_SPARC4_Spectral_Response_2, Concrete_SPARC4_Spectral_Response_3, Concrete_SPARC4_Spectral_Response_4\n'), ((12753, 12796), 'numpy.allclose', 'np.allclose', (['new_transmitance', 'transmitance'], {}), '(new_transmitance, transmitance)\n', (12764, 12796), True, 'import numpy as np\n'), ((12533, 12548), 'numpy.ones', 'np.ones', (['(1, n)'], {}), '((1, n))\n', (12540, 12548), True, 'import numpy as np\n'), ((608, 673), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""Channel 1"""', '"""ccd.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'Channel 1', 'ccd.xlsx')\n", (620, 673), False, 'import os\n'), ((819, 884), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""Channel 2"""', '"""ccd.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'Channel 2', 'ccd.xlsx')\n", (831, 884), False, 'import os\n'), ((1030, 1095), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""Channel 3"""', '"""ccd.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'Channel 3', 'ccd.xlsx')\n", (1042, 1095), False, 'import os\n'), ((1241, 1306), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""Channel 4"""', '"""ccd.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'Channel 4', 'ccd.xlsx')\n", (1253, 1306), False, 'import os\n')]
|
import numpy
import pandas
import requests
from bs4 import BeautifulSoup as bsoup
from time import sleep
from random import randint
# start and end of urls for imbd top 1000 movies site
URL_START = "https://www.imdb.com/search/title/?groups=top_1000&start="
URL_END = "&ref_=adv_nxt"
# data for each movie
titles = []
years = []
runtimes = []
ratings = []
metascores = []
votes = []
grosses = []
headers = {"Accept-Language": "en-US, en;q=0.5"}
pages = numpy.arange(1,1001,50)
for page in pages:
cur_page = requests.get(URL_START + str(page) + URL_END, headers = headers)
soup = bsoup(cur_page.text, "html.parser")
# find all divs containing data for each movie
movie_divs = soup.find_all('div', class_='lister-item mode-advanced')
for div in movie_divs:
name = div.h3.a.text
titles.append(name)
year = div.h3.find('span', class_='lister-item-year').text
years.append(year)
runtime = div.p.find('span', class_='runtime').text
runtimes.append(runtime)
rating = float(div.strong.text)
ratings.append(rating)
score = div.find('span', class_='metascore').text if div.find('span', class_='metascore') else '-'
metascores.append(score)
# nv contains the class for both the votes and gross (if it is present) <span> tags
nv = div.find_all('span', attrs={'name': 'nv'})
vote = nv[0].text
votes.append(vote)
gross = nv[1].text if len(nv) > 1 else '-'
grosses.append(gross)
# slow down crawling of imbd site to avoid disrupting website activity
sleep(randint(2,8))
movies = pandas.DataFrame({
'movie': titles,
'year': years,
'runtime': runtimes,
'imdb': ratings,
'metascore': metascores,
'votes': votes,
'grossMillions': grosses,
})
# CLEANING DATA
# remove brackets from year and cast string to int
movies['year'] = movies['year'].str.extract('(\d+)').astype(int)
# remove ' min' from runtime and cast string to int
movies['runtime'] = movies['runtime'].str.extract('(\d+)').astype(int)
# convert grossMillions to numeric (int) and transform dashes into NaN values
movies['metascore'] = pandas.to_numeric(movies['metascore'], errors='coerce')
# remove commas from votes and cast string to int
movies['votes'] = movies['votes'].str.replace(',', '').astype(int)
# remove '$' and 'M' from grossMillions and cast string to int
movies['grossMillions'] = movies['grossMillions'].map(lambda x: x.lstrip('$').rstrip('M'))
# convert grossMillions to numeric (float) and transform dashes into NaN values
movies['grossMillions'] = pandas.to_numeric(movies['grossMillions'], errors='coerce')
movies.to_csv('movies.csv')
|
[
"pandas.DataFrame",
"random.randint",
"numpy.arange",
"bs4.BeautifulSoup",
"pandas.to_numeric"
] |
[((456, 481), 'numpy.arange', 'numpy.arange', (['(1)', '(1001)', '(50)'], {}), '(1, 1001, 50)\n', (468, 481), False, 'import numpy\n'), ((1634, 1797), 'pandas.DataFrame', 'pandas.DataFrame', (["{'movie': titles, 'year': years, 'runtime': runtimes, 'imdb': ratings,\n 'metascore': metascores, 'votes': votes, 'grossMillions': grosses}"], {}), "({'movie': titles, 'year': years, 'runtime': runtimes,\n 'imdb': ratings, 'metascore': metascores, 'votes': votes,\n 'grossMillions': grosses})\n", (1650, 1797), False, 'import pandas\n'), ((2180, 2235), 'pandas.to_numeric', 'pandas.to_numeric', (["movies['metascore']"], {'errors': '"""coerce"""'}), "(movies['metascore'], errors='coerce')\n", (2197, 2235), False, 'import pandas\n'), ((2616, 2675), 'pandas.to_numeric', 'pandas.to_numeric', (["movies['grossMillions']"], {'errors': '"""coerce"""'}), "(movies['grossMillions'], errors='coerce')\n", (2633, 2675), False, 'import pandas\n'), ((591, 626), 'bs4.BeautifulSoup', 'bsoup', (['cur_page.text', '"""html.parser"""'], {}), "(cur_page.text, 'html.parser')\n", (596, 626), True, 'from bs4 import BeautifulSoup as bsoup\n'), ((1610, 1623), 'random.randint', 'randint', (['(2)', '(8)'], {}), '(2, 8)\n', (1617, 1623), False, 'from random import randint\n')]
|
#########################################################################
### Program clean tweets ###
### 1. spaCy POS tagging for relevant tweets (apple fruit vs iphone) ###
### 2. Sentiment analysis of tweets ###
### 3. Group tweets by date ###
### 4. Process tweets by removing URLs, hashtags, emoticons ###
### 5. Feature engineering ###
### 6. Tokenise, remove stopwords, lemmatise tweets ###
### 7. Join with prices, derive price features and target label ###
### Output 1 pickle per ticker ###
#########################################################################
""" Copyright 2017, <NAME>, All rights reserved. """
## Credit for NLP cleaning portion
import pandas as pd
import numpy as np
import json
import string
import ast
from datetime import timedelta
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk import word_tokenize
# nltk.download('stopwords')
# nltk.download('punkt')
# nltk.download('wordnet')
# nltk.download('averaged_perceptron_tagger')
stoplist = stopwords.words('english')
my_stopwords = "multiExclamation multiQuestion multiStop url atUser st rd nd th am pm" # my extra stopwords
stoplist = stoplist + my_stopwords.split()
lemmatizer = WordNetLemmatizer() # set lemmatizer
from techniques import *
import spacy
from spacy import displacy
import en_core_web_sm
nlp = en_core_web_sm.load()
from nltk.sentiment.vader import SentimentIntensityAnalyzer
analyser = SentimentIntensityAnalyzer()
# Remove 5 companies: CAT, DIS, DOW, TRV, WBA
ticker = ["MMM OR 3M", "AXP OR American Express", "AAPL OR Apple", "BA OR Boeing", \
"CVX OR Chevron", "CSCO OR Cisco", "KO OR Coca-Cola", "XOM OR Exxon Mobil", \
"GS OR Goldman Sachs", "HD OR Home Depot", "IBM", "INTC OR Intel", \
"JNJ OR Johnson & Johnson", "JPM OR JPMorgan Chase", "MCD OR McDonald's", \
"MRK OR Merck", "MSFT OR Microsoft", "NKE OR Nike", "PFE OR Pfizer", \
"PG OR Procter & Gamble", "UTX OR United Technologies", "UNH OR UnitedHealth", \
"VZ OR Verizon", "V OR Visa", "WMT OR Wal-Mart"]
ticker_symbol = ["MMM", "AXP", "AAPL", "BA", \
"CVX", "CSCO", "KO", "XOM", \
"GS", "HD", "IBM", "INTC", \
"JNJ", "JPM", "MCD", \
"MRK", "MSFT", "NKE", "PFE", \
"PG", "UTX", "UNH",
"VZ", "V", "WMT"]
########################################################################
### 1. spaCy POS tagging for relevant tweets (apple fruit vs iphone) ###
########################################################################
def spacy_pos(df, name):
'''
POS-tag each token and filter for texts with "ORG" label
Parameters
----------
df (pandas DataFrame)
name (string) ticker name
Returns
-------
the processed pandas DataFrame
'''
def find_org(text, name):
doc = nlp(text)
for ent in doc.ents:
# print(ent.text, ent.label_)
if (ent.text.lower()==name.lower()) & (ent.label_=='ORG'):
return True
return False
df['relevant'] = [find_org(text,name) for text in df['text']]
print("Before:", df.shape)
df = df[(df['relevant']==True)]
print("After:", df.shape)
return df
########################################################################
### 2. Sentiment analysis of tweets ###
### 3. Group tweets by date ###
########################################################################
def group_tweets_by_date(df, symbol, name):
'''
Aggregate all columns after grouping rows by dates.
Shift weekend tweets to following Monday.
Parameters
----------
df (pandas DataFrame)
symbol (string) ticker symbol eg. AAPL
name (string) ticker name eg. Apple
Returns
-------
the processed pandas DataFrame
'''
df_filter = df[["text", "hashtags", "likes", "replies", "parent_tweet_id", "timestamp"]]
df_filter.likes = df.likes.astype('int64')
df_filter.replies = df.replies.astype('int64')
# remove retweets
df_filter = df_filter[df_filter.parent_tweet_id.isnull()]
df_filter['hashtags'] = df_filter['hashtags'].apply(ast.literal_eval)
df_filter['hashtags'] = df_filter['hashtags'].apply(lambda x : ','.join(x))
df_filter['timestamp'] = pd.to_datetime(df_filter['timestamp'])
df_filter['day'] = df_filter['timestamp'].dt.dayofweek
df_filter['vader'] = [analyser.polarity_scores(tweet)['compound'] for tweet in df_filter['text']]
# carry forward weekend tweets to following Monday (1 or 2 days)
df_filter['stock_date'] = np.where(df_filter['day']>4,
df_filter['timestamp'] + pd.to_timedelta(7-df_filter['day'], unit='d'),
df_filter['timestamp']
)
# group tweets by dates
df_filter['stock_date'] = df_filter['stock_date'].dt.date
df_filter = df_filter.groupby(df_filter['stock_date']).agg({'text': lambda x: ','.join(x),
'hashtags': lambda x: ','.join(x),
'likes':'sum',
'replies': 'sum',
'vader': 'mean'
})
df_filter['hashtags'] = df_filter['hashtags'].apply(lambda hashtags: list(filter(None, hashtags.split(','))))
df_filter['text_removeCompany'] = df_filter.text.str.replace(symbol+' ','')
name = name.lower()
df_filter['text_removeCompany'] = df_filter.text_removeCompany.str.lower().str.replace(name+" ",'')
df_filter = df_filter.reset_index(drop=False)
return df_filter
########################################################################
### 6. Tokenise, remove stopwords, lemmatise tweets ###
########################################################################
def tokenize(text):
'''
Tokenise texts, remove stopwords, lemmatise word.
Parameters
----------
text (string)
Returns
-------
list of tokens (string)
'''
onlyOneSentenceTokens = [] # tokens of one sentence each time
tokens = word_tokenize(text)
tokens = replaceNegations(tokens)
translator = str.maketrans('', '', string.punctuation)
text = text.translate(translator) # Remove punctuation
tokens = nltk.word_tokenize(text)
for w in tokens:
if (w not in stoplist):
final_word = w.lower()
final_word = replaceElongated(final_word)
final_word = lemmatizer.lemmatize(final_word)
onlyOneSentenceTokens.append(final_word)
onlyOneSentence = " ".join(onlyOneSentenceTokens) # form again the sentence from the list of tokens
return onlyOneSentenceTokens
########################################################################
### 4. Process tweets by removing URLs, hashtags, emoticons ###
### 5. Feature engineering of numerical features ###
########################################################################
# A clean tweet should not contain URLs, hashtags (i.e. #happy) or mentions (i.e. @BarackObama)
def clean_dirty_tweets(text_series):
'''
Clean tweets before tokenisation.
Parameters
----------
text_series (pandas Series)
Returns
-------
the pandas DataFrame containing processed text
and other engineered features
'''
clean_tweets = []
for text in text_series:
totalEmoticons = 0
totalSlangs = 0
totalSlangsFound = []
totalElongated = 0
totalMultiExclamationMarks = 0
totalMultiQuestionMarks = 0
totalMultiStopMarks = 0
totalAllCaps = 0
text = removeUnicode(text)
text = replaceURL(text)
text = replaceAtUser(text)
text = removeWholeHashtag(text)
temp_slangs, temp_slangsFound = countSlang(text)
totalSlangs += temp_slangs
for word in temp_slangsFound:
totalSlangsFound.append(word) # all the slangs found in all sentences
text = replaceSlang(text)
text = replaceContraction(text)
text = removeNumbers(text)
emoticons = countEmoticons(text)
totalEmoticons += emoticons
text = removeEmoticons(text)
totalAllCaps += countAllCaps(text)
totalMultiExclamationMarks += countMultiExclamationMarks(text)
totalMultiQuestionMarks += countMultiQuestionMarks(text)
totalMultiStopMarks += countMultiStopMarks(text)
text = replaceMultiExclamationMark(text)
text = replaceMultiQuestionMark(text)
text = replaceMultiStopMark(text)
totalElongated += countElongated(text)
tokenized_tweet = tokenize(text)
clean_tweets.append([tokenized_tweet, totalEmoticons, totalSlangs,
totalSlangsFound, totalElongated, totalMultiExclamationMarks,
totalMultiQuestionMarks, totalMultiStopMarks, totalAllCaps])
# form new dataframe
df_clean_tweets = pd.DataFrame(clean_tweets,columns=['tokenized_tweet', 'totalEmoticons', 'totalSlangs',
'totalSlangsFound', 'totalElongated', 'totalMultiExclamationMarks',
'totalMultiQuestionMarks', 'totalMultiStopMarks', 'totalAllCaps'])
return df_clean_tweets
# def spellcheck(tweet):
# tweet_spellchecked = []
# print(len(tweet))
# for word in tweet:
# if len(word)>1:
# word = spellCorrection(word) # Technique 12: correction of spelling errors
# tweet_spellchecked.append(word)
# return tweet_spellchecked
price_labels = pd.read_csv("../../Raw Data/Price/price_labels.csv")
for i in range(len(ticker_symbol)):
df = pd.read_csv('../Raw Data/Tweets/'+ticker_symbol[i]+'_tweets.csv')
print("Now cleaning:", ticker_symbol[i])
print("Check pos tag...")
if ticker_symbol[i] in ['JPM', "MMM", "KO", "JNJ", "PFE", "TRV", "V", "UNH"]:
df_filter = df
else:
df_filter = spacy_pos(df, ticker_name[i])
print("Group tweets by date...")
df_filter = group_tweets_by_date(df, ticker_symbol[i], ticker_name[i])
print("Number of records (weekdays):", df_filter.shape)
print("Process raw tweets...")
df_clean_tweets = clean_dirty_tweets(df_filter.text_removeCompany)
# # spell_check_col = [spellcheck(tweet) for tweet in df_clean_tweets['tokenized_tweet']]
# # print("spell check")
# # df_clean_tweets['tokenized_tweet_spellcheck'] = spell_check_col
# Join original df with df from tokenising + results
df_tweets_final = pd.concat([df_filter, df_clean_tweets], axis = 1)
####################################################################
### 7. Join with prices, derive price features and target label ###
####################################################################
price_labels_xticker = price_labels[price_labels['Ticker']==ticker_symbol[i]][['Date', "Adj Close"]]
print("Number of business days:", price_labels_xticker.shape)
price_labels_xticker.loc[:,'Date'] = pd.to_datetime(price_labels_xticker['Date']).dt.date
price_labels_xticker.loc[:,'hist_returns'] = np.log10(price_labels_xticker['Adj Close']/price_labels_xticker['Adj Close'].shift())
price_labels_xticker.loc[:,'returns5'] = np.log10(price_labels_xticker['Adj Close'].shift(-5)/price_labels_xticker['Adj Close'])
price_labels_xticker.loc[:,'label5'] = np.where(price_labels_xticker['returns5']>=0,1,-1)
joined_df = price_labels_xticker.join(df_tweets_final.set_index("stock_date"), on='Date', how='left')
print("Longest NaN period:", joined_df.text.isnull().astype(int).groupby(joined_df.text.notnull().astype(int).cumsum()).sum().max())
# joined_df = joined_df.drop(['Unnamed: 0', 'Unnamed: 0.1'], axis=1)
joined_df['Date'] = pd.to_datetime(joined_df['Date'])
joined_df['Year'] = joined_df.Date.dt.year
joined_df['Month'] = joined_df.Date.dt.month
joined_df['vader_standardise'] = (joined_df['vader']-joined_df['vader'].expanding().mean())/joined_df['vader'].expanding().std()
joined_df['vader3'] = joined_df['vader_standardise'].rolling(window=3, min_periods=2).sum()
joined_df.to_pickle("../../Processed Data/Tweets/"+ticker_symbol[i]+"_df.pkl")
|
[
"pandas.DataFrame",
"nltk.stem.WordNetLemmatizer",
"nltk.sentiment.vader.SentimentIntensityAnalyzer",
"pandas.read_csv",
"numpy.where",
"pandas.to_datetime",
"pandas.to_timedelta",
"nltk.corpus.stopwords.words",
"en_core_web_sm.load",
"pandas.concat",
"nltk.word_tokenize"
] |
[((1269, 1295), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (1284, 1295), False, 'from nltk.corpus import stopwords\n'), ((1460, 1479), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (1477, 1479), False, 'from nltk.stem import WordNetLemmatizer\n'), ((1592, 1613), 'en_core_web_sm.load', 'en_core_web_sm.load', ([], {}), '()\n', (1611, 1613), False, 'import en_core_web_sm\n'), ((1686, 1714), 'nltk.sentiment.vader.SentimentIntensityAnalyzer', 'SentimentIntensityAnalyzer', ([], {}), '()\n', (1712, 1714), False, 'from nltk.sentiment.vader import SentimentIntensityAnalyzer\n'), ((10368, 10420), 'pandas.read_csv', 'pd.read_csv', (['"""../../Raw Data/Price/price_labels.csv"""'], {}), "('../../Raw Data/Price/price_labels.csv')\n", (10379, 10420), True, 'import pandas as pd\n'), ((4735, 4773), 'pandas.to_datetime', 'pd.to_datetime', (["df_filter['timestamp']"], {}), "(df_filter['timestamp'])\n", (4749, 4773), True, 'import pandas as pd\n'), ((6792, 6811), 'nltk.word_tokenize', 'word_tokenize', (['text'], {}), '(text)\n', (6805, 6811), False, 'from nltk import word_tokenize\n'), ((6984, 7008), 'nltk.word_tokenize', 'nltk.word_tokenize', (['text'], {}), '(text)\n', (7002, 7008), False, 'import nltk\n'), ((9751, 9985), 'pandas.DataFrame', 'pd.DataFrame', (['clean_tweets'], {'columns': "['tokenized_tweet', 'totalEmoticons', 'totalSlangs', 'totalSlangsFound',\n 'totalElongated', 'totalMultiExclamationMarks',\n 'totalMultiQuestionMarks', 'totalMultiStopMarks', 'totalAllCaps']"}), "(clean_tweets, columns=['tokenized_tweet', 'totalEmoticons',\n 'totalSlangs', 'totalSlangsFound', 'totalElongated',\n 'totalMultiExclamationMarks', 'totalMultiQuestionMarks',\n 'totalMultiStopMarks', 'totalAllCaps'])\n", (9763, 9985), True, 'import pandas as pd\n'), ((10467, 10536), 'pandas.read_csv', 'pd.read_csv', (["('../Raw Data/Tweets/' + ticker_symbol[i] + '_tweets.csv')"], {}), "('../Raw Data/Tweets/' + ticker_symbol[i] + '_tweets.csv')\n", (10478, 10536), True, 'import pandas as pd\n'), ((11350, 11397), 'pandas.concat', 'pd.concat', (['[df_filter, df_clean_tweets]'], {'axis': '(1)'}), '([df_filter, df_clean_tweets], axis=1)\n', (11359, 11397), True, 'import pandas as pd\n'), ((12200, 12254), 'numpy.where', 'np.where', (["(price_labels_xticker['returns5'] >= 0)", '(1)', '(-1)'], {}), "(price_labels_xticker['returns5'] >= 0, 1, -1)\n", (12208, 12254), True, 'import numpy as np\n'), ((12596, 12629), 'pandas.to_datetime', 'pd.to_datetime', (["joined_df['Date']"], {}), "(joined_df['Date'])\n", (12610, 12629), True, 'import pandas as pd\n'), ((5131, 5178), 'pandas.to_timedelta', 'pd.to_timedelta', (["(7 - df_filter['day'])"], {'unit': '"""d"""'}), "(7 - df_filter['day'], unit='d')\n", (5146, 5178), True, 'import pandas as pd\n'), ((11836, 11880), 'pandas.to_datetime', 'pd.to_datetime', (["price_labels_xticker['Date']"], {}), "(price_labels_xticker['Date'])\n", (11850, 11880), True, 'import pandas as pd\n')]
|
# <Copyright 2022, Argo AI, LLC. Released under the MIT license.>
"""Generate MP4 videos with map entities rendered on top of sensor imagery, for all cameras, for a single log.
We use a inferred depth map from LiDAR to render only visible map entities (lanes and pedestrian crossings).
"""
import logging
import os
import sys
import time
from pathlib import Path
from typing import Final, List, Tuple
import click
import numpy as np
import av2.geometry.interpolate as interp_utils
import av2.rendering.video as video_utils
import av2.utils.io as io_utils
import av2.utils.raster as raster_utils
from av2.datasets.sensor.av2_sensor_dataloader import AV2SensorDataLoader
from av2.datasets.sensor.constants import RingCameras
from av2.map.map_api import ArgoverseStaticMap
from av2.rendering.color import BLUE_BGR
from av2.rendering.map import EgoViewMapRenderer
from av2.utils.typing import NDArrayByte
RING_CAMERA_FPS: Final[int] = 20
logger = logging.getLogger(__name__)
def generate_egoview_overlaid_map(
data_root: Path,
output_dir: Path,
log_id: str,
max_range_m: float,
use_depth_map_for_occlusion: bool,
dump_single_frames: bool,
cam_names: List[RingCameras],
) -> None:
"""Render the map from a particular camera's viewpoint for each camera frame.
Args:
data_root: path to where the AV2 logs live.
output_dir: path to directory where renderings will be saved.
log_id: unique ID for AV2 scenario/log.
max_range_m: maximum range of map entities from egovehicle to consider for rendering (by l-infinity norm).
use_depth_map_for_occlusion: whether to use an inferred depth map for rendering occluded elements.
dump_single_frames: Whether to save to disk individual RGB frames of the rendering, in addition to generating
the mp4 file.
cam_names: list of camera names. For each camera, its viewport will be used to render the map.
"""
loader = AV2SensorDataLoader(data_dir=data_root, labels_dir=data_root)
log_map_dirpath = data_root / log_id / "map"
avm = ArgoverseStaticMap.from_map_dir(log_map_dirpath, build_raster=True)
for _, cam_enum in enumerate(cam_names):
cam_name = cam_enum.value
pinhole_cam = loader.get_log_pinhole_camera(log_id, cam_name)
cam_im_fpaths = loader.get_ordered_log_cam_fpaths(log_id, cam_name)
num_cam_imgs = len(cam_im_fpaths)
video_list = []
for i, img_fpath in enumerate(cam_im_fpaths):
if i % 50 == 0:
logging.info(f"\tOn file {i}/{num_cam_imgs} of camera {cam_name} of {log_id}")
cam_timestamp_ns = int(img_fpath.stem)
city_SE3_ego = loader.get_city_SE3_ego(log_id, cam_timestamp_ns)
if city_SE3_ego is None:
logger.info("missing LiDAR pose")
continue
# load feather file path, e.g. '315978406032859416.feather"
lidar_fpath = loader.get_closest_lidar_fpath(log_id, cam_timestamp_ns)
if lidar_fpath is None:
# without depth map, can't do this accurately
continue
lidar_points = io_utils.read_lidar_sweep(lidar_fpath, attrib_spec="xyz")
lidar_timestamp_ns = int(lidar_fpath.stem)
if use_depth_map_for_occlusion:
depth_map = loader.get_depth_map_from_lidar(
lidar_points=lidar_points,
cam_name=cam_name,
log_id=log_id,
cam_timestamp_ns=cam_timestamp_ns,
lidar_timestamp_ns=lidar_timestamp_ns,
)
else:
depth_map = None
egoview_renderer = EgoViewMapRenderer(
depth_map=depth_map, city_SE3_ego=city_SE3_ego, pinhole_cam=pinhole_cam, avm=avm
)
frame_rgb = render_egoview(
output_dir=output_dir,
img_fpath=img_fpath,
egoview_renderer=egoview_renderer,
cam_timestamp_ns=cam_timestamp_ns,
log_id=log_id,
max_range_m=max_range_m,
dump_single_frames=dump_single_frames,
)
video_list.append(frame_rgb)
video: NDArrayByte = np.stack(video_list).astype(np.uint8)
video_output_dir = output_dir / "videos"
video_utils.write_video(
video=video,
dst=video_output_dir / f"{log_id}_{cam_name}.mp4",
fps=RING_CAMERA_FPS,
preset="medium",
)
def render_egoview(
output_dir: Path,
img_fpath: Path,
egoview_renderer: EgoViewMapRenderer,
cam_timestamp_ns: int,
log_id: str,
max_range_m: float,
dump_single_frames: bool,
) -> NDArrayByte:
"""Synthetically manipulate a vector map, render the map in the ego-view, and save rendering to disk.
Args:
output_dir: path to directory where renderings will be saved.
img_fpath: path to RGB image, from one of the ring or stereo cameras.
egoview_renderer: rendering engine for map elements in the ego-view.
cam_timestamp_ns: nanosecond camera timestamp when image was captured.
log_id: unique ID for AV2 scenario/log.
max_range_m: maximum range of map entities from egovehicle to consider for rendering (by l-infinity norm).
dump_single_frames: Whether to save to disk individual RGB frames of the rendering, in addition to generating
the mp4 file.
Returns:
array of shape (H,W,3) and type uint8 representing a RGB image.
"""
save_dir = output_dir / log_id
if dump_single_frames:
# we only create log-specific directories, if dumping individual frames.
save_dir.mkdir(exist_ok=True, parents=True)
img_fname = f"{egoview_renderer.pinhole_cam.cam_name}_{cam_timestamp_ns}_vectormap.jpg"
save_fpath = save_dir / img_fname
if save_fpath.exists():
logger.info("Rendered image already exists, skipping")
img: NDArrayByte = io_utils.read_img(save_fpath)
return img
start = time.time()
img_rgb: NDArrayByte = io_utils.read_img(img_fpath)
# to prevent washing out, can pass in black image, and get just mask back, or can overlay directly.
img_h, img_w, _ = img_rgb.shape
img_empty: NDArrayByte = np.full(
(img_h, img_w, 3), fill_value=128, dtype=np.uint8
) # pure white polylines will disappear @ 255
img_empty = render_egoview_with_occlusion_checks(
img_canvas=img_empty,
egoview_renderer=egoview_renderer,
max_range_m=max_range_m,
)
end = time.time()
duration = end - start
logger.info(f"Rendering single image took {duration:.2f} sec.")
frame_rgb = raster_utils.blend_images(img_rgb, img_empty, alpha=0.45)
if dump_single_frames:
io_utils.write_img(save_fpath, frame_rgb, channel_order="RGB")
return frame_rgb
def render_egoview_with_occlusion_checks(
img_canvas: NDArrayByte, egoview_renderer: EgoViewMapRenderer, max_range_m: float, line_width_px: int = 10
) -> NDArrayByte:
"""Render pedestrian crossings and lane segments in the ego-view.
Pedestrian crossings (crosswalks) will be rendered in blue, and lane markings will be colored according to their
marking color, or otherwise red, if markings are implicit.
Args:
img_canvas: array of shape (H,W,3) representing BGR canvas to rasterize map elements onto.
egoview_renderer: rendering engine for map elements in the ego-view.
max_range_m: maximum range of map entities from egovehicle to consider for rendering (by l-infinity norm).
line_width_px: thickness (in pixels) to use for rendering each polyline.
Returns:
array of shape (H,W,3) and type uint8 representing a RGB image.
"""
for ls in egoview_renderer.avm.get_scenario_lane_segments():
img_canvas = egoview_renderer.render_lane_boundary_egoview(img_canvas, ls, "right", line_width_px)
img_canvas = egoview_renderer.render_lane_boundary_egoview(img_canvas, ls, "left", line_width_px)
for pc in egoview_renderer.avm.get_scenario_ped_crossings():
EPS = 1e-5
crosswalk_color = BLUE_BGR
# render ped crossings (pc's)
xwalk_polygon = pc.polygon
# prevent duplicate first and last coords
xwalk_polygon[:-1] += EPS
N_INTERP_PTS = 100
# For pixel-perfect rendering, querying crosswalk boundary ground height at waypoints throughout
# the street is much more accurate than 3d linear interpolation using only the 4 annotated corners.
polygon_city_frame = interp_utils.interp_arc(t=N_INTERP_PTS, points=xwalk_polygon[:, :2])
polygon_city_frame = egoview_renderer.avm.append_height_to_2d_city_pt_cloud(points_xy=polygon_city_frame)
egoview_renderer.render_polyline_egoview(
polygon_city_frame,
img_canvas,
crosswalk_color,
thickness_px=line_width_px,
)
# convert BGR to RGB
img_rgb: NDArrayByte = img_canvas[:, :, ::-1]
return img_rgb
def parse_camera_enum_types(cam_names: Tuple[str, ...]) -> List[RingCameras]:
"""Convert a list of CLI string types, to enums of type RingCameras, and validate each input.
Args:
cam_names: Tuple of camera names to use for rendering the map.
Returns:
List of camera enums to use for rendering the map.
Raises:
ValueError: If an invalid camera name is provided.
"""
valid_ring_cams = set([x.value for x in list(RingCameras)])
cam_enums: List[RingCameras] = []
for cam_name in list(cam_names):
if cam_name in valid_ring_cams:
cam_enums.append(RingCameras(cam_name))
else:
raise ValueError("Must provide _valid_ camera names!")
return cam_enums
@click.command(help="Generate map visualizations on ego-view imagery from the Argoverse 2 Sensor or TbV Datasets.")
@click.option(
"-d",
"--data-root",
required=True,
help="Path to local directory where the Argoverse 2 Sensor Dataset or TbV logs are stored.",
type=click.Path(exists=True),
)
@click.option(
"-o",
"--output-dir",
required=True,
help="Path to local directory where renderings will be saved.",
type=str,
)
@click.option(
"-l",
"--log-id",
default="00a6ffc1-6ce9-3bc3-a060-6006e9893a1a",
help="unique log identifier.",
type=str,
)
@click.option(
"-r",
"--max-range-m",
type=float,
default=100,
help="Maximum range of map entities from egovehicle to consider for rendering (by l-infinity norm).",
)
@click.option(
"-d",
"--use-depth-map-for_occlusion",
default=True,
help="Whether to use an inferred depth map for rendering occluded elements (defaults to True).",
type=bool,
)
@click.option(
"-s",
"--dump-single-frames",
default=False,
help="Whether to save to disk individual RGB frames of the rendering, in addition to generating the mp4 file"
"(defaults to False). Note: can quickly generate 100s of MBs, for 200 KB frames.",
type=bool,
)
@click.option(
"-c",
"--cam-names",
default=tuple(x.value for x in list(RingCameras)),
help="List of camera viewpoints to render the map from.",
multiple=True,
type=str,
)
def run_generate_egoview_overlaid_map(
data_root: "os.PathLike[str]",
output_dir: "os.PathLike[str]",
log_id: str,
max_range_m: float,
use_depth_map_for_occlusion: bool,
dump_single_frames: bool,
cam_names: Tuple[str, ...],
) -> None:
"""Click entry point for visualizing map entities rendered on top of sensor imagery."""
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
data_root = Path(data_root)
output_dir = Path(output_dir)
logger.info(
"data_root: %s, output_dir: %s, log_id: %s, max_range_m: %f, "
"use_depth_map_for_occlusion: %s, dump_single_frames %s",
data_root,
output_dir,
log_id,
max_range_m,
use_depth_map_for_occlusion,
dump_single_frames,
)
generate_egoview_overlaid_map(
data_root=data_root,
output_dir=output_dir,
log_id=log_id,
max_range_m=max_range_m,
use_depth_map_for_occlusion=use_depth_map_for_occlusion,
dump_single_frames=dump_single_frames,
cam_names=parse_camera_enum_types(cam_names),
)
if __name__ == "__main__":
run_generate_egoview_overlaid_map()
|
[
"av2.map.map_api.ArgoverseStaticMap.from_map_dir",
"av2.utils.io.read_img",
"av2.utils.io.write_img",
"av2.geometry.interpolate.interp_arc",
"click.option",
"av2.rendering.video.write_video",
"pathlib.Path",
"click.Path",
"numpy.full",
"click.command",
"av2.rendering.map.EgoViewMapRenderer",
"numpy.stack",
"av2.datasets.sensor.constants.RingCameras",
"logging.basicConfig",
"av2.utils.io.read_lidar_sweep",
"av2.datasets.sensor.av2_sensor_dataloader.AV2SensorDataLoader",
"time.time",
"logging.info",
"av2.utils.raster.blend_images",
"logging.getLogger"
] |
[((950, 977), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (967, 977), False, 'import logging\n'), ((9898, 10022), 'click.command', 'click.command', ([], {'help': '"""Generate map visualizations on ego-view imagery from the Argoverse 2 Sensor or TbV Datasets."""'}), "(help=\n 'Generate map visualizations on ego-view imagery from the Argoverse 2 Sensor or TbV Datasets.'\n )\n", (9911, 10022), False, 'import click\n'), ((10210, 10338), 'click.option', 'click.option', (['"""-o"""', '"""--output-dir"""'], {'required': '(True)', 'help': '"""Path to local directory where renderings will be saved."""', 'type': 'str'}), "('-o', '--output-dir', required=True, help=\n 'Path to local directory where renderings will be saved.', type=str)\n", (10222, 10338), False, 'import click\n'), ((10358, 10486), 'click.option', 'click.option', (['"""-l"""', '"""--log-id"""'], {'default': '"""00a6ffc1-6ce9-3bc3-a060-6006e9893a1a"""', 'help': '"""unique log identifier."""', 'type': 'str'}), "('-l', '--log-id', default=\n '00a6ffc1-6ce9-3bc3-a060-6006e9893a1a', help='unique log identifier.',\n type=str)\n", (10370, 10486), False, 'import click\n'), ((10502, 10674), 'click.option', 'click.option', (['"""-r"""', '"""--max-range-m"""'], {'type': 'float', 'default': '(100)', 'help': '"""Maximum range of map entities from egovehicle to consider for rendering (by l-infinity norm)."""'}), "('-r', '--max-range-m', type=float, default=100, help=\n 'Maximum range of map entities from egovehicle to consider for rendering (by l-infinity norm).'\n )\n", (10514, 10674), False, 'import click\n'), ((10689, 10872), 'click.option', 'click.option', (['"""-d"""', '"""--use-depth-map-for_occlusion"""'], {'default': '(True)', 'help': '"""Whether to use an inferred depth map for rendering occluded elements (defaults to True)."""', 'type': 'bool'}), "('-d', '--use-depth-map-for_occlusion', default=True, help=\n 'Whether to use an inferred depth map for rendering occluded elements (defaults to True).'\n , type=bool)\n", (10701, 10872), False, 'import click\n'), ((10887, 11155), 'click.option', 'click.option', (['"""-s"""', '"""--dump-single-frames"""'], {'default': '(False)', 'help': '"""Whether to save to disk individual RGB frames of the rendering, in addition to generating the mp4 file(defaults to False). Note: can quickly generate 100s of MBs, for 200 KB frames."""', 'type': 'bool'}), "('-s', '--dump-single-frames', default=False, help=\n 'Whether to save to disk individual RGB frames of the rendering, in addition to generating the mp4 file(defaults to False). Note: can quickly generate 100s of MBs, for 200 KB frames.'\n , type=bool)\n", (10899, 11155), False, 'import click\n'), ((1966, 2027), 'av2.datasets.sensor.av2_sensor_dataloader.AV2SensorDataLoader', 'AV2SensorDataLoader', ([], {'data_dir': 'data_root', 'labels_dir': 'data_root'}), '(data_dir=data_root, labels_dir=data_root)\n', (1985, 2027), False, 'from av2.datasets.sensor.av2_sensor_dataloader import AV2SensorDataLoader\n'), ((2088, 2155), 'av2.map.map_api.ArgoverseStaticMap.from_map_dir', 'ArgoverseStaticMap.from_map_dir', (['log_map_dirpath'], {'build_raster': '(True)'}), '(log_map_dirpath, build_raster=True)\n', (2119, 2155), False, 'from av2.map.map_api import ArgoverseStaticMap\n'), ((6123, 6134), 'time.time', 'time.time', ([], {}), '()\n', (6132, 6134), False, 'import time\n'), ((6163, 6191), 'av2.utils.io.read_img', 'io_utils.read_img', (['img_fpath'], {}), '(img_fpath)\n', (6180, 6191), True, 'import av2.utils.io as io_utils\n'), ((6362, 6420), 'numpy.full', 'np.full', (['(img_h, img_w, 3)'], {'fill_value': '(128)', 'dtype': 'np.uint8'}), '((img_h, img_w, 3), fill_value=128, dtype=np.uint8)\n', (6369, 6420), True, 'import numpy as np\n'), ((6657, 6668), 'time.time', 'time.time', ([], {}), '()\n', (6666, 6668), False, 'import time\n'), ((6781, 6838), 'av2.utils.raster.blend_images', 'raster_utils.blend_images', (['img_rgb', 'img_empty'], {'alpha': '(0.45)'}), '(img_rgb, img_empty, alpha=0.45)\n', (6806, 6838), True, 'import av2.utils.raster as raster_utils\n'), ((11731, 11789), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (11750, 11789), False, 'import logging\n'), ((11806, 11821), 'pathlib.Path', 'Path', (['data_root'], {}), '(data_root)\n', (11810, 11821), False, 'from pathlib import Path\n'), ((11839, 11855), 'pathlib.Path', 'Path', (['output_dir'], {}), '(output_dir)\n', (11843, 11855), False, 'from pathlib import Path\n'), ((4386, 4515), 'av2.rendering.video.write_video', 'video_utils.write_video', ([], {'video': 'video', 'dst': "(video_output_dir / f'{log_id}_{cam_name}.mp4')", 'fps': 'RING_CAMERA_FPS', 'preset': '"""medium"""'}), "(video=video, dst=video_output_dir /\n f'{log_id}_{cam_name}.mp4', fps=RING_CAMERA_FPS, preset='medium')\n", (4409, 4515), True, 'import av2.rendering.video as video_utils\n'), ((6061, 6090), 'av2.utils.io.read_img', 'io_utils.read_img', (['save_fpath'], {}), '(save_fpath)\n', (6078, 6090), True, 'import av2.utils.io as io_utils\n'), ((6875, 6937), 'av2.utils.io.write_img', 'io_utils.write_img', (['save_fpath', 'frame_rgb'], {'channel_order': '"""RGB"""'}), "(save_fpath, frame_rgb, channel_order='RGB')\n", (6893, 6937), True, 'import av2.utils.io as io_utils\n'), ((8685, 8753), 'av2.geometry.interpolate.interp_arc', 'interp_utils.interp_arc', ([], {'t': 'N_INTERP_PTS', 'points': 'xwalk_polygon[:, :2]'}), '(t=N_INTERP_PTS, points=xwalk_polygon[:, :2])\n', (8708, 8753), True, 'import av2.geometry.interpolate as interp_utils\n'), ((10182, 10205), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (10192, 10205), False, 'import click\n'), ((3175, 3232), 'av2.utils.io.read_lidar_sweep', 'io_utils.read_lidar_sweep', (['lidar_fpath'], {'attrib_spec': '"""xyz"""'}), "(lidar_fpath, attrib_spec='xyz')\n", (3200, 3232), True, 'import av2.utils.io as io_utils\n'), ((3730, 3834), 'av2.rendering.map.EgoViewMapRenderer', 'EgoViewMapRenderer', ([], {'depth_map': 'depth_map', 'city_SE3_ego': 'city_SE3_ego', 'pinhole_cam': 'pinhole_cam', 'avm': 'avm'}), '(depth_map=depth_map, city_SE3_ego=city_SE3_ego,\n pinhole_cam=pinhole_cam, avm=avm)\n', (3748, 3834), False, 'from av2.rendering.map import EgoViewMapRenderer\n'), ((2548, 2626), 'logging.info', 'logging.info', (['f"""\tOn file {i}/{num_cam_imgs} of camera {cam_name} of {log_id}"""'], {}), "(f'\\tOn file {i}/{num_cam_imgs} of camera {cam_name} of {log_id}')\n", (2560, 2626), False, 'import logging\n'), ((4291, 4311), 'numpy.stack', 'np.stack', (['video_list'], {}), '(video_list)\n', (4299, 4311), True, 'import numpy as np\n'), ((9770, 9791), 'av2.datasets.sensor.constants.RingCameras', 'RingCameras', (['cam_name'], {}), '(cam_name)\n', (9781, 9791), False, 'from av2.datasets.sensor.constants import RingCameras\n')]
|
# -*- coding: utf-8 -*-
import os, sys
import shutil
import tissueloc as tl
from tissueloc.load_slide import load_slide_img, select_slide_level
import numpy as np
from skimage import io, color
import cv2
if __name__ == "__main__":
slide_dir = "../data/TestSlides/Malignant"
save_dir = "../data/TestSlides/MalignantTissue"
if os.path.exists(save_dir):
shutil.rmtree(save_dir)
os.makedirs(save_dir)
slide_list = [ele for ele in os.listdir(slide_dir) if "tiff" in ele]
for ind, ele in enumerate(slide_list):
slide_path = os.path.join(slide_dir, ele)
cnts, d_factor = tl.locate_tissue_cnts(slide_path, max_img_size=2048, smooth_sigma=13,
thresh_val=0.88,min_tissue_size=10000)
s_level, d_factor = select_slide_level(slide_path, max_size=2048)
slide_img = load_slide_img(slide_path, s_level)
slide_img = np.ascontiguousarray(slide_img, dtype=np.uint8)
cv2.drawContours(slide_img, cnts, -1, (0, 255, 0), 9)
io.imsave(os.path.join(save_dir, os.path.join(os.path.splitext(ele)[0]+'_cnt.png')), slide_img)
|
[
"os.makedirs",
"tissueloc.load_slide.load_slide_img",
"numpy.ascontiguousarray",
"os.path.exists",
"cv2.drawContours",
"os.path.splitext",
"tissueloc.load_slide.select_slide_level",
"shutil.rmtree",
"os.path.join",
"os.listdir",
"tissueloc.locate_tissue_cnts"
] |
[((342, 366), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (356, 366), False, 'import os, sys\n'), ((404, 425), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (415, 425), False, 'import os, sys\n'), ((376, 399), 'shutil.rmtree', 'shutil.rmtree', (['save_dir'], {}), '(save_dir)\n', (389, 399), False, 'import shutil\n'), ((564, 592), 'os.path.join', 'os.path.join', (['slide_dir', 'ele'], {}), '(slide_dir, ele)\n', (576, 592), False, 'import os, sys\n'), ((618, 731), 'tissueloc.locate_tissue_cnts', 'tl.locate_tissue_cnts', (['slide_path'], {'max_img_size': '(2048)', 'smooth_sigma': '(13)', 'thresh_val': '(0.88)', 'min_tissue_size': '(10000)'}), '(slide_path, max_img_size=2048, smooth_sigma=13,\n thresh_val=0.88, min_tissue_size=10000)\n', (639, 731), True, 'import tissueloc as tl\n'), ((802, 847), 'tissueloc.load_slide.select_slide_level', 'select_slide_level', (['slide_path'], {'max_size': '(2048)'}), '(slide_path, max_size=2048)\n', (820, 847), False, 'from tissueloc.load_slide import load_slide_img, select_slide_level\n'), ((868, 903), 'tissueloc.load_slide.load_slide_img', 'load_slide_img', (['slide_path', 's_level'], {}), '(slide_path, s_level)\n', (882, 903), False, 'from tissueloc.load_slide import load_slide_img, select_slide_level\n'), ((924, 971), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['slide_img'], {'dtype': 'np.uint8'}), '(slide_img, dtype=np.uint8)\n', (944, 971), True, 'import numpy as np\n'), ((980, 1033), 'cv2.drawContours', 'cv2.drawContours', (['slide_img', 'cnts', '(-1)', '(0, 255, 0)', '(9)'], {}), '(slide_img, cnts, -1, (0, 255, 0), 9)\n', (996, 1033), False, 'import cv2\n'), ((460, 481), 'os.listdir', 'os.listdir', (['slide_dir'], {}), '(slide_dir)\n', (470, 481), False, 'import os, sys\n'), ((1088, 1109), 'os.path.splitext', 'os.path.splitext', (['ele'], {}), '(ele)\n', (1104, 1109), False, 'import os, sys\n')]
|
#! /usr/bin/env python
"""Run a YOLO_v2 style detection model on test images."""
import argparse
import colorsys
import imghdr
import os
import random
import numpy as np
from keras import backend as K
from keras.models import load_model
from PIL import Image, ImageDraw, ImageFont
from yad2k.models.keras_yolo import yolo_eval, yolo_head
import shutil
def _main(session, args_model_path, args_anchors_path, args_classes_path, args_test_path, args_output_path):
model_path = args_model_path
assert model_path.endswith('.h5'), 'Keras model must be a .h5 file.'
anchors_path = args_anchors_path
classes_path = args_classes_path
test_path = args_test_path
output_path = args_output_path
args_score_threshold = .3
args_iou_threshold = .5
if not os.path.exists(output_path):
print('Creating output path {}'.format(output_path))
os.mkdir(output_path)
# sess = K.get_session() # TODO: Remove dependence on Tensorflow session.
sess = session
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
anchors = np.array(anchors).reshape(-1, 2)
yolo_model = load_model(model_path)
# Verify model, anchors, and classes are compatible
num_classes = len(class_names)
num_anchors = len(anchors)
# TODO: Assumes dim ordering is channel last
model_output_channels = yolo_model.layers[-1].output_shape[-1]
assert model_output_channels == num_anchors * (num_classes + 5), \
'Mismatch between model and given anchor and class sizes. ' \
'Specify matching anchors and classes with --anchors_path and ' \
'--classes_path flags.'
print('{} model, anchors, and classes loaded.'.format(model_path))
# Check if model is fully convolutional, assuming channel last order.
model_image_size = yolo_model.layers[0].input_shape[1:3]
is_fixed_size = model_image_size != (None, None)
# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / len(class_names), 1., 1.)
for x in range(len(class_names))]
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
colors))
random.seed(10101) # Fixed seed for consistent colors across runs.
random.shuffle(colors) # Shuffle colors to decorrelate adjacent classes.
random.seed(None) # Reset seed to default.
# Generate output tensor targets for filtered bounding boxes.
# TODO: Wrap these backend operations with Keras layers.
yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
input_image_shape = K.placeholder(shape=(2, ))
boxes, scores, classes = yolo_eval(
yolo_outputs,
input_image_shape,
score_threshold=args_score_threshold,
iou_threshold=args_iou_threshold)
for image_file in os.listdir(test_path):
# try:
# image_type = imghdr.what(os.path.join(test_path, image_file))
# if not image_type:
# continue
# except IsADirectoryError:
# continue
image = Image.open(os.path.join(test_path, image_file))
if is_fixed_size: # TODO: When resizing we can use minibatch input.
resized_image = image.resize(
tuple(reversed(model_image_size)), Image.BICUBIC)
image_data = np.array(resized_image, dtype='float32')
else:
# Due to skip connection + max pooling in YOLO_v2, inputs must have
# width and height as multiples of 32.
new_image_size = (image.width - (image.width % 32),
image.height - (image.height % 32))
resized_image = image.resize(new_image_size, Image.BICUBIC)
image_data = np.array(resized_image, dtype='float32')
print(image_data.shape)
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
out_boxes, out_scores, out_classes = sess.run(
[boxes, scores, classes],
feed_dict={
yolo_model.input: image_data,
input_image_shape: [image.size[1], image.size[0]],
K.learning_phase(): 0
})
print('Found {} boxes for {}'.format(len(out_boxes), image_file))
font = ImageFont.truetype(
font='font/FiraMono-Medium.otf',
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 300
max_score = 0
for i, c in reversed(list(enumerate(out_classes))):
predicted_class = class_names[c]
box = out_boxes[i]
score = out_scores[i]
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
print(label, (left, top), (right, bottom))
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
# # My kingdom for a good redistributable image drawing library.
# for i in range(thickness):
# draw.rectangle(
# [left + i, top + i, right - i, bottom - i],
# outline=colors[c])
# draw.rectangle(
# [tuple(text_origin), tuple(text_origin + label_size)],
# fill=colors[c])
# draw.text(text_origin, label, fill=(0, 0, 0), font=font)
# del draw
if predicted_class == 'dog':
if score > max_score:
if max_score > 0:
print('-' * 10)
border = 10
max_score = score
crop_box = left - border, top - border, right + border, bottom + border
cropped_img = image.crop(crop_box)
cropped_img.save(os.path.join(output_path, image_file), quality=90)
else:
shutil.copyfile(os.path.join(test_path, image_file), os.path.join(output_path, image_file))
# image.save(os.path.join(output_path, image_file), quality=90)
def _main_input():
model_path = 'model_data/yolo.h5'
anchors_path = 'model_data/yolo_anchors.txt'
classes_path = 'model_data/pascal_classes.txt'
# model_path = args_model_path
assert model_path.endswith('.h5'), 'Keras model must be a .h5 file.'
# anchors_path = args_anchors_path
# classes_path = args_classes_path
# test_path = args_test_path
# output_path = args_output_path
intput_path = 'D:/Udacity/MachineLearning(Advanced)/p6_graduation_project/input'
data_folders = ['data_train', 'data_val', 'data_test']
args_score_threshold = .3
args_iou_threshold = .5
count_max_dog = 0
count_no_dog = 0
count_no_object = 0
# if not os.path.exists(output_path):
# print('Creating output path {}'.format(output_path))
# os.mkdir(output_path)
sess = K.get_session() # TODO: Remove dependence on Tensorflow session.
# sess = session
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
anchors = np.array(anchors).reshape(-1, 2)
yolo_model = load_model(model_path)
# Verify model, anchors, and classes are compatible
num_classes = len(class_names)
num_anchors = len(anchors)
# TODO: Assumes dim ordering is channel last
model_output_channels = yolo_model.layers[-1].output_shape[-1]
assert model_output_channels == num_anchors * (num_classes + 5), \
'Mismatch between model and given anchor and class sizes. ' \
'Specify matching anchors and classes with --anchors_path and ' \
'--classes_path flags.'
print('{} model, anchors, and classes loaded.'.format(model_path))
# Check if model is fully convolutional, assuming channel last order.
model_image_size = yolo_model.layers[0].input_shape[1:3]
is_fixed_size = model_image_size != (None, None)
# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / len(class_names), 1., 1.)
for x in range(len(class_names))]
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
colors))
random.seed(10101) # Fixed seed for consistent colors across runs.
random.shuffle(colors) # Shuffle colors to decorrelate adjacent classes.
random.seed(None) # Reset seed to default.
# Generate output tensor targets for filtered bounding boxes.
# TODO: Wrap these backend operations with Keras layers.
yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
input_image_shape = K.placeholder(shape=(2, ))
boxes, scores, classes = yolo_eval(
yolo_outputs,
input_image_shape,
score_threshold=args_score_threshold,
iou_threshold=args_iou_threshold)
for data_folder_name in data_folders:
data_folder = os.path.join(intput_path, data_folder_name)
output_folder = os.path.join(intput_path, 'yolo_' + data_folder_name)
if not os.path.exists(output_folder):
print('Create folders: %s' % output_folder)
os.makedirs(output_folder)
else:
print('Folder exists: %s' % output_folder)
for class_folder_name in os.listdir(data_folder):
test_path = os.path.join(data_folder, class_folder_name)
output_path = os.path.join(output_folder, class_folder_name)
if not os.path.exists(output_path):
print('Create folders: %s' % output_path)
os.makedirs(output_path)
else:
print('Folder exists: %s' % output_path)
for image_file in os.listdir(test_path):
# try:
# image_type = imghdr.what(os.path.join(test_path, image_file))
# if not image_type:
# continue
# except IsADirectoryError:
# continue
image = Image.open(os.path.join(test_path, image_file))
if is_fixed_size: # TODO: When resizing we can use minibatch input.
resized_image = image.resize(
tuple(reversed(model_image_size)), Image.BICUBIC)
image_data = np.array(resized_image, dtype='float32')
else:
# Due to skip connection + max pooling in YOLO_v2, inputs must have
# width and height as multiples of 32.
new_image_size = (image.width - (image.width % 32),
image.height - (image.height % 32))
resized_image = image.resize(new_image_size, Image.BICUBIC)
image_data = np.array(resized_image, dtype='float32')
print(image_data.shape)
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
try:
out_boxes, out_scores, out_classes = sess.run(
[boxes, scores, classes],
feed_dict={
yolo_model.input: image_data,
input_image_shape: [image.size[1], image.size[0]],
K.learning_phase(): 0
})
except Exception as ex:
print('Err: %s' % image_file)
print(ex)
shutil.copyfile(os.path.join(test_path, image_file), os.path.join(output_path, image_file))
continue
# print('Found {} boxes for {}'.format(len(out_boxes), image_file))
font = ImageFont.truetype(
font='font/FiraMono-Medium.otf',
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 300
max_score = 0
if len(out_classes) > 0:
for i, c in reversed(list(enumerate(out_classes))):
predicted_class = class_names[c]
box = out_boxes[i]
score = out_scores[i]
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
# print(label, (left, top), (right, bottom))
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
# # My kingdom for a good redistributable image drawing library.
# for i in range(thickness):
# draw.rectangle(
# [left + i, top + i, right - i, bottom - i],
# outline=colors[c])
# draw.rectangle(
# [tuple(text_origin), tuple(text_origin + label_size)],
# fill=colors[c])
# draw.text(text_origin, label, fill=(0, 0, 0), font=font)
# del draw
if predicted_class == 'dog':
if score > max_score:
if max_score > 0:
print('+' * 10)
count_max_dog += 1
border = 10
max_score = score
crop_box = left - border, top - border, right + border, bottom + border
cropped_img = image.crop(crop_box)
cropped_img.save(os.path.join(output_path, image_file), quality=90)
else:
count_no_dog += 1
print('-' * 10)
shutil.copyfile(os.path.join(test_path, image_file), os.path.join(output_path, image_file))
else:
count_no_object += 1
print('*' * 10)
shutil.copyfile(os.path.join(test_path, image_file), os.path.join(output_path, image_file))
print('%s %s %s' %(count_max_dog, count_no_dog, count_no_object))
# image.save(os.path.join(output_path, image_file), quality=90)
if __name__ == '__main__':
# sess = K.get_session() # TODO: Remove dependence on Tensorflow session.
# 测试YOLO自带的图片
model_path = 'model_data/yolo.h5'
anchors_path = 'model_data/yolo_anchors.txt'
classes_path = 'model_data/pascal_classes.txt'
# test_path = 'images'
# output_path = 'images/out'
# _main(model_path, anchors_path, classes_path, test_path, output_path)
# 处理inputdata
_main_input()
# # 处理data_train
# test_path = 'D:/Udacity/MachineLearning(Advanced)/p6_graduation_project/input/data_train'
# output_path = 'D:/Udacity/MachineLearning(Advanced)/p6_graduation_project/input/yolo_data_train'
# for folder_name in os.listdir(test_path):
# in_path = os.path.join(test_path, folder_name)
# out_path = os.path.join(output_path, folder_name)
# if not os.path.exists(out_path):
# print('Create folder: %s' % out_path)
# os.makedirs(out_path)
# else:
# print('Folder exists: %s' % out_path)
# # _main(sess, model_path, anchors_path, classes_path, in_path, out_path)
# # 处理data_val
# test_path = 'D:/Udacity/MachineLearning(Advanced)/p6_graduation_project/input/data_val'
# output_path = 'D:/Udacity/MachineLearning(Advanced)/p6_graduation_project/input/yolo_data_val'
# for folder_name in os.listdir(test_path):
# in_path = os.path.join(test_path, folder_name)
# out_path = os.path.join(output_path, folder_name)
# if not os.path.exists(out_path):
# print('Create folder: %s' % out_path)
# os.makedirs(out_path)
# else:
# print('Folder exists: %s' % out_path)
# # _main(sess, model_path, anchors_path, classes_path, in_path, out_path)
# # 处理data_test
# test_path = 'D:/Udacity/MachineLearning(Advanced)/p6_graduation_project/input/data_test'
# output_path = 'D:/Udacity/MachineLearning(Advanced)/p6_graduation_project/input/yolo_data_test'
# for folder_name in os.listdir(test_path):
# in_path = os.path.join(test_path, folder_name)
# out_path = os.path.join(output_path, folder_name)
# if not os.path.exists(out_path):
# print('Create folder: %s' % out_path)
# os.makedirs(out_path)
# else:
# print('Folder exists: %s' % out_path)
# # _main(sess, model_path, anchors_path, classes_path, in_path, out_path)
# sess.close()
|
[
"keras.models.load_model",
"keras.backend.placeholder",
"os.mkdir",
"os.makedirs",
"colorsys.hsv_to_rgb",
"keras.backend.learning_phase",
"keras.backend.get_session",
"random.shuffle",
"numpy.floor",
"os.path.exists",
"numpy.expand_dims",
"yad2k.models.keras_yolo.yolo_eval",
"random.seed",
"numpy.array",
"PIL.ImageDraw.Draw",
"os.path.join",
"os.listdir"
] |
[((1317, 1339), 'keras.models.load_model', 'load_model', (['model_path'], {}), '(model_path)\n', (1327, 1339), False, 'from keras.models import load_model\n'), ((2427, 2445), 'random.seed', 'random.seed', (['(10101)'], {}), '(10101)\n', (2438, 2445), False, 'import random\n'), ((2499, 2521), 'random.shuffle', 'random.shuffle', (['colors'], {}), '(colors)\n', (2513, 2521), False, 'import random\n'), ((2577, 2594), 'random.seed', 'random.seed', (['None'], {}), '(None)\n', (2588, 2594), False, 'import random\n'), ((2848, 2873), 'keras.backend.placeholder', 'K.placeholder', ([], {'shape': '(2,)'}), '(shape=(2,))\n', (2861, 2873), True, 'from keras import backend as K\n'), ((2904, 3023), 'yad2k.models.keras_yolo.yolo_eval', 'yolo_eval', (['yolo_outputs', 'input_image_shape'], {'score_threshold': 'args_score_threshold', 'iou_threshold': 'args_iou_threshold'}), '(yolo_outputs, input_image_shape, score_threshold=\n args_score_threshold, iou_threshold=args_iou_threshold)\n', (2913, 3023), False, 'from yad2k.models.keras_yolo import yolo_eval, yolo_head\n'), ((3075, 3096), 'os.listdir', 'os.listdir', (['test_path'], {}), '(test_path)\n', (3085, 3096), False, 'import os\n'), ((7662, 7677), 'keras.backend.get_session', 'K.get_session', ([], {}), '()\n', (7675, 7677), True, 'from keras import backend as K\n'), ((8063, 8085), 'keras.models.load_model', 'load_model', (['model_path'], {}), '(model_path)\n', (8073, 8085), False, 'from keras.models import load_model\n'), ((9173, 9191), 'random.seed', 'random.seed', (['(10101)'], {}), '(10101)\n', (9184, 9191), False, 'import random\n'), ((9245, 9267), 'random.shuffle', 'random.shuffle', (['colors'], {}), '(colors)\n', (9259, 9267), False, 'import random\n'), ((9323, 9340), 'random.seed', 'random.seed', (['None'], {}), '(None)\n', (9334, 9340), False, 'import random\n'), ((9594, 9619), 'keras.backend.placeholder', 'K.placeholder', ([], {'shape': '(2,)'}), '(shape=(2,))\n', (9607, 9619), True, 'from keras import backend as K\n'), ((9650, 9769), 'yad2k.models.keras_yolo.yolo_eval', 'yolo_eval', (['yolo_outputs', 'input_image_shape'], {'score_threshold': 'args_score_threshold', 'iou_threshold': 'args_iou_threshold'}), '(yolo_outputs, input_image_shape, score_threshold=\n args_score_threshold, iou_threshold=args_iou_threshold)\n', (9659, 9769), False, 'from yad2k.models.keras_yolo import yolo_eval, yolo_head\n'), ((784, 811), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (798, 811), False, 'import os\n'), ((882, 903), 'os.mkdir', 'os.mkdir', (['output_path'], {}), '(output_path)\n', (890, 903), False, 'import os\n'), ((4122, 4151), 'numpy.expand_dims', 'np.expand_dims', (['image_data', '(0)'], {}), '(image_data, 0)\n', (4136, 4151), True, 'import numpy as np\n'), ((9868, 9911), 'os.path.join', 'os.path.join', (['intput_path', 'data_folder_name'], {}), '(intput_path, data_folder_name)\n', (9880, 9911), False, 'import os\n'), ((9936, 9989), 'os.path.join', 'os.path.join', (['intput_path', "('yolo_' + data_folder_name)"], {}), "(intput_path, 'yolo_' + data_folder_name)\n", (9948, 9989), False, 'import os\n'), ((10234, 10257), 'os.listdir', 'os.listdir', (['data_folder'], {}), '(data_folder)\n', (10244, 10257), False, 'import os\n'), ((3336, 3371), 'os.path.join', 'os.path.join', (['test_path', 'image_file'], {}), '(test_path, image_file)\n', (3348, 3371), False, 'import os\n'), ((3583, 3623), 'numpy.array', 'np.array', (['resized_image'], {'dtype': '"""float32"""'}), "(resized_image, dtype='float32')\n", (3591, 3623), True, 'import numpy as np\n'), ((3996, 4036), 'numpy.array', 'np.array', (['resized_image'], {'dtype': '"""float32"""'}), "(resized_image, dtype='float32')\n", (4004, 4036), True, 'import numpy as np\n'), ((5031, 5052), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['image'], {}), '(image)\n', (5045, 5052), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((10005, 10034), 'os.path.exists', 'os.path.exists', (['output_folder'], {}), '(output_folder)\n', (10019, 10034), False, 'import os\n'), ((10104, 10130), 'os.makedirs', 'os.makedirs', (['output_folder'], {}), '(output_folder)\n', (10115, 10130), False, 'import os\n'), ((10283, 10327), 'os.path.join', 'os.path.join', (['data_folder', 'class_folder_name'], {}), '(data_folder, class_folder_name)\n', (10295, 10327), False, 'import os\n'), ((10354, 10400), 'os.path.join', 'os.path.join', (['output_folder', 'class_folder_name'], {}), '(output_folder, class_folder_name)\n', (10366, 10400), False, 'import os\n'), ((10654, 10675), 'os.listdir', 'os.listdir', (['test_path'], {}), '(test_path)\n', (10664, 10675), False, 'import os\n'), ((1266, 1283), 'numpy.array', 'np.array', (['anchors'], {}), '(anchors)\n', (1274, 1283), True, 'import numpy as np\n'), ((2270, 2293), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['*x'], {}), '(*x)\n', (2289, 2293), False, 'import colorsys\n'), ((5560, 5597), 'numpy.array', 'np.array', (['[left, top - label_size[1]]'], {}), '([left, top - label_size[1]])\n', (5568, 5597), True, 'import numpy as np\n'), ((5646, 5671), 'numpy.array', 'np.array', (['[left, top + 1]'], {}), '([left, top + 1])\n', (5654, 5671), True, 'import numpy as np\n'), ((8012, 8029), 'numpy.array', 'np.array', (['anchors'], {}), '(anchors)\n', (8020, 8029), True, 'import numpy as np\n'), ((9016, 9039), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['*x'], {}), '(*x)\n', (9035, 9039), False, 'import colorsys\n'), ((10420, 10447), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (10434, 10447), False, 'import os\n'), ((10523, 10547), 'os.makedirs', 'os.makedirs', (['output_path'], {}), '(output_path)\n', (10534, 10547), False, 'import os\n'), ((11867, 11896), 'numpy.expand_dims', 'np.expand_dims', (['image_data', '(0)'], {}), '(image_data, 0)\n', (11881, 11896), True, 'import numpy as np\n'), ((4423, 4441), 'keras.backend.learning_phase', 'K.learning_phase', ([], {}), '()\n', (4439, 4441), True, 'from keras import backend as K\n'), ((6676, 6711), 'os.path.join', 'os.path.join', (['test_path', 'image_file'], {}), '(test_path, image_file)\n', (6688, 6711), False, 'import os\n'), ((6713, 6750), 'os.path.join', 'os.path.join', (['output_path', 'image_file'], {}), '(output_path, image_file)\n', (6725, 6750), False, 'import os\n'), ((10971, 11006), 'os.path.join', 'os.path.join', (['test_path', 'image_file'], {}), '(test_path, image_file)\n', (10983, 11006), False, 'import os\n'), ((11250, 11290), 'numpy.array', 'np.array', (['resized_image'], {'dtype': '"""float32"""'}), "(resized_image, dtype='float32')\n", (11258, 11290), True, 'import numpy as np\n'), ((11717, 11757), 'numpy.array', 'np.array', (['resized_image'], {'dtype': '"""float32"""'}), "(resized_image, dtype='float32')\n", (11725, 11757), True, 'import numpy as np\n'), ((4632, 4668), 'numpy.floor', 'np.floor', (['(0.03 * image.size[1] + 0.5)'], {}), '(0.03 * image.size[1] + 0.5)\n', (4640, 4668), True, 'import numpy as np\n'), ((5174, 5193), 'numpy.floor', 'np.floor', (['(top + 0.5)'], {}), '(top + 0.5)\n', (5182, 5193), True, 'import numpy as np\n'), ((5237, 5257), 'numpy.floor', 'np.floor', (['(left + 0.5)'], {}), '(left + 0.5)\n', (5245, 5257), True, 'import numpy as np\n'), ((5315, 5337), 'numpy.floor', 'np.floor', (['(bottom + 0.5)'], {}), '(bottom + 0.5)\n', (5323, 5337), True, 'import numpy as np\n'), ((5394, 5415), 'numpy.floor', 'np.floor', (['(right + 0.5)'], {}), '(right + 0.5)\n', (5402, 5415), True, 'import numpy as np\n'), ((6575, 6612), 'os.path.join', 'os.path.join', (['output_path', 'image_file'], {}), '(output_path, image_file)\n', (6587, 6612), False, 'import os\n'), ((13351, 13372), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['image'], {}), '(image)\n', (13365, 13372), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((15738, 15773), 'os.path.join', 'os.path.join', (['test_path', 'image_file'], {}), '(test_path, image_file)\n', (15750, 15773), False, 'import os\n'), ((15775, 15812), 'os.path.join', 'os.path.join', (['output_path', 'image_file'], {}), '(output_path, image_file)\n', (15787, 15812), False, 'import os\n'), ((12466, 12501), 'os.path.join', 'os.path.join', (['test_path', 'image_file'], {}), '(test_path, image_file)\n', (12478, 12501), False, 'import os\n'), ((12503, 12540), 'os.path.join', 'os.path.join', (['output_path', 'image_file'], {}), '(output_path, image_file)\n', (12515, 12540), False, 'import os\n'), ((13990, 14027), 'numpy.array', 'np.array', (['[left, top - label_size[1]]'], {}), '([left, top - label_size[1]])\n', (13998, 14027), True, 'import numpy as np\n'), ((14100, 14125), 'numpy.array', 'np.array', (['[left, top + 1]'], {}), '([left, top + 1])\n', (14108, 14125), True, 'import numpy as np\n'), ((12261, 12279), 'keras.backend.learning_phase', 'K.learning_phase', ([], {}), '()\n', (12277, 12279), True, 'from keras import backend as K\n'), ((12799, 12835), 'numpy.floor', 'np.floor', (['(0.03 * image.size[1] + 0.5)'], {}), '(0.03 * image.size[1] + 0.5)\n', (12807, 12835), True, 'import numpy as np\n'), ((15527, 15562), 'os.path.join', 'os.path.join', (['test_path', 'image_file'], {}), '(test_path, image_file)\n', (15539, 15562), False, 'import os\n'), ((15564, 15601), 'os.path.join', 'os.path.join', (['output_path', 'image_file'], {}), '(output_path, image_file)\n', (15576, 15601), False, 'import os\n'), ((13530, 13549), 'numpy.floor', 'np.floor', (['(top + 0.5)'], {}), '(top + 0.5)\n', (13538, 13549), True, 'import numpy as np\n'), ((13605, 13625), 'numpy.floor', 'np.floor', (['(left + 0.5)'], {}), '(left + 0.5)\n', (13613, 13625), True, 'import numpy as np\n'), ((13695, 13717), 'numpy.floor', 'np.floor', (['(bottom + 0.5)'], {}), '(bottom + 0.5)\n', (13703, 13717), True, 'import numpy as np\n'), ((13786, 13807), 'numpy.floor', 'np.floor', (['(right + 0.5)'], {}), '(right + 0.5)\n', (13794, 13807), True, 'import numpy as np\n'), ((15312, 15349), 'os.path.join', 'os.path.join', (['output_path', 'image_file'], {}), '(output_path, image_file)\n', (15324, 15349), False, 'import os\n')]
|
import random
from collections import deque
import numpy as np
from keras.layers import Dense
from keras.models import Sequential
from keras.optimizers import Adam
class DQNAgent:
def __init__(self, state_size,
action_size,
memory_size,
hidden_layers_number,
hidden_layers_size,
learning_rate=0.001,
gamma=0.95,
sample_batch_size=32,
exploration_rate=1.0,
exploration_min=0.01,
exploration_decay=0.995):
assert hidden_layers_number > 0
self.state_size = state_size
self.action_size = action_size
self.memory = deque(maxlen=memory_size)
self.learning_rate = learning_rate
self.gamma = gamma
self.sample_batch_size = sample_batch_size
self.exploration_rate = exploration_rate
self.exploration_min = exploration_min
self.exploration_decay = exploration_decay
self.model = self._build_model(hidden_layers_number, hidden_layers_size)
self.target_model = self._build_model(hidden_layers_number, hidden_layers_size)
def _build_model(self, hidden_layers_number, hidden_layers_size):
model = Sequential()
model.add(Dense(hidden_layers_size, activation='relu', input_dim=self.state_size))
for i in range(hidden_layers_number - 1):
model.add(Dense(hidden_layers_size, activation='relu'))
model.add(Dense(self.action_size, activation='linear'))
model.compile(optimizer=Adam(lr=self.learning_rate), loss='mse')
return model
def remember(self, state, action, reward, done, next_state):
self.memory.append((state, action, reward, done, next_state))
def sync_weights(self):
self.target_model.set_weights(self.model.get_weights())
def train(self):
""" Double DQN """
if len(self.memory) < self.sample_batch_size:
return
batch = random.sample(self.memory, self.sample_batch_size)
states, actions, rewards, dones, next_states = unpack_batch(batch)
next_state_values_model_indexes = np.argmax(self.target_model.predict(next_states), axis=1)
next_state_values_target_model = self.target_model.predict(next_states)
next_state_values = np.zeros(len(states))
for i, index in enumerate(next_state_values_model_indexes):
next_state_values[i] = next_state_values_target_model[i, index]
# setting values to 0 for episodes that are done. Only rewards should be taken into calculation in this case
next_state_values *= 1 - dones
targets = next_state_values * self.gamma + rewards
# To calculate MSE based only on target (maximum) action values for each state, let's make MSE for the rest
# action values to be equal 0. For this lets predict all action values for states and replace those that are
# expected to be target(maximum) with values calculated by Bellman's equation
expected_state_action_values = self.model.predict(states)
for i in range(len(expected_state_action_values)):
expected_state_action_values[i, actions[i]] = targets[i]
self.model.fit(states, expected_state_action_values, epochs=1, verbose=0, batch_size=1)
if self.exploration_rate > self.exploration_min:
self.exploration_rate *= self.exploration_decay
def act(self, state, test_mode=False):
if (np.random.rand() <= self.exploration_rate) & (not test_mode):
return random.randrange(self.action_size)
act_values = self.model.predict(np.array(state).reshape((1, self.state_size)))
return np.argmax(act_values[0])
def unpack_batch(batch):
states, actions, rewards, dones, next_states = [], [], [], [], []
for state, action, reward, done, next_state in batch:
state = np.array(state, copy=False)
states.append(state)
actions.append(action)
rewards.append(reward)
dones.append(done)
if next_state is None:
next_states.append(state) # the result will be masked anyway
else:
next_states.append(np.array(next_state, copy=False))
return np.array(states, copy=False), np.array(actions), np.array(rewards, dtype=np.float32), \
np.array(dones, dtype=np.uint8), np.array(next_states, copy=False)
|
[
"numpy.argmax",
"random.sample",
"keras.optimizers.Adam",
"keras.layers.Dense",
"numpy.array",
"random.randrange",
"numpy.random.rand",
"keras.models.Sequential",
"collections.deque"
] |
[((721, 746), 'collections.deque', 'deque', ([], {'maxlen': 'memory_size'}), '(maxlen=memory_size)\n', (726, 746), False, 'from collections import deque\n'), ((1271, 1283), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1281, 1283), False, 'from keras.models import Sequential\n'), ((2019, 2069), 'random.sample', 'random.sample', (['self.memory', 'self.sample_batch_size'], {}), '(self.memory, self.sample_batch_size)\n', (2032, 2069), False, 'import random\n'), ((3739, 3763), 'numpy.argmax', 'np.argmax', (['act_values[0]'], {}), '(act_values[0])\n', (3748, 3763), True, 'import numpy as np\n'), ((3935, 3962), 'numpy.array', 'np.array', (['state'], {'copy': '(False)'}), '(state, copy=False)\n', (3943, 3962), True, 'import numpy as np\n'), ((4276, 4304), 'numpy.array', 'np.array', (['states'], {'copy': '(False)'}), '(states, copy=False)\n', (4284, 4304), True, 'import numpy as np\n'), ((4306, 4323), 'numpy.array', 'np.array', (['actions'], {}), '(actions)\n', (4314, 4323), True, 'import numpy as np\n'), ((4325, 4360), 'numpy.array', 'np.array', (['rewards'], {'dtype': 'np.float32'}), '(rewards, dtype=np.float32)\n', (4333, 4360), True, 'import numpy as np\n'), ((4375, 4406), 'numpy.array', 'np.array', (['dones'], {'dtype': 'np.uint8'}), '(dones, dtype=np.uint8)\n', (4383, 4406), True, 'import numpy as np\n'), ((4408, 4441), 'numpy.array', 'np.array', (['next_states'], {'copy': '(False)'}), '(next_states, copy=False)\n', (4416, 4441), True, 'import numpy as np\n'), ((1302, 1373), 'keras.layers.Dense', 'Dense', (['hidden_layers_size'], {'activation': '"""relu"""', 'input_dim': 'self.state_size'}), "(hidden_layers_size, activation='relu', input_dim=self.state_size)\n", (1307, 1373), False, 'from keras.layers import Dense\n'), ((1511, 1555), 'keras.layers.Dense', 'Dense', (['self.action_size'], {'activation': '"""linear"""'}), "(self.action_size, activation='linear')\n", (1516, 1555), False, 'from keras.layers import Dense\n'), ((3602, 3636), 'random.randrange', 'random.randrange', (['self.action_size'], {}), '(self.action_size)\n', (3618, 3636), False, 'import random\n'), ((1447, 1491), 'keras.layers.Dense', 'Dense', (['hidden_layers_size'], {'activation': '"""relu"""'}), "(hidden_layers_size, activation='relu')\n", (1452, 1491), False, 'from keras.layers import Dense\n'), ((1589, 1616), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'self.learning_rate'}), '(lr=self.learning_rate)\n', (1593, 1616), False, 'from keras.optimizers import Adam\n'), ((3521, 3537), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3535, 3537), True, 'import numpy as np\n'), ((4231, 4263), 'numpy.array', 'np.array', (['next_state'], {'copy': '(False)'}), '(next_state, copy=False)\n', (4239, 4263), True, 'import numpy as np\n'), ((3677, 3692), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (3685, 3692), True, 'import numpy as np\n')]
|
import os
import json
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
# t_stat, p_val = stats.ttest_ind(sample1, sample2, equal_var=False)
test_result_dir = "utils/testresults"
all_results = {}
aggregate_terms = [
"count", "valid", "missing", "distinct", "sum", "mean", "average",
"variance", "variancep", "stdev", "stdevp", "stderr", "median", "q1", "q3",
"ci0", "ci1", "min", "max", "argmin", "argmax"
]
file_paths = [
"/vizmodeluninat5.json", "/vizmodeluninat10.json",
"/vizmodeluninat15.json", "/vizmodeluninat20.json", "/vizmodeluni5.json",
"/vizmodeluni10.json", "/vizmodeluni15.json", "/vizmodeluni20.json",
"/vizmodelbi5.json", "/vizmodelbi10.json", "/vizmodelbi15.json",
"/vizmodelbi20.json"
]
def analyze_test_suite(test_dataset_directory):
# for subdir, dirs, files in os.walk(test_dataset_directory):
# for file in files:
# filepath = subdir + os.sep + file
# if filepath.endswith(
# "json") and not filepath.endswith("lsit.json"):
for filepath in file_paths:
filepath = test_result_dir + filepath
# data = json.load(open(filepath))
# print(filepath)
analyze_data(filepath)
def is_valid_aggregate(agg_val):
if (agg_val not in aggregate_terms):
# print("issh", agg_val)
return False
else:
return True
def computer_anova():
print("anova")
def analyze_data(filepath):
data = json.load(open(filepath))
beam_width = data["beamwidth"]
valid_json_array = []
valid_vega_array = []
phantom_count_array = []
x = list(range(0, 100))
for row in data["data"]:
valid_json_count = row["validjsoncount"] / beam_width
valid_json_array.append(valid_json_count)
valid_vega_count = row["validvegacount"]
vs_array = row["vegaspecarray"]
# mark specs with incorrect aggregation value as invalid vega
for vs_row in vs_array:
if ("aggregate" in vs_row["encoding"]["y"]):
if not is_valid_aggregate(
vs_row["encoding"]["y"]["aggregate"]):
valid_vega_count -= 1
else:
if ("aggregate" in vs_row["encoding"]["x"]):
if not is_valid_aggregate(
vs_row["encoding"]["x"]["aggregate"]):
valid_vega_count -= 1
# print(valid_vega_count, row["validjsoncount"])
valid_vegap_count = valid_vega_count
valid_vega_count = valid_vega_count / beam_width
valid_vega_array.append(valid_vega_count)
if (valid_vega_count == 0):
phantom_count = 0
else:
phantom_count = row["phantomcount"] / valid_vegap_count
phantom_count_array.append(phantom_count)
# print("Count", row["phantomcount"], valid_vegap_count)
# print(x, valid_json_array)
# plt.plot(x, valid_json_array)
# plt.plot(x, valid_vega_array)
# plt.plot(x, phantom_count_array)
# plt.show()
print(
filepath.split("vizmodel")[1], "Json:",
round(np.mean(valid_json_array), 3), "Vega",
round(np.mean(valid_vega_array), 3), "Mean % Phantom",
round(np.mean(phantom_count_array), 3))
result = {"json:": valid_json_array, "vega": valid_vega_array}
analyze_test_suite(test_result_dir)
# data = json.load(open("utils/testresults/vizmodelbi15.json"))
# print(len(data["data"]))
# analyze_data("utils/testresults/vizmodeluninat15.json")
|
[
"numpy.mean"
] |
[((3168, 3193), 'numpy.mean', 'np.mean', (['valid_json_array'], {}), '(valid_json_array)\n', (3175, 3193), True, 'import numpy as np\n'), ((3221, 3246), 'numpy.mean', 'np.mean', (['valid_vega_array'], {}), '(valid_vega_array)\n', (3228, 3246), True, 'import numpy as np\n'), ((3284, 3312), 'numpy.mean', 'np.mean', (['phantom_count_array'], {}), '(phantom_count_array)\n', (3291, 3312), True, 'import numpy as np\n')]
|
import numpy as np
import torch
from PIL import Image
import torchvision.transforms as T
from infer import Inference
from utils.nms import nms
torch.set_grad_enabled(False)
def class_agnostic_nms(boxes, scores, iou=0.5):
if len(boxes) > 1:
boxes, scores = nms(np.array(boxes), np.array(scores), iou)
return list(boxes), list(scores)
else:
return boxes, scores
def generate_image_crops(img, num_crops=8):
"""
Note: num_crops must be greater than 2 and of multiple of 2
"""
assert num_crops > 2
assert num_crops % 2 == 0
# Get the image width and height
img_w, img_h = img.size
crops = []
coordinates = []
crops.append(img)
coordinates.append((0, 0, img_w, img_h))
crop_chunks_x = int(num_crops / 2)
crop_chunks_y = int(num_crops / crop_chunks_x)
x_inc = int(img_w / crop_chunks_y)
y_inc = int(img_h / crop_chunks_y)
x_space = np.linspace(0, img_w - x_inc, crop_chunks_y)
y_spcae = np.linspace(0, img_h - y_inc, int(num_crops / crop_chunks_y))
if num_crops > 1:
for x in x_space:
for y in y_spcae:
x1, y1 = x, y
x2, y2 = x1 + x_inc, y1 + y_inc
crops.append((img.crop((x1, y1, x2, y2))).resize((img_w, img_h)))
coordinates.append((x1, y1, x2, y2))
return crops, coordinates, (img_w, img_h)
def scale_boxes(boxes, coordinates, img_dims):
x1, y1, x2, y2 = coordinates
img_w, img_h = img_dims
w = x2 - x1
h = y2 - y1
for b in boxes:
b[0], b[1], b[2], b[3] = int((b[0] / img_w) * w) + x1, int((b[1] / img_h) * h) + y1, \
int((b[2] / img_w) * w) + x1, int((b[3] / img_h) * h) + y1
return boxes
class ModulatedDetection(Inference):
"""
The class supports the inference using both MDETR & MDef-DETR models.
"""
def __init__(self, model, confidence_thresh=0.0):
Inference.__init__(self, model)
self.conf_thresh = confidence_thresh
self.transform = T.Compose([
T.Resize(800),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
@staticmethod
def box_cxcywh_to_xyxy(x):
x_c, y_c, w, h = x.unbind(1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
(x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=1)
def rescale_bboxes(self, out_bbox, size):
img_w, img_h = size
b = self.box_cxcywh_to_xyxy(out_bbox)
b = b * torch.tensor([img_w, img_h, img_w, img_h], dtype=torch.float32)
return b
def infer_image(self, image_path, **kwargs):
caption = kwargs["caption"]
# Read the image
im = Image.open(image_path)
imq = np.array(im)
if len(imq.shape) != 3:
im = im.convert('RGB')
img = self.transform(im).unsqueeze(0).cuda()
# propagate through the models
memory_cache = self.model(img, [caption], encode_and_save=True)
outputs = self.model(img, [caption], encode_and_save=False, memory_cache=memory_cache)
# keep only predictions with self.conf_thresh+ confidence
probas = 1 - outputs['pred_logits'].softmax(-1)[0, :, -1].cpu()
keep = (probas > self.conf_thresh).cpu()
# convert boxes from [0; 1] to image scales
bboxes_scaled = self.rescale_bboxes(outputs['pred_boxes'].cpu()[0, keep], im.size)
kept_probs = probas[keep]
# Convert outputs to the required format
bboxes = list(bboxes_scaled.numpy())
probs = list(kept_probs.numpy())
boxes, scores = [], []
for b, conf in zip(bboxes, probs):
boxes.append([int(b[0]), int(b[1]), int(b[2]), int(b[3])])
scores.append(conf)
# Read image, perform inference, parse results, append the predicted boxes to detections
return boxes, scores
def infer_image_multi_crop(self, image_path, **kwargs):
caption = kwargs["caption"]
# Read the image
im = Image.open(image_path)
crops, coordinates, img_dims = generate_image_crops(im)
imgs = [self.transform(crop).unsqueeze(0).cuda() for crop in crops]
imgs = torch.cat(imgs)
# propagate through the models
memory_cache = self.model(imgs, [caption for i in range(imgs.shape[0])], encode_and_save=True)
outputs = self.model(imgs, [caption], encode_and_save=False, memory_cache=memory_cache)
all_boxes = []
all_scores = []
for i in range(len(crops)):
# keep only predictions with self.conf_thresh+ confidence
probas = 1 - outputs['pred_logits'].softmax(-1)[i, :, -1].cpu()
keep = (probas > self.conf_thresh).cpu()
# convert boxes from [0; 1] to image scales
bboxes_scaled = self.rescale_bboxes(outputs['pred_boxes'].cpu()[i, keep], im.size)
kept_probs = probas[keep]
# Convert outputs to the required format
bboxes = list(bboxes_scaled.numpy())
probs = list(kept_probs.numpy())
boxes, scores = [], []
for b, conf in zip(bboxes, probs):
boxes.append([int(b[0]), int(b[1]), int(b[2]), int(b[3])])
scores.append(conf)
# Read image, perform inference, parse results, append the predicted boxes to detections
boxes = scale_boxes(boxes, coordinates[i], img_dims)
all_boxes += boxes
all_scores += scores
all_boxes = class_agnostic_nms(all_boxes, all_scores)
return all_boxes, all_scores
|
[
"torch.stack",
"torchvision.transforms.Normalize",
"torch.cat",
"PIL.Image.open",
"torchvision.transforms.ToTensor",
"numpy.array",
"numpy.linspace",
"torch.set_grad_enabled",
"torch.tensor",
"infer.Inference.__init__",
"torchvision.transforms.Resize"
] |
[((144, 173), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (166, 173), False, 'import torch\n'), ((926, 970), 'numpy.linspace', 'np.linspace', (['(0)', '(img_w - x_inc)', 'crop_chunks_y'], {}), '(0, img_w - x_inc, crop_chunks_y)\n', (937, 970), True, 'import numpy as np\n'), ((1941, 1972), 'infer.Inference.__init__', 'Inference.__init__', (['self', 'model'], {}), '(self, model)\n', (1959, 1972), False, 'from infer import Inference\n'), ((2385, 2406), 'torch.stack', 'torch.stack', (['b'], {'dim': '(1)'}), '(b, dim=1)\n', (2396, 2406), False, 'import torch\n'), ((2749, 2771), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (2759, 2771), False, 'from PIL import Image\n'), ((2786, 2798), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (2794, 2798), True, 'import numpy as np\n'), ((4062, 4084), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (4072, 4084), False, 'from PIL import Image\n'), ((4240, 4255), 'torch.cat', 'torch.cat', (['imgs'], {}), '(imgs)\n', (4249, 4255), False, 'import torch\n'), ((275, 290), 'numpy.array', 'np.array', (['boxes'], {}), '(boxes)\n', (283, 290), True, 'import numpy as np\n'), ((292, 308), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (300, 308), True, 'import numpy as np\n'), ((2544, 2607), 'torch.tensor', 'torch.tensor', (['[img_w, img_h, img_w, img_h]'], {'dtype': 'torch.float32'}), '([img_w, img_h, img_w, img_h], dtype=torch.float32)\n', (2556, 2607), False, 'import torch\n'), ((2067, 2080), 'torchvision.transforms.Resize', 'T.Resize', (['(800)'], {}), '(800)\n', (2075, 2080), True, 'import torchvision.transforms as T\n'), ((2094, 2106), 'torchvision.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (2104, 2106), True, 'import torchvision.transforms as T\n'), ((2120, 2177), 'torchvision.transforms.Normalize', 'T.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (2131, 2177), True, 'import torchvision.transforms as T\n')]
|
# -*- coding: utf-8 -*-
import hashlib
import logging
import os
import tempfile
import time
import cv2
import numpy as np
WIDTH_HEIGHT_LIMIT = 1600 # in pixel
def resize_large_image(image_data):
img_array = np.fromstring(image_data, dtype=np.uint8)
image = cv2.imdecode(img_array, 1)
height, width = image.shape[:2]
logging.info("Height: {}, Width: {}".format(height, width))
if height > width and height > WIDTH_HEIGHT_LIMIT:
ratio = float(WIDTH_HEIGHT_LIMIT) / float(height)
new_width = int((width * ratio) + 0.5)
return cv2.resize(
image,
(new_width, WIDTH_HEIGHT_LIMIT),
interpolation=cv2.INTER_AREA
)
elif width > WIDTH_HEIGHT_LIMIT:
ratio = float(WIDTH_HEIGHT_LIMIT) / float(width)
new_height = int((height * ratio) + 0.5)
return cv2.resize(
image,
(WIDTH_HEIGHT_LIMIT, new_height),
interpolation=cv2.INTER_AREA
)
else:
return image
def resize_faces(image_files, width=96, height=96):
for image_file in image_files:
image = cv2.imread(image_file)
resized_image = cv2.resize(
image,
(width, height),
interpolation=cv2.INTER_AREA
)
cv2.imwrite(image_file, resized_image)
def cleanup_image_cache(image_dir, expire=3600): # Expire in 1 hour
now = time.time()
for f in os.listdir(image_dir):
f = os.path.join(image_dir, f)
if os.stat(f).st_mtime < now - expire:
if os.path.isfile(f):
os.remove(f)
def sha256_checksum(filename, block_size=65536):
sha256 = hashlib.sha256()
with open(filename, 'rb') as f:
for block in iter(lambda: f.read(block_size), b''):
sha256.update(block)
return sha256.hexdigest()
def get_hex_value(r, g, b):
def clamp(x):
return max(0, min(x, 255))
return "#{0:02x}{1:02x}{2:02x}".format(clamp(r), clamp(g), clamp(b))
def get_resized_face_temp_file(face_dict, cv2_img):
width, height = 96, 96
pos = face_dict['pos']
crop_img = cv2_img[pos.y:pos.y+pos.height, pos.x:pos.x+pos.width]
resized_img = cv2.resize(
crop_img,
(width, height),
interpolation=cv2.INTER_AREA
)
resized_path = None
with tempfile.NamedTemporaryFile(delete=False, suffix='.jpg') as temp_ff:
resized_path = temp_ff.name
cv2.imwrite(temp_ff.name, resized_img)
return resized_path
|
[
"tempfile.NamedTemporaryFile",
"os.remove",
"os.stat",
"cv2.imwrite",
"cv2.imdecode",
"time.time",
"hashlib.sha256",
"cv2.imread",
"numpy.fromstring",
"os.path.isfile",
"os.path.join",
"os.listdir",
"cv2.resize"
] |
[((216, 257), 'numpy.fromstring', 'np.fromstring', (['image_data'], {'dtype': 'np.uint8'}), '(image_data, dtype=np.uint8)\n', (229, 257), True, 'import numpy as np\n'), ((270, 296), 'cv2.imdecode', 'cv2.imdecode', (['img_array', '(1)'], {}), '(img_array, 1)\n', (282, 296), False, 'import cv2\n'), ((1407, 1418), 'time.time', 'time.time', ([], {}), '()\n', (1416, 1418), False, 'import time\n'), ((1432, 1453), 'os.listdir', 'os.listdir', (['image_dir'], {}), '(image_dir)\n', (1442, 1453), False, 'import os\n'), ((1668, 1684), 'hashlib.sha256', 'hashlib.sha256', ([], {}), '()\n', (1682, 1684), False, 'import hashlib\n'), ((2197, 2264), 'cv2.resize', 'cv2.resize', (['crop_img', '(width, height)'], {'interpolation': 'cv2.INTER_AREA'}), '(crop_img, (width, height), interpolation=cv2.INTER_AREA)\n', (2207, 2264), False, 'import cv2\n'), ((572, 657), 'cv2.resize', 'cv2.resize', (['image', '(new_width, WIDTH_HEIGHT_LIMIT)'], {'interpolation': 'cv2.INTER_AREA'}), '(image, (new_width, WIDTH_HEIGHT_LIMIT), interpolation=cv2.INTER_AREA\n )\n', (582, 657), False, 'import cv2\n'), ((1121, 1143), 'cv2.imread', 'cv2.imread', (['image_file'], {}), '(image_file)\n', (1131, 1143), False, 'import cv2\n'), ((1168, 1232), 'cv2.resize', 'cv2.resize', (['image', '(width, height)'], {'interpolation': 'cv2.INTER_AREA'}), '(image, (width, height), interpolation=cv2.INTER_AREA)\n', (1178, 1232), False, 'import cv2\n'), ((1287, 1325), 'cv2.imwrite', 'cv2.imwrite', (['image_file', 'resized_image'], {}), '(image_file, resized_image)\n', (1298, 1325), False, 'import cv2\n'), ((1467, 1493), 'os.path.join', 'os.path.join', (['image_dir', 'f'], {}), '(image_dir, f)\n', (1479, 1493), False, 'import os\n'), ((2328, 2384), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)', 'suffix': '""".jpg"""'}), "(delete=False, suffix='.jpg')\n", (2355, 2384), False, 'import tempfile\n'), ((2441, 2479), 'cv2.imwrite', 'cv2.imwrite', (['temp_ff.name', 'resized_img'], {}), '(temp_ff.name, resized_img)\n', (2452, 2479), False, 'import cv2\n'), ((857, 943), 'cv2.resize', 'cv2.resize', (['image', '(WIDTH_HEIGHT_LIMIT, new_height)'], {'interpolation': 'cv2.INTER_AREA'}), '(image, (WIDTH_HEIGHT_LIMIT, new_height), interpolation=cv2.\n INTER_AREA)\n', (867, 943), False, 'import cv2\n'), ((1556, 1573), 'os.path.isfile', 'os.path.isfile', (['f'], {}), '(f)\n', (1570, 1573), False, 'import os\n'), ((1505, 1515), 'os.stat', 'os.stat', (['f'], {}), '(f)\n', (1512, 1515), False, 'import os\n'), ((1591, 1603), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (1600, 1603), False, 'import os\n')]
|
import importlib
from hydroDL.master import basins
from hydroDL.app import waterQuality, wqLinear
from hydroDL import kPath
from hydroDL.model import trainTS
from hydroDL.data import gageII, usgs
from hydroDL.post import axplot, figplot
import torch
import os
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# test
outName = 'Silica64-Y8090-00955-opt1'
wqData = waterQuality.DataModelWQ('Silica64')
code = '00955'
trainset = 'Y8090'
testset = 'Y0010'
# trainset = 'Y0010'
# testset = 'Y8090'
optT = trainset
master = basins.loadMaster(outName)
# seq test
siteNoLst = wqData.info['siteNo'].unique().tolist()
basins.testModelSeq(outName, siteNoLst, wqData=wqData)
ns = len(siteNoLst)
# calculate error from sequence
rmseMat = np.ndarray([ns, 2])
corrMat = np.ndarray([ns, 2])
for k, siteNo in enumerate(siteNoLst):
print(k, siteNo)
dfPred, dfObs = basins.loadSeq(outName, siteNo)
rmseLSTM, corrLSTM = waterQuality.calErrSeq(dfPred[code], dfObs[code])
rmseMat[k, :] = rmseLSTM
corrMat[k, :] = corrLSTM
# time series map
dfCrd = gageII.readData(
varLst=['LAT_GAGE', 'LNG_GAGE'], siteNoLst=siteNoLst)
lat = dfCrd['LAT_GAGE'].values
lon = dfCrd['LNG_GAGE'].values
codePdf = usgs.codePdf
def funcMap():
figM, axM = plt.subplots(2, 1, figsize=(8, 6))
axplot.mapPoint(axM[0], lat, lon, corrMat[:, 0]-corrMat[:, 1], s=12)
axplot.mapPoint(axM[1], lat, lon, corrMat[:, 1], s=12)
figP, axP = plt.subplots(1, 1, figsize=(8, 6))
return figM, axM, figP, axP, lon, lat
def funcPoint(iP, axP):
siteNo = siteNoLst[iP]
dfP1, dfObs = basins.loadSeq(outName, siteNo)
rmse1, corr1 = waterQuality.calErrSeq(dfP1[code], dfObs[code])
t = dfObs.index.values
tBar = np.datetime64('2000-01-01')
axplot.plotTS(axP, t, [dfP1[code], dfObs[code]], tBar=tBar,
legLst=['LSTM', 'obs'], styLst='-*', cLst='br')
tStr = '{}, rmse [{:.2f} {:.2f}], corr [{:.2f} {:.2f}]'.format(
siteNo, rmse1[0], rmse1[1], corr1[0], corr1[1])
axP.set_title(tStr)
importlib.reload(figplot)
figM, figP = figplot.clickMap(funcMap, funcPoint)
for ax in figP.axes:
ax.set_xlim(np.datetime64('2010-01-01'), np.datetime64('2015-01-01'))
figP.canvas.draw()
for ax in figP.axes:
ax.set_xlim(np.datetime64('1990-01-01'), np.datetime64('1995-01-01'))
figP.canvas.draw()
for ax in figP.axes:
ax.set_xlim(np.datetime64('1980-01-01'), np.datetime64('2020-01-01'))
figP.canvas.draw()
for ax in figP.axes:
ax.set_ylim(5, 30)
figP.canvas.draw()
|
[
"hydroDL.post.axplot.plotTS",
"hydroDL.app.waterQuality.calErrSeq",
"hydroDL.post.figplot.clickMap",
"numpy.datetime64",
"matplotlib.pyplot.subplots",
"hydroDL.app.waterQuality.DataModelWQ",
"hydroDL.master.basins.loadSeq",
"importlib.reload",
"hydroDL.master.basins.loadMaster",
"hydroDL.post.axplot.mapPoint",
"hydroDL.data.gageII.readData",
"numpy.ndarray",
"hydroDL.master.basins.testModelSeq"
] |
[((400, 436), 'hydroDL.app.waterQuality.DataModelWQ', 'waterQuality.DataModelWQ', (['"""Silica64"""'], {}), "('Silica64')\n", (424, 436), False, 'from hydroDL.app import waterQuality, wqLinear\n'), ((555, 581), 'hydroDL.master.basins.loadMaster', 'basins.loadMaster', (['outName'], {}), '(outName)\n', (572, 581), False, 'from hydroDL.master import basins\n'), ((646, 700), 'hydroDL.master.basins.testModelSeq', 'basins.testModelSeq', (['outName', 'siteNoLst'], {'wqData': 'wqData'}), '(outName, siteNoLst, wqData=wqData)\n', (665, 700), False, 'from hydroDL.master import basins\n'), ((763, 782), 'numpy.ndarray', 'np.ndarray', (['[ns, 2]'], {}), '([ns, 2])\n', (773, 782), True, 'import numpy as np\n'), ((793, 812), 'numpy.ndarray', 'np.ndarray', (['[ns, 2]'], {}), '([ns, 2])\n', (803, 812), True, 'import numpy as np\n'), ((1085, 1154), 'hydroDL.data.gageII.readData', 'gageII.readData', ([], {'varLst': "['LAT_GAGE', 'LNG_GAGE']", 'siteNoLst': 'siteNoLst'}), "(varLst=['LAT_GAGE', 'LNG_GAGE'], siteNoLst=siteNoLst)\n", (1100, 1154), False, 'from hydroDL.data import gageII, usgs\n'), ((2054, 2079), 'importlib.reload', 'importlib.reload', (['figplot'], {}), '(figplot)\n', (2070, 2079), False, 'import importlib\n'), ((2093, 2129), 'hydroDL.post.figplot.clickMap', 'figplot.clickMap', (['funcMap', 'funcPoint'], {}), '(funcMap, funcPoint)\n', (2109, 2129), False, 'from hydroDL.post import axplot, figplot\n'), ((893, 924), 'hydroDL.master.basins.loadSeq', 'basins.loadSeq', (['outName', 'siteNo'], {}), '(outName, siteNo)\n', (907, 924), False, 'from hydroDL.master import basins\n'), ((950, 999), 'hydroDL.app.waterQuality.calErrSeq', 'waterQuality.calErrSeq', (['dfPred[code]', 'dfObs[code]'], {}), '(dfPred[code], dfObs[code])\n', (972, 999), False, 'from hydroDL.app import waterQuality, wqLinear\n'), ((1278, 1312), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(8, 6)'}), '(2, 1, figsize=(8, 6))\n', (1290, 1312), True, 'import matplotlib.pyplot as plt\n'), ((1317, 1387), 'hydroDL.post.axplot.mapPoint', 'axplot.mapPoint', (['axM[0]', 'lat', 'lon', '(corrMat[:, 0] - corrMat[:, 1])'], {'s': '(12)'}), '(axM[0], lat, lon, corrMat[:, 0] - corrMat[:, 1], s=12)\n', (1332, 1387), False, 'from hydroDL.post import axplot, figplot\n'), ((1390, 1444), 'hydroDL.post.axplot.mapPoint', 'axplot.mapPoint', (['axM[1]', 'lat', 'lon', 'corrMat[:, 1]'], {'s': '(12)'}), '(axM[1], lat, lon, corrMat[:, 1], s=12)\n', (1405, 1444), False, 'from hydroDL.post import axplot, figplot\n'), ((1461, 1495), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(8, 6)'}), '(1, 1, figsize=(8, 6))\n', (1473, 1495), True, 'import matplotlib.pyplot as plt\n'), ((1609, 1640), 'hydroDL.master.basins.loadSeq', 'basins.loadSeq', (['outName', 'siteNo'], {}), '(outName, siteNo)\n', (1623, 1640), False, 'from hydroDL.master import basins\n'), ((1660, 1707), 'hydroDL.app.waterQuality.calErrSeq', 'waterQuality.calErrSeq', (['dfP1[code]', 'dfObs[code]'], {}), '(dfP1[code], dfObs[code])\n', (1682, 1707), False, 'from hydroDL.app import waterQuality, wqLinear\n'), ((1746, 1773), 'numpy.datetime64', 'np.datetime64', (['"""2000-01-01"""'], {}), "('2000-01-01')\n", (1759, 1773), True, 'import numpy as np\n'), ((1778, 1889), 'hydroDL.post.axplot.plotTS', 'axplot.plotTS', (['axP', 't', '[dfP1[code], dfObs[code]]'], {'tBar': 'tBar', 'legLst': "['LSTM', 'obs']", 'styLst': '"""-*"""', 'cLst': '"""br"""'}), "(axP, t, [dfP1[code], dfObs[code]], tBar=tBar, legLst=['LSTM',\n 'obs'], styLst='-*', cLst='br')\n", (1791, 1889), False, 'from hydroDL.post import axplot, figplot\n'), ((2168, 2195), 'numpy.datetime64', 'np.datetime64', (['"""2010-01-01"""'], {}), "('2010-01-01')\n", (2181, 2195), True, 'import numpy as np\n'), ((2197, 2224), 'numpy.datetime64', 'np.datetime64', (['"""2015-01-01"""'], {}), "('2015-01-01')\n", (2210, 2224), True, 'import numpy as np\n'), ((2283, 2310), 'numpy.datetime64', 'np.datetime64', (['"""1990-01-01"""'], {}), "('1990-01-01')\n", (2296, 2310), True, 'import numpy as np\n'), ((2312, 2339), 'numpy.datetime64', 'np.datetime64', (['"""1995-01-01"""'], {}), "('1995-01-01')\n", (2325, 2339), True, 'import numpy as np\n'), ((2398, 2425), 'numpy.datetime64', 'np.datetime64', (['"""1980-01-01"""'], {}), "('1980-01-01')\n", (2411, 2425), True, 'import numpy as np\n'), ((2427, 2454), 'numpy.datetime64', 'np.datetime64', (['"""2020-01-01"""'], {}), "('2020-01-01')\n", (2440, 2454), True, 'import numpy as np\n')]
|
import vne
from vne.constants import nfeat, nsensors
from vne.model import simpleModel, init_weights_simple
from vne.persist import save_model, load_model
import numpy as np
import torch
import os
def encode_save(sig=np.random.random([1, nfeat, nsensors]), name='simpleModel_ini', dir_path="../src/vne/models"):
"""
This function will create a model, test it, then save a persistent version (a file)
Parameters
__________
sig:
a numpy array with shape (nsamples, nfeatures, nsensors). in general a single neural signal may contain multiple channels.
the multi-channel nature of the neural activations is a feature of the vne.
typically shaped with size (1,1,S) where S is number os sensors
the vne will map all the different channels to a single scalar encoded signal.
name:
string with the filename to save the model under
dir:
dir_path, the local directory to save the model
Returns
--------
model:
A copy of the encoder model generated by the function
"""
model = simpleModel().eval()
model.apply(init_weights_simple)
sig = torch.tensor(sig.astype(np.float32)).to('cpu')
enc = model(sig)
print("signal={}".format(sig))
print("encoded={}".format(enc))
# save the model
model.apply(vne.init_weights_simple)
save_model(encoder=model, name=name, dir_path=dir_path)
return model
def encode_load(sig=np.random.random([1, nfeat, nsensors]), name="simpleModel_ini", dir_path="../src/vne/models"):
"""
This function will load a saved model, test it, then save a persistent version (a file)
Parameters
----------
sig:
a numpy array with shape (nsamples, nfeatures, nsensors). in general a single neural signal may contain multiple channels.
the multi-channel nature of the neural activations is a feature of the vne.
typically shaped with size (1,1,S) where S is number os sensors
the vne will map all the different channels to a single scalar encoded signal.
name:
the filename of the saved model
dir_path:
the directory path to the folder containing the file with the saved model
"""
# load the saved model
model = load_model(name, dir_path)
# do some stuff
# save the model
model.apply(vne.init_weights_simple)
return model
# Function to Convert to ONNX
def Convert_ONNX(model=None, name="simpleModel_ini", dir_path="../src/vne/models"):
if model is None:
model = encode_load()
# set the model to inference mode (making sure)
model.eval()
name = os.path.join(dir_path, name)
# Let's create a dummy input tensor
dummy_input = torch.randn(1, nfeat, nsensors, requires_grad=True)
# Export the model
torch.onnx.export(model, # model being run
dummy_input, # model input (or a tuple for multiple inputs)
name + ".onnx", # where to save the model
export_params=True, # store the trained parameter weights inside the model file
opset_version=9, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['sensorData'], # the model's input names
output_names=['modelOutput'], # the model's output names
dynamic_axes={'modelInput': {0: 'batch_size'}, # variable length axes
'modelOutput': {0: 'batch_size'}})
print(" ")
print('Model has been converted to ONNX')
if __name__ == '__main__':
# load persistent model from file
model_name = 'simpleModel_ini-Trivial19'
model = encode_save(name=model_name)
print("saved model")
# use the model
sig = np.random.random([1, nfeat, nsensors])
sig = torch.tensor(sig.astype(np.float32)).to('cpu')
enc = model(sig)
print("signal={}".format(sig))
print("encoded={}".format(enc))
print("ran model")
Convert_ONNX(model, name=model_name)
|
[
"vne.persist.save_model",
"torch.onnx.export",
"vne.model.simpleModel",
"torch.randn",
"vne.persist.load_model",
"numpy.random.random",
"os.path.join"
] |
[((219, 257), 'numpy.random.random', 'np.random.random', (['[1, nfeat, nsensors]'], {}), '([1, nfeat, nsensors])\n', (235, 257), True, 'import numpy as np\n'), ((1351, 1406), 'vne.persist.save_model', 'save_model', ([], {'encoder': 'model', 'name': 'name', 'dir_path': 'dir_path'}), '(encoder=model, name=name, dir_path=dir_path)\n', (1361, 1406), False, 'from vne.persist import save_model, load_model\n'), ((1446, 1484), 'numpy.random.random', 'np.random.random', (['[1, nfeat, nsensors]'], {}), '([1, nfeat, nsensors])\n', (1462, 1484), True, 'import numpy as np\n'), ((2249, 2275), 'vne.persist.load_model', 'load_model', (['name', 'dir_path'], {}), '(name, dir_path)\n', (2259, 2275), False, 'from vne.persist import save_model, load_model\n'), ((2626, 2654), 'os.path.join', 'os.path.join', (['dir_path', 'name'], {}), '(dir_path, name)\n', (2638, 2654), False, 'import os\n'), ((2713, 2764), 'torch.randn', 'torch.randn', (['(1)', 'nfeat', 'nsensors'], {'requires_grad': '(True)'}), '(1, nfeat, nsensors, requires_grad=True)\n', (2724, 2764), False, 'import torch\n'), ((2793, 3065), 'torch.onnx.export', 'torch.onnx.export', (['model', 'dummy_input', "(name + '.onnx')"], {'export_params': '(True)', 'opset_version': '(9)', 'do_constant_folding': '(True)', 'input_names': "['sensorData']", 'output_names': "['modelOutput']", 'dynamic_axes': "{'modelInput': {(0): 'batch_size'}, 'modelOutput': {(0): 'batch_size'}}"}), "(model, dummy_input, name + '.onnx', export_params=True,\n opset_version=9, do_constant_folding=True, input_names=['sensorData'],\n output_names=['modelOutput'], dynamic_axes={'modelInput': {(0):\n 'batch_size'}, 'modelOutput': {(0): 'batch_size'}})\n", (2810, 3065), False, 'import torch\n'), ((3865, 3903), 'numpy.random.random', 'np.random.random', (['[1, nfeat, nsensors]'], {}), '([1, nfeat, nsensors])\n', (3881, 3903), True, 'import numpy as np\n'), ((1077, 1090), 'vne.model.simpleModel', 'simpleModel', ([], {}), '()\n', (1088, 1090), False, 'from vne.model import simpleModel, init_weights_simple\n')]
|
import os
import argparse
import configargparse
import time
import glob
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms import functional as VF
from torch.nn import functional as F
import cv2
import model
import data
from eval.kitti_depth_eval_utils import *
from eval.depth_eval_utils import *
from data import create_dataset
import opts
class DirDataset(Dataset):
def __init__(self, dir, height, width, crop=None):
'''
crop: (top, left, height, width)
'''
self.filenames = glob.glob(os.path.join(args.input_dir, "*.jpg"))
self.height = height
self.width = width
self.crop = crop
def __getitem__(self, idx):
img = Image.open(self.filenames[idx])
if img.size[0] != self.width or img.size[1] != self.height:
img = img.resize((self.width, self.height), resample=Image.LANCZOS)
else:
print("No resize required")
if self.crop is not None:
img = img.crop(
(self.crop[1], self.crop[0], self.crop[1] + self.crop[3], self.crop[0] + self.crop[2]))
img = VF.to_tensor(img)
return {'path': self.filenames[idx], 'img': img}
def __len__(self):
return len(self.filenames)
if __name__ == '__main__':
args = opts.parse_args()
args.seq_len = 1
args.workers = 0
checkpoint = torch.load(args.checkpoint)
os.makedirs(args.output_dir, exist_ok=True)
model = checkpoint['model']
model.to(args.device)
model.eval()
dataset = DirDataset(args.input_dir, args.height, args.width)
dataloader = DataLoader(dataset, batch_size=12,
shuffle=False, num_workers=args.workers)
for i, batch in enumerate(dataloader):
with torch.no_grad():
fnames, imgs = batch['path'], batch['img']
imgs = imgs.to(args.device)
imgs_normalized = VF.normalize(
imgs, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
depths, _, _ = model.depth_net(imgs_normalized)
depths = depths[0].cpu().numpy()
depths = np.squeeze(depths, 1)
assert len(depths), len(fnames)
vmin = np.min(1 / depths)
vmax = np.percentile(1 / depths, 95)
disps = 1 / depths
disps_rgb = convert_util.gray_to_rgb_np(
disps, cmap='magma', lb=vmin, ub=vmax)
for j in range(len(fnames)):
filename = fnames[j].split('/')[-1]
outname_noext = os.path.join(args.output_dir, filename[:-4])
if args.output_type == 'png':
cv2.imwrite(
outname_noext + '_pred.png',
255 * disps_rgb[j])
else:
np.savez(outname_noext + '.npz', disps_rgb[j])
|
[
"torchvision.transforms.functional.normalize",
"model.to",
"os.makedirs",
"torch.utils.data.DataLoader",
"torchvision.transforms.functional.to_tensor",
"os.path.join",
"torch.load",
"cv2.imwrite",
"numpy.savez",
"numpy.percentile",
"numpy.min",
"model.eval",
"numpy.squeeze",
"model.depth_net",
"torch.no_grad",
"opts.parse_args"
] |
[((1346, 1363), 'opts.parse_args', 'opts.parse_args', ([], {}), '()\n', (1361, 1363), False, 'import opts\n'), ((1425, 1452), 'torch.load', 'torch.load', (['args.checkpoint'], {}), '(args.checkpoint)\n', (1435, 1452), False, 'import torch\n'), ((1458, 1501), 'os.makedirs', 'os.makedirs', (['args.output_dir'], {'exist_ok': '(True)'}), '(args.output_dir, exist_ok=True)\n', (1469, 1501), False, 'import os\n'), ((1539, 1560), 'model.to', 'model.to', (['args.device'], {}), '(args.device)\n', (1547, 1560), False, 'import model\n'), ((1565, 1577), 'model.eval', 'model.eval', ([], {}), '()\n', (1575, 1577), False, 'import model\n'), ((1662, 1737), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(12)', 'shuffle': '(False)', 'num_workers': 'args.workers'}), '(dataset, batch_size=12, shuffle=False, num_workers=args.workers)\n', (1672, 1737), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((1170, 1187), 'torchvision.transforms.functional.to_tensor', 'VF.to_tensor', (['img'], {}), '(img)\n', (1182, 1187), True, 'from torchvision.transforms import functional as VF\n'), ((586, 623), 'os.path.join', 'os.path.join', (['args.input_dir', '"""*.jpg"""'], {}), "(args.input_dir, '*.jpg')\n", (598, 623), False, 'import os\n'), ((1823, 1838), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1836, 1838), False, 'import torch\n'), ((1965, 2038), 'torchvision.transforms.functional.normalize', 'VF.normalize', (['imgs'], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(imgs, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (1977, 2038), True, 'from torchvision.transforms import functional as VF\n'), ((2084, 2116), 'model.depth_net', 'model.depth_net', (['imgs_normalized'], {}), '(imgs_normalized)\n', (2099, 2116), False, 'import model\n'), ((2183, 2204), 'numpy.squeeze', 'np.squeeze', (['depths', '(1)'], {}), '(depths, 1)\n', (2193, 2204), True, 'import numpy as np\n'), ((2270, 2288), 'numpy.min', 'np.min', (['(1 / depths)'], {}), '(1 / depths)\n', (2276, 2288), True, 'import numpy as np\n'), ((2308, 2337), 'numpy.percentile', 'np.percentile', (['(1 / depths)', '(95)'], {}), '(1 / depths, 95)\n', (2321, 2337), True, 'import numpy as np\n'), ((2604, 2648), 'os.path.join', 'os.path.join', (['args.output_dir', 'filename[:-4]'], {}), '(args.output_dir, filename[:-4])\n', (2616, 2648), False, 'import os\n'), ((2715, 2775), 'cv2.imwrite', 'cv2.imwrite', (["(outname_noext + '_pred.png')", '(255 * disps_rgb[j])'], {}), "(outname_noext + '_pred.png', 255 * disps_rgb[j])\n", (2726, 2775), False, 'import cv2\n'), ((2867, 2913), 'numpy.savez', 'np.savez', (["(outname_noext + '.npz')", 'disps_rgb[j]'], {}), "(outname_noext + '.npz', disps_rgb[j])\n", (2875, 2913), True, 'import numpy as np\n')]
|
"""Armijo rule."""
import numpy as np
from optimus.types import LRMethod, Function
class Armijo(LRMethod):
"""Armijo method for finding Learning Rate.
This method successively reduces the learning rate until it finds the
resulting change to be as good as a linear approximation of the function.
"""
def __init__(
self,
initial_lr: float,
tolerance: float,
decrease_factor: float,
max_iters: int = 10,
):
self.initial_lr = initial_lr
self.tolerance = tolerance
self.decrease_factor = decrease_factor
self.max_iters = max_iters
def __call__(
self,
parameters: np.ndarray,
function_value: float,
gradient: np.ndarray,
direction: np.ndarray,
step: int,
objective_function: Function,
) -> float:
lr = self.initial_lr
def new_value(lr):
return function_value - objective_function(parameters - lr * direction)
def desired_value(lr):
return self.tolerance * lr * np.dot(gradient, direction)
while new_value(lr) < desired_value(lr):
lr *= self.decrease_factor
return lr
|
[
"numpy.dot"
] |
[((1071, 1098), 'numpy.dot', 'np.dot', (['gradient', 'direction'], {}), '(gradient, direction)\n', (1077, 1098), True, 'import numpy as np\n')]
|
from astropy.cosmology.funcs import z_at_value
import numpy as np
from pyHalo.single_realization import SingleHalo
import numpy.testing as npt
import numpy as np
from pyHalo.Halos.HaloModels.ULDM import ULDMFieldHalo, ULDMSubhalo
from pyHalo.Halos.lens_cosmo import LensCosmo
from pyHalo.Cosmology.cosmology import Cosmology
from lenstronomy.LensModel.Profiles.cnfw import CNFW
from lenstronomy.LensModel.Profiles.nfw import NFW
from lenstronomy.LensModel.Profiles.uldm import Uldm
import pytest
class TestULDMHalo(object):
def setup(self):
mass = 1e9
x = 0.5
y = 1.
r3d = np.sqrt(1 + 0.5 ** 2 + 70**2)
self.r3d = r3d
self.z = 0.25
sub_flag = True
mdef = 'ULDM'
self.H0 = 70
self.omega_baryon = 0.03
self.omega_DM = 0.25
self.sigma8 = 0.82
curvature = 'flat'
self.ns = 0.9608
cosmo_params = {'H0': self.H0, 'Om0': self.omega_baryon + self.omega_DM, 'Ob0': self.omega_baryon,
'sigma8': self.sigma8, 'ns': self.ns, 'curvature': curvature}
self._dm, self._bar = self.omega_DM, self.omega_baryon
cosmo = Cosmology(cosmo_kwargs=cosmo_params)
self.lens_cosmo = LensCosmo(self.z, 2., cosmo)
profile_args = {'RocheNorm': 1.2, 'RocheNu': 2/3,
'evaluate_mc_at_zlens': False,
'log_mc': None, 'c_scale': 60.,
'c_power': -0.17, 'c_scatter': False,
'mc_model': 'diemer19', 'LOS_truncation_factor': 40,
'c_scatter_dex': 0.1, 'mc_mdef': '200c',
'log10_m_uldm':-22, 'uldm_plaw':1/3}
self.subhalo = ULDMSubhalo(mass, x, y, r3d, mdef, self.z,
sub_flag, self.lens_cosmo,
profile_args, unique_tag=np.random.rand())
self.fieldhalo = ULDMFieldHalo(mass, x, y, r3d, mdef, self.z,
sub_flag, self.lens_cosmo,
profile_args, unique_tag=np.random.rand())
def test_lenstronomy_ID(self):
ID = self.fieldhalo.lenstronomy_ID
npt.assert_string_equal(ID[0], 'CNFW')
npt.assert_string_equal(ID[1], 'ULDM')
ID = self.subhalo.lenstronomy_ID
npt.assert_string_equal(ID[0], 'CNFW')
npt.assert_string_equal(ID[1], 'ULDM')
def test_redshift_eval(self):
z_subhalo = self.subhalo.z_eval
z_field = self.fieldhalo.z_eval
npt.assert_equal(z_field, self.z)
# because the concentration is evaluated at infall, and z_infall > z
npt.assert_equal(True, z_subhalo > z_field)
def test_profile_load(self):
# test cored composite profile
profile_args = {'log10_m_uldm': -22, 'uldm_plaw': 1/3, 'scale_nfw':False}
single_halo = SingleHalo(1e8, 0.5, 0.5, 'ULDM', 0.5, 0.5, 1.5, None, True, profile_args, None)
lens_model_list, redshift_array, kwargs_lens, numerical_interp = single_halo.\
lensing_quantities(add_mass_sheet_correction=False)
npt.assert_string_equal(lens_model_list[1], 'ULDM')
npt.assert_string_equal(lens_model_list[0], 'CNFW')
npt.assert_equal(True, len(kwargs_lens)==2)
npt.assert_equal(True, len(redshift_array)==2)
def test_profile_normalization(self):
"""
Test that the mass enclosed within r200 of the composite profile is correct
and check that the ULDM core density is correct.
"""
profile_args = {'log10_m_uldm': -21, 'uldm_plaw': 1/3, 'scale_nfw':True}
mass = 1e10
zl = 0.5
zs = 1.5
single_halo = SingleHalo(mass, 0.5, 0.5, 'ULDM', zl, zl, zs, None, True, profile_args, None)
_, _, kwargs_lens, _ = single_halo.lensing_quantities(add_mass_sheet_correction=False)
Rs_angle, _ = single_halo.halos[0].lens_cosmo.nfw_physical2angle(mass, single_halo.halos[0].c, zl)
sigma_crit = single_halo.halos[0].lens_cosmo.sigmacrit
r200 = single_halo.halos[0].c * Rs_angle
cnfw_kwargs, uldm_kwargs = kwargs_lens
M_nfw = CNFW().mass_3d_lens(r200, cnfw_kwargs['Rs'], cnfw_kwargs['alpha_Rs']*sigma_crit, cnfw_kwargs['r_core'])
M_uldm = Uldm().mass_3d_lens(r200, uldm_kwargs['kappa_0']*sigma_crit, uldm_kwargs['theta_c'])
npt.assert_almost_equal((M_uldm+M_nfw)/mass,1,decimal=2) # less than 1% error
_,theta_c,kappa_0 = single_halo.halos[0].profile_args
rho0 = Uldm().density_lens(0,uldm_kwargs['kappa_0'],
uldm_kwargs['theta_c'])
rhos = CNFW().density_lens(0,cnfw_kwargs['Rs'],
cnfw_kwargs['alpha_Rs'],
cnfw_kwargs['r_core'])
rho_goal = Uldm().density_lens(0,kappa_0,theta_c)
npt.assert_array_less(np.array([1-(rho0+rhos)/rho_goal]),np.array([0.02])) # less than 2% error
if __name__ == '__main__':
pytest.main()
|
[
"pyHalo.Cosmology.cosmology.Cosmology",
"lenstronomy.LensModel.Profiles.uldm.Uldm",
"lenstronomy.LensModel.Profiles.cnfw.CNFW",
"numpy.testing.assert_almost_equal",
"pyHalo.single_realization.SingleHalo",
"pytest.main",
"numpy.testing.assert_string_equal",
"numpy.array",
"numpy.testing.assert_equal",
"numpy.random.rand",
"pyHalo.Halos.lens_cosmo.LensCosmo",
"numpy.sqrt"
] |
[((5016, 5029), 'pytest.main', 'pytest.main', ([], {}), '()\n', (5027, 5029), False, 'import pytest\n'), ((613, 644), 'numpy.sqrt', 'np.sqrt', (['(1 + 0.5 ** 2 + 70 ** 2)'], {}), '(1 + 0.5 ** 2 + 70 ** 2)\n', (620, 644), True, 'import numpy as np\n'), ((1168, 1204), 'pyHalo.Cosmology.cosmology.Cosmology', 'Cosmology', ([], {'cosmo_kwargs': 'cosmo_params'}), '(cosmo_kwargs=cosmo_params)\n', (1177, 1204), False, 'from pyHalo.Cosmology.cosmology import Cosmology\n'), ((1231, 1260), 'pyHalo.Halos.lens_cosmo.LensCosmo', 'LensCosmo', (['self.z', '(2.0)', 'cosmo'], {}), '(self.z, 2.0, cosmo)\n', (1240, 1260), False, 'from pyHalo.Halos.lens_cosmo import LensCosmo\n'), ((2208, 2246), 'numpy.testing.assert_string_equal', 'npt.assert_string_equal', (['ID[0]', '"""CNFW"""'], {}), "(ID[0], 'CNFW')\n", (2231, 2246), True, 'import numpy.testing as npt\n'), ((2255, 2293), 'numpy.testing.assert_string_equal', 'npt.assert_string_equal', (['ID[1]', '"""ULDM"""'], {}), "(ID[1], 'ULDM')\n", (2278, 2293), True, 'import numpy.testing as npt\n'), ((2344, 2382), 'numpy.testing.assert_string_equal', 'npt.assert_string_equal', (['ID[0]', '"""CNFW"""'], {}), "(ID[0], 'CNFW')\n", (2367, 2382), True, 'import numpy.testing as npt\n'), ((2391, 2429), 'numpy.testing.assert_string_equal', 'npt.assert_string_equal', (['ID[1]', '"""ULDM"""'], {}), "(ID[1], 'ULDM')\n", (2414, 2429), True, 'import numpy.testing as npt\n'), ((2554, 2587), 'numpy.testing.assert_equal', 'npt.assert_equal', (['z_field', 'self.z'], {}), '(z_field, self.z)\n', (2570, 2587), True, 'import numpy.testing as npt\n'), ((2673, 2716), 'numpy.testing.assert_equal', 'npt.assert_equal', (['(True)', '(z_subhalo > z_field)'], {}), '(True, z_subhalo > z_field)\n', (2689, 2716), True, 'import numpy.testing as npt\n'), ((2897, 2989), 'pyHalo.single_realization.SingleHalo', 'SingleHalo', (['(100000000.0)', '(0.5)', '(0.5)', '"""ULDM"""', '(0.5)', '(0.5)', '(1.5)', 'None', '(True)', 'profile_args', 'None'], {}), "(100000000.0, 0.5, 0.5, 'ULDM', 0.5, 0.5, 1.5, None, True,\n profile_args, None)\n", (2907, 2989), False, 'from pyHalo.single_realization import SingleHalo\n'), ((3137, 3188), 'numpy.testing.assert_string_equal', 'npt.assert_string_equal', (['lens_model_list[1]', '"""ULDM"""'], {}), "(lens_model_list[1], 'ULDM')\n", (3160, 3188), True, 'import numpy.testing as npt\n'), ((3197, 3248), 'numpy.testing.assert_string_equal', 'npt.assert_string_equal', (['lens_model_list[0]', '"""CNFW"""'], {}), "(lens_model_list[0], 'CNFW')\n", (3220, 3248), True, 'import numpy.testing as npt\n'), ((3722, 3800), 'pyHalo.single_realization.SingleHalo', 'SingleHalo', (['mass', '(0.5)', '(0.5)', '"""ULDM"""', 'zl', 'zl', 'zs', 'None', '(True)', 'profile_args', 'None'], {}), "(mass, 0.5, 0.5, 'ULDM', zl, zl, zs, None, True, profile_args, None)\n", (3732, 3800), False, 'from pyHalo.single_realization import SingleHalo\n'), ((4392, 4454), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['((M_uldm + M_nfw) / mass)', '(1)'], {'decimal': '(2)'}), '((M_uldm + M_nfw) / mass, 1, decimal=2)\n', (4415, 4454), True, 'import numpy.testing as npt\n'), ((4911, 4951), 'numpy.array', 'np.array', (['[1 - (rho0 + rhos) / rho_goal]'], {}), '([1 - (rho0 + rhos) / rho_goal])\n', (4919, 4951), True, 'import numpy as np\n'), ((4946, 4962), 'numpy.array', 'np.array', (['[0.02]'], {}), '([0.02])\n', (4954, 4962), True, 'import numpy as np\n'), ((1884, 1900), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1898, 1900), True, 'import numpy as np\n'), ((2102, 2118), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2116, 2118), True, 'import numpy as np\n'), ((4178, 4184), 'lenstronomy.LensModel.Profiles.cnfw.CNFW', 'CNFW', ([], {}), '()\n', (4182, 4184), False, 'from lenstronomy.LensModel.Profiles.cnfw import CNFW\n'), ((4299, 4305), 'lenstronomy.LensModel.Profiles.uldm.Uldm', 'Uldm', ([], {}), '()\n', (4303, 4305), False, 'from lenstronomy.LensModel.Profiles.uldm import Uldm\n'), ((4547, 4553), 'lenstronomy.LensModel.Profiles.uldm.Uldm', 'Uldm', ([], {}), '()\n', (4551, 4553), False, 'from lenstronomy.LensModel.Profiles.uldm import Uldm\n'), ((4668, 4674), 'lenstronomy.LensModel.Profiles.cnfw.CNFW', 'CNFW', ([], {}), '()\n', (4672, 4674), False, 'from lenstronomy.LensModel.Profiles.cnfw import CNFW\n'), ((4842, 4848), 'lenstronomy.LensModel.Profiles.uldm.Uldm', 'Uldm', ([], {}), '()\n', (4846, 4848), False, 'from lenstronomy.LensModel.Profiles.uldm import Uldm\n')]
|
from generator import Generator
import numpy as np, random
np.set_printoptions(precision=4, suppress=True, linewidth=132)
from tensorflow import keras
from tensorflow.keras.layers import LSTM, Dense, Input
from tensorflow.keras import Model
from tensorflow.keras.optimizers import Adagrad
def create_net(nwords, batch_size, hidden=100):
inp = Input((None, nwords), batch_size=batch_size)
r1 = LSTM(hidden, return_sequences=True, stateful=True)(inp)
#r2 = LSTM(hidden, return_sequences=True)(r1)
probs = Dense(nwords, activation="softmax")(r1)
model = Model(inp, probs)
model.compile(optimizer=Adagrad(learning_rate=0.01), loss="categorical_crossentropy")
return model
def generate_from_model(model, g, length, batch_size):
#print("------- generate ----------")
model.reset_states()
nwords = g.NWords
rows = []
row = [random.randint(0, nwords-1) for _ in range(batch_size)] # [w]
rows.append(row)
for t in range(length-1):
x = np.array([g.vectorize(xi) for xi in row])
y = model.predict(x[:,None,:])[:,0,:] # y: [mb, w], t=0
pvec = y**3
pvec = pvec/np.sum(pvec, axis=-1, keepdims=True) # -> [mb, w]
row = [np.random.choice(nwords, p=p) for p in pvec]
rows.append(row)
rows = np.array(rows) # [t,mb]
return rows.transpose((1,0))
def generate_batch(g, length, batch_size):
#print("generate_batch(%s, %s)..." % (length, batch_size))
sequences = np.array([g.generate(length+1, as_vectors=True) for _ in range(batch_size)])
#print("sequences:", sequences.shape)
x = sequences[:,:-1,:]
y_ = sequences[:,1:,:]
return x, y_
def train(model, g, length, batch_size):
valid_ma = 0.0
steps = 0
for iteration in range(100000):
#print
x, y_ = generate_batch(g, length, batch_size)
loss = model.train_on_batch(x, y_)
if iteration and iteration % 50 == 0:
generated = generate_from_model(model, g, length, batch_size)[0]
#print(type(generated), generated.shape, generated)
valid_length = g.validate(generated)
valid_ma += 0.1*(valid_length-valid_ma)
if iteration % 100 == 0:
print(generated[:valid_length], "*", generated[valid_length:], " valid length:", valid_length)
print("Batches:", iteration, " steps:", iteration*length*batch_size, " loss/step:", loss/x.shape[1],
" moving average:", valid_ma)
if __name__ == '__main__':
nwords = 10
length = 50
distance = 5
r = 2
batch_size = 5
g = Generator(nwords, distance, r)
model = create_net(nwords, batch_size)
train(model, g, length, batch_size)
|
[
"numpy.set_printoptions",
"numpy.sum",
"random.randint",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.Model",
"numpy.array",
"tensorflow.keras.layers.Input",
"tensorflow.keras.layers.LSTM",
"numpy.random.choice",
"generator.Generator",
"tensorflow.keras.optimizers.Adagrad"
] |
[((60, 122), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(4)', 'suppress': '(True)', 'linewidth': '(132)'}), '(precision=4, suppress=True, linewidth=132)\n', (79, 122), True, 'import numpy as np, random\n'), ((350, 394), 'tensorflow.keras.layers.Input', 'Input', (['(None, nwords)'], {'batch_size': 'batch_size'}), '((None, nwords), batch_size=batch_size)\n', (355, 394), False, 'from tensorflow.keras.layers import LSTM, Dense, Input\n'), ((574, 591), 'tensorflow.keras.Model', 'Model', (['inp', 'probs'], {}), '(inp, probs)\n', (579, 591), False, 'from tensorflow.keras import Model\n'), ((1337, 1351), 'numpy.array', 'np.array', (['rows'], {}), '(rows)\n', (1345, 1351), True, 'import numpy as np, random\n'), ((2733, 2763), 'generator.Generator', 'Generator', (['nwords', 'distance', 'r'], {}), '(nwords, distance, r)\n', (2742, 2763), False, 'from generator import Generator\n'), ((404, 454), 'tensorflow.keras.layers.LSTM', 'LSTM', (['hidden'], {'return_sequences': '(True)', 'stateful': '(True)'}), '(hidden, return_sequences=True, stateful=True)\n', (408, 454), False, 'from tensorflow.keras.layers import LSTM, Dense, Input\n'), ((522, 557), 'tensorflow.keras.layers.Dense', 'Dense', (['nwords'], {'activation': '"""softmax"""'}), "(nwords, activation='softmax')\n", (527, 557), False, 'from tensorflow.keras.layers import LSTM, Dense, Input\n'), ((874, 903), 'random.randint', 'random.randint', (['(0)', '(nwords - 1)'], {}), '(0, nwords - 1)\n', (888, 903), False, 'import numpy as np, random\n'), ((620, 647), 'tensorflow.keras.optimizers.Adagrad', 'Adagrad', ([], {'learning_rate': '(0.01)'}), '(learning_rate=0.01)\n', (627, 647), False, 'from tensorflow.keras.optimizers import Adagrad\n'), ((1166, 1202), 'numpy.sum', 'np.sum', (['pvec'], {'axis': '(-1)', 'keepdims': '(True)'}), '(pvec, axis=-1, keepdims=True)\n', (1172, 1202), True, 'import numpy as np, random\n'), ((1247, 1276), 'numpy.random.choice', 'np.random.choice', (['nwords'], {'p': 'p'}), '(nwords, p=p)\n', (1263, 1276), True, 'import numpy as np, random\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import os
import cv2
from scipy.ndimage import gaussian_filter, median_filter
from matplotlib.animation import FuncAnimation
from perlin_noise import PerlinNoise
##########################################################################################
# FUNCTIONS FOR RENDERING I.E. GO FROM GEOMETRY TO FINAL IMAGES
##########################################################################################
# function: z_disk_props
# function: sarc_list_in_slice_fcn
# function: return_x_y_z_mat
# function: point_in_cyl
# function: binary_box
# function: slice_to_matrix
# function: matrix_gaussian_blur_fcn
# function: matrix_median_blur_fcn
# function: random_val
# function: cloud_image
# function: matrix_to_image
# function: add_perlin_noise
# function: save_img_stil
# function: still_to_avi
# function: ground_truth_movie
##########################################################################################
##########################################################################################
def z_disk_props( sarc_list, is_normal_radius, is_normal_height, avg_radius, avg_height, parameter_radius, parameter_height):
"""Create z disk properties, z disks are modeled as cylinders with radius R and height H. Once cylinder per sarcomere (s1)."""
radius_list = []
height_list = []
for kk in range(0,len(sarc_list)):
if is_normal_radius:
rad = avg_radius + np.random.normal(0,parameter_radius)
else:
rad = avg_radius + (np.random.random(1)[0] - .5) * parameter_radius * 2.0
if is_normal_height:
hei = avg_height + np.random.normal(0,parameter_height)
else:
hei = avg_height + (np.random.random(1)[0] - .5) * parameter_height * 2.0
radius_list.append(rad)
height_list.append(hei)
return radius_list, height_list
##########################################################################################
def sarc_list_in_slice_fcn(sarc_list, radius_list, height_list, z_lower, z_upper):
"""Check to see if sarcomere is within a slice in the z dimension."""
sarc_list_in_slice = []
radius_list_in_slice = []
height_list_in_slice = []
num_sarc = len(sarc_list)
for kk in range(0,num_sarc):
z = 0.5*( sarc_list[kk][0][2] + sarc_list[kk][1][2] )
if z > z_lower and z < z_upper:
sarc_list_in_slice.append(sarc_list[kk])
radius_list_in_slice.append(radius_list[kk])
height_list_in_slice.append(height_list[kk])
return sarc_list_in_slice, radius_list_in_slice, height_list_in_slice
##########################################################################################
def return_x_y_z_mat(matrix, x_lower, x_upper, y_lower, y_upper, z_lower, z_upper):
"""Helper function that returns the X, Y, and Z coordinates of a matrix."""
matrix_X = np.zeros(matrix.shape)
matrix_Y = np.zeros(matrix.shape)
matrix_Z = np.zeros(matrix.shape)
num_x = matrix.shape[0]
num_y = matrix.shape[1]
num_z = matrix.shape[2]
for ii in range(0,num_x):
for jj in range(0,num_y):
for kk in range(0,num_z):
matrix_X[ii,jj,kk] = ii / num_x * (x_upper - x_lower) + x_lower
matrix_Y[ii,jj,kk] = jj / num_y * (y_upper - y_lower) + y_lower
matrix_Z[ii,jj,kk] = kk / num_z * (z_upper - z_lower) + z_lower
return matrix_X, matrix_Y, matrix_Z
##########################################################################################
def point_in_cyl(pt_x,pt_y,pt_z,cyl_p1,cyl_p2,cyl_rad):
"""Helper function that returns 1 if a point is inside a cylinder, 0 otherwise."""
q = np.asarray([pt_x,pt_y,pt_z])
p1 = np.asarray([cyl_p1[0],cyl_p1[1],cyl_p1[2]])
p2 = np.asarray([cyl_p2[0],cyl_p2[1],cyl_p2[2]])
check_1 = np.dot(q-p1,p2-p1)
check_2 = np.dot(q-p2,p2-p1)
if check_1 >=0 and check_2 <= 0:
rad = np.linalg.norm(np.cross( q-p1, p2-p1 )) / np.linalg.norm(p2-p1)
if rad <= cyl_rad:
return 1
else:
return 0
else:
return 0
##########################################################################################
def binary_box(matrix_X,matrix_Y,matrix_Z,cyl_p1,cyl_p2,cyl_rad):
"""Helper function that returns a binary matrix if the point is inside the cylinder."""
num_x = matrix_X.shape[0]
num_y = matrix_Y.shape[1]
num_z = matrix_Z.shape[2]
bin_box = np.zeros((num_x,num_y,num_z))
for ii in range(0,num_x):
for jj in range(0,num_y):
for kk in range(0,num_z):
x = matrix_X[ii,jj,kk]
y = matrix_Y[ii,jj,kk]
z = matrix_Z[ii,jj,kk]
bin_box[ii,jj,kk] = point_in_cyl(x,y,z,cyl_p1,cyl_p2,cyl_rad)
return bin_box
##########################################################################################
def slice_to_matrix(sarc_list,dim_x,dim_y,dim_z,x_lower,x_upper,y_lower,y_upper,z_lower,z_upper, mean_rad, mean_hei, bound_x, bound_y, bound_z, val):
"""Create a 3D matrix where each sarcomere is represented as voxels."""
matrix = np.zeros((dim_x,dim_y,dim_z))
matrix_X, matrix_Y, matrix_Z = return_x_y_z_mat(matrix, x_lower, x_upper, y_lower, y_upper, z_lower, z_upper)
# for each, only add s1 (adding s2 would be redundant)
num_sarc = len(sarc_list)
for kk in range(0,num_sarc):
s1 = sarc_list[kk][0]
s1 = np.asarray([s1[0],s1[1],s1[2]])
s2 = sarc_list[kk][1]
s2 = np.asarray([s2[0],s2[1],s2[2]])
vec = (s2 - s1) / np.linalg.norm(s2-s1)
rad = mean_rad[kk]
hei = mean_hei[kk]
p1 = s1 + vec * hei/2.0
p2 = s1 - vec * hei/2.0
cent_x = int((s1[0] - x_lower)/(x_upper-x_lower) * dim_x)
cent_y = int((s1[1] - y_lower)/(y_upper-y_lower) * dim_y)
cent_z = int((s1[2] - z_lower)/(z_upper-z_lower) * dim_z)
lower_x = np.max([cent_x - bound_x, 0])
upper_x = np.min([cent_x + bound_x, dim_x-1])
lower_y = np.max([cent_y - bound_y, 0])
upper_y = np.min([cent_y + bound_y, dim_y-1])
lower_z = np.max([cent_z - bound_z, 0])
upper_z = np.min([cent_z + bound_z, dim_z-1])
mm_x = matrix_X[lower_x:upper_x,lower_y:upper_y,lower_z:upper_z]
mm_y = matrix_Y[lower_x:upper_x,lower_y:upper_y,lower_z:upper_z]
mm_z = matrix_Z[lower_x:upper_x,lower_y:upper_y,lower_z:upper_z]
bin_box = binary_box(mm_x,mm_y,mm_z,p1,p2,rad)
matrix[lower_x:upper_x,lower_y:upper_y,lower_z:upper_z] += bin_box*val
if kk == num_sarc - 1:
s1 = sarc_list[kk][0]
s1 = np.asarray([s1[0],s1[1],s1[2]])
s2 = sarc_list[kk][1]
s2 = np.asarray([s2[0],s2[1],s2[2]])
vec = (s2 - s1) / np.linalg.norm(s2-s1)
rad = mean_rad[kk]
hei = mean_hei[kk]
p1 = s2 + vec * hei/2.0
p2 = s2 - vec * hei/2.0
cent_x = int((s1[0] - x_lower)/(x_upper-x_lower) * dim_x)
cent_y = int((s1[1] - y_lower)/(y_upper-y_lower) * dim_y)
cent_z = int((s1[2] - z_lower)/(z_upper-z_lower) * dim_z)
lower_x = np.max([cent_x - bound_x, 0])
upper_x = np.min([cent_x + bound_x, dim_x-1])
lower_y = np.max([cent_y - bound_y, 0])
upper_y = np.min([cent_y + bound_y, dim_y-1])
lower_z = np.max([cent_z - bound_z, 0])
upper_z = np.min([cent_z + bound_z, dim_z-1])
mm_x = matrix_X[lower_x:upper_x,lower_y:upper_y,lower_z:upper_z]
mm_y = matrix_Y[lower_x:upper_x,lower_y:upper_y,lower_z:upper_z]
mm_z = matrix_Z[lower_x:upper_x,lower_y:upper_y,lower_z:upper_z]
bin_box = binary_box(mm_x,mm_y,mm_z,p1,p2,rad)
matrix[lower_x:upper_x,lower_y:upper_y,lower_z:upper_z] += bin_box*val
return matrix
##########################################################################################
def matrix_gaussian_blur_fcn(matrix,sig):
"""Function to apply gaussian blur to the matrix that represents sarcomeres as voxels."""
matrix_blur = gaussian_filter(matrix, sigma=sig)
return matrix_blur
##########################################################################################
def matrix_median_blur_fcn(matrix,size):
"""Function to apply median blur to the matrix that represents sarcomeres as voxels."""
matrix_blur = median_filter(matrix_blur, size=size)
return matrix_blur
##########################################################################################
def random_val(matrix,mean,std):
"""Function to apply normally distributed random noise to the matrix that represents sarcomeres as voxels."""
mat = np.random.normal(mean,std,matrix.shape)
matrix += mat
return matrix
##########################################################################################
def cloud_image(a,b,x0,y0,matrix,val):
for ii in range(0,matrix.shape[0]):
for jj in range(0,matrix.shape[1]):
for kk in range(0,matrix.shape[2]):
if ((ii-x0)/a)**2.0 + ((jj - y0)/b)**2.0 < 1:
matrix[ii,jj,kk] += val*10
return matrix
##########################################################################################
def matrix_to_image(matrix,slice_lower,slice_upper):
"""Convert the 3D matrix into a projected 2D image matrix."""
matrix = matrix[:,:,slice_lower:slice_upper]
image = np.sum(matrix,axis=2)
return image
##########################################################################################
def add_perlin_noise(image,octaves,mag_ratio):
"""Add Perlin noise to the image."""
noise = PerlinNoise(octaves,seed=777)
pix0 = image.shape[0]; pix1 = image.shape[1]
pic = [[noise([i/pix0, j/pix1]) for j in range(pix0)] for i in range(pix1)]
# make perlin noise from range 0-1
pic = (pic - np.min(pic)) / (np.max(pic) - np.min(pic))
max_image = np.max(image)
image_with_noise = image + pic * max_image * mag_ratio
return image_with_noise
##########################################################################################
def save_img_stills(image_list,folder_name):
"""Save image stills with correct matplotlib settings."""
folder_name_render = folder_name + '/render'
if not os.path.exists(folder_name_render):
os.makedirs(folder_name_render)
num_images = len(image_list)
for step in range(0,num_images):
image = image_list[step]
plt.figure()
plt.imshow(image)
plt.axis('off')
ax = plt.gca()
ax.set_xticks([]); ax.set_yticks([])
if step < 10:
plt.savefig(folder_name_render + '/frame_00%i.png'%(step),bbox_inches = 'tight',transparent=True,pad_inches = 0)
elif step < 100:
plt.savefig(folder_name_render + '/frame_0%i.png'%(step),bbox_inches = 'tight',transparent=True,pad_inches = 0)
else:
plt.savefig(folder_name_render + '/frame_%i.png'%(step),bbox_inches = 'tight',transparent=True,pad_inches = 0)
plt.close()
return
##########################################################################################
def still_to_avi(folder_name,num_frames,is_GT):
"""Convert still images to an avi."""
folder_name_render = folder_name + '/render'
if is_GT == True:
video_name = folder_name + '/ground_truth_movie/GT_' + folder_name + '.avi'
else:
video_name = folder_name + '/' + folder_name + '.avi'
img_list = []
for kk in range(0,num_frames):
if kk < 10:
fname = 'frame_00%i.png'%(kk)
elif kk < 100:
fname = 'frame_0%i.png'%(kk)
else:
fname = 'frame_%i.png'%(kk)
img_list.append(fname)
images = [img for img in img_list]
if is_GT == True:
frame = cv2.imread(os.path.join(folder_name + '/ground_truth_movie', images[0]))
else:
frame = cv2.imread(os.path.join(folder_name + '/render', images[0]))
height, width, layers = frame.shape
video = cv2.VideoWriter(video_name, 0, 30, (width,height))
for image in images:
if is_GT == True:
video.write(cv2.imread(os.path.join(folder_name + '/ground_truth_movie', image)))
else:
video.write(cv2.imread(os.path.join(folder_name + '/render', image)))
cv2.destroyAllWindows()
video.release()
return
##########################################################################################
def ground_truth_movie(folder_name,num_frames,img_list,sarc_array_normalized, x_pos_array, y_pos_array,x_lower,x_upper,y_lower,y_upper,dim_x,dim_y):
"""Make the ground truth movie from the geometry."""
folder_name_GT = folder_name + '/ground_truth_movie'
if not os.path.exists(folder_name_GT):
os.makedirs(folder_name_GT)
all_normalized = sarc_array_normalized
color_matrix = np.zeros(all_normalized.shape)
for kk in range(0,all_normalized.shape[0]):
for jj in range(0,all_normalized.shape[1]):
of = all_normalized[kk,jj]
if of < -.2:
color_matrix[kk,jj] = 0
elif of > .2:
color_matrix[kk,jj] = 1
else:
color_matrix[kk,jj] = of*2.5 + .5
for t in range(0,num_frames):
img = img_list[t]
plt.figure()
plt.imshow(img)
for kk in range(0,all_normalized.shape[0]):
col = (1 - color_matrix[kk,t], 0, color_matrix[kk,t])
yy = (y_pos_array[kk,t] - y_lower)/(y_upper-y_lower)*dim_y
xx = (x_pos_array[kk,t] - x_lower)/(x_upper-x_lower)*dim_x
plt.plot(yy,xx,'.',c=col)
ax = plt.gca()
ax.set_xticks([]); ax.set_yticks([])
plt.axis('off')
if t < 10:
plt.savefig(folder_name_GT + '/frame_00%i.png'%(t),bbox_inches = 'tight',transparent=True,pad_inches = 0)
elif t < 100:
plt.savefig(folder_name_GT + '/frame_0%i.png'%(t),bbox_inches = 'tight',transparent=True,pad_inches = 0)
else:
plt.savefig(folder_name_GT + '/frame_%i.png'%(t),bbox_inches = 'tight',transparent=True,pad_inches = 0)
plt.close()
return
|
[
"perlin_noise.PerlinNoise",
"numpy.sum",
"matplotlib.pyplot.figure",
"numpy.linalg.norm",
"numpy.random.normal",
"matplotlib.pyplot.gca",
"cv2.VideoWriter",
"scipy.ndimage.median_filter",
"os.path.join",
"scipy.ndimage.gaussian_filter",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.close",
"os.path.exists",
"numpy.max",
"cv2.destroyAllWindows",
"numpy.asarray",
"numpy.cross",
"numpy.min",
"numpy.dot",
"os.makedirs",
"matplotlib.pyplot.plot",
"numpy.zeros",
"matplotlib.pyplot.axis",
"numpy.random.random",
"matplotlib.pyplot.savefig"
] |
[((2777, 2799), 'numpy.zeros', 'np.zeros', (['matrix.shape'], {}), '(matrix.shape)\n', (2785, 2799), True, 'import numpy as np\n'), ((2812, 2834), 'numpy.zeros', 'np.zeros', (['matrix.shape'], {}), '(matrix.shape)\n', (2820, 2834), True, 'import numpy as np\n'), ((2847, 2869), 'numpy.zeros', 'np.zeros', (['matrix.shape'], {}), '(matrix.shape)\n', (2855, 2869), True, 'import numpy as np\n'), ((3512, 3542), 'numpy.asarray', 'np.asarray', (['[pt_x, pt_y, pt_z]'], {}), '([pt_x, pt_y, pt_z])\n', (3522, 3542), True, 'import numpy as np\n'), ((3547, 3592), 'numpy.asarray', 'np.asarray', (['[cyl_p1[0], cyl_p1[1], cyl_p1[2]]'], {}), '([cyl_p1[0], cyl_p1[1], cyl_p1[2]])\n', (3557, 3592), True, 'import numpy as np\n'), ((3597, 3642), 'numpy.asarray', 'np.asarray', (['[cyl_p2[0], cyl_p2[1], cyl_p2[2]]'], {}), '([cyl_p2[0], cyl_p2[1], cyl_p2[2]])\n', (3607, 3642), True, 'import numpy as np\n'), ((3652, 3675), 'numpy.dot', 'np.dot', (['(q - p1)', '(p2 - p1)'], {}), '(q - p1, p2 - p1)\n', (3658, 3675), True, 'import numpy as np\n'), ((3682, 3705), 'numpy.dot', 'np.dot', (['(q - p2)', '(p2 - p1)'], {}), '(q - p2, p2 - p1)\n', (3688, 3705), True, 'import numpy as np\n'), ((4221, 4252), 'numpy.zeros', 'np.zeros', (['(num_x, num_y, num_z)'], {}), '((num_x, num_y, num_z))\n', (4229, 4252), True, 'import numpy as np\n'), ((4829, 4860), 'numpy.zeros', 'np.zeros', (['(dim_x, dim_y, dim_z)'], {}), '((dim_x, dim_y, dim_z))\n', (4837, 4860), True, 'import numpy as np\n'), ((7496, 7530), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['matrix'], {'sigma': 'sig'}), '(matrix, sigma=sig)\n', (7511, 7530), False, 'from scipy.ndimage import gaussian_filter, median_filter\n'), ((7789, 7826), 'scipy.ndimage.median_filter', 'median_filter', (['matrix_blur'], {'size': 'size'}), '(matrix_blur, size=size)\n', (7802, 7826), False, 'from scipy.ndimage import gaussian_filter, median_filter\n'), ((8091, 8132), 'numpy.random.normal', 'np.random.normal', (['mean', 'std', 'matrix.shape'], {}), '(mean, std, matrix.shape)\n', (8107, 8132), True, 'import numpy as np\n'), ((8774, 8796), 'numpy.sum', 'np.sum', (['matrix'], {'axis': '(2)'}), '(matrix, axis=2)\n', (8780, 8796), True, 'import numpy as np\n'), ((8996, 9026), 'perlin_noise.PerlinNoise', 'PerlinNoise', (['octaves'], {'seed': '(777)'}), '(octaves, seed=777)\n', (9007, 9026), False, 'from perlin_noise import PerlinNoise\n'), ((9255, 9268), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (9261, 9268), True, 'import numpy as np\n'), ((11149, 11200), 'cv2.VideoWriter', 'cv2.VideoWriter', (['video_name', '(0)', '(30)', '(width, height)'], {}), '(video_name, 0, 30, (width, height))\n', (11164, 11200), False, 'import cv2\n'), ((11412, 11435), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (11433, 11435), False, 'import cv2\n'), ((11937, 11967), 'numpy.zeros', 'np.zeros', (['all_normalized.shape'], {}), '(all_normalized.shape)\n', (11945, 11967), True, 'import numpy as np\n'), ((5114, 5147), 'numpy.asarray', 'np.asarray', (['[s1[0], s1[1], s1[2]]'], {}), '([s1[0], s1[1], s1[2]])\n', (5124, 5147), True, 'import numpy as np\n'), ((5177, 5210), 'numpy.asarray', 'np.asarray', (['[s2[0], s2[1], s2[2]]'], {}), '([s2[0], s2[1], s2[2]])\n', (5187, 5210), True, 'import numpy as np\n'), ((5546, 5575), 'numpy.max', 'np.max', (['[cent_x - bound_x, 0]'], {}), '([cent_x - bound_x, 0])\n', (5552, 5575), True, 'import numpy as np\n'), ((5588, 5625), 'numpy.min', 'np.min', (['[cent_x + bound_x, dim_x - 1]'], {}), '([cent_x + bound_x, dim_x - 1])\n', (5594, 5625), True, 'import numpy as np\n'), ((5636, 5665), 'numpy.max', 'np.max', (['[cent_y - bound_y, 0]'], {}), '([cent_y - bound_y, 0])\n', (5642, 5665), True, 'import numpy as np\n'), ((5678, 5715), 'numpy.min', 'np.min', (['[cent_y + bound_y, dim_y - 1]'], {}), '([cent_y + bound_y, dim_y - 1])\n', (5684, 5715), True, 'import numpy as np\n'), ((5726, 5755), 'numpy.max', 'np.max', (['[cent_z - bound_z, 0]'], {}), '([cent_z - bound_z, 0])\n', (5732, 5755), True, 'import numpy as np\n'), ((5768, 5805), 'numpy.min', 'np.min', (['[cent_z + bound_z, dim_z - 1]'], {}), '([cent_z + bound_z, dim_z - 1])\n', (5774, 5805), True, 'import numpy as np\n'), ((9602, 9636), 'os.path.exists', 'os.path.exists', (['folder_name_render'], {}), '(folder_name_render)\n', (9616, 9636), False, 'import os\n'), ((9640, 9671), 'os.makedirs', 'os.makedirs', (['folder_name_render'], {}), '(folder_name_render)\n', (9651, 9671), False, 'import os\n'), ((9765, 9777), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9775, 9777), True, 'import matplotlib.pyplot as plt\n'), ((9780, 9797), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (9790, 9797), True, 'import matplotlib.pyplot as plt\n'), ((9800, 9815), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (9808, 9815), True, 'import matplotlib.pyplot as plt\n'), ((9823, 9832), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (9830, 9832), True, 'import matplotlib.pyplot as plt\n'), ((10262, 10273), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (10271, 10273), True, 'import matplotlib.pyplot as plt\n'), ((11819, 11849), 'os.path.exists', 'os.path.exists', (['folder_name_GT'], {}), '(folder_name_GT)\n', (11833, 11849), False, 'import os\n'), ((11853, 11880), 'os.makedirs', 'os.makedirs', (['folder_name_GT'], {}), '(folder_name_GT)\n', (11864, 11880), False, 'import os\n'), ((12283, 12295), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12293, 12295), True, 'import matplotlib.pyplot as plt\n'), ((12298, 12313), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (12308, 12313), True, 'import matplotlib.pyplot as plt\n'), ((12581, 12590), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (12588, 12590), True, 'import matplotlib.pyplot as plt\n'), ((12632, 12647), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (12640, 12647), True, 'import matplotlib.pyplot as plt\n'), ((13011, 13022), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (13020, 13022), True, 'import matplotlib.pyplot as plt\n'), ((3785, 3808), 'numpy.linalg.norm', 'np.linalg.norm', (['(p2 - p1)'], {}), '(p2 - p1)\n', (3799, 3808), True, 'import numpy as np\n'), ((5229, 5252), 'numpy.linalg.norm', 'np.linalg.norm', (['(s2 - s1)'], {}), '(s2 - s1)\n', (5243, 5252), True, 'import numpy as np\n'), ((6196, 6229), 'numpy.asarray', 'np.asarray', (['[s1[0], s1[1], s1[2]]'], {}), '([s1[0], s1[1], s1[2]])\n', (6206, 6229), True, 'import numpy as np\n'), ((6261, 6294), 'numpy.asarray', 'np.asarray', (['[s2[0], s2[1], s2[2]]'], {}), '([s2[0], s2[1], s2[2]])\n', (6271, 6294), True, 'import numpy as np\n'), ((6639, 6668), 'numpy.max', 'np.max', (['[cent_x - bound_x, 0]'], {}), '([cent_x - bound_x, 0])\n', (6645, 6668), True, 'import numpy as np\n'), ((6682, 6719), 'numpy.min', 'np.min', (['[cent_x + bound_x, dim_x - 1]'], {}), '([cent_x + bound_x, dim_x - 1])\n', (6688, 6719), True, 'import numpy as np\n'), ((6731, 6760), 'numpy.max', 'np.max', (['[cent_y - bound_y, 0]'], {}), '([cent_y - bound_y, 0])\n', (6737, 6760), True, 'import numpy as np\n'), ((6774, 6811), 'numpy.min', 'np.min', (['[cent_y + bound_y, dim_y - 1]'], {}), '([cent_y + bound_y, dim_y - 1])\n', (6780, 6811), True, 'import numpy as np\n'), ((6823, 6852), 'numpy.max', 'np.max', (['[cent_z - bound_z, 0]'], {}), '([cent_z - bound_z, 0])\n', (6829, 6852), True, 'import numpy as np\n'), ((6866, 6903), 'numpy.min', 'np.min', (['[cent_z + bound_z, dim_z - 1]'], {}), '([cent_z + bound_z, dim_z - 1])\n', (6872, 6903), True, 'import numpy as np\n'), ((9199, 9210), 'numpy.min', 'np.min', (['pic'], {}), '(pic)\n', (9205, 9210), True, 'import numpy as np\n'), ((9215, 9226), 'numpy.max', 'np.max', (['pic'], {}), '(pic)\n', (9221, 9226), True, 'import numpy as np\n'), ((9229, 9240), 'numpy.min', 'np.min', (['pic'], {}), '(pic)\n', (9235, 9240), True, 'import numpy as np\n'), ((9891, 10007), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(folder_name_render + '/frame_00%i.png' % step)"], {'bbox_inches': '"""tight"""', 'transparent': '(True)', 'pad_inches': '(0)'}), "(folder_name_render + '/frame_00%i.png' % step, bbox_inches=\n 'tight', transparent=True, pad_inches=0)\n", (9902, 10007), True, 'import matplotlib.pyplot as plt\n'), ((10959, 11019), 'os.path.join', 'os.path.join', (["(folder_name + '/ground_truth_movie')", 'images[0]'], {}), "(folder_name + '/ground_truth_movie', images[0])\n", (10971, 11019), False, 'import os\n'), ((11049, 11097), 'os.path.join', 'os.path.join', (["(folder_name + '/render')", 'images[0]'], {}), "(folder_name + '/render', images[0])\n", (11061, 11097), False, 'import os\n'), ((12544, 12572), 'matplotlib.pyplot.plot', 'plt.plot', (['yy', 'xx', '"""."""'], {'c': 'col'}), "(yy, xx, '.', c=col)\n", (12552, 12572), True, 'import matplotlib.pyplot as plt\n'), ((12664, 12772), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(folder_name_GT + '/frame_00%i.png' % t)"], {'bbox_inches': '"""tight"""', 'transparent': '(True)', 'pad_inches': '(0)'}), "(folder_name_GT + '/frame_00%i.png' % t, bbox_inches='tight',\n transparent=True, pad_inches=0)\n", (12675, 12772), True, 'import matplotlib.pyplot as plt\n'), ((1440, 1477), 'numpy.random.normal', 'np.random.normal', (['(0)', 'parameter_radius'], {}), '(0, parameter_radius)\n', (1456, 1477), True, 'import numpy as np\n'), ((1607, 1644), 'numpy.random.normal', 'np.random.normal', (['(0)', 'parameter_height'], {}), '(0, parameter_height)\n', (1623, 1644), True, 'import numpy as np\n'), ((3758, 3783), 'numpy.cross', 'np.cross', (['(q - p1)', '(p2 - p1)'], {}), '(q - p1, p2 - p1)\n', (3766, 3783), True, 'import numpy as np\n'), ((6314, 6337), 'numpy.linalg.norm', 'np.linalg.norm', (['(s2 - s1)'], {}), '(s2 - s1)\n', (6328, 6337), True, 'import numpy as np\n'), ((10026, 10141), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(folder_name_render + '/frame_0%i.png' % step)"], {'bbox_inches': '"""tight"""', 'transparent': '(True)', 'pad_inches': '(0)'}), "(folder_name_render + '/frame_0%i.png' % step, bbox_inches=\n 'tight', transparent=True, pad_inches=0)\n", (10037, 10141), True, 'import matplotlib.pyplot as plt\n'), ((10149, 10263), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(folder_name_render + '/frame_%i.png' % step)"], {'bbox_inches': '"""tight"""', 'transparent': '(True)', 'pad_inches': '(0)'}), "(folder_name_render + '/frame_%i.png' % step, bbox_inches=\n 'tight', transparent=True, pad_inches=0)\n", (10160, 10263), True, 'import matplotlib.pyplot as plt\n'), ((12789, 12896), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(folder_name_GT + '/frame_0%i.png' % t)"], {'bbox_inches': '"""tight"""', 'transparent': '(True)', 'pad_inches': '(0)'}), "(folder_name_GT + '/frame_0%i.png' % t, bbox_inches='tight',\n transparent=True, pad_inches=0)\n", (12800, 12896), True, 'import matplotlib.pyplot as plt\n'), ((12905, 13011), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(folder_name_GT + '/frame_%i.png' % t)"], {'bbox_inches': '"""tight"""', 'transparent': '(True)', 'pad_inches': '(0)'}), "(folder_name_GT + '/frame_%i.png' % t, bbox_inches='tight',\n transparent=True, pad_inches=0)\n", (12916, 13011), True, 'import matplotlib.pyplot as plt\n'), ((11269, 11325), 'os.path.join', 'os.path.join', (["(folder_name + '/ground_truth_movie')", 'image'], {}), "(folder_name + '/ground_truth_movie', image)\n", (11281, 11325), False, 'import os\n'), ((11362, 11406), 'os.path.join', 'os.path.join', (["(folder_name + '/render')", 'image'], {}), "(folder_name + '/render', image)\n", (11374, 11406), False, 'import os\n'), ((1508, 1527), 'numpy.random.random', 'np.random.random', (['(1)'], {}), '(1)\n', (1524, 1527), True, 'import numpy as np\n'), ((1675, 1694), 'numpy.random.random', 'np.random.random', (['(1)'], {}), '(1)\n', (1691, 1694), True, 'import numpy as np\n')]
|
_url = 'https://raw.githubusercontent.com/mikkokotila/version-controlled-data/master/data/polymod_social_contact_data.csv'
class Polymod:
def __init__(self, data=None):
import pandas as _pd
self.data = _pd.read_csv(_url)
def country_data(self, country_code='fi'):
# get the country columns to drop it before return
cols = []
for col in self.data.columns:
if 'country_' in col:
cols.append(col)
return p.data[p.data['country_' + country_code] == 1].drop(cols, 1)
def _build_population(self,
population_size,
age_distribution=[15, 65, 20]):
'''Returns a population expressed as a 1d array where
each record is a member of the population.'''
import numpy as np
self.population = np.random.choice([1, 2, 3], size=population_size, p=np.array(age_distribution) / 100)
def _build_contacts(self, data, probabilities=False):
'''Returns participant level daily contact record
in absolute values or probabilities.'''
temp = data.copy(deep=True)
temp = temp.groupby('participant_id').sum()
cols = ['contact_home',
'contact_work',
'contact_school',
'contact_transport',
'contact_leisure',
'contact_other']
temp = temp[cols]
if probabilities:
temp['contact_total'] = temp.sum(axis=1)
for col in cols:
temp[col] = temp[col] / temp['contact_total']
return temp.dropna()
def _build_age_groups(self, country_code):
country_data = self.country_data(country_code)
country_data['0-14'] = country_data.participant_age.between(0, 14).astype(int)
country_data['15-64'] = country_data.participant_age.between(15, 64).astype(int)
country_data['65-100'] = country_data.participant_age.between(64, 100).astype(int)
self.age_young = country_data[country_data.participant_age.between(0, 14)]
self.age_adult = country_data[country_data.participant_age.between(15, 64)]
self.age_elderly = country_data[country_data.participant_age.between(64, 100)]
def raw_daily_contacts(self, country_code='fi', probabilities=False):
self._build_age_groups(country_code)
if probabilities:
young = self._build_contacts(self.age_young, True).values
adult = self._build_contacts(self.age_adult, True).values
elderly = self._build_contacts(self.age_elderly, True).values
else:
young = self._build_contacts(self.age_young).values
adult = self._build_contacts(self.age_adult).values
elderly = self._build_contacts(self.age_elderly).values
return young, adult, elderly
def total_daily_contacts(self,
population_size=1000,
country_code='fi',
multiplier=1,
age_distribution=[15, 65, 20],
restrictions=[0,0,0,0,0,0]):
import random
import numpy as np
restrictions = np.array(restrictions)
self._build_age_groups(country_code)
self._build_population(population_size=population_size, age_distribution=age_distribution)
out = []
young = (self.population == 1).sum() * multiplier
adult = (self.population == 2).sum() * multiplier
elderly = (self.population == 3).sum() * multiplier
young_picks = self._build_contacts(self.age_young).values * (1 - restrictions)
adult_picks = self._build_contacts(self.age_adult).values * (1 - restrictions)
elderly_picks = self._build_contacts(self.age_elderly).values * (1 - restrictions)
out = random.choices(young_picks.tolist(), k=young)
out += random.choices(adult_picks.tolist(), k=adult)
out += random.choices(elderly_picks.tolist(), k=elderly)
return [int(i) for i in np.array(out).sum(0)]
p = Polymod()
|
[
"pandas.read_csv",
"numpy.array"
] |
[((246, 264), 'pandas.read_csv', '_pd.read_csv', (['_url'], {}), '(_url)\n', (258, 264), True, 'import pandas as _pd\n'), ((3351, 3373), 'numpy.array', 'np.array', (['restrictions'], {}), '(restrictions)\n', (3359, 3373), True, 'import numpy as np\n'), ((953, 979), 'numpy.array', 'np.array', (['age_distribution'], {}), '(age_distribution)\n', (961, 979), True, 'import numpy as np\n'), ((4212, 4225), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (4220, 4225), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# @Time : 2019/1/8 下午8:20
# @Author : yidxue
from __future__ import division
import numpy
import datetime
import pandas as pd
def get_all_file_path(path):
"""
循环遍历,得到一个文件夹第一层下的文件路径
"""
import os
file_name_list = os.listdir(path)
return [path + os.sep + file_name for file_name in file_name_list]
def read_file(path_ls):
"""
读数据
"""
map = {}
for file_path in path_ls:
with open(file_path, mode="r") as in_file:
for i, line in enumerate(in_file):
if not (line.strip == "" or line.startswith('clusterid')):
data = line.strip().split(",")
cluster = data[0]
timestamp = data[1]
rtts = float(data[7])
if cluster in map.keys():
map[cluster][timestamp] = rtts
else:
cluster_map = {timestamp: rtts}
map[cluster] = cluster_map
return map
def write_file(file_path, context_ls, method='a'):
"""
写数据到一个文件
:param file_path:
:param method: 'a'表示默认为追加方式, 'wb'表示覆盖或者创建文件写入
:param context:
"""
with open(file_path, method) as fo:
for text in context_ls:
fo.write(text + "\n")
# 关闭打开的文件
fo.close()
def calculate_std(dps, moving_average):
variance = 0
flag_list = moving_average.isnull()
count = 0
for index in range(len(dps)):
if flag_list[index]:
count += 1
continue
variance += (dps[index] - moving_average[index]) ** 2
variance /= (len(dps) - count)
return numpy.sqrt(variance)
day = '2018-12-24'
# 1. 读数据
path = '/Users/cisco/Downloads/abnormal_value_2018lastweek/abnormal_value_{day}.csv'
path_ls = get_all_file_path(path.format(day=day))
# 2. 读数据
data_dict = read_file(path_ls)
# 3. 每个cluster时间戳进行排序
DESC = False
# 列表推导生成字典,这个字典的value的是排序后的另一个字典
data_sort = {
cluster: sorted(data_dict[cluster].items(), key=lambda d: datetime.datetime.strptime(d[0], '%Y-%m-%d %H:%M:%S'),
reverse=DESC)
for cluster in data_dict.keys()}
cluster = {}
for key in data_sort.keys():
cluster[key] = pd.Series({item[0]: item[1] for item in data_sort[key]})
# 4. 异常检测
for key in cluster.keys():
dps = pd.Series(cluster[key])
ewma_line = dps.ewm(span=4).mean()
ewma_std = calculate_std(dps, ewma_line)
result = []
for index in ewma_line.index:
if not (ewma_line[index] - ewma_std <= dps[index] <= ewma_line[index] + ewma_std):
result.append(key + "," + index + "," + str(dps[index]) + ",1")
else:
result.append(key + "," + index + "," + str(dps[index]) + ",0")
# 存数据
write_file('/Users/cisco/Desktop/{day}.csv'.format(day=day), result)
|
[
"pandas.Series",
"datetime.datetime.strptime",
"os.listdir",
"numpy.sqrt"
] |
[((262, 278), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (272, 278), False, 'import os\n'), ((1673, 1693), 'numpy.sqrt', 'numpy.sqrt', (['variance'], {}), '(variance)\n', (1683, 1693), False, 'import numpy\n'), ((2234, 2290), 'pandas.Series', 'pd.Series', (['{item[0]: item[1] for item in data_sort[key]}'], {}), '({item[0]: item[1] for item in data_sort[key]})\n', (2243, 2290), True, 'import pandas as pd\n'), ((2339, 2362), 'pandas.Series', 'pd.Series', (['cluster[key]'], {}), '(cluster[key])\n', (2348, 2362), True, 'import pandas as pd\n'), ((2046, 2099), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['d[0]', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(d[0], '%Y-%m-%d %H:%M:%S')\n", (2072, 2099), False, 'import datetime\n')]
|
"""
This is a sample stub of loadgen with multiple processes support.
Each process sets its affinity by a proc list.
Loadgen is a producer, which calls issue_queries(). issue_queries() gets query
from loadgen and puts query id/sample indices into an input queue.
Each Consumer(process)'s run() reads input queue, calls model_predict() to get
inference result, and put result into output queue.
A standalone thread's response_loadgen() reads output queue, and responds
inference result to loadgen.
Server and Offline scenario PerformanceOnly mode are verified.
Each Model needs to implement below
model_predict()
load_query_samples()
unload_query_samples()
For model_predict(), how to return data to loadgen is model specific, the
loadgen CPP API requires a data pointer and length, then it saves the data to
mlperf_log_accuracy.json, which is used to generate accuracy number offline.
"""
import multiprocessing
import threading
import subprocess
import time
import os
import sys
import argparse
import array
import logging
import numpy as np
import mlperf_loadgen as lg
from collections import defaultdict
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("MXNet-BERT")
num_cpus = 28
num_ins = 2
NANO_SEC = 1e9
MILLI_SEC = 1000
in_queue_cnt = 0
out_queue_cnt = 0
bs_step = 8
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--scenario", choices=["Offline", "Server"], default="Offline", help="Scenario")
parser.add_argument("--batching", choices=["Fixed", "Dynamic", "Adaptive"], default="Adaptive", help="Batching method")
parser.add_argument("--batch-size", default=1, type=int, help="batch_size")
parser.add_argument("--num-instance", default=2, type=int, help="number of instance")
parser.add_argument("--num-phy-cpus", default=28, type=int, help="number of physical cpus")
parser.add_argument("--vocab", default='converted_from_tf_to_mxnet/tf.vocab',
type=str, help="vocab file path")
parser.add_argument("--params", default='converted_from_tf_to_mxnet/tf_fp32.params',
type=str, help="FP32 params path")
parser.add_argument("--quantized_model_prefix",
default='converted_from_tf_to_mxnet/quantized_models/model_bert_squad_quantized_customize',
type=str, help="quantized model prefix")
parser.add_argument("--accuracy", action="store_true", help="enable accuracy pass")
parser.add_argument("--quantized", action="store_true", help="use quantized model")
parser.add_argument("--mlperf-conf", default="mlperf.conf", help="mlperf rules config")
parser.add_argument("--user-conf", default="user.conf", help="user rules config")
parser.add_argument("--perf-count", default=None, help="perf count")
parser.add_argument("--profile", action="store_true", help="whether enable profiler")
parser.add_argument("--warmup", action="store_true", help="whether do warmup")
parser.add_argument("--perf_calibrate", action="store_true", help="whether do performance calibration")
args = parser.parse_args()
return args
scenario_map = {
"Offline": lg.TestScenario.Offline,
"Server": lg.TestScenario.Server,
}
def load_query_samples(sample_list):
# This is model specific place holder
pass
def unload_query_samples(sample_list):
# This is model specific place holder
pass
def block_until(counter, num_ins, t=1):
while counter.value < num_ins:
time.sleep(t)
batches = None
def load_perf_prof():
global batches
global throughputs
# load performance profile map for offline scenario
if os.path.exists("prof.py"):
from prof import prof_map
from prof import prof_bs_step
else:
prof_map = {}
prof_bs_step = 1
return
longest_seq = 0
for k, v in sorted(prof_map.items()):
if k > longest_seq:
longest_seq = k
batches = [0.0] * (longest_seq+1)
throughputs = [0.0] * (longest_seq+1)
for k, v in sorted(prof_map.items()):
max_throughput = 0.0
max_bs = 0
for i in range(1, len(v)):
current_bs = i * prof_bs_step
if current_bs/v[i] > max_throughput:
max_throughput = current_bs/v[i]
max_bs = current_bs
batches[k] = max_bs
throughputs[k] = max_throughput
def get_best_bs(seq_len):
global batches
if batches == None:
load_perf_prof()
global throughputs
while batches[seq_len] == 0:
seq_len += 1
best_seq_len = seq_len
best_bs = batches[seq_len]
best_throughput = throughputs[seq_len]
seq_len += 1
while seq_len < 385:
if throughputs[seq_len] > best_throughput:
best_seq_len = seq_len
best_bs = batches[seq_len]
best_throughput = throughputs[seq_len]
seq_len += 1
return best_seq_len, best_bs, best_throughput
class Consumer(multiprocessing.Process):
def __init__(self, task_queue, result_queue, lock, init_counter, calibrate_counter, proc_idx, world_size, args, max_pad_len=384):
multiprocessing.Process.__init__(self)
global num_ins
self.task_queue = task_queue
self.result_queue = result_queue
self.lock = lock
self.init_counter = init_counter
self.calibrate_counter = calibrate_counter
self.proc_idx = proc_idx
self.world_size = world_size
self.args = args
self.affinity = range(round(proc_idx * num_cpus / num_ins),
round((proc_idx + 1) * num_cpus / num_ins))
self.start_core_idx = proc_idx * num_cpus // num_ins
self.end_core_idx = (proc_idx + 1) * num_cpus // num_ins - 1
self.length_list = {}
self.length_time_list = {}
self.max_pad_len = max_pad_len
def warmup(self, model, data_set, context, scenario):
if self.proc_idx == 0:
print ('Start warmup...')
data_size = len(data_set.eval_features)
count = 0
import mxnet as mx
for start in range(0, data_size):
inputs_list = []
token_types_list = []
valid_length_list = []
eval_feature = data_set.eval_features[start]
_, inputs, token_types, valid_length, _, _ = eval_feature
if len(inputs) in self.length_list:
continue
self.length_list[len(inputs)] = True
max_throughput = 0.0
best_bs = 0
if scenario == 'Offline':
# only support warmup of adaptive batching
best_len, best_bs, _ = get_best_bs(len(inputs))
if best_len in self.length_list:
continue
self.length_list[best_len] = True
inputs += [0] * (best_len - len(inputs))
token_types += [0] * (best_len - len(token_types))
for i in range(best_bs):
inputs_list.append(inputs)
token_types_list.append(token_types)
valid_length_list.append(valid_length)
if self.proc_idx == 0:
print ("warmup seqlen {} batchsize {}".format(best_len, best_bs))
else:
inputs_list.append(inputs)
token_types_list.append(token_types)
valid_length_list.append(valid_length)
inputs_nd = mx.nd.array(inputs_list).as_in_context(context)
token_types_nd = mx.nd.array(token_types_list).as_in_context(context)
valid_length_nd = mx.nd.array(valid_length_list).as_in_context(context).astype('float32')
# warm up primitive once
out = model.net(inputs_nd, token_types_nd, valid_length_nd)
out_np = out.asnumpy()
count += 1
if count % 10 == 0 and self.proc_idx == 0:
print ('Warmup {} samples'.format(count))
if self.proc_idx == 0:
print ('Warmup done')
def calibrate(self, model, data_set, context):
if self.proc_idx == 0:
print ('Start calibration...')
data_size = len(data_set.eval_features)
count = 0
global bs_step
import mxnet as mx
for start in range(0, data_size):
inputs_list = []
token_types_list = []
valid_length_list = []
eval_feature = data_set.eval_features[start]
_, inputs, token_types, valid_length, _, _ = eval_feature
cur_len = len(inputs)
if cur_len in self.length_list:
continue
self.length_list[cur_len] = True
if count % self.world_size != self.proc_idx:
count += 1
continue
count += 1
length_time_list = []
length_time_list.append(0)
max_throughput = 0.0
best_bs = 0
max_len = len(inputs)
while True:
for i in range(bs_step):
inputs_list.append(inputs)
token_types_list.append(token_types)
valid_length_list.append(valid_length)
inputs_nd = mx.nd.array(inputs_list).as_in_context(context)
token_types_nd = mx.nd.array(token_types_list).as_in_context(context)
valid_length_nd = mx.nd.array(valid_length_list).as_in_context(context).astype('float32')
# warm up primitive once
out = model.net(inputs_nd, token_types_nd, valid_length_nd)
out_np = out.asnumpy()
# measure time for the batch
t0 = time.time()
for i in range(8):
out = model.net(inputs_nd, token_types_nd, valid_length_nd)
out_np = out.asnumpy()
t1 = time.time()
duration = (t1 - t0)/8.0
throughput = len(inputs_list)/duration
if throughput > max_throughput:
max_throughput = throughput
best_bs = len(inputs_list)
if len(inputs_list) >= 256:
print ("{} - Best efficiency for seq len {} is BS {} with seq/s {:.5}".format(
self.proc_idx, max_len, best_bs, max_throughput))
break
#print ("{} - Best efficiency for seq len {} is BS {} with seq/s {:.5}, current BS {} seq/s {:.5}\r".format(
# self.proc_idx, max_len, best_bs, max_throughput, len(inputs_list), throughput), end='')
length_time_list.append(duration)
self.length_time_list[cur_len] = length_time_list
with open('prof_new.py', 'a') as f:
for k, v in sorted(self.length_time_list.items()):
print (' {} : {},'.format(k, v), file=f)
# keep the processor hot until all instance done calibration
print ('Calibrate almost done, keep instance hot')
self.lock.acquire()
self.calibrate_counter.value += 1
self.lock.release()
while self.calibrate_counter.value < 2 * self.world_size:
out = model.net(inputs_nd, token_types_nd, valid_length_nd)
out_np = out.asnumpy()
print ('Calibrate done')
def run(self):
global batching
#os.sched_setaffinity(self.pid, self.affinity)
cmd = "taskset -p -c %d-%d %d" % (self.start_core_idx, self.end_core_idx, self.pid)
print (cmd)
os.system(cmd)
import mxnet as mx
ctx = mx.cpu()
#from numexpr.utils import set_num_threads
#set_num_threads(28)
os.environ['OMP_NUM_THREADS'] = '{}'.format(self.end_core_idx-self.start_core_idx+1)
model = BERTModel(mx.cpu(), self.args.vocab, self.args.params,
self.args.quantized, self.args.quantized_model_prefix)
data_set = BERTDataSet(self.args.vocab, self.args.perf_count)
self.lock.acquire()
self.calibrate_counter.value += 1
self.lock.release()
block_until(self.calibrate_counter, self.world_size)
if self.args.perf_calibrate:
self.calibrate(model, data_set, ctx)
return
self.lock.acquire()
self.calibrate_counter.value += 1
self.lock.release()
if self.args.warmup:
self.warmup(model, data_set, ctx, self.args.scenario)
self.lock.acquire()
self.init_counter.value += 1
self.lock.release()
#affinity = os.sched_getaffinity(self.pid)
#print('Process', self.pid, 'affinity proc list:', affinity)
cur_step = 0
start_step = 384
end_step = -1
from utils import profile
while True:
next_task = self.task_queue.get() #(self.proc_idx)
if next_task is None:
# None means shutdown
log.info('Exiting {}-pid:{}, cur_step={}'.format(self.name, self.pid, cur_step))
self.task_queue.task_done()
if self.args.profile and self.proc_idx==0:
if end_step == -1:
end_step = cur_step
profile(cur_step, start_step, end_step, profile_name='profile_{}.json'.format(self.pid), early_exit=False)
break
query_id_list = next_task.query_id_list
sample_index_list = next_task.sample_index_list
batch_size = len(sample_index_list)
#print ('pid-{}, query_id_list: {}, sample_index_list: {}'.format(self.pid, query_id_list, sample_index_list))
inputs_list = []
token_types_list = []
valid_length_list = []
for sample_index in sample_index_list:
eval_feature = data_set.eval_features[sample_index]
_, inputs, token_types, valid_length, _, _ = eval_feature
inputs_list.append(inputs)
token_types_list.append(token_types)
valid_length_list.append(valid_length)
if len(inputs_list) > 1:
max_len = max([len(inp) for inp in inputs_list])
new_max_len, bs, best_throughput = get_best_bs(max_len)
if bs == len(inputs_list):
max_len = new_max_len
#for i in range(len(inputs_list)):
# inputs_list[i] += [0] * (max_len - len(inputs_list[i]))
# token_types_list[i] += [0] * (max_len - len(token_types_list[i]))
else:
max_len = self.max_pad_len #len(inputs_list[0]) #self.max_pad_len #len(inputs_list)
for i in range(len(inputs_list)):
inputs_list[i] += [0] * (max_len - len(inputs_list[i]))
token_types_list[i] += [0] * (max_len - len(token_types_list[i]))
inputs = mx.nd.array(inputs_list).as_in_context(ctx)
token_types = mx.nd.array(token_types_list).as_in_context(ctx)
valid_length = mx.nd.array(valid_length_list).as_in_context(ctx).astype('float32')
if self.args.profile and self.proc_idx==0:
profile(cur_step, start_step, end_step, profile_name='profile_{}.json'.format(self.pid), early_exit=False)
cur_step += 1
#t0 = time.time()
out = model.net(inputs, token_types, valid_length)
out_np = out.asnumpy()
#t1 = time.time()
#if self.proc_idx == 0:
# cur_throughput = len(inputs_list)/(t1-t0)
# if best_throughput != 0:
# throughput_diff = (cur_throughput - best_throughput) / best_throughput
# print ('inference seq len = {} BS = {} throughput = {:.5f} ({:.3f}%)'.format(max_len, len(inputs_list), cur_throughput, throughput_diff*100))
# else:
# print ('inference seq len = {} BS = {} throughput = {:.5f})'.format(max_len, len(inputs_list), cur_throughput))
result = Output(query_id_list, out_np)
self.result_queue.put(result)
#print('consumer-{}: output.shape={}, query_id={}'.format(self.pid, out_np.shape, query_id_list[0]))
self.task_queue.task_done()
class Input(object):
def __init__(self, id_list, index_list, sample_length_list):
assert isinstance(id_list, list)
assert isinstance(index_list, list)
assert isinstance(sample_length_list, list)
assert len(id_list) == len(index_list)
self.query_id_list = id_list
self.sample_index_list = index_list
self.sample_length_list = sample_length_list
class Output(object):
def __init__(self, query_id_list, result):
self.query_id_list = query_id_list
self.result = result
class InQueue():
def __init__(self, in_queue, batch_size, data_set):
from preprocessing_utils import max_seq_length
self.in_queue = in_queue
self.batch_size = batch_size
self.query_id_list = []
self.sample_index_list = []
self.sample_length_list = []
self.index = 0
self.data_set = data_set
self.max_seq_len = max_seq_length
def put(self, query_samples):
global in_queue_cnt
##TODO, debug
idx = [q.index for q in query_samples]
query_id = [q.id for q in query_samples]
query_len = len(query_samples)
num_samples = len(query_samples)
def idx_len(e):
idx = e.index
feature = self.data_set.eval_features[idx]
_, inputs, _, _, _, _ = feature
return len(inputs)
if num_samples == 1:
if self.batch_size == 1:
in_queue_cnt += 1
self.in_queue.put(Input([query_samples[0].id],
[query_samples[0].index],
[idx_len(query_samples[0])]))
else:
self.index += 1
if self.index < self.batch_size:
self.query_id_list.append(query_samples[0].id)
self.sample_index_list.append(query_samples[0].index)
self.sample_length_list.append(idx_len(query_samples[0]))
else:
self.query_id_list.append(query_samples[0].id)
self.sample_index_list.append(query_samples[0].index)
self.sample_length_list.append(idx_len(query_samples[0]))
self.in_queue.put(Input(self.query_id_list, self.sample_index_list, self.sample_length_list))
in_queue_cnt += self.batch_size
self.index = 0
self.query_id_list = []
self.sample_index_list = []
self.sample_length_list = []
else:
query_samples.sort(key=idx_len, reverse=True)
def enqueue_batch(cur_batch_size, base_index=0):
global in_queue_cnt
id_list = []
index_list = []
length_list = []
for i in range(cur_batch_size):
id_list.append(query_samples[base_index + i].id)
index_list.append(query_samples[base_index + i].index)
length_list.append(idx_len(query_samples[base_index + i]))
self.in_queue.put(Input(id_list, index_list, length_list))
in_queue_cnt += cur_batch_size
global batching
true_total_len = 0
total_len = 0
for i in range(num_samples):
true_total_len += idx_len(query_samples[i])
if batching == 'Dynamic':
batch_seq_len = self.batch_size * self.max_seq_len
base_index = 0
num_batches = 0
while base_index < num_samples:
base_len = idx_len(query_samples[base_index])
for i in range(base_index, num_samples):
current_len = base_len * (i-base_index+1)
if i+1 < num_samples:
next_len = base_len * (i+1-base_index+1)
if next_len > batch_seq_len:
if next_len - batch_seq_len > batch_seq_len - current_len:
next_index = i+1
else:
next_index = i+2
break
else:
next_index = i+1
break
total_len += base_len * (next_index-base_index)
enqueue_batch(next_index-base_index, base_index)
num_batches += 1
#print('pid-{2}: enqueue bs={0} and input volume {1}...'
# .format(next_index-base_index, current_len, os.getpid()))
base_index = next_index
print('pid-{1}: enqueued {0} batches, pad ratio = {2}%'
.format(num_batches, os.getpid(), (total_len-true_total_len)*100/true_total_len))
elif batching == 'Adaptive':
batch_seq_len = self.batch_size * self.max_seq_len
base_index = 0
num_batches = 0
while base_index < num_samples:
base_len = idx_len(query_samples[base_index])
best_len, best_bs, _ = get_best_bs(base_len)
next_index = base_index + best_bs
if next_index > num_samples:
next_index = num_samples
total_len += base_len * (next_index-base_index)
enqueue_batch(next_index-base_index, base_index)
num_batches += 1
#print('pid-{2}: enqueue bs={0} and input volume {1}...'
# .format(next_index-base_index, current_len, os.getpid()))
base_index = next_index
print('pid-{1}: enqueued {0} batches, pad ratio = {2}%'
.format(num_batches, os.getpid(), (total_len-true_total_len)*100/true_total_len))
else:
num_batch = num_samples // self.batch_size
remaining_batch = num_samples % self.batch_size
## TODO, remove
print('pid-{3}: split the datasets into {0} batches with bs={1} and remaining {2}...'
.format(num_batch, self.batch_size, remaining_batch, os.getpid()))
for b in range(num_batch):
base_index = b * self.batch_size
enqueue_batch(self.batch_size, base_index)
if remaining_batch > 0:
base_index = num_batch * self.batch_size
enqueue_batch(remaining_batch, base_index)
#print ('in_queue_cnt=', in_queue_cnt)
class InQueueServer():
def __init__(self, in_queue, batch_sizes, data_set, expected_total_queries):
from preprocessing_utils import max_seq_length
self.in_queues = in_queue
self.batch_sizes = batch_sizes
self.query_id_lists = defaultdict(list)
self.sample_index_lists = defaultdict(list)
self.indexes = defaultdict(int)
self.sample_length_lists = defaultdict(list)
self.data_set = data_set
self.max_seq_len = max_seq_length
self.num_buckets = len(in_queue)
self.cutoffs = sorted(list(batch_sizes.keys()))
self.expected_total_queries = expected_total_queries
self.batch_sizes = defaultdict(int)
def getQueryBucket(self, query_len):
end = 0
while end < self.num_buckets and query_len > self.cutoffs[end]:
end += 1
return self.cutoffs[end]
def getQuerySampleLength(self, query ):
idx = query.index
return len( self.data_set.eval_features[idx][1] ) # input sequence is the 2nd attribute per ex.
def put(self, query_samples):
global in_queue_cnt
global queries_so_far # Track no. of queries received from loadgen
##TODO, debug
idx = [q.index for q in query_samples]
query_id = [q.id for q in query_samples]
query_len = len(query_samples)
num_samples = len(query_samples)
if num_samples == 1:
# Use length of the query sample to determine the queue it should be put
q_length = self.getQuerySampleLength( query_samples[0] )
bucket = self.getQueryBucket( q_length )
if self.batch_sizes[bucket] == 1:
in_queue_cnt += 1
self.in_queues[bucket].put(Input([query_samples[0].id], [query_samples[0].index], [q_len]))
else:
self.indexes[bucket] += 1
if self.indexes[bucket] < self.batch_sizes[bucket]:
self.query_id_lists[bucket].append(query_samples[0].id)
self.sample_index_lists[bucket].append(query_samples[0].index)
self.sample_length__lists[bucket].append(q_length)
else:
self.query_id_lists[bucket].append(query_samples[0].id)
self.sample_index_lists[bucket].append(query_samples[0].index)
self.sample_length_lists[bucket].append(q_length)
self.in_queues[bucket].put(Input(self.query_id_lists[bucket], self.sample_index_lists[bucket], self.sample_length_lists[bucket]))
in_queue_cnt += self.batch_sizes[bucket]
self.indexes[bucket] = 0
self.query_id_lists[bucket] = []
self.sample_index_lists[bucket] = []
self.sample_length_lists[bucket] = []
if queries_so_far == self.expected_total_queries:
for bucket in self.in_queues:
query_id_list = self.query_id_lists[bucket]
sample_index_list = self.sample_index_lists[bucket]
sample_length_list = self.sample_length_lists[bucket]
for j, q_id in enumerate(query_id_list):
s_idx = sample_index_list[j]
s_len = sample_length_list[j]
self.in_queues[bucket].put(Input([q_id], [s_idx], [s_len]))
in_queue_cnt += 1
def flush_queries():
pass
def process_latencies(latencies_ns):
# It's called by loadgen to show us the recorded latencies
log.info("Average latency (ms) per query:")
log.info(np.mean(latencies_ns)/1000000.0)
log.info("Median latency (ms): ")
log.info(np.percentile(latencies_ns, 50)/1000000.0)
log.info("90 percentile latency (ms): ")
log.info(np.percentile(latencies_ns, 90)/1000000.0)
def response_loadgen(out_queue):
global out_queue_cnt
while True:
next_task = out_queue.get()
if next_task is None:
# None means shutdown
log.info('Exiting response thread')
break
query_id_list = next_task.query_id_list
result = next_task.result
batch_size = len(query_id_list)
result.reshape(batch_size, -1, 2)
out_list = np.split(result, batch_size, axis=0)
#responses = []
for i, o in enumerate(out_list):
response_array = array.array("B", np.array(o).astype(np.float32).tobytes())
bi = response_array.buffer_info()
#responses.append(lg.QuerySampleResponse(query_id_list[i], bi[0], bi[1]))
responses = [lg.QuerySampleResponse(query_id_list[i], bi[0], bi[1])]
out_queue_cnt += 1
#print('Response loadgen ({}), query_id {}, out_queue_cnt {}'.format(os.getpid(), query_id_list[i], out_queue_cnt))
lg.QuerySamplesComplete(responses)
#lg.QuerySamplesComplete(responses)
class BERTModel():
def __init__(self, ctx, mx_vocab, params, quantized, quantized_model_prefix):
import gluonnlp as nlp
from utils import BertForQA
import mxnet as mx
if quantized:
log.info('Loading quantized MXNet model...')
self.net = mx.gluon.SymbolBlock.imports('{}-symbol.json'.format(quantized_model_prefix),
['data0', 'data1', 'data2'],
'{}-0000.params'.format(quantized_model_prefix))
self.net.hybridize(static_alloc=True, static_shape=True)
else:
log.info('Loading MXNet model...')
with open(mx_vocab, 'r') as f:
vocab = nlp.vocab.BERTVocab.from_json(f.read())
bert, vocab = nlp.model.get_model(
name='bert_24_1024_16',
dataset_name=None,
vocab=vocab,
pretrained=False,
ctx=ctx,
use_pooler=False,
use_decoder=False,
use_classifier=False)
self.net = BertForQA(bert=bert)
nlp.utils.load_parameters(self.net, params, ctx=ctx, cast_dtype=True)
self.net.hybridize(static_alloc=True)
class BERTDataSet():
def __init__(self, mx_vocab, perf_count):
import gluonnlp as nlp
from preprocessing_utils import preprocess_dataset, max_seq_length, max_query_length, doc_stride
from gluonnlp.data import SQuAD
eval_features = []
with open(mx_vocab, 'r') as f:
vocab = nlp.vocab.BERTVocab.from_json(f.read())
log.info("Creating tokenizer...")
tokenizer = nlp.data.BERTTokenizer(vocab=vocab, lower=True)
round_to = None
log.info("Reading examples...")
dev_path = os.path.join(os.getcwd(), 'build/data')
dev_data = SQuAD('dev', version='1.1', root=dev_path)
dev_data_transform = preprocess_dataset(tokenizer,
dev_data,
max_seq_length=max_seq_length,
doc_stride=doc_stride,
max_query_length=max_query_length,
input_features=True)
self.eval_features = dev_data_transform
self.count = len(self.eval_features)
self.perf_count = perf_count if perf_count is not None else self.count
class MultiprocessShapeBasedQueue(object):
def __init__(self):
global num_ins
self._jq = multiprocessing.JoinableQueue()
self._instances_queue = [multiprocessing.Queue() for _ in range(num_ins)]
self._manager = multiprocessing.Manager()
self.shape_in_instance = self._manager.dict()
self.finish_status = self._manager.dict()
def get(self, instance_id=0):
return self._jq.get()
# with multiprocessing.Lock():
# if self._instances_queue[instance_id].empty():
# while True:
# item = self._jq.get()
# if item != None:
# sample_length = item.sample_length_list[0]
# batch_size = len(item.sample_index_list)
# key = (batch_size, sample_length)
# if key in self.shape_in_instance.keys():
# if self.shape_in_instance[key] == instance_id:
# return item
# else:
# target_instance = self.shape_in_instance[key]
# if target_instance in self.finish_status.keys():
# # target instance already finished execution - get item
# del shape_in_instance[key]
# return item
# else:
# self._instances_queue[target_instance].put(item)
# # reapeat while loop - get new item and check if it's suitable for instance
# else:
# # mark shape with current instance
# self.shape_in_instance[key] = instance_id
# return item
# else:
# self.finish_status[instance_id] = True
# return item # return None
# else:
# item = self._instances_queue[instance_id].get()
# return item
def put(self, obj, block=True, timeout=None):
return self._jq.put(obj, block, timeout)
##print("end put")
def task_done(self):
#print("task_done")
return self._jq.task_done()
#print("end task_done")
def join(self):
#print("join")
return self._jq.join()
#print("end join")
def main():
global num_ins
global num_cpus
global in_queue_cnt
global out_queue_cnt
global batching
global queries_so_far
global Latencies
queries_so_far = 0
args = get_args()
log.info(args)
scenario = args.scenario
accuracy_mode = args.accuracy
perf_count = args.perf_count
batch_size = args.batch_size
num_ins = args.num_instance
num_cpus = args.num_phy_cpus
batching = args.batching
# Read Loadgen and workload config parameters
settings = lg.TestSettings()
settings.scenario = scenario_map[scenario]
settings.FromConfig(args.mlperf_conf, "bert", scenario)
settings.FromConfig(args.user_conf, "bert", scenario)
settings.mode = lg.TestMode.AccuracyOnly if accuracy_mode else lg.TestMode.PerformanceOnly
# Establish communication queues
lock = multiprocessing.Lock()
init_counter = multiprocessing.Value("i", 0)
calibrate_counter = multiprocessing.Value("i", 0)
out_queue = multiprocessing.Queue()
# Create consumers
consumers = []
if scenario == "Server":
from parse_server_config import configParser
buckets = configParser( "machine_conf.json")
cutoffs = list(buckets.keys())
batch_sizes = {}
in_queue = {j: multiprocessing.JoinableQueue() for j in buckets}
proc_idx = 0
num_cpus = 0
total_ins = 0
for cutoff in list(buckets.keys()):
batch_sizes[ cutoff ] = buckets[ cutoff ]["batch_size"]
num_ins = buckets[ cutoff ]["instances"]
cpus_per_instance = buckets[ cutoff ]["cpus_per_instance"]
num_cpus = num_ins * cpus_per_instance
total_ins += num_ins
for j in range(num_ins):
consumer = Consumer( in_queue[ cutoff ], out_queue, lock, init_counter, calibrate_counter, proc_idx, num_ins, args, cutoff)
consumer.start_core_idx = proc_idx
consumer.end_core_idx = proc_idx + cpus_per_instance - 1
consumers.append(consumer)
proc_idx = consumer.end_core_idx + 1
num_ins = total_ins
else:
total_ins = num_ins
in_queue = MultiprocessShapeBasedQueue()
consumers = [Consumer(in_queue, out_queue, lock, init_counter, calibrate_counter, i, num_ins, args)
for i in range(num_ins)]
for c in consumers:
c.start()
# Dataset object used by constructQSL
data_set = BERTDataSet(args.vocab, args.perf_count)
if scenario=="Server":
issue_queue = InQueueServer(in_queue, batch_sizes, data_set, settings.min_query_count)
else:
issue_queue = InQueue(in_queue, batch_size, data_set)
# Wait until all sub-processors are ready
block_until(init_counter, total_ins, 2)
# Start response thread
response_worker = threading.Thread(
target=response_loadgen, args=(out_queue,))
response_worker.daemon = True
response_worker.start()
def issue_queries(query_samples):
# It's called by loadgen to send query to SUT
issue_queue.put(query_samples)
sut = lg.ConstructSUT(
issue_queries, flush_queries, process_latencies)
qsl = lg.ConstructQSL(
data_set.count, data_set.perf_count, load_query_samples, unload_query_samples)
log_path = "build/logs"
if not os.path.exists(log_path):
os.makedirs(log_path)
log_output_settings = lg.LogOutputSettings()
log_output_settings.outdir = log_path
log_output_settings.copy_summary_to_stdout = True
log_settings = lg.LogSettings()
log_settings.log_output = log_output_settings
lg.StartTestWithLogSettings(sut, qsl, settings, log_settings)
# Wait until outQueue done
while out_queue_cnt < in_queue_cnt:
time.sleep(0.2)
if scenario == "Server":
for i in in_queue:
in_queue[i].join()
for j in range(buckets[ i ]["cpus_per_instance"]):
in_queue[i].put(None)
else:
for i in range(num_ins):
in_queue.put(None)
for c in consumers:
c.join()
out_queue.put(None)
if accuracy_mode:
cmd = "python accuracy-squad.py --log_file={}/mlperf_log_accuracy.json".format(log_path)
subprocess.check_call(cmd, shell=True)
lg.DestroyQSL(qsl)
lg.DestroySUT(sut)
if __name__ == '__main__':
main()
|
[
"argparse.ArgumentParser",
"multiprocessing.Lock",
"mlperf_loadgen.TestSettings",
"multiprocessing.Value",
"collections.defaultdict",
"numpy.mean",
"multiprocessing.Queue",
"multiprocessing.Process.__init__",
"subprocess.check_call",
"os.path.exists",
"gluonnlp.data.SQuAD",
"mlperf_loadgen.LogOutputSettings",
"gluonnlp.data.BERTTokenizer",
"multiprocessing.JoinableQueue",
"mlperf_loadgen.QuerySampleResponse",
"threading.Thread",
"mlperf_loadgen.StartTestWithLogSettings",
"mlperf_loadgen.DestroySUT",
"os.system",
"time.sleep",
"parse_server_config.configParser",
"numpy.percentile",
"mlperf_loadgen.QuerySamplesComplete",
"mlperf_loadgen.ConstructSUT",
"mlperf_loadgen.LogSettings",
"mxnet.cpu",
"preprocessing_utils.preprocess_dataset",
"mxnet.nd.array",
"gluonnlp.utils.load_parameters",
"os.getpid",
"os.makedirs",
"logging.basicConfig",
"mlperf_loadgen.DestroyQSL",
"multiprocessing.Manager",
"gluonnlp.model.get_model",
"os.getcwd",
"mlperf_loadgen.ConstructQSL",
"utils.BertForQA",
"numpy.split",
"time.time",
"numpy.array",
"prof.prof_map.items",
"logging.getLogger"
] |
[((1116, 1155), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (1135, 1155), False, 'import logging\n'), ((1162, 1193), 'logging.getLogger', 'logging.getLogger', (['"""MXNet-BERT"""'], {}), "('MXNet-BERT')\n", (1179, 1193), False, 'import logging\n'), ((1333, 1358), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1356, 1358), False, 'import argparse\n'), ((3653, 3678), 'os.path.exists', 'os.path.exists', (['"""prof.py"""'], {}), "('prof.py')\n", (3667, 3678), False, 'import os\n'), ((33593, 33610), 'mlperf_loadgen.TestSettings', 'lg.TestSettings', ([], {}), '()\n', (33608, 33610), True, 'import mlperf_loadgen as lg\n'), ((33920, 33942), 'multiprocessing.Lock', 'multiprocessing.Lock', ([], {}), '()\n', (33940, 33942), False, 'import multiprocessing\n'), ((33962, 33991), 'multiprocessing.Value', 'multiprocessing.Value', (['"""i"""', '(0)'], {}), "('i', 0)\n", (33983, 33991), False, 'import multiprocessing\n'), ((34016, 34045), 'multiprocessing.Value', 'multiprocessing.Value', (['"""i"""', '(0)'], {}), "('i', 0)\n", (34037, 34045), False, 'import multiprocessing\n'), ((34062, 34085), 'multiprocessing.Queue', 'multiprocessing.Queue', ([], {}), '()\n', (34083, 34085), False, 'import multiprocessing\n'), ((35934, 35994), 'threading.Thread', 'threading.Thread', ([], {'target': 'response_loadgen', 'args': '(out_queue,)'}), '(target=response_loadgen, args=(out_queue,))\n', (35950, 35994), False, 'import threading\n'), ((36209, 36273), 'mlperf_loadgen.ConstructSUT', 'lg.ConstructSUT', (['issue_queries', 'flush_queries', 'process_latencies'], {}), '(issue_queries, flush_queries, process_latencies)\n', (36224, 36273), True, 'import mlperf_loadgen as lg\n'), ((36293, 36391), 'mlperf_loadgen.ConstructQSL', 'lg.ConstructQSL', (['data_set.count', 'data_set.perf_count', 'load_query_samples', 'unload_query_samples'], {}), '(data_set.count, data_set.perf_count, load_query_samples,\n unload_query_samples)\n', (36308, 36391), True, 'import mlperf_loadgen as lg\n'), ((36519, 36541), 'mlperf_loadgen.LogOutputSettings', 'lg.LogOutputSettings', ([], {}), '()\n', (36539, 36541), True, 'import mlperf_loadgen as lg\n'), ((36657, 36673), 'mlperf_loadgen.LogSettings', 'lg.LogSettings', ([], {}), '()\n', (36671, 36673), True, 'import mlperf_loadgen as lg\n'), ((36729, 36790), 'mlperf_loadgen.StartTestWithLogSettings', 'lg.StartTestWithLogSettings', (['sut', 'qsl', 'settings', 'log_settings'], {}), '(sut, qsl, settings, log_settings)\n', (36756, 36790), True, 'import mlperf_loadgen as lg\n'), ((37390, 37408), 'mlperf_loadgen.DestroyQSL', 'lg.DestroyQSL', (['qsl'], {}), '(qsl)\n', (37403, 37408), True, 'import mlperf_loadgen as lg\n'), ((37413, 37431), 'mlperf_loadgen.DestroySUT', 'lg.DestroySUT', (['sut'], {}), '(sut)\n', (37426, 37431), True, 'import mlperf_loadgen as lg\n'), ((3495, 3508), 'time.sleep', 'time.sleep', (['t'], {}), '(t)\n', (3505, 3508), False, 'import time\n'), ((3867, 3883), 'prof.prof_map.items', 'prof_map.items', ([], {}), '()\n', (3881, 3883), False, 'from prof import prof_map\n'), ((4045, 4061), 'prof.prof_map.items', 'prof_map.items', ([], {}), '()\n', (4059, 4061), False, 'from prof import prof_map\n'), ((5137, 5175), 'multiprocessing.Process.__init__', 'multiprocessing.Process.__init__', (['self'], {}), '(self)\n', (5169, 5175), False, 'import multiprocessing\n'), ((11582, 11596), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (11591, 11596), False, 'import os\n'), ((11638, 11646), 'mxnet.cpu', 'mx.cpu', ([], {}), '()\n', (11644, 11646), True, 'import mxnet as mx\n'), ((23323, 23340), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (23334, 23340), False, 'from collections import defaultdict\n'), ((23375, 23392), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (23386, 23392), False, 'from collections import defaultdict\n'), ((23416, 23432), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (23427, 23432), False, 'from collections import defaultdict\n'), ((23468, 23485), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (23479, 23485), False, 'from collections import defaultdict\n'), ((23749, 23765), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (23760, 23765), False, 'from collections import defaultdict\n'), ((27350, 27386), 'numpy.split', 'np.split', (['result', 'batch_size'], {'axis': '(0)'}), '(result, batch_size, axis=0)\n', (27358, 27386), True, 'import numpy as np\n'), ((29727, 29774), 'gluonnlp.data.BERTTokenizer', 'nlp.data.BERTTokenizer', ([], {'vocab': 'vocab', 'lower': '(True)'}), '(vocab=vocab, lower=True)\n', (29749, 29774), True, 'import gluonnlp as nlp\n'), ((29918, 29960), 'gluonnlp.data.SQuAD', 'SQuAD', (['"""dev"""'], {'version': '"""1.1"""', 'root': 'dev_path'}), "('dev', version='1.1', root=dev_path)\n", (29923, 29960), False, 'from gluonnlp.data import SQuAD\n'), ((29990, 30147), 'preprocessing_utils.preprocess_dataset', 'preprocess_dataset', (['tokenizer', 'dev_data'], {'max_seq_length': 'max_seq_length', 'doc_stride': 'doc_stride', 'max_query_length': 'max_query_length', 'input_features': '(True)'}), '(tokenizer, dev_data, max_seq_length=max_seq_length,\n doc_stride=doc_stride, max_query_length=max_query_length,\n input_features=True)\n', (30008, 30147), False, 'from preprocessing_utils import preprocess_dataset, max_seq_length, max_query_length, doc_stride\n'), ((30664, 30695), 'multiprocessing.JoinableQueue', 'multiprocessing.JoinableQueue', ([], {}), '()\n', (30693, 30695), False, 'import multiprocessing\n'), ((30802, 30827), 'multiprocessing.Manager', 'multiprocessing.Manager', ([], {}), '()\n', (30825, 30827), False, 'import multiprocessing\n'), ((34230, 34263), 'parse_server_config.configParser', 'configParser', (['"""machine_conf.json"""'], {}), "('machine_conf.json')\n", (34242, 34263), False, 'from parse_server_config import configParser\n'), ((36437, 36461), 'os.path.exists', 'os.path.exists', (['log_path'], {}), '(log_path)\n', (36451, 36461), False, 'import os\n'), ((36471, 36492), 'os.makedirs', 'os.makedirs', (['log_path'], {}), '(log_path)\n', (36482, 36492), False, 'import os\n'), ((36871, 36886), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (36881, 36886), False, 'import time\n'), ((37346, 37384), 'subprocess.check_call', 'subprocess.check_call', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (37367, 37384), False, 'import subprocess\n'), ((11847, 11855), 'mxnet.cpu', 'mx.cpu', ([], {}), '()\n', (11853, 11855), True, 'import mxnet as mx\n'), ((26695, 26716), 'numpy.mean', 'np.mean', (['latencies_ns'], {}), '(latencies_ns)\n', (26702, 26716), True, 'import numpy as np\n'), ((26779, 26810), 'numpy.percentile', 'np.percentile', (['latencies_ns', '(50)'], {}), '(latencies_ns, 50)\n', (26792, 26810), True, 'import numpy as np\n'), ((26880, 26911), 'numpy.percentile', 'np.percentile', (['latencies_ns', '(90)'], {}), '(latencies_ns, 90)\n', (26893, 26911), True, 'import numpy as np\n'), ((27924, 27958), 'mlperf_loadgen.QuerySamplesComplete', 'lg.QuerySamplesComplete', (['responses'], {}), '(responses)\n', (27947, 27958), True, 'import mlperf_loadgen as lg\n'), ((28826, 28995), 'gluonnlp.model.get_model', 'nlp.model.get_model', ([], {'name': '"""bert_24_1024_16"""', 'dataset_name': 'None', 'vocab': 'vocab', 'pretrained': '(False)', 'ctx': 'ctx', 'use_pooler': '(False)', 'use_decoder': '(False)', 'use_classifier': '(False)'}), "(name='bert_24_1024_16', dataset_name=None, vocab=vocab,\n pretrained=False, ctx=ctx, use_pooler=False, use_decoder=False,\n use_classifier=False)\n", (28845, 28995), True, 'import gluonnlp as nlp\n'), ((29140, 29160), 'utils.BertForQA', 'BertForQA', ([], {'bert': 'bert'}), '(bert=bert)\n', (29149, 29160), False, 'from utils import BertForQA\n'), ((29173, 29242), 'gluonnlp.utils.load_parameters', 'nlp.utils.load_parameters', (['self.net', 'params'], {'ctx': 'ctx', 'cast_dtype': '(True)'}), '(self.net, params, ctx=ctx, cast_dtype=True)\n', (29198, 29242), True, 'import gluonnlp as nlp\n'), ((29872, 29883), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (29881, 29883), False, 'import os\n'), ((30729, 30752), 'multiprocessing.Queue', 'multiprocessing.Queue', ([], {}), '()\n', (30750, 30752), False, 'import multiprocessing\n'), ((34353, 34384), 'multiprocessing.JoinableQueue', 'multiprocessing.JoinableQueue', ([], {}), '()\n', (34382, 34384), False, 'import multiprocessing\n'), ((9725, 9736), 'time.time', 'time.time', ([], {}), '()\n', (9734, 9736), False, 'import time\n'), ((9916, 9927), 'time.time', 'time.time', ([], {}), '()\n', (9925, 9927), False, 'import time\n'), ((27697, 27751), 'mlperf_loadgen.QuerySampleResponse', 'lg.QuerySampleResponse', (['query_id_list[i]', 'bi[0]', 'bi[1]'], {}), '(query_id_list[i], bi[0], bi[1])\n', (27719, 27751), True, 'import mlperf_loadgen as lg\n'), ((7471, 7495), 'mxnet.nd.array', 'mx.nd.array', (['inputs_list'], {}), '(inputs_list)\n', (7482, 7495), True, 'import mxnet as mx\n'), ((7548, 7577), 'mxnet.nd.array', 'mx.nd.array', (['token_types_list'], {}), '(token_types_list)\n', (7559, 7577), True, 'import mxnet as mx\n'), ((14950, 14974), 'mxnet.nd.array', 'mx.nd.array', (['inputs_list'], {}), '(inputs_list)\n', (14961, 14974), True, 'import mxnet as mx\n'), ((15020, 15049), 'mxnet.nd.array', 'mx.nd.array', (['token_types_list'], {}), '(token_types_list)\n', (15031, 15049), True, 'import mxnet as mx\n'), ((9261, 9285), 'mxnet.nd.array', 'mx.nd.array', (['inputs_list'], {}), '(inputs_list)\n', (9272, 9285), True, 'import mxnet as mx\n'), ((9342, 9371), 'mxnet.nd.array', 'mx.nd.array', (['token_types_list'], {}), '(token_types_list)\n', (9353, 9371), True, 'import mxnet as mx\n'), ((21210, 21221), 'os.getpid', 'os.getpid', ([], {}), '()\n', (21219, 21221), False, 'import os\n'), ((7631, 7661), 'mxnet.nd.array', 'mx.nd.array', (['valid_length_list'], {}), '(valid_length_list)\n', (7642, 7661), True, 'import mxnet as mx\n'), ((15096, 15126), 'mxnet.nd.array', 'mx.nd.array', (['valid_length_list'], {}), '(valid_length_list)\n', (15107, 15126), True, 'import mxnet as mx\n'), ((22264, 22275), 'os.getpid', 'os.getpid', ([], {}), '()\n', (22273, 22275), False, 'import os\n'), ((22673, 22684), 'os.getpid', 'os.getpid', ([], {}), '()\n', (22682, 22684), False, 'import os\n'), ((9429, 9459), 'mxnet.nd.array', 'mx.nd.array', (['valid_length_list'], {}), '(valid_length_list)\n', (9440, 9459), True, 'import mxnet as mx\n'), ((27498, 27509), 'numpy.array', 'np.array', (['o'], {}), '(o)\n', (27506, 27509), True, 'import numpy as np\n')]
|
# Author: <NAME> (<EMAIL>)
# Center for Machine Perception, Czech Technical University in Prague
"""Evaluation script for the BOP Challenge 2019."""
import os
import time
import argparse
import subprocess
import numpy as np
from bop_toolkit_lib import config
from bop_toolkit_lib import inout
from bop_toolkit_lib import misc
# PARAMETERS (some can be overwritten by the command line arguments below).
################################################################################
p = {
# Errors to calculate.
'errors': [
{
'n_top': -1,
'type': 'vsd',
'vsd_deltas': {
'hb': 15,
'icbin': 15,
'icmi': 15,
'itodd': 5,
'lm': 15,
'lmo': 15,
'ruapc': 15,
'tless': 15,
'tudl': 15,
'tyol': 15,
},
'vsd_taus': list(np.arange(0.05, 0.51, 0.05)),
'correct_th': [[th] for th in np.arange(0.05, 0.51, 0.05)]
},
{
'n_top': -1,
'type': 'mssd',
'correct_th': [[th] for th in np.arange(0.05, 0.51, 0.05)]
},
{
'n_top': -1,
'type': 'mspd',
'correct_th': [[th] for th in np.arange(5, 51, 5)]
},
],
# Minimum visible surface fraction of a valid GT pose.
'visib_gt_min': 0.1,
# See misc.get_symmetry_transformations().
'max_sym_disc_step': 0.01,
# Type of the renderer (used for the VSD pose error function).
'renderer_type': 'python', # Options: 'cpp', 'python'.
# Names of files with results for which to calculate the errors (assumed to be
# stored in folder config.eval_path). See docs/bop_challenge_2019.md for a
# description of the format. Example results can be found at:
# http://ptak.felk.cvut.cz/6DB/public/bop_sample_results/bop_challenge_2019/
'result_filenames': [
'/home_local/sund_ma/src/foreign_packages/bop/bop_results/bop_challenge_2019/hodan-iros15_lm-test.csv',
],
# File with a list of estimation targets to consider. The file is assumed to
# be stored in the dataset folder.
'targets_filename': 'test_targets_bop19.json',
}
################################################################################
# Command line arguments.
# ------------------------------------------------------------------------------
parser = argparse.ArgumentParser()
parser.add_argument('--visib_gt_min', default=p['visib_gt_min'])
parser.add_argument('--max_sym_disc_step', default=p['max_sym_disc_step'])
parser.add_argument('--renderer_type', default=p['renderer_type'])
parser.add_argument('--result_filenames',
default=','.join(p['result_filenames']),
help='Comma-separated names of files with results.')
parser.add_argument('--targets_filename', default=p['targets_filename'])
args = parser.parse_args()
p['visib_gt_min'] = float(args.visib_gt_min)
p['max_sym_disc_step'] = float(args.max_sym_disc_step)
p['renderer_type'] = str(args.renderer_type)
p['result_filenames'] = args.result_filenames.split(',')
p['targets_filename'] = str(args.targets_filename)
# Evaluation.
# ------------------------------------------------------------------------------
for result_filename in p['result_filenames']:
misc.log('===========')
misc.log('EVALUATING: {}'.format(result_filename))
misc.log('===========')
time_start = time.time()
aur = {}
for error in p['errors']:
# Calculate error of the pose estimates.
calc_errors_cmd = [
'python',
os.path.join('scripts', 'eval_calc_errors.py'),
'--n_top={}'.format(error['n_top']),
'--error_type={}'.format(error['type']),
'--result_filenames={}'.format(result_filename),
'--renderer_type={}'.format(p['renderer_type']),
'--targets_filename={}'.format(p['targets_filename']),
'--max_sym_disc_step={}'.format(p['max_sym_disc_step']),
'--skip_missing=1',
]
if error['type'] == 'vsd':
vsd_deltas_str = \
','.join(['{}:{}'.format(k, v) for k, v in error['vsd_deltas'].items()])
calc_errors_cmd += [
'--vsd_deltas={}'.format(vsd_deltas_str),
'--vsd_taus={}'.format(','.join(map(str, error['vsd_taus'])))
]
misc.log('Running: ' + ' '.join(calc_errors_cmd))
if subprocess.call(calc_errors_cmd) != 0:
raise RuntimeError('Calculation of VSD failed.')
# Name of the result and the dataset.
result_name = os.path.splitext(os.path.basename(result_filename))[0]
dataset = str(result_name.split('_')[1].split('-')[0])
# Paths (rel. to config.eval_path) to folders with calculated pose errors.
# For VSD, there is one path for each setting of tau. For the other pose
# error functions, there is only one path.
error_dir_paths = {}
if error['type'] == 'vsd':
for vsd_tau in error['vsd_taus']:
error_sign = misc.get_error_signature(
error['type'], error['n_top'], vsd_delta=error['vsd_deltas'][dataset],
vsd_tau=vsd_tau)
error_dir_paths[error_sign] = os.path.join(result_name, error_sign)
else:
error_sign = misc.get_error_signature(error['type'], error['n_top'])
error_dir_paths[error_sign] = os.path.join(result_name, error_sign)
# Recall scores for all settings of the threshold of correctness (and also
# of the misalignment tolerance tau in the case of VSD).
recalls = []
# Calculate performance scores.
for error_sign, error_dir_path in error_dir_paths.items():
for correct_th in error['correct_th']:
calc_scores_cmd = [
'python',
os.path.join('scripts', 'eval_calc_scores.py'),
'--error_dir_paths={}'.format(error_dir_path),
'--targets_filename={}'.format(p['targets_filename']),
'--visib_gt_min={}'.format(p['visib_gt_min'])
]
calc_scores_cmd += ['--correct_th_{}={}'.format(
error['type'], ','.join(map(str, correct_th)))]
misc.log('Running: ' + ' '.join(calc_scores_cmd))
if subprocess.call(calc_scores_cmd) != 0:
raise RuntimeError('Calculation of scores failed.')
# Path to file with calculated scores.
score_sign = misc.get_score_signature(correct_th, p['visib_gt_min'])
scores_filename = 'scores_{}.json'.format(score_sign)
scores_path = os.path.join(
config.eval_path, result_name, error_sign, scores_filename)
# Load the scores.
misc.log('Loading calculated scores from: {}'.format(scores_path))
scores = inout.load_json(scores_path)
recalls.append(scores['total_recall'])
# Area under precision recall:
aur[error['type']] = np.mean(recalls)
misc.log('Recall scores: {}'.format(' '.join(map(str, recalls))))
time_total = time.time() - time_start
misc.log('Evaluation of {} took {}s.'.format(result_filename, time_total))
# output final scores
err_types = [e['type'] for e in p['errors']]
for err_type in err_types:
misc.log('#### {} #### area under recall surface: {}'.format(err_type,
aur[err_type]))
if set(['vsd', 'mssd', 'mspd']).issubset(err_types):
test_set = os.path.basename(result_filename)
mean_error = np.mean([aur[err_type] for err_type in err_types])
misc.log('Average BOP score on {}: {}'.format(test_set, mean_error))
misc.log('Done.')
|
[
"argparse.ArgumentParser",
"os.path.basename",
"bop_toolkit_lib.misc.log",
"time.time",
"bop_toolkit_lib.misc.get_score_signature",
"numpy.mean",
"subprocess.call",
"numpy.arange",
"bop_toolkit_lib.inout.load_json",
"os.path.join",
"bop_toolkit_lib.misc.get_error_signature"
] |
[((2255, 2280), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2278, 2280), False, 'import argparse\n'), ((7234, 7251), 'bop_toolkit_lib.misc.log', 'misc.log', (['"""Done."""'], {}), "('Done.')\n", (7242, 7251), False, 'from bop_toolkit_lib import misc\n'), ((3163, 3186), 'bop_toolkit_lib.misc.log', 'misc.log', (['"""==========="""'], {}), "('===========')\n", (3171, 3186), False, 'from bop_toolkit_lib import misc\n'), ((3242, 3265), 'bop_toolkit_lib.misc.log', 'misc.log', (['"""==========="""'], {}), "('===========')\n", (3250, 3265), False, 'from bop_toolkit_lib import misc\n'), ((3282, 3293), 'time.time', 'time.time', ([], {}), '()\n', (3291, 3293), False, 'import time\n'), ((6579, 6595), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (6586, 6595), True, 'import numpy as np\n'), ((6682, 6693), 'time.time', 'time.time', ([], {}), '()\n', (6691, 6693), False, 'import time\n'), ((7058, 7091), 'os.path.basename', 'os.path.basename', (['result_filename'], {}), '(result_filename)\n', (7074, 7091), False, 'import os\n'), ((7109, 7159), 'numpy.mean', 'np.mean', (['[aur[err_type] for err_type in err_types]'], {}), '([aur[err_type] for err_type in err_types])\n', (7116, 7159), True, 'import numpy as np\n'), ((3426, 3472), 'os.path.join', 'os.path.join', (['"""scripts"""', '"""eval_calc_errors.py"""'], {}), "('scripts', 'eval_calc_errors.py')\n", (3438, 3472), False, 'import os\n'), ((4184, 4216), 'subprocess.call', 'subprocess.call', (['calc_errors_cmd'], {}), '(calc_errors_cmd)\n', (4199, 4216), False, 'import subprocess\n'), ((5013, 5068), 'bop_toolkit_lib.misc.get_error_signature', 'misc.get_error_signature', (["error['type']", "error['n_top']"], {}), "(error['type'], error['n_top'])\n", (5037, 5068), False, 'from bop_toolkit_lib import misc\n'), ((5105, 5142), 'os.path.join', 'os.path.join', (['result_name', 'error_sign'], {}), '(result_name, error_sign)\n', (5117, 5142), False, 'import os\n'), ((831, 858), 'numpy.arange', 'np.arange', (['(0.05)', '(0.51)', '(0.05)'], {}), '(0.05, 0.51, 0.05)\n', (840, 858), True, 'import numpy as np\n'), ((4356, 4389), 'os.path.basename', 'os.path.basename', (['result_filename'], {}), '(result_filename)\n', (4372, 4389), False, 'import os\n'), ((4774, 4891), 'bop_toolkit_lib.misc.get_error_signature', 'misc.get_error_signature', (["error['type']", "error['n_top']"], {'vsd_delta': "error['vsd_deltas'][dataset]", 'vsd_tau': 'vsd_tau'}), "(error['type'], error['n_top'], vsd_delta=error[\n 'vsd_deltas'][dataset], vsd_tau=vsd_tau)\n", (4798, 4891), False, 'from bop_toolkit_lib import misc\n'), ((4946, 4983), 'os.path.join', 'os.path.join', (['result_name', 'error_sign'], {}), '(result_name, error_sign)\n', (4958, 4983), False, 'import os\n'), ((6097, 6152), 'bop_toolkit_lib.misc.get_score_signature', 'misc.get_score_signature', (['correct_th', "p['visib_gt_min']"], {}), "(correct_th, p['visib_gt_min'])\n", (6121, 6152), False, 'from bop_toolkit_lib import misc\n'), ((6238, 6310), 'os.path.join', 'os.path.join', (['config.eval_path', 'result_name', 'error_sign', 'scores_filename'], {}), '(config.eval_path, result_name, error_sign, scores_filename)\n', (6250, 6310), False, 'import os\n'), ((6442, 6470), 'bop_toolkit_lib.inout.load_json', 'inout.load_json', (['scores_path'], {}), '(scores_path)\n', (6457, 6470), False, 'from bop_toolkit_lib import inout\n'), ((897, 924), 'numpy.arange', 'np.arange', (['(0.05)', '(0.51)', '(0.05)'], {}), '(0.05, 0.51, 0.05)\n', (906, 924), True, 'import numpy as np\n'), ((1016, 1043), 'numpy.arange', 'np.arange', (['(0.05)', '(0.51)', '(0.05)'], {}), '(0.05, 0.51, 0.05)\n', (1025, 1043), True, 'import numpy as np\n'), ((1135, 1154), 'numpy.arange', 'np.arange', (['(5)', '(51)', '(5)'], {}), '(5, 51, 5)\n', (1144, 1154), True, 'import numpy as np\n'), ((5505, 5551), 'os.path.join', 'os.path.join', (['"""scripts"""', '"""eval_calc_scores.py"""'], {}), "('scripts', 'eval_calc_scores.py')\n", (5517, 5551), False, 'import os\n'), ((5927, 5959), 'subprocess.call', 'subprocess.call', (['calc_scores_cmd'], {}), '(calc_scores_cmd)\n', (5942, 5959), False, 'import subprocess\n')]
|
# ---
# jupyter:
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# %% [markdown]
# # The bike rides dataset
#
# In this notebook, we will present the "Bike Ride" dataset. This dataset is
# located in the directory `datasets` in a comma separated values (CSV) format.
#
# We open this dataset using pandas.
# %%
import pandas as pd
cycling = pd.read_csv("../datasets/bike_rides.csv")
cycling.head()
# %% [markdown]
# The first column `timestamp` contains a specific information regarding the
# the time and date of a record while other columns contain numerical value
# of some specific measurements. Let's check the data type of the columns more
# in details.
# %%
cycling.info()
# %% [markdown]
# Indeed, CSV format store data as text. Pandas tries to infer numerical type
# by default. It is the reason why all features but `timestamp` are encoded as
# floating point values. However, we see that the `timestamp` is stored as an
# `object` column. It means that the data in this column are stored as `str`
# rather than a specialized `datetime` data type.
#
# In fact, one needs to set an option such that pandas is directed to infer
# such data type when opening the file. In addition, we will want to use
# `timestamp` as an index. Thus, we can reopen the file with some extra
# arguments to help pandas at reading properly our CSV file.
# %%
cycling = pd.read_csv("../datasets/bike_rides.csv", index_col=0,
parse_dates=True)
cycling.index.name = ""
cycling.head()
# %%
cycling.info()
# %% [markdown]
# By specifying to pandas to parse the date, we obtain a `DatetimeIndex` that
# is really handy when filtering data based on date.
#
# We can now have a look at the data stored in our dataframe. It will help us
# to frame the data science problem that we try to solve.
#
# The records correspond at information derived from GPS recordings of a
# cyclist (`speed`, `acceleration`, `slope`) and some extra information
# acquired from other sensors: `heart-rate` that corresponds to the number of
# beats per minute of the cyclist heart, `cadence` that is the rate at which a
# cyclist is turning the pedals, and `power` that corresponds to the work
# required by the cyclist to go forward.
#
# The power might be slightly an abstract quantity so let's give a more
# intuitive explanation.
#
# Let's take the example of a soup blender that one uses to blend vegetable.
# The engine of this blender develop an instantaneous power of ~300 Watts to
# blend the vegetable. Here, our cyclist is just the engine of the blender (at
# the difference that an average cyclist will develop an instantaneous power
# around ~150 Watts) and blending the vegetable corresponds to move the
# cyclist's bike forward.
#
# Professional cyclists are using power to calibrate their training and track
# the energy spent during a ride. For instance, riding at a higher power
# requires more energy and thus, you need to provide resources to create this
# energy. With human, this resource is food. For our soup blender, this
# resource can be uranium, petrol, natural gas, coal, etc. Our body serves as a
# power plant to transform the resources into energy.
#
# The issue with measuring power is linked to the cost of the sensor: a cycling
# power meter. The cost of such sensor vary from $400 to $1000. Thus, our
# data science problem is quite easy: can we predict instantaneous cyclist
# power from other (cheaper) sensors.
# %%
target_name = "power"
data, target = cycling.drop(columns=target_name), cycling[target_name]
# %% [markdown]
# We can have a first look at the target distribution.
# %%
import matplotlib.pyplot as plt
target.plot.hist(bins=50, edgecolor="black")
plt.xlabel("Power (W)")
# %% [markdown]
# We see a pick at 0 Watts, it corresponds to whenever our cyclist does not
# pedals (descent, stopped). In average, this cyclist delivers a power around
# ~200 Watts. We also see a long tail from ~300 Watts to ~400 Watts. You can
# think that this range of data correspond to effort a cyclist will train to
# reproduce to be able to breakout in the final kilometers of a cycling race.
# However, this is costly for the human body and no one can cruise with this
# power output.
#
# Now, let's have a look at the data.
# %%
data.head()
# %% [markdown]
# We can first have a closer look to the index of the dataframe.
# %%
data.index
# %% [markdown]
# We see that records are acquired every seconds.
# %%
data.index.min(), data.index.max()
# %% [markdown]
# The starting date is the August 18, 2020 and the ending date is
# September 13, 2020. However, it is obvious that our cyclist did not ride
# every seconds between these dates. Indeed, only a couple of date should be
# present in the dataframe, corresponding to the number of cycling rides.
# %%
data.index.normalize().nunique()
# %% [markdown]
# Indeed, we have only four different dates corresponding to four rides. Let's
# extract only the first ride of August 18, 2020.
# %%
date_first_ride = "2020-08-18"
cycling_ride = cycling.loc[date_first_ride]
data_ride, target_ride = data.loc[date_first_ride], target.loc[date_first_ride]
# %%
data_ride.plot()
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
_ = plt.title("Sensor values for different cyclist measurements")
# %% [markdown]
# Since the unit and range of each measurement (feature) is different, it is
# rather difficult to interpret the plot. Also, the high temporal resolution
# make it difficult to make any observation. We could resample the data to get
# a smoother visualization.
# %%
data_ride.resample("60S").mean().plot()
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
_ = plt.title("Sensor values for different cyclist measurements")
# %% [markdown]
# We can check the range of the different features:
# %%
axs = data_ride.hist(figsize=(10, 12), bins=50, edgecolor="black", grid=False)
# add the units to the plots
units = ["beats per minute", "rotations per minute", "meters per second",
"meters per second squared", "%"]
for unit, ax in zip(units, axs.ravel()):
ax.set_xlabel(unit)
plt.subplots_adjust(hspace=0.6)
# %% [markdown]
# From these plots, we can see some interesting information: a cyclist is
# spending some time without pedaling. This samples should be associated with
# a null power. We also see that the slope have large extremum.
#
# Let's make a pair plot on a subset of data samples to see if we can confirm
# some of these intuitions.
# %%
import numpy as np
rng = np.random.RandomState(0)
indices = rng.choice(np.arange(cycling_ride.shape[0]), size=500, replace=False)
# %%
subset = cycling_ride.iloc[indices].copy()
# Quantize the target and keep the midpoint for each interval
subset["power"] = pd.qcut(subset["power"], 6, retbins=False)
subset["power"] = subset["power"].apply(lambda x: x.mid)
# %%
import seaborn as sns
_ = sns.pairplot(data=subset, hue="power", palette="viridis")
# %% [markdown]
# Indeed, we see that low cadence is associated with low power. We can also
# the a link between higher slope / high heart-rate and higher power: a cyclist
# need to develop more energy to go uphill enforcing a stronger physiological
# stimuli on the body. We can confirm this intuition by looking at the
# interaction between the slope and the speed: a lower speed with a higher
# slope is usually associated with higher power.
|
[
"matplotlib.pyplot.title",
"pandas.read_csv",
"matplotlib.pyplot.legend",
"numpy.random.RandomState",
"numpy.arange",
"seaborn.pairplot",
"matplotlib.pyplot.subplots_adjust",
"pandas.qcut",
"matplotlib.pyplot.xlabel"
] |
[((367, 408), 'pandas.read_csv', 'pd.read_csv', (['"""../datasets/bike_rides.csv"""'], {}), "('../datasets/bike_rides.csv')\n", (378, 408), True, 'import pandas as pd\n'), ((1387, 1459), 'pandas.read_csv', 'pd.read_csv', (['"""../datasets/bike_rides.csv"""'], {'index_col': '(0)', 'parse_dates': '(True)'}), "('../datasets/bike_rides.csv', index_col=0, parse_dates=True)\n", (1398, 1459), True, 'import pandas as pd\n'), ((3717, 3740), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Power (W)"""'], {}), "('Power (W)')\n", (3727, 3740), True, 'import matplotlib.pyplot as plt\n'), ((5180, 5234), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.05, 1)', 'loc': '"""upper left"""'}), "(bbox_to_anchor=(1.05, 1), loc='upper left')\n", (5190, 5234), True, 'import matplotlib.pyplot as plt\n'), ((5239, 5300), 'matplotlib.pyplot.title', 'plt.title', (['"""Sensor values for different cyclist measurements"""'], {}), "('Sensor values for different cyclist measurements')\n", (5248, 5300), True, 'import matplotlib.pyplot as plt\n'), ((5625, 5679), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.05, 1)', 'loc': '"""upper left"""'}), "(bbox_to_anchor=(1.05, 1), loc='upper left')\n", (5635, 5679), True, 'import matplotlib.pyplot as plt\n'), ((5684, 5745), 'matplotlib.pyplot.title', 'plt.title', (['"""Sensor values for different cyclist measurements"""'], {}), "('Sensor values for different cyclist measurements')\n", (5693, 5745), True, 'import matplotlib.pyplot as plt\n'), ((6111, 6142), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.6)'}), '(hspace=0.6)\n', (6130, 6142), True, 'import matplotlib.pyplot as plt\n'), ((6516, 6540), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (6537, 6540), True, 'import numpy as np\n'), ((6750, 6792), 'pandas.qcut', 'pd.qcut', (["subset['power']", '(6)'], {'retbins': '(False)'}), "(subset['power'], 6, retbins=False)\n", (6757, 6792), True, 'import pandas as pd\n'), ((6883, 6940), 'seaborn.pairplot', 'sns.pairplot', ([], {'data': 'subset', 'hue': '"""power"""', 'palette': '"""viridis"""'}), "(data=subset, hue='power', palette='viridis')\n", (6895, 6940), True, 'import seaborn as sns\n'), ((6562, 6594), 'numpy.arange', 'np.arange', (['cycling_ride.shape[0]'], {}), '(cycling_ride.shape[0])\n', (6571, 6594), True, 'import numpy as np\n')]
|
import logging
import random
import numpy as np
# imports for deformed slice
from skimage.draw import line
from scipy.ndimage.measurements import label
from scipy.ndimage.interpolation import map_coordinates
from scipy.ndimage.morphology import binary_dilation
from gunpowder.batch_request import BatchRequest
from gunpowder.coordinate import Coordinate
from .batch_filter import BatchFilter
logger = logging.getLogger(__name__)
class DefectAugment(BatchFilter):
'''Augment intensity arrays section-wise with artifacts like missing
sections, low-contrast sections, by blending in artifacts drawn from a
separate source, or by deforming a section.
Args:
intensities (:class:`ArrayKey`):
The key of the array of intensities to modify.
prob_missing(``float``):
prob_low_contrast(``float``):
prob_artifact(``float``):
prob_deform(``float``):
Probabilities of having a missing section, low-contrast section, an
artifact (see param ``artifact_source``) or a deformed slice. The
sum should not exceed 1. Values in missing sections will be set to
0.
contrast_scale (``float``, optional):
By how much to scale the intensities for a low-contrast section,
used if ``prob_low_contrast`` > 0.
artifact_source (class:`BatchProvider`, optional):
A gunpowder batch provider that delivers intensities (via
:class:`ArrayKey` ``artifacts``) and an alpha mask (via
:class:`ArrayKey` ``artifacts_mask``), used if ``prob_artifact`` > 0.
artifacts(:class:`ArrayKey`, optional):
The key to query ``artifact_source`` for to get the intensities
of the artifacts.
artifacts_mask(:class:`ArrayKey`, optional):
The key to query ``artifact_source`` for to get the alpha mask
of the artifacts to blend them with ``intensities``.
deformation_strength (``int``, optional):
Strength of the slice deformation in voxels, used if
``prob_deform`` > 0. The deformation models a fold by shifting the
section contents towards a randomly oriented line in the section.
The line itself will be drawn with a value of 0.
axis (``int``, optional):
Along which axis sections are cut.
'''
def __init__(
self,
intensities,
prob_missing=0.05,
prob_low_contrast=0.05,
prob_artifact=0.0,
prob_deform=0.0,
contrast_scale=0.1,
artifact_source=None,
artifacts=None,
artifacts_mask=None,
deformation_strength=20,
axis=0):
self.intensities = intensities
self.prob_missing = prob_missing
self.prob_low_contrast = prob_low_contrast
self.prob_artifact = prob_artifact
self.prob_deform = prob_deform
self.contrast_scale = contrast_scale
self.artifact_source = artifact_source
self.artifacts = artifacts
self.artifacts_mask = artifacts_mask
self.deformation_strength = deformation_strength
self.axis = axis
def setup(self):
if self.artifact_source is not None:
self.artifact_source.setup()
def teardown(self):
if self.artifact_source is not None:
self.artifact_source.teardown()
# send roi request to data-source upstream
def prepare(self, request):
random.seed(request.random_seed)
deps = BatchRequest()
# we prepare the augmentations, by determining which slices
# will be augmented by which method
# If one of the slices is augmented with 'deform',
# we prepare these trafos already
# and request a bigger roi from upstream
prob_missing_threshold = self.prob_missing
prob_low_contrast_threshold = prob_missing_threshold + self.prob_low_contrast
prob_artifact_threshold = prob_low_contrast_threshold + self.prob_artifact
prob_deform_slice = prob_artifact_threshold + self.prob_deform
spec = request[self.intensities].copy()
roi = spec.roi
logger.debug("downstream request ROI is %s" % roi)
raw_voxel_size = self.spec[self.intensities].voxel_size
# store the mapping slice to augmentation type in a dict
self.slice_to_augmentation = {}
# store the transformations for deform slice
self.deform_slice_transformations = {}
for c in range((roi / raw_voxel_size).get_shape()[self.axis]):
r = random.random()
if r < prob_missing_threshold:
logger.debug("Zero-out " + str(c))
self.slice_to_augmentation[c] = 'zero_out'
elif r < prob_low_contrast_threshold:
logger.debug("Lower contrast " + str(c))
self.slice_to_augmentation[c] = 'lower_contrast'
elif r < prob_artifact_threshold:
logger.debug("Add artifact " + str(c))
self.slice_to_augmentation[c] = 'artifact'
elif r < prob_deform_slice:
logger.debug("Add deformed slice " + str(c))
self.slice_to_augmentation[c] = 'deformed_slice'
# get the shape of a single slice
slice_shape = (roi / raw_voxel_size).get_shape()
slice_shape = slice_shape[:self.axis] + slice_shape[self.axis+1:]
self.deform_slice_transformations[c] = self.__prepare_deform_slice(slice_shape)
# prepare transformation and
# request bigger upstream roi for deformed slice
if 'deformed_slice' in self.slice_to_augmentation.values():
# create roi sufficiently large to feed deformation
logger.debug("before growth: %s" % spec.roi)
growth = Coordinate(
tuple(0 if d == self.axis else raw_voxel_size[d] * self.deformation_strength
for d in range(spec.roi.dims()))
)
logger.debug("growing request by %s" % str(growth))
source_roi = roi.grow(growth, growth)
# update request ROI to get all voxels necessary to perfrom
# transformation
spec.roi = source_roi
logger.debug("upstream request roi is %s" % spec.roi)
deps[self.intensities] = spec
def process(self, batch, request):
assert batch.get_total_roi().dims() == 3, "defectaugment works on 3d batches only"
raw = batch.arrays[self.intensities]
raw_voxel_size = self.spec[self.intensities].voxel_size
for c, augmentation_type in self.slice_to_augmentation.items():
section_selector = tuple(
slice(None if d != self.axis else c, None if d != self.axis else c+1)
for d in range(raw.spec.roi.dims())
)
if augmentation_type == 'zero_out':
raw.data[section_selector] = 0
elif augmentation_type == 'low_contrast':
section = raw.data[section_selector]
mean = section.mean()
section -= mean
section *= self.contrast_scale
section += mean
raw.data[section_selector] = section
elif augmentation_type == 'artifact':
section = raw.data[section_selector]
alpha_voxel_size = self.artifact_source.spec[self.artifacts_mask].voxel_size
assert raw_voxel_size == alpha_voxel_size, ("Can only alpha blend RAW with "
"ALPHA_MASK if both have the same "
"voxel size")
artifact_request = BatchRequest()
artifact_request.add(self.artifacts, Coordinate(section.shape) * raw_voxel_size, voxel_size=raw_voxel_size)
artifact_request.add(self.artifacts_mask, Coordinate(section.shape) * alpha_voxel_size, voxel_size=raw_voxel_size)
logger.debug("Requesting artifact batch %s", artifact_request)
artifact_batch = self.artifact_source.request_batch(artifact_request)
artifact_alpha = artifact_batch.arrays[self.artifacts_mask].data
artifact_raw = artifact_batch.arrays[self.artifacts].data
assert artifact_alpha.dtype == np.float32
assert artifact_alpha.min() >= 0.0
assert artifact_alpha.max() <= 1.0
raw.data[section_selector] = section*(1.0 - artifact_alpha) + artifact_raw*artifact_alpha
elif augmentation_type == 'deformed_slice':
section = raw.data[section_selector].squeeze()
# set interpolation to cubic, spec interploatable is true, else to 0
interpolation = 3 if self.spec[self.intensities].interpolatable else 0
# load the deformation fields that were prepared for this slice
flow_x, flow_y, line_mask = self.deform_slice_transformations[c]
# apply the deformation fields
shape = section.shape
section = map_coordinates(
section, (flow_y, flow_x), mode='constant', order=interpolation
).reshape(shape)
# things can get smaller than 0 at the boundary, so we clip
section = np.clip(section, 0., 1.)
# zero-out data below the line mask
section[line_mask] = 0.
raw.data[section_selector] = section
# in case we needed to change the ROI due to a deformation augment,
# restore original ROI and crop the array data
if 'deformed_slice' in self.slice_to_augmentation.values():
old_roi = request[self.intensities].roi
logger.debug("resetting roi to %s" % old_roi)
crop = tuple(
slice(None) if d == self.axis else slice(self.deformation_strength, -self.deformation_strength)
for d in range(raw.spec.roi.dims())
)
raw.data = raw.data[crop]
raw.spec.roi = old_roi
def __prepare_deform_slice(self, slice_shape):
# grow slice shape by 2 x deformation strength
grow_by = 2 * self.deformation_strength
shape = (slice_shape[0] + grow_by, slice_shape[1] + grow_by)
# randomly choose fixed x or fixed y with p = 1/2
fixed_x = random.random() < .5
if fixed_x:
x0, y0 = 0, np.random.randint(1, shape[1] - 2)
x1, y1 = shape[0] - 1, np.random.randint(1, shape[1] - 2)
else:
x0, y0 = np.random.randint(1, shape[0] - 2), 0
x1, y1 = np.random.randint(1, shape[0] - 2), shape[1] - 1
## generate the mask of the line that should be blacked out
line_mask = np.zeros(shape, dtype='bool')
rr, cc = line(x0, y0, x1, y1)
line_mask[rr, cc] = 1
# generate vectorfield pointing towards the line to compress the image
# first we get the unit vector representing the line
line_vector = np.array([x1 - x0, y1 - y0], dtype='float32')
line_vector /= np.linalg.norm(line_vector)
# next, we generate the normal to the line
normal_vector = np.zeros_like(line_vector)
normal_vector[0] = - line_vector[1]
normal_vector[1] = line_vector[0]
# make meshgrid
x, y = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]))
# generate the vector field
flow_x, flow_y = np.zeros(shape), np.zeros(shape)
# find the 2 components where coordinates are bigger / smaller than the line
# to apply normal vector in the correct direction
components, n_components = label(np.logical_not(line_mask).view('uint8'))
assert n_components == 2, "%i" % n_components
neg_val = components[0, 0] if fixed_x else components[-1, -1]
pos_val = components[-1, -1] if fixed_x else components[0, 0]
flow_x[components == pos_val] = self.deformation_strength * normal_vector[1]
flow_y[components == pos_val] = self.deformation_strength * normal_vector[0]
flow_x[components == neg_val] = - self.deformation_strength * normal_vector[1]
flow_y[components == neg_val] = - self.deformation_strength * normal_vector[0]
# generate the flow fields
flow_x, flow_y = (x + flow_x).reshape(-1, 1), (y + flow_y).reshape(-1, 1)
# dilate the line mask
line_mask = binary_dilation(line_mask, iterations=10)
return flow_x, flow_y, line_mask
|
[
"numpy.zeros_like",
"gunpowder.batch_request.BatchRequest",
"scipy.ndimage.morphology.binary_dilation",
"numpy.logical_not",
"numpy.zeros",
"skimage.draw.line",
"numpy.clip",
"scipy.ndimage.interpolation.map_coordinates",
"random.random",
"gunpowder.coordinate.Coordinate",
"numpy.random.randint",
"random.seed",
"numpy.array",
"numpy.linalg.norm",
"numpy.arange",
"logging.getLogger"
] |
[((404, 431), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (421, 431), False, 'import logging\n'), ((3550, 3582), 'random.seed', 'random.seed', (['request.random_seed'], {}), '(request.random_seed)\n', (3561, 3582), False, 'import random\n'), ((3598, 3612), 'gunpowder.batch_request.BatchRequest', 'BatchRequest', ([], {}), '()\n', (3610, 3612), False, 'from gunpowder.batch_request import BatchRequest\n'), ((10997, 11026), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': '"""bool"""'}), "(shape, dtype='bool')\n", (11005, 11026), True, 'import numpy as np\n'), ((11044, 11064), 'skimage.draw.line', 'line', (['x0', 'y0', 'x1', 'y1'], {}), '(x0, y0, x1, y1)\n', (11048, 11064), False, 'from skimage.draw import line\n'), ((11258, 11303), 'numpy.array', 'np.array', (['[x1 - x0, y1 - y0]'], {'dtype': '"""float32"""'}), "([x1 - x0, y1 - y0], dtype='float32')\n", (11266, 11303), True, 'import numpy as np\n'), ((11327, 11354), 'numpy.linalg.norm', 'np.linalg.norm', (['line_vector'], {}), '(line_vector)\n', (11341, 11354), True, 'import numpy as np\n'), ((11430, 11456), 'numpy.zeros_like', 'np.zeros_like', (['line_vector'], {}), '(line_vector)\n', (11443, 11456), True, 'import numpy as np\n'), ((12666, 12707), 'scipy.ndimage.morphology.binary_dilation', 'binary_dilation', (['line_mask'], {'iterations': '(10)'}), '(line_mask, iterations=10)\n', (12681, 12707), False, 'from scipy.ndimage.morphology import binary_dilation\n'), ((4656, 4671), 'random.random', 'random.random', ([], {}), '()\n', (4669, 4671), False, 'import random\n'), ((10595, 10610), 'random.random', 'random.random', ([], {}), '()\n', (10608, 10610), False, 'import random\n'), ((11595, 11614), 'numpy.arange', 'np.arange', (['shape[1]'], {}), '(shape[1])\n', (11604, 11614), True, 'import numpy as np\n'), ((11616, 11635), 'numpy.arange', 'np.arange', (['shape[0]'], {}), '(shape[0])\n', (11625, 11635), True, 'import numpy as np\n'), ((11698, 11713), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (11706, 11713), True, 'import numpy as np\n'), ((11715, 11730), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (11723, 11730), True, 'import numpy as np\n'), ((10660, 10694), 'numpy.random.randint', 'np.random.randint', (['(1)', '(shape[1] - 2)'], {}), '(1, shape[1] - 2)\n', (10677, 10694), True, 'import numpy as np\n'), ((10730, 10764), 'numpy.random.randint', 'np.random.randint', (['(1)', '(shape[1] - 2)'], {}), '(1, shape[1] - 2)\n', (10747, 10764), True, 'import numpy as np\n'), ((10800, 10834), 'numpy.random.randint', 'np.random.randint', (['(1)', '(shape[0] - 2)'], {}), '(1, shape[0] - 2)\n', (10817, 10834), True, 'import numpy as np\n'), ((10859, 10893), 'numpy.random.randint', 'np.random.randint', (['(1)', '(shape[0] - 2)'], {}), '(1, shape[0] - 2)\n', (10876, 10893), True, 'import numpy as np\n'), ((11916, 11941), 'numpy.logical_not', 'np.logical_not', (['line_mask'], {}), '(line_mask)\n', (11930, 11941), True, 'import numpy as np\n'), ((7868, 7882), 'gunpowder.batch_request.BatchRequest', 'BatchRequest', ([], {}), '()\n', (7880, 7882), False, 'from gunpowder.batch_request import BatchRequest\n'), ((9534, 9560), 'numpy.clip', 'np.clip', (['section', '(0.0)', '(1.0)'], {}), '(section, 0.0, 1.0)\n', (9541, 9560), True, 'import numpy as np\n'), ((7936, 7961), 'gunpowder.coordinate.Coordinate', 'Coordinate', (['section.shape'], {}), '(section.shape)\n', (7946, 7961), False, 'from gunpowder.coordinate import Coordinate\n'), ((8065, 8090), 'gunpowder.coordinate.Coordinate', 'Coordinate', (['section.shape'], {}), '(section.shape)\n', (8075, 8090), False, 'from gunpowder.coordinate import Coordinate\n'), ((9297, 9382), 'scipy.ndimage.interpolation.map_coordinates', 'map_coordinates', (['section', '(flow_y, flow_x)'], {'mode': '"""constant"""', 'order': 'interpolation'}), "(section, (flow_y, flow_x), mode='constant', order=interpolation\n )\n", (9312, 9382), False, 'from scipy.ndimage.interpolation import map_coordinates\n')]
|
import numpy as np
import scipy.ndimage as ndimage
import matplotlib.pyplot as plt
from skimage import data, color, img_as_float
from tkinter import *
from PIL import Image
from graph_cut import GraphCut
from graph_cut_gui import GraphCutGui
class GraphCutController:
def __init__(self):
self.__init_view()
def __init_view(self):
root = Tk()
root.geometry("700x500")
self._view = GraphCutGui(self, root)
root.mainloop()
# TODO: TASK 2.1
def __get_color_histogram(self, image, seed, hist_res):
"""
Compute a color histograms based on selected points from an image
:param image: color image
:param seed: Nx2 matrix containing the the position of pixels which will be used to compute the color histogram
:param histRes: resolution of the histogram
:return hist: color histogram
"""
seed_r_values = image[seed[:, 1], seed[:, 0], 0]
seed_g_values = image[seed[:, 1], seed[:, 0], 1]
seed_b_values = image[seed[:, 1], seed[:, 0], 2]
data = np.transpose(np.vstack((seed_r_values, seed_g_values, seed_b_values)))
histogram, _ = np.histogramdd(data, hist_res, range=[(0, 255), (0, 255), (0, 255)])
# w = 2*int(truncate*sigma + 0.5) + 1
# sigma = 0.65 is taken from MATLAB default, truncate = 4 in scipy default which results in w = 7
smoothed_histogram = ndimage.gaussian_filter(histogram, 0.85)
normalized_smoothed_histogram = smoothed_histogram / np.sum(smoothed_histogram.ravel())
return normalized_smoothed_histogram
# TODO: TASK 2.2
# Hint: Set K very high using numpy's inf parameter
def __get_unaries(self, image, lambda_param, hist_fg, hist_bg, seed_fg, seed_bg):
"""
:param image: color image as a numpy array
:param lambda_param: lamdba as set by the user
:param hist_fg: foreground color histogram
:param hist_bg: background color histogram
:param seed_fg: pixels marked as foreground by the user
:param seed_bg: pixels marked as background by the user
:return: unaries : Nx2 numpy array containing the unary cost for every pixels in I (N = number of pixels in I)
"""
print("Calcuating unaries...")
hist_step = 255.0 / 32.0
image_rows = np.size(image, 0)
image_cols = np.size(image, 1)
unaries = np.empty((image_rows, image_cols, 2))
for i in range(0, image_rows):
for j in range(0, image_cols):
pixel = image[i, j, :]
pixel_bins = np.floor(pixel / hist_step).astype(int)
pixel_bins[pixel_bins == 32] = 31
cost_fg = -np.log(hist_fg[pixel_bins[0], pixel_bins[1], pixel_bins[2]] + 1e-10)
cost_bg = -np.log(hist_bg[pixel_bins[0], pixel_bins[1], pixel_bins[2]] + 1e-10)
unaries[i, j, 1] = lambda_param * cost_bg
unaries[i, j, 0] = lambda_param * cost_fg
for j, i in seed_fg:
unaries[i, j, 1] = np.inf
unaries[i, j, 0] = 0
for j, i in seed_bg:
unaries[i, j, 1] = 0
unaries[i, j, 0] = np.inf
unariesN = np.reshape(unaries, (-1, 2))
return unariesN
# TASK 2.3
def __get_pairwise(self, image, sigma):
"""
Get pairwise terms for each pairs of pixels on image
:param image: color image as a numpy array
:param sigma: ad-hoc cost function parameter
:return: pairwise : ivj (triplet or coo) formatted list of lists containing the pairwise costs for image
"""
def get_neighbours(i, j, image_rows, image_cols):
neighbours = np.array([[i - 1, j - 1], # upper left
[i - 1, j], # upper
[i - 1, j + 1], # upper right
[i, j + 1], # right
[i + 1, j + 1], # lower right
[i + 1, j], # lower
[i + 1, j - 1], # lower left
[i, j - 1]]) # left
is_boundary_1 = 0 <= neighbours[:, 0]
is_boundary_2 = image_rows > neighbours[:, 0]
is_boundary_3 = 0 <= neighbours[:, 1]
is_boundary_4 = image_cols > neighbours[:, 1]
valid = np.logical_and(np.logical_and(is_boundary_1, is_boundary_2), np.logical_and(is_boundary_3, is_boundary_4))
return neighbours[valid, :]
print("Calcuating pairwises...")
image_rows = np.size(image, 0)
image_cols = np.size(image, 1)
pairwise = []
for i in range(0, image_rows):
for j in range(0, image_cols):
current_coordinates = np.array([i, j])
current_index = i * image_cols + j
current_pixel = image[i, j].astype(float)
neighbour_coordinates = get_neighbours(i, j, image_rows, image_cols)
neighbour_indices = neighbour_coordinates[:, 0] * image_cols + neighbour_coordinates[:, 1]
neighbour_pixels = image[neighbour_coordinates[:, 0], neighbour_coordinates[:, 1]].astype(float)
pixel_differences = np.subtract(neighbour_pixels, current_pixel)
pixel_distances = np.linalg.norm(pixel_differences, axis=1)
spatial_differences = current_coordinates - neighbour_coordinates
spatial_differences = np.linalg.norm(spatial_differences, axis=1)
neighbour_costs = np.divide(np.exp(-np.square(pixel_distances) / (2 * np.square(sigma))),
spatial_differences)
for k in range(0, np.size(neighbour_indices.ravel())):
neighbour_index = neighbour_indices[k]
cost = neighbour_costs[k]
pairwise.append([current_index, neighbour_index, 0, cost, 0, 0])
if current_index%1000 == 0:
print(current_index, '/', image_rows*image_cols)
pairwise = np.asarray(pairwise)
return pairwise
# TODO TASK 2.4 get segmented image to the view
def __get_segmented_image(self, image, labels, background=None):
"""
Return a segmented image, as well as an image with new background
:param image: color image as a numpy array
:param label: labels a numpy array
:param background: color image as a numpy array
:return image_segmented: image as a numpy array with red foreground, blue background
:return image_with_background: image as a numpy array with changed background if any (None if not)
"""
image_rows = np.size(image, 0)
image_cols = np.size(image, 1)
not_labels = np.logical_not(labels)
mask = np.zeros((image_rows, image_cols, 3), dtype=np.uint8)
mask[not_labels, :] = np.array([255, 0, 0], dtype=np.uint8)
mask[labels, :] = np.array([0, 0, 255])
image_PIL = Image.fromarray(image)
mask_PIL = Image.fromarray(mask)
result_PIL = Image.blend(image_PIL, mask_PIL, 0.6)
segmented_image = np.array(result_PIL)
if background is not None:
mask = np.zeros((image_rows, image_cols), dtype=np.bool)
mask[np.logical_not(labels)] = np.bool(1)
result = np.copy(background[0:image_rows, 0:image_cols, :])
result.setflags(write=1)
result[not_labels, 0:3] = image[not_labels, 0:3]
segmented_image_with_background = result
else:
segmented_image_with_background = None
return segmented_image, segmented_image_with_background
def segment_image(self, image, seed_fg, seed_bg, lambda_value, background=None):
image_array = np.asarray(image)
background_array = None
if background:
background = background.convert("RGB")
background_array = np.asarray(background)
seed_fg = np.array(seed_fg)
seed_bg = np.array(seed_bg)
height, width = np.shape(image_array)[0:2]
num_pixels = height * width
# TASK 2.1 - get the color histogram for the unaries
hist_res = 32
cost_fg = self.__get_color_histogram(image_array, seed_fg, hist_res)
cost_bg = self.__get_color_histogram(image_array, seed_bg, hist_res)
# TASK 2.2-2.3 - set the unaries and the pairwise terms
unaries = self.__get_unaries(image_array, lambda_value, cost_fg, cost_bg, seed_fg, seed_bg)
pairwise = self.__get_pairwise(image_array, sigma=5)
# TODO: TASK 2.4 - perform graph cut
g = GraphCut(num_pixels, pairwise.__len__())
g.set_unary(unaries)
g.set_pairwise(pairwise)
g.minimize()
labels = g.get_labeling()
labels = np.reshape(labels, (height, width))
# plt.imshow(labels)
# plt.show()
# TODO TASK 2.4 get segmented image to the view
segmented_image, segmented_image_with_background = self.__get_segmented_image(image_array, labels, background_array)
# transform image array to an rgb image
segmented_image = Image.fromarray(segmented_image, 'RGB')
self._view.set_canvas_image(segmented_image)
if segmented_image_with_background is not None:
segmented_image_with_background = Image.fromarray(segmented_image_with_background, 'RGB')
plt.imshow(segmented_image_with_background)
plt.show()
|
[
"numpy.empty",
"numpy.floor",
"numpy.histogramdd",
"numpy.shape",
"numpy.linalg.norm",
"PIL.Image.blend",
"numpy.copy",
"scipy.ndimage.gaussian_filter",
"matplotlib.pyplot.imshow",
"numpy.logical_not",
"numpy.reshape",
"numpy.bool",
"numpy.size",
"matplotlib.pyplot.show",
"numpy.asarray",
"numpy.square",
"numpy.vstack",
"graph_cut_gui.GraphCutGui",
"numpy.subtract",
"numpy.logical_and",
"numpy.log",
"numpy.zeros",
"numpy.array",
"PIL.Image.fromarray"
] |
[((425, 448), 'graph_cut_gui.GraphCutGui', 'GraphCutGui', (['self', 'root'], {}), '(self, root)\n', (436, 448), False, 'from graph_cut_gui import GraphCutGui\n'), ((1178, 1246), 'numpy.histogramdd', 'np.histogramdd', (['data', 'hist_res'], {'range': '[(0, 255), (0, 255), (0, 255)]'}), '(data, hist_res, range=[(0, 255), (0, 255), (0, 255)])\n', (1192, 1246), True, 'import numpy as np\n'), ((1429, 1469), 'scipy.ndimage.gaussian_filter', 'ndimage.gaussian_filter', (['histogram', '(0.85)'], {}), '(histogram, 0.85)\n', (1452, 1469), True, 'import scipy.ndimage as ndimage\n'), ((2348, 2365), 'numpy.size', 'np.size', (['image', '(0)'], {}), '(image, 0)\n', (2355, 2365), True, 'import numpy as np\n'), ((2387, 2404), 'numpy.size', 'np.size', (['image', '(1)'], {}), '(image, 1)\n', (2394, 2404), True, 'import numpy as np\n'), ((2424, 2461), 'numpy.empty', 'np.empty', (['(image_rows, image_cols, 2)'], {}), '((image_rows, image_cols, 2))\n', (2432, 2461), True, 'import numpy as np\n'), ((3234, 3262), 'numpy.reshape', 'np.reshape', (['unaries', '(-1, 2)'], {}), '(unaries, (-1, 2))\n', (3244, 3262), True, 'import numpy as np\n'), ((4644, 4661), 'numpy.size', 'np.size', (['image', '(0)'], {}), '(image, 0)\n', (4651, 4661), True, 'import numpy as np\n'), ((4683, 4700), 'numpy.size', 'np.size', (['image', '(1)'], {}), '(image, 1)\n', (4690, 4700), True, 'import numpy as np\n'), ((6166, 6186), 'numpy.asarray', 'np.asarray', (['pairwise'], {}), '(pairwise)\n', (6176, 6186), True, 'import numpy as np\n'), ((6804, 6821), 'numpy.size', 'np.size', (['image', '(0)'], {}), '(image, 0)\n', (6811, 6821), True, 'import numpy as np\n'), ((6843, 6860), 'numpy.size', 'np.size', (['image', '(1)'], {}), '(image, 1)\n', (6850, 6860), True, 'import numpy as np\n'), ((6883, 6905), 'numpy.logical_not', 'np.logical_not', (['labels'], {}), '(labels)\n', (6897, 6905), True, 'import numpy as np\n'), ((6921, 6974), 'numpy.zeros', 'np.zeros', (['(image_rows, image_cols, 3)'], {'dtype': 'np.uint8'}), '((image_rows, image_cols, 3), dtype=np.uint8)\n', (6929, 6974), True, 'import numpy as np\n'), ((7005, 7042), 'numpy.array', 'np.array', (['[255, 0, 0]'], {'dtype': 'np.uint8'}), '([255, 0, 0], dtype=np.uint8)\n', (7013, 7042), True, 'import numpy as np\n'), ((7069, 7090), 'numpy.array', 'np.array', (['[0, 0, 255]'], {}), '([0, 0, 255])\n', (7077, 7090), True, 'import numpy as np\n'), ((7112, 7134), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (7127, 7134), False, 'from PIL import Image\n'), ((7154, 7175), 'PIL.Image.fromarray', 'Image.fromarray', (['mask'], {}), '(mask)\n', (7169, 7175), False, 'from PIL import Image\n'), ((7197, 7234), 'PIL.Image.blend', 'Image.blend', (['image_PIL', 'mask_PIL', '(0.6)'], {}), '(image_PIL, mask_PIL, 0.6)\n', (7208, 7234), False, 'from PIL import Image\n'), ((7262, 7282), 'numpy.array', 'np.array', (['result_PIL'], {}), '(result_PIL)\n', (7270, 7282), True, 'import numpy as np\n'), ((7903, 7920), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (7913, 7920), True, 'import numpy as np\n'), ((8099, 8116), 'numpy.array', 'np.array', (['seed_fg'], {}), '(seed_fg)\n', (8107, 8116), True, 'import numpy as np\n'), ((8135, 8152), 'numpy.array', 'np.array', (['seed_bg'], {}), '(seed_bg)\n', (8143, 8152), True, 'import numpy as np\n'), ((8938, 8973), 'numpy.reshape', 'np.reshape', (['labels', '(height, width)'], {}), '(labels, (height, width))\n', (8948, 8973), True, 'import numpy as np\n'), ((9281, 9320), 'PIL.Image.fromarray', 'Image.fromarray', (['segmented_image', '"""RGB"""'], {}), "(segmented_image, 'RGB')\n", (9296, 9320), False, 'from PIL import Image\n'), ((1097, 1153), 'numpy.vstack', 'np.vstack', (['(seed_r_values, seed_g_values, seed_b_values)'], {}), '((seed_r_values, seed_g_values, seed_b_values))\n', (1106, 1153), True, 'import numpy as np\n'), ((3733, 3859), 'numpy.array', 'np.array', (['[[i - 1, j - 1], [i - 1, j], [i - 1, j + 1], [i, j + 1], [i + 1, j + 1], [i +\n 1, j], [i + 1, j - 1], [i, j - 1]]'], {}), '([[i - 1, j - 1], [i - 1, j], [i - 1, j + 1], [i, j + 1], [i + 1, j +\n 1], [i + 1, j], [i + 1, j - 1], [i, j - 1]])\n', (3741, 3859), True, 'import numpy as np\n'), ((7338, 7387), 'numpy.zeros', 'np.zeros', (['(image_rows, image_cols)'], {'dtype': 'np.bool'}), '((image_rows, image_cols), dtype=np.bool)\n', (7346, 7387), True, 'import numpy as np\n'), ((7431, 7441), 'numpy.bool', 'np.bool', (['(1)'], {}), '(1)\n', (7438, 7441), True, 'import numpy as np\n'), ((7463, 7513), 'numpy.copy', 'np.copy', (['background[0:image_rows, 0:image_cols, :]'], {}), '(background[0:image_rows, 0:image_cols, :])\n', (7470, 7513), True, 'import numpy as np\n'), ((8058, 8080), 'numpy.asarray', 'np.asarray', (['background'], {}), '(background)\n', (8068, 8080), True, 'import numpy as np\n'), ((8177, 8198), 'numpy.shape', 'np.shape', (['image_array'], {}), '(image_array)\n', (8185, 8198), True, 'import numpy as np\n'), ((9476, 9531), 'PIL.Image.fromarray', 'Image.fromarray', (['segmented_image_with_background', '"""RGB"""'], {}), "(segmented_image_with_background, 'RGB')\n", (9491, 9531), False, 'from PIL import Image\n'), ((9544, 9587), 'matplotlib.pyplot.imshow', 'plt.imshow', (['segmented_image_with_background'], {}), '(segmented_image_with_background)\n', (9554, 9587), True, 'import matplotlib.pyplot as plt\n'), ((9600, 9610), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9608, 9610), True, 'import matplotlib.pyplot as plt\n'), ((4447, 4491), 'numpy.logical_and', 'np.logical_and', (['is_boundary_1', 'is_boundary_2'], {}), '(is_boundary_1, is_boundary_2)\n', (4461, 4491), True, 'import numpy as np\n'), ((4493, 4537), 'numpy.logical_and', 'np.logical_and', (['is_boundary_3', 'is_boundary_4'], {}), '(is_boundary_3, is_boundary_4)\n', (4507, 4537), True, 'import numpy as np\n'), ((4844, 4860), 'numpy.array', 'np.array', (['[i, j]'], {}), '([i, j])\n', (4852, 4860), True, 'import numpy as np\n'), ((5312, 5356), 'numpy.subtract', 'np.subtract', (['neighbour_pixels', 'current_pixel'], {}), '(neighbour_pixels, current_pixel)\n', (5323, 5356), True, 'import numpy as np\n'), ((5391, 5432), 'numpy.linalg.norm', 'np.linalg.norm', (['pixel_differences'], {'axis': '(1)'}), '(pixel_differences, axis=1)\n', (5405, 5432), True, 'import numpy as np\n'), ((5553, 5596), 'numpy.linalg.norm', 'np.linalg.norm', (['spatial_differences'], {'axis': '(1)'}), '(spatial_differences, axis=1)\n', (5567, 5596), True, 'import numpy as np\n'), ((7405, 7427), 'numpy.logical_not', 'np.logical_not', (['labels'], {}), '(labels)\n', (7419, 7427), True, 'import numpy as np\n'), ((2731, 2799), 'numpy.log', 'np.log', (['(hist_fg[pixel_bins[0], pixel_bins[1], pixel_bins[2]] + 1e-10)'], {}), '(hist_fg[pixel_bins[0], pixel_bins[1], pixel_bins[2]] + 1e-10)\n', (2737, 2799), True, 'import numpy as np\n'), ((2827, 2895), 'numpy.log', 'np.log', (['(hist_bg[pixel_bins[0], pixel_bins[1], pixel_bins[2]] + 1e-10)'], {}), '(hist_bg[pixel_bins[0], pixel_bins[1], pixel_bins[2]] + 1e-10)\n', (2833, 2895), True, 'import numpy as np\n'), ((2613, 2640), 'numpy.floor', 'np.floor', (['(pixel / hist_step)'], {}), '(pixel / hist_step)\n', (2621, 2640), True, 'import numpy as np\n'), ((5650, 5676), 'numpy.square', 'np.square', (['pixel_distances'], {}), '(pixel_distances)\n', (5659, 5676), True, 'import numpy as np\n'), ((5684, 5700), 'numpy.square', 'np.square', (['sigma'], {}), '(sigma)\n', (5693, 5700), True, 'import numpy as np\n')]
|
# superpixels.py Performs SLIC algorithm #
# Authors: <NAME>, <NAME>, <NAME>, <NAME>``
# import the necessary packages
from skimage.segmentation import slic
from skimage.segmentation import mark_boundaries
from skimage.util import img_as_float
from skimage.util import img_as_ubyte
from skimage import data, io, segmentation, color
from skimage.color import rgb2gray
from skimage.future import graph
import matplotlib.pyplot as plt
import argparse
import numpy as np
import cv2
import os
# Weighting Functions based on Color Intensities
def _weight_mean_color(graph, src, dst, n):
diff = graph.nodes[dst]['mean color'] - graph.nodes[n]['mean color']
diff = np.linalg.norm(diff)
return {'weight': diff}
def merge_mean_color(graph, src, dst):
graph.nodes[dst]['total color'] += graph.nodes[src]['total color']
graph.nodes[dst]['pixel count'] += graph.nodes[src]['pixel count']
graph.nodes[dst]['mean color'] = (graph.nodes[dst]['total color'] /
graph.nodes[dst]['pixel count'])
# Grayscale & Segments the Image as a Color
def segmentImage(sourcePath,destPath):
image_gray = rgb2gray(io.imread(sourcePath))
image = io.imread(sourcePath)
#gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
image_gray = np.dstack([image_gray, image_gray, image_gray])
image_gray = img_as_float(image_gray)
# load the image and convert it to a floating point data type
# loop over the number of segments
#for numSegments in (100,200,300):
numSegments = 10000
# apply SLIC and extract (approximately) the supplied number
# of segments
segments = slic(image_gray, n_segments = numSegments, sigma = 5)
g = graph.rag_mean_color(image,segments)
labels2 = graph.merge_hierarchical(segments, g, thresh=35, rag_copy=False,
in_place_merge=True,
merge_func=merge_mean_color,
weight_func=_weight_mean_color)
out = color.label2rgb(labels2, image, kind='avg', bg_label=0)
#out = segmentation.mark_boundaries(out, labels2, color=(0,0,0))
# saves segmented image
# fig = plt.figure("Superpixels -- %d segments" % (numSegments))
# ax = fig.add_subplot(1, 1, 1)
# ax.imshow(mark_boundaries(image,segments,color=(0,0,0)))
# Saving Image
io.imsave(destPath,img_as_ubyte(out))
#Prints Every Single Segment
# for (i, segVal) in enumerate(np.unique(segments)):
# print("[x] inspecting segment %d" % (i))
# mask = np.zeros(image.shape[:2], dtype = "uint8")
# mask[segments == segVal] = 255
# # show the masked region
# cv2.imshow("Mask", mask)
# cv2.imshow("Applied", cv2.bitwise_and(image, image, mask = mask))
# cv2.waitKey(1)
#plt.axis("off")
# show the plots
#plt.show()
# Segments
def segmentFolder(source,dest):
# Loops through directory for images
for file in os.listdir(source):
isPicture = file.endswith(".jpg") or file.endswith(".png") or file.endswith(".JPG") or file.endswith(".PNG")
if isPicture == True:
segmentImage(source + file, dest + file)
|
[
"numpy.dstack",
"skimage.color.label2rgb",
"skimage.util.img_as_ubyte",
"skimage.future.graph.merge_hierarchical",
"numpy.linalg.norm",
"skimage.future.graph.rag_mean_color",
"skimage.segmentation.slic",
"skimage.util.img_as_float",
"os.listdir",
"skimage.io.imread"
] |
[((668, 688), 'numpy.linalg.norm', 'np.linalg.norm', (['diff'], {}), '(diff)\n', (682, 688), True, 'import numpy as np\n'), ((1187, 1208), 'skimage.io.imread', 'io.imread', (['sourcePath'], {}), '(sourcePath)\n', (1196, 1208), False, 'from skimage import data, io, segmentation, color\n'), ((1282, 1329), 'numpy.dstack', 'np.dstack', (['[image_gray, image_gray, image_gray]'], {}), '([image_gray, image_gray, image_gray])\n', (1291, 1329), True, 'import numpy as np\n'), ((1347, 1371), 'skimage.util.img_as_float', 'img_as_float', (['image_gray'], {}), '(image_gray)\n', (1359, 1371), False, 'from skimage.util import img_as_float\n'), ((1639, 1688), 'skimage.segmentation.slic', 'slic', (['image_gray'], {'n_segments': 'numSegments', 'sigma': '(5)'}), '(image_gray, n_segments=numSegments, sigma=5)\n', (1643, 1688), False, 'from skimage.segmentation import slic\n'), ((1701, 1738), 'skimage.future.graph.rag_mean_color', 'graph.rag_mean_color', (['image', 'segments'], {}), '(image, segments)\n', (1721, 1738), False, 'from skimage.future import graph\n'), ((1752, 1907), 'skimage.future.graph.merge_hierarchical', 'graph.merge_hierarchical', (['segments', 'g'], {'thresh': '(35)', 'rag_copy': '(False)', 'in_place_merge': '(True)', 'merge_func': 'merge_mean_color', 'weight_func': '_weight_mean_color'}), '(segments, g, thresh=35, rag_copy=False,\n in_place_merge=True, merge_func=merge_mean_color, weight_func=\n _weight_mean_color)\n', (1776, 1907), False, 'from skimage.future import graph\n'), ((2017, 2072), 'skimage.color.label2rgb', 'color.label2rgb', (['labels2', 'image'], {'kind': '"""avg"""', 'bg_label': '(0)'}), "(labels2, image, kind='avg', bg_label=0)\n", (2032, 2072), False, 'from skimage import data, io, segmentation, color\n'), ((2974, 2992), 'os.listdir', 'os.listdir', (['source'], {}), '(source)\n', (2984, 2992), False, 'import os\n'), ((1152, 1173), 'skimage.io.imread', 'io.imread', (['sourcePath'], {}), '(sourcePath)\n', (1161, 1173), False, 'from skimage import data, io, segmentation, color\n'), ((2387, 2404), 'skimage.util.img_as_ubyte', 'img_as_ubyte', (['out'], {}), '(out)\n', (2399, 2404), False, 'from skimage.util import img_as_ubyte\n')]
|
from src.slu.datareader import domain_set, y1_set, y2_set
from preprocess.gen_embeddings_for_slu import domain2slot
import torch
import torch.nn as nn
import os
from tqdm import tqdm
import numpy as np
import logging
logger = logging.getLogger()
from src.conll2002_metrics import *
class SLUTrainer(object):
def __init__(self, params, binary_slu_tagger, slotname_predictor, sent_repre_generator=None):
self.params = params
self.binary_slu_tagger = binary_slu_tagger
self.slotname_predictor = slotname_predictor
self.lr = params.lr
self.use_label_encoder = params.tr
self.num_domain = params.num_domain
if self.use_label_encoder:
self.sent_repre_generator = sent_repre_generator
self.loss_fn_mse = nn.MSELoss()
model_parameters = [
{"params": self.binary_slu_tagger.parameters()},
{"params": self.slotname_predictor.parameters()},
{"params": self.sent_repre_generator.parameters()}
]
else:
model_parameters = [
{"params": self.binary_slu_tagger.parameters()},
{"params": self.slotname_predictor.parameters()}
]
# Adam optimizer
self.optimizer = torch.optim.Adam(model_parameters, lr=self.lr)
self.loss_fn = nn.CrossEntropyLoss()
self.early_stop = params.early_stop
self.no_improvement_num = 0
self.best_f1 = 0
self.stop_training_flag = False
def train_step(self, X, lengths, y_bin, y_final, y_dm, templates=None, tem_lengths=None, epoch=None):
# print(X)
# print(lengths)
# print(y_bin)
# print(y_final)
# print(y_dm)
# print('-'*20)
self.binary_slu_tagger.train()
self.slotname_predictor.train()
if self.use_label_encoder:
self.sent_repre_generator.train()
bin_preds, lstm_hiddens = self.binary_slu_tagger(X, lengths)
# print(y_bin)
# y_bin_ = [i for i in y_bin]
# mx_len = max(lengths)
# # print(mx_len)
# for i in range(len(y_bin_)):
# while len(y_bin_[i]) < mx_len.item():
# y_bin_[i].append(0)
# y_bin_ = torch.tensor(y_bin_,device='cuda:0')
# print(y_bin_)
# print(bin_preds.size())
# loss_func = nn.CrossEntropyLoss(reduction='mean')
# t1 = bin_preds.view(-1, 3)
# t2 = y_bin_.view(-1)
# loss = loss_func(t1, t2)
# print(loss)
## optimize binary_slu_tagger
loss_bin = self.binary_slu_tagger.crf_loss(bin_preds, lengths, y_bin)
self.optimizer.zero_grad()
loss_bin.backward(retain_graph=True)
# self.optimizer.step()
# print(loss_bin)
## optimize slotname_predictor
pred_slotname_list, gold_slotname_list = self.slotname_predictor(y_dm, lstm_hiddens, binary_golds=y_bin, final_golds=y_final)
# for i in pred_slotname_list:
# print(i)
# print('-'*20)
# for i in gold_slotname_list:
# print(i)
# print('-'*20)
# print('-'*20)
# print(pred_slotname_list)
# print('-'*30)
# print(gold_slotname_list)
# return 1,0
# '''
# loss_slotname = torch.tensor(0)
# loss_slotname = loss_slotname.cuda()
with torch.autograd.set_detect_anomaly(True):
for pred_slotname_each_sample, gold_slotname_each_sample in zip(pred_slotname_list, gold_slotname_list):
assert pred_slotname_each_sample.size()[0] == gold_slotname_each_sample.size()[0]
# loss_slotname = loss_slotname + self.loss_fn(pred_slotname_each_sample, gold_slotname_each_sample.cuda())
loss_slotname = self.loss_fn(pred_slotname_each_sample, gold_slotname_each_sample.cuda())
# self.optimizer.zero_grad()
loss_slotname.backward(retain_graph=True)
# self.optimizer.step()
# loss = loss_bin + loss_slotname
# self.optimizer.zero_grad()
# # loss_slotname = loss_temp
# loss.backward()
# print(temp)
# self.optimizer.zero_grad()
# loss_slotname = temp
# self.optimizer.step()
if self.use_label_encoder:
templates_repre, input_repre = self.sent_repre_generator(templates, tem_lengths, lstm_hiddens, lengths)
input_repre = input_repre.detach()
template0_loss = self.loss_fn_mse(templates_repre[:, 0, :], input_repre)
template1_loss = -1 * self.loss_fn_mse(templates_repre[:, 1, :], input_repre)
template2_loss = -1 * self.loss_fn_mse(templates_repre[:, 2, :], input_repre)
input_repre.requires_grad = True
# self.optimizer.zero_grad()
template0_loss.backward(retain_graph=True)
template1_loss.backward(retain_graph=True)
template2_loss.backward(retain_graph=True)
# self.optimizer.step()
if epoch > 3:
templates_repre = templates_repre.detach()
input_loss0 = self.loss_fn_mse(input_repre, templates_repre[:, 0, :])
input_loss1 = -1 * self.loss_fn_mse(input_repre, templates_repre[:, 1, :])
input_loss2 = -1 * self.loss_fn_mse(input_repre, templates_repre[:, 2, :])
templates_repre.requires_grad = True
# self.optimizer.zero_grad()
input_loss0.backward(retain_graph=True)
input_loss1.backward(retain_graph=True)
input_loss2.backward(retain_graph=True)
self.optimizer.step()
if self.use_label_encoder:
return loss_bin.item(), loss_slotname.item(), template0_loss.item(), template1_loss.item()
else:
self.optimizer.step()
return loss_bin.item(), loss_slotname.item()
# '''
def evaluate(self, dataloader, istestset=False):
self.binary_slu_tagger.eval()
self.slotname_predictor.eval()
binary_preds, binary_golds = [], []
final_preds, final_golds = [], []
pbar = tqdm(enumerate(dataloader), total=len(dataloader))
for i, (X, lengths, y_bin, y_final, y_dm) in pbar:
binary_golds.extend(y_bin)
final_golds.extend(y_final)
X, lengths = X.cuda(), lengths.cuda()
bin_preds_batch, lstm_hiddens = self.binary_slu_tagger(X, lengths)
bin_preds_batch = self.binary_slu_tagger.crf_decode(bin_preds_batch, lengths)
binary_preds.extend(bin_preds_batch)
slotname_preds_batch = self.slotname_predictor(y_dm, lstm_hiddens, binary_preditions=bin_preds_batch, binary_golds=None, final_golds=None)
final_preds_batch = self.combine_binary_and_slotname_preds(y_dm, bin_preds_batch, slotname_preds_batch)
final_preds.extend(final_preds_batch)
# binary predictions
binary_preds = np.concatenate(binary_preds, axis=0)
binary_preds = list(binary_preds)
binary_golds = np.concatenate(binary_golds, axis=0)
binary_golds = list(binary_golds)
# final predictions
final_preds = np.concatenate(final_preds, axis=0)
final_preds = list(final_preds)
final_golds = np.concatenate(final_golds, axis=0)
final_golds = list(final_golds)
bin_lines, final_lines = [], []
for bin_pred, bin_gold, final_pred, final_gold in zip(binary_preds, binary_golds, final_preds, final_golds):
bin_slot_pred = y1_set[bin_pred]
bin_slot_gold = y1_set[bin_gold]
final_slot_pred = y2_set[final_pred]
final_slot_gold = y2_set[final_gold]
bin_lines.append("w" + " " + bin_slot_pred + " " + bin_slot_gold)
final_lines.append("w" + " " + final_slot_pred + " " + final_slot_gold)
bin_result = conll2002_measure(bin_lines)
bin_f1 = bin_result["fb1"]
final_result = conll2002_measure(final_lines)
final_f1 = final_result["fb1"]
if istestset == False: # dev set
if final_f1 > self.best_f1:
self.best_f1 = final_f1
self.no_improvement_num = 0
logger.info("Found better model!!")
self.save_model()
else:
self.no_improvement_num += 1
logger.info("No better model found (%d/%d)" % (self.no_improvement_num, self.early_stop))
if self.no_improvement_num >= self.early_stop:
self.stop_training_flag = True
return bin_f1, final_f1, self.stop_training_flag
def combine_binary_and_slotname_preds(self, dm_id_batch, binary_preds_batch, slotname_preds_batch):
"""
Input:
dm_id_batch: (bsz)
binary_preds: (bsz, seq_len)
slotname_preds: (bsz, num_slotname, slot_num)
Output:
final_preds: (bsz, seq_len)
"""
final_preds = []
for i in range(len(dm_id_batch)):
dm_id = dm_id_batch[i]
binary_preds = binary_preds_batch[i]
slotname_preds = slotname_preds_batch[i]
slot_list_based_dm = domain2slot[domain_set[dm_id]]
i = -1
final_preds_each = []
for bin_pred in binary_preds:
# values of bin_pred are 0 (O), or 1(B) or 2(I)
if bin_pred.item() == 0:
final_preds_each.append(0)
elif bin_pred.item() == 1:
i += 1
pred_slot_id = torch.argmax(slotname_preds[i])
slotname = "B-" + slot_list_based_dm[pred_slot_id]
final_preds_each.append(y2_set.index(slotname))
elif bin_pred.item() == 2:
if i == -1:
final_preds_each.append(0)
else:
pred_slot_id = torch.argmax(slotname_preds[i])
slotname = "I-" + slot_list_based_dm[pred_slot_id]
if slotname not in y2_set:
final_preds_each.append(0)
else:
final_preds_each.append(y2_set.index(slotname))
assert len(final_preds_each) == len(binary_preds)
final_preds.append(final_preds_each)
return final_preds
def save_model(self):
"""
save the best model
"""
saved_path = os.path.join(self.params.dump_path, "best_model.pth")
torch.save({
"binary_slu_tagger": self.binary_slu_tagger,
"slotname_predictor": self.slotname_predictor
}, saved_path)
logger.info("Best model has been saved to %s" % saved_path)
|
[
"torch.nn.MSELoss",
"torch.argmax",
"torch.nn.CrossEntropyLoss",
"logging.getLogger",
"torch.save",
"src.slu.datareader.y2_set.index",
"torch.optim.Adam",
"torch.autograd.set_detect_anomaly",
"os.path.join",
"numpy.concatenate"
] |
[((228, 247), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (245, 247), False, 'import logging\n'), ((1284, 1330), 'torch.optim.Adam', 'torch.optim.Adam', (['model_parameters'], {'lr': 'self.lr'}), '(model_parameters, lr=self.lr)\n', (1300, 1330), False, 'import torch\n'), ((1363, 1384), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1382, 1384), True, 'import torch.nn as nn\n'), ((7420, 7456), 'numpy.concatenate', 'np.concatenate', (['binary_preds'], {'axis': '(0)'}), '(binary_preds, axis=0)\n', (7434, 7456), True, 'import numpy as np\n'), ((7522, 7558), 'numpy.concatenate', 'np.concatenate', (['binary_golds'], {'axis': '(0)'}), '(binary_golds, axis=0)\n', (7536, 7558), True, 'import numpy as np\n'), ((7652, 7687), 'numpy.concatenate', 'np.concatenate', (['final_preds'], {'axis': '(0)'}), '(final_preds, axis=0)\n', (7666, 7687), True, 'import numpy as np\n'), ((7750, 7785), 'numpy.concatenate', 'np.concatenate', (['final_golds'], {'axis': '(0)'}), '(final_golds, axis=0)\n', (7764, 7785), True, 'import numpy as np\n'), ((11087, 11140), 'os.path.join', 'os.path.join', (['self.params.dump_path', '"""best_model.pth"""'], {}), "(self.params.dump_path, 'best_model.pth')\n", (11099, 11140), False, 'import os\n'), ((11149, 11269), 'torch.save', 'torch.save', (["{'binary_slu_tagger': self.binary_slu_tagger, 'slotname_predictor': self.\n slotname_predictor}", 'saved_path'], {}), "({'binary_slu_tagger': self.binary_slu_tagger,\n 'slotname_predictor': self.slotname_predictor}, saved_path)\n", (11159, 11269), False, 'import torch\n'), ((785, 797), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (795, 797), True, 'import torch.nn as nn\n'), ((3533, 3572), 'torch.autograd.set_detect_anomaly', 'torch.autograd.set_detect_anomaly', (['(True)'], {}), '(True)\n', (3566, 3572), False, 'import torch\n'), ((10146, 10177), 'torch.argmax', 'torch.argmax', (['slotname_preds[i]'], {}), '(slotname_preds[i])\n', (10158, 10177), False, 'import torch\n'), ((10293, 10315), 'src.slu.datareader.y2_set.index', 'y2_set.index', (['slotname'], {}), '(slotname)\n', (10305, 10315), False, 'from src.slu.datareader import domain_set, y1_set, y2_set\n'), ((10508, 10539), 'torch.argmax', 'torch.argmax', (['slotname_preds[i]'], {}), '(slotname_preds[i])\n', (10520, 10539), False, 'import torch\n'), ((10803, 10825), 'src.slu.datareader.y2_set.index', 'y2_set.index', (['slotname'], {}), '(slotname)\n', (10815, 10825), False, 'from src.slu.datareader import domain_set, y1_set, y2_set\n')]
|
import random
import numpy as np
import tensorflow as tf
class Reproducibility:
"""
Singleton class for ensure reproducibility.
You indicates the seed and the execution is the same. The server initialice this class and the clients only
call/get a seed.
Server initialize it with Reproducibility(seed) before all executions
For get a seed, the client has to put Reproducibility.get_instance().set_seed(ID)
Is important to know that the reproducibility only works if you execute the experiment in CPU. Many ops in GPU
like convolutions are not deterministic and the don't replicate.
# Arguments:
seed: the main seed for server
# Properties:
seed:
return server seed
seeds:
return all seeds
"""
__instance = None
@staticmethod
def get_instance():
"""
Static access method.
# Returns:
instance: Singleton instance class
"""
if Reproducibility.__instance is None:
Reproducibility()
return Reproducibility.__instance
def __init__(self, seed=None):
"""
Virtually private constructor.
"""
if Reproducibility.__instance is not None:
raise Exception("This class is a singleton")
else:
self.__seed = seed
self.__seeds = {'server': self.__seed}
Reproducibility.__instance = self
if self.__seed is not None:
self.set_seed('server')
def set_seed(self, id):
"""
Set server and clients seed
# Arguments:
id: 'server' in server node and ID in client node
"""
if id not in self.__seeds.keys():
self.__seeds[id] = np.random.randint(2**32-1)
np.random.seed(self.__seeds[id])
random.seed(self.__seeds[id])
tf.random.set_seed(self.__seeds[id])
@property
def seed(self):
return self.__seed
@property
def seeds(self):
return self.__seeds
def delete_instance(self):
"""
Remove the singleton instance. Not recommended for normal use. This method is necessary for tests.
"""
if Reproducibility.__instance is not None:
del self.__seed
del self.__seeds
Reproducibility.__instance = None
|
[
"tensorflow.random.set_seed",
"random.seed",
"numpy.random.randint",
"numpy.random.seed"
] |
[((1806, 1838), 'numpy.random.seed', 'np.random.seed', (['self.__seeds[id]'], {}), '(self.__seeds[id])\n', (1820, 1838), True, 'import numpy as np\n'), ((1847, 1876), 'random.seed', 'random.seed', (['self.__seeds[id]'], {}), '(self.__seeds[id])\n', (1858, 1876), False, 'import random\n'), ((1885, 1921), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['self.__seeds[id]'], {}), '(self.__seeds[id])\n', (1903, 1921), True, 'import tensorflow as tf\n'), ((1771, 1801), 'numpy.random.randint', 'np.random.randint', (['(2 ** 32 - 1)'], {}), '(2 ** 32 - 1)\n', (1788, 1801), True, 'import numpy as np\n')]
|
import os
import numpy as np
import json
import torch
from .utils import skeleton
class SkeletonDataset(torch.utils.data.Dataset):
""" Feeder for skeleton-based action recognition
Arguments:
data_path: the path to data folder
random_choose: If true, randomly choose a portion of the input sequence
random_move: If true, randomly perfrom affine transformation
window_size: The length of the output sequence
repeat: times of repeating the dataset
data_subscripts: subscript expression of einsum operation.
In the default case, the shape of output data is `(channel, vertex, frames, person)`.
To permute the shape to `(channel, frames, vertex, person)`,
set `data_subscripts` to 'cvfm->cfvm'.
"""
def __init__(self,
data_dir,
random_choose=False,
random_move=False,
window_size=-1,
num_track=1,
data_subscripts=None,
repeat=1):
self.data_dir = data_dir
self.random_choose = random_choose
self.random_move = random_move
self.window_size = window_size
self.num_track = num_track
self.data_subscripts = data_subscripts
self.files = [
os.path.join(self.data_dir, f) for f in os.listdir(self.data_dir)
] * repeat
def __len__(self):
return len(self.files)
def __getitem__(self, index):
with open(self.files[index]) as f:
data = json.load(f)
resolution = data['info']['resolution']
category_id = data['category_id']
annotations = data['annotations']
num_frame = data['info']['num_frame']
num_keypoints = data['info']['num_keypoints']
channel = data['info']['keypoint_channels']
num_channel = len(channel)
# get data
data = np.zeros(
(num_channel, num_keypoints, num_frame, self.num_track),
dtype=np.float32)
for a in annotations:
person_id = a['id'] if a['person_id'] is None else a['person_id']
frame_index = a['frame_index']
if person_id < self.num_track and frame_index < num_frame:
data[:, :, frame_index, person_id] = np.array(
a['keypoints']).transpose()
# normalization
if self.normalization:
for i, c in enumerate(channel):
if c == 'x':
data[i] = data[i] / resolution[0] - 0.5
if c == 'y':
data[i] = data[i] / resolution[1] - 0.5
if c == 'score' or c == 'visibility':
mask = (data[i] == 0)
for j in range(num_channel):
if c != j:
data[j][mask] = 0
# permute
if self.data_subscripts is not None:
data = np.einsum(self.data_subscripts, data)
# augmentation
if self.random_choose:
data = skeleton.random_choose(data, self.window_size)
elif self.window_size > 0:
data = skeleton.auto_pading(data, self.window_size)
if self.random_move:
data = skeleton.random_move(data)
return data, category_id
|
[
"json.load",
"numpy.zeros",
"numpy.einsum",
"numpy.array",
"os.path.join",
"os.listdir"
] |
[((1924, 2012), 'numpy.zeros', 'np.zeros', (['(num_channel, num_keypoints, num_frame, self.num_track)'], {'dtype': 'np.float32'}), '((num_channel, num_keypoints, num_frame, self.num_track), dtype=np.\n float32)\n', (1932, 2012), True, 'import numpy as np\n'), ((1556, 1568), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1565, 1568), False, 'import json\n'), ((2954, 2991), 'numpy.einsum', 'np.einsum', (['self.data_subscripts', 'data'], {}), '(self.data_subscripts, data)\n', (2963, 2991), True, 'import numpy as np\n'), ((1318, 1348), 'os.path.join', 'os.path.join', (['self.data_dir', 'f'], {}), '(self.data_dir, f)\n', (1330, 1348), False, 'import os\n'), ((1358, 1383), 'os.listdir', 'os.listdir', (['self.data_dir'], {}), '(self.data_dir)\n', (1368, 1383), False, 'import os\n'), ((2309, 2333), 'numpy.array', 'np.array', (["a['keypoints']"], {}), "(a['keypoints'])\n", (2317, 2333), True, 'import numpy as np\n')]
|
import sys
import uncertainty_rfr
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor
import pandas.api.types as ptypes
sys.path.append("../")
df_test = pd.read_csv('./xiaofeng_lasso/unittest_dummy.csv', nrows=5)
X_test, y_test = uncertainty_rfr.descriptors_outputs(df_test, d_start=5,
o=0)
def test_uncertainty_rfr_qfr():
'''
Test function for uncertainty_rfr_qfr. Checks values in actual are 0 when
true_y = False, and that the output df has the correct number of rows.
'''
df_test = pd.read_csv('./xiaofeng_lasso/unittest_dummy.csv')
X = df_test.iloc[range(3)]
err_df_test = \
uncertainty_rfr.uncertainty_rfr_qfr(df_test, X[X.columns[5:]],
Y='none', true_y=False, o=0,
d_start=5)
assert err_df_test['actual'][0] == err_df_test['actual'][1], \
'with true_y = False, all values in "actual" should be equal (0.0)'
assert len(err_df_test) == len(X), \
'length of predicting df should equal length of output df'
def test_descriptors_outputs():
'''
Test function for descriptors_outputs. Checks the shape of X, and checks
that the correct type of value (numeric) is in the columns.
'''
X_test, y_test = uncertainty_rfr.descriptors_outputs(df_test, d_start=5,
o=0)
assert X_test.shape[1] == 5, \
'array shape is incorrect. should be ({}, 7), got ({}, {})'\
.format(X_test.shape[0], X_test.shape[0], X_test.shape[1])
assert all(ptypes.is_numeric_dtype(X_test[col]) for col in
list(X_test[X_test.columns[:]])), \
'data type in columns is of incorrect type, must be numeric'
assert ptypes.is_numeric_dtype(y_test), \
'data type in columns is of incorrect type, must be numeric'
def test_traintest():
'''
Test function for traintest. Checks that the length of X_train and
y_train are the same.
'''
train_idx_test = np.array([0, 1, 2])
test_idx_test = np.array([3, 4])
X_train_test, y_train_test = \
uncertainty_rfr.traintest(X_test, y_test, train_idx_test,
test_idx_test)
assert X_train_test.shape[0] == y_train_test.shape[0], \
'X_train and y_train datapoints do not have the same num of values'
def test_predict_append():
'''
Test function for predict_append. Checks that the func appends one value
at a time, and that the output is a list.
'''
df_test2 = df_test[df_test.columns[:7]]
X_test, y_test = uncertainty_rfr.descriptors_outputs(df_test2, d_start=5,
o=0)
clf_test = RandomForestRegressor(random_state=130)
clf_test.fit(X_test, y_test)
N_arr_test = np.array([[3.98069889, 0.38048415],
[-0.78001682, 0.20058657]])
n_test = 0
preds_test = []
preds_test = uncertainty_rfr.predict_append(clf_test, N_arr_test, n_test,
preds_test)
assert len(preds_test) == 1, \
'preds_test needs to be length 1. Got {}'.format(len(preds_test))
assert isinstance(preds_test, list), \
'preds_test needs to be a list, got {}'.format(type(preds_test))
def test_dft_points():
'''
Test functino for dft_points. Checks that when true_y = True, the output
array is equal to Y_test, adn when true_y = False the output arry is the
same length as N_arr_test.
'''
Y_test = [3, 5]
N_arr_test = np.array([[3.98069889, 0.38048415],
[-0.78001682, 0.20058657]])
Y_arr_test = uncertainty_rfr.dft_points(True, Y_test, N_arr_test)
Y_arr_test2 = uncertainty_rfr.dft_points(False, Y_test, N_arr_test)
assert Y_arr_test[0] == Y_test[0], \
'Y_arr_test got unexpected result. Expected np.array([3,5]), got{}'.\
format(Y_arr_test)
assert len(Y_arr_test2) == N_arr_test.shape[0], \
'length of Y_arr_test2 should be equal to the number of rows of \
N_arr_test. Got Y_arr: {}, N_arr {}'.\
format(len(Y_arr_test2), N_arr_test.shape[0])
def test_uncert_table():
'''
Test function for uncert_table. Checks that the columns in the df are in
the correct place, the length of the output dataframe the correct
length, and that the last three columns in the output df are numeric.
'''
N_test = df_test[df_test.columns[5:]].iloc[[0, 1]]
X = df_test.iloc[[0, 1]]
Y_arr_test = np.array([3, 5])
pred_desc_test = pd.DataFrame(data={'mean': [1, 2], 'std': [3, 4]}).T
err_df = uncertainty_rfr.uncert_table(N_test, X, 1, 2, 3, 4,
Y_arr_test, pred_desc_test)
assert err_df.columns[0] == 'Type', \
'first column got unexpected value {}, should be Type'.\
format(err_df.columns[0])
assert len(err_df) == len(X), \
'arrays must all be the same length'
assert all(ptypes.is_numeric_dtype(err_df[col]) for col in
list(err_df[err_df.columns[4:]])), \
'columns "true val", "mean", and "std" are of wrong type, should be\
numeric values.'
def test_uncertainty_rfr_cv():
'''
Test function for undertainty_rfr_cv. Checks that the prediction df has
as many rows as folds in cv. In the output df it checks that "true val"
values are 0 when true_y = False, and checks that values in "AB" are of
type string.
'''
X = df_test.iloc[[0, 1]]
Y = 'none'
d_start, x_start = 5, 5
o = 0
folds_test = 2
pred_df_test, err_df_test = \
uncertainty_rfr.uncertainty_rfr_cv(df_test, X, Y, o, d_start, x_start,
folds=folds_test)
assert pred_df_test.shape[0] == folds_test, \
'Number of row in pred_df_test array should equal number of folds, \
expected {}, got {}'.format(folds_test, pred_df_test.shape[0])
assert err_df_test[err_df_test.columns[4]][0] == 0.0, \
'Expected 0.0 in "true val" with true_y set to false, instead got a \
different val'
assert isinstance(err_df_test['AB'][1], str), \
'Expected string in column "AB", got {}'.format(type(
err_df_test['AB'][1]))
def test_largest_uncertainty():
'''
test function for largest_uncertainty. checks that that length of the
df is equal to the num of values it was asked to return, and that the
output idx are a list.
'''
df = pd.DataFrame(data={'err_int': [1, 2, 3], 'std_dev': [4, 5, 6]})
num_vals = 2
larg, idx = uncertainty_rfr.largest_uncertainty(df, num_vals, 'std_dev')
assert len(larg) == num_vals, \
'number of rows in the output df should equal the number of values\
the func called to return'
assert isinstance(idx, list), \
'expected idx to be list, got {}'.format(type(idx))
|
[
"sys.path.append",
"pandas.DataFrame",
"uncertainty_rfr.traintest",
"pandas.read_csv",
"uncertainty_rfr.predict_append",
"uncertainty_rfr.descriptors_outputs",
"sklearn.ensemble.RandomForestRegressor",
"uncertainty_rfr.uncertainty_rfr_cv",
"uncertainty_rfr.largest_uncertainty",
"numpy.array",
"uncertainty_rfr.dft_points",
"pandas.api.types.is_numeric_dtype",
"uncertainty_rfr.uncert_table",
"uncertainty_rfr.uncertainty_rfr_qfr"
] |
[((159, 181), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (174, 181), False, 'import sys\n'), ((194, 253), 'pandas.read_csv', 'pd.read_csv', (['"""./xiaofeng_lasso/unittest_dummy.csv"""'], {'nrows': '(5)'}), "('./xiaofeng_lasso/unittest_dummy.csv', nrows=5)\n", (205, 253), True, 'import pandas as pd\n'), ((271, 331), 'uncertainty_rfr.descriptors_outputs', 'uncertainty_rfr.descriptors_outputs', (['df_test'], {'d_start': '(5)', 'o': '(0)'}), '(df_test, d_start=5, o=0)\n', (306, 331), False, 'import uncertainty_rfr\n'), ((602, 652), 'pandas.read_csv', 'pd.read_csv', (['"""./xiaofeng_lasso/unittest_dummy.csv"""'], {}), "('./xiaofeng_lasso/unittest_dummy.csv')\n", (613, 652), True, 'import pandas as pd\n'), ((713, 819), 'uncertainty_rfr.uncertainty_rfr_qfr', 'uncertainty_rfr.uncertainty_rfr_qfr', (['df_test', 'X[X.columns[5:]]'], {'Y': '"""none"""', 'true_y': '(False)', 'o': '(0)', 'd_start': '(5)'}), "(df_test, X[X.columns[5:]], Y='none',\n true_y=False, o=0, d_start=5)\n", (748, 819), False, 'import uncertainty_rfr\n'), ((1368, 1428), 'uncertainty_rfr.descriptors_outputs', 'uncertainty_rfr.descriptors_outputs', (['df_test'], {'d_start': '(5)', 'o': '(0)'}), '(df_test, d_start=5, o=0)\n', (1403, 1428), False, 'import uncertainty_rfr\n'), ((1854, 1885), 'pandas.api.types.is_numeric_dtype', 'ptypes.is_numeric_dtype', (['y_test'], {}), '(y_test)\n', (1877, 1885), True, 'import pandas.api.types as ptypes\n'), ((2116, 2135), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (2124, 2135), True, 'import numpy as np\n'), ((2156, 2172), 'numpy.array', 'np.array', (['[3, 4]'], {}), '([3, 4])\n', (2164, 2172), True, 'import numpy as np\n'), ((2217, 2289), 'uncertainty_rfr.traintest', 'uncertainty_rfr.traintest', (['X_test', 'y_test', 'train_idx_test', 'test_idx_test'], {}), '(X_test, y_test, train_idx_test, test_idx_test)\n', (2242, 2289), False, 'import uncertainty_rfr\n'), ((2695, 2756), 'uncertainty_rfr.descriptors_outputs', 'uncertainty_rfr.descriptors_outputs', (['df_test2'], {'d_start': '(5)', 'o': '(0)'}), '(df_test2, d_start=5, o=0)\n', (2730, 2756), False, 'import uncertainty_rfr\n'), ((2829, 2868), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'random_state': '(130)'}), '(random_state=130)\n', (2850, 2868), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((2919, 2982), 'numpy.array', 'np.array', (['[[3.98069889, 0.38048415], [-0.78001682, 0.20058657]]'], {}), '([[3.98069889, 0.38048415], [-0.78001682, 0.20058657]])\n', (2927, 2982), True, 'import numpy as np\n'), ((3062, 3134), 'uncertainty_rfr.predict_append', 'uncertainty_rfr.predict_append', (['clf_test', 'N_arr_test', 'n_test', 'preds_test'], {}), '(clf_test, N_arr_test, n_test, preds_test)\n', (3092, 3134), False, 'import uncertainty_rfr\n'), ((3673, 3736), 'numpy.array', 'np.array', (['[[3.98069889, 0.38048415], [-0.78001682, 0.20058657]]'], {}), '([[3.98069889, 0.38048415], [-0.78001682, 0.20058657]])\n', (3681, 3736), True, 'import numpy as np\n'), ((3781, 3833), 'uncertainty_rfr.dft_points', 'uncertainty_rfr.dft_points', (['(True)', 'Y_test', 'N_arr_test'], {}), '(True, Y_test, N_arr_test)\n', (3807, 3833), False, 'import uncertainty_rfr\n'), ((3852, 3905), 'uncertainty_rfr.dft_points', 'uncertainty_rfr.dft_points', (['(False)', 'Y_test', 'N_arr_test'], {}), '(False, Y_test, N_arr_test)\n', (3878, 3905), False, 'import uncertainty_rfr\n'), ((4648, 4664), 'numpy.array', 'np.array', (['[3, 5]'], {}), '([3, 5])\n', (4656, 4664), True, 'import numpy as np\n'), ((4753, 4832), 'uncertainty_rfr.uncert_table', 'uncertainty_rfr.uncert_table', (['N_test', 'X', '(1)', '(2)', '(3)', '(4)', 'Y_arr_test', 'pred_desc_test'], {}), '(N_test, X, 1, 2, 3, 4, Y_arr_test, pred_desc_test)\n', (4781, 4832), False, 'import uncertainty_rfr\n'), ((5754, 5846), 'uncertainty_rfr.uncertainty_rfr_cv', 'uncertainty_rfr.uncertainty_rfr_cv', (['df_test', 'X', 'Y', 'o', 'd_start', 'x_start'], {'folds': 'folds_test'}), '(df_test, X, Y, o, d_start, x_start,\n folds=folds_test)\n', (5788, 5846), False, 'import uncertainty_rfr\n'), ((6626, 6689), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'err_int': [1, 2, 3], 'std_dev': [4, 5, 6]}"}), "(data={'err_int': [1, 2, 3], 'std_dev': [4, 5, 6]})\n", (6638, 6689), True, 'import pandas as pd\n'), ((6724, 6784), 'uncertainty_rfr.largest_uncertainty', 'uncertainty_rfr.largest_uncertainty', (['df', 'num_vals', '"""std_dev"""'], {}), "(df, num_vals, 'std_dev')\n", (6759, 6784), False, 'import uncertainty_rfr\n'), ((4686, 4736), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'mean': [1, 2], 'std': [3, 4]}"}), "(data={'mean': [1, 2], 'std': [3, 4]})\n", (4698, 4736), True, 'import pandas as pd\n'), ((1674, 1710), 'pandas.api.types.is_numeric_dtype', 'ptypes.is_numeric_dtype', (['X_test[col]'], {}), '(X_test[col])\n', (1697, 1710), True, 'import pandas.api.types as ptypes\n'), ((5113, 5149), 'pandas.api.types.is_numeric_dtype', 'ptypes.is_numeric_dtype', (['err_df[col]'], {}), '(err_df[col])\n', (5136, 5149), True, 'import pandas.api.types as ptypes\n')]
|
# MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2018
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Provides black-box gradient estimation using NES.
"""
import logging
from typing import List, Optional, Tuple, Union, TYPE_CHECKING
import numpy as np
from scipy.stats import entropy
from art.estimators.estimator import BaseEstimator
from art.estimators.classification.classifier import ClassifierMixin, ClassifierLossGradients
from art.utils import clip_and_round
if TYPE_CHECKING:
from art.utils import CLASSIFIER_CLASS_LOSS_GRADIENTS_TYPE
logger = logging.getLogger(__name__)
import itertools
class QueryEfficientGradientEstimationClassifier(ClassifierLossGradients, ClassifierMixin, BaseEstimator):
"""
Implementation of Query-Efficient Black-box Adversarial Examples. The attack approximates the gradient by
maximizing the loss function over samples drawn from random Gaussian noise around the input.
| Paper link: https://arxiv.org/abs/1712.07113
"""
estimator_params = ["num_basis", "sigma", "round_samples"]
def __init__(
self,
classifier: "CLASSIFIER_CLASS_LOSS_GRADIENTS_TYPE",
num_basis: int,
sigma: float,
round_samples: float = 0.0,
) -> None:
"""
:param classifier: An instance of a classification estimator whose loss_gradient is being approximated.
:param num_basis: The number of samples to draw to approximate the gradient.
:param sigma: Scaling on the Gaussian noise N(0,1).
:param round_samples: The resolution of the input domain to round the data to, e.g., 1.0, or 1/255. Set to 0 to
disable.
"""
super().__init__(model=classifier.model, clip_values=classifier.clip_values)
# pylint: disable=E0203
self._classifier = classifier
self.num_basis = num_basis
self.sigma = sigma
self.round_samples = round_samples
self._nb_classes = self._classifier.nb_classes
@property
def input_shape(self) -> Tuple[int, ...]:
"""
Return the shape of one input sample.
:return: Shape of one input sample.
"""
return self._classifier.input_shape # type: ignore
def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> np.ndarray: # pylint: disable=W0221
"""
Perform prediction of the classifier for input `x`. Rounds results first.
:param x: Features in array of shape (nb_samples, nb_features) or (nb_samples, nb_pixels_1, nb_pixels_2,
nb_channels) or (nb_samples, nb_channels, nb_pixels_1, nb_pixels_2).
:param batch_size: Size of batches.
:return: Array of predictions of shape `(nb_inputs, nb_classes)`.
"""
return self._classifier.predict(clip_and_round(x, self.clip_values, self.round_samples), batch_size=batch_size)
def fit(self, x: np.ndarray, y: np.ndarray, **kwargs) -> None:
"""
Fit the classifier using the training data `(x, y)`.
:param x: Features in array of shape (nb_samples, nb_features) or (nb_samples, nb_pixels_1, nb_pixels_2,
nb_channels) or (nb_samples, nb_channels, nb_pixels_1, nb_pixels_2).
:param y: Target values (class labels in classification) in array of shape (nb_samples, nb_classes) in
one-hot encoding format.
:param kwargs: Dictionary of framework-specific arguments.
"""
raise NotImplementedError
def _generate_samples(self, x: np.ndarray, epsilon_map: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Generate samples around the current image.
:param x: Sample input with shape as expected by the model.
:param epsilon_map: Samples drawn from search space.
:return: Two arrays of new input samples to approximate gradient.
"""
minus = clip_and_round(
np.repeat(x, self.num_basis, axis=0) - epsilon_map,
self.clip_values,
self.round_samples,
)
plus = clip_and_round(
np.repeat(x, self.num_basis, axis=0) + epsilon_map,
self.clip_values,
self.round_samples,
)
return minus, plus
def class_gradient(self, x: np.ndarray, label: Union[int, List[int], None] = None, **kwargs) -> np.ndarray:
"""
Compute per-class derivatives w.r.t. `x`.
:param x: Input with shape as expected by the classifier's model.
:param label: Index of a specific per-class derivative. If an integer is provided, the gradient of that class
output is computed for all samples. If multiple values as provided, the first dimension should
match the batch size of `x`, and each value will be used as target for its corresponding sample in
`x`. If `None`, then gradients for all classes will be computed for each sample.
:return: Array of gradients of input features w.r.t. each class in the form
`(batch_size, nb_classes, input_shape)` when computing for all classes, otherwise shape becomes
`(batch_size, 1, input_shape)` when `label` parameter is specified.
"""
raise NotImplementedError
def _generate_sample_i(self, x: np.ndarray, epsilon_map: np.ndarray, i: int) -> Tuple[np.ndarray, np.ndarray]:
minus = clip_and_round(
x - epsilon_map[i],
self.clip_values,
self.round_samples,
)
plus = clip_and_round(
x + epsilon_map[i],
self.clip_values,
self.round_samples,
)
return minus, plus
def loss_gradient(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray:
if self.amortized_attack:
return self.loss_gradient_new_efficient(x, y)
#return self.loss_gradient_new(x, y)
else:
return self.loss_gradient_old(x, y)
#return self.loss_gradient_new(x, y)
#return self.loss_gradient_new_efficient(x, y)
def loss_gradient_new_efficient(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray:
"""
Compute the gradient of the loss function w.r.t. `x`.
:param x: Sample input with shape as expected by the model.
:param y: Correct labels, one-vs-rest encoding.
:return: Array of gradients of the same shape as `x`.
"""
epsilon_map = self.sigma * np.random.normal(size=([self.num_basis] + list(self.input_shape)))
#print(epsilon_map.shape)
#print(epsilon_map.reshape(self.num_basis, -1).shape)
grads = [0.0] * len(x)
#print('eps map shape', epsilon_map.shape)
#print('epsmap 11', epsilon_map[11])
#batch over multiple examples
reps_per_batch = 10
reps = epsilon_map.shape[0]
for jb in range(0, reps, reps_per_batch):
minus_preds = []
len_x = len(x)
pm_len = 2*len_x*reps_per_batch
minuses_pluses = [None]*pm_len
for b in range(reps_per_batch):
j = jb + b
#print('j', j, 'b', b)
if j >= reps:
b -= 1
#print('b after dec', b)
break
for i in range(len(x)):
minus, plus = self._generate_sample_i(x[i : i + 1], epsilon_map, j)
#print('j', j)
#print('minus i', i + b*2*len_x, 'plus i', i + len_x + b*2*len_x)
minuses_pluses[i + b*2*len_x] = minus
minuses_pluses[i + len_x + b*2*len_x] = plus
#print('b after loop', b)
if jb + reps_per_batch > reps:
#print(minuses_pluses[:(b+1)*2*len_x])
#print(minuses_pluses[(b+1)*2*len_x:])
minuses_pluses = minuses_pluses[:(b+1)*2*len_x]
#print('len(minuses_pluses)', len(minuses_pluses))
minuses_pluses = np.array(minuses_pluses)
minuses_pluses = np.squeeze(minuses_pluses, 1)
#print(minuses_pluses.shape)
pm_preds = self.predict(minuses_pluses, batch_size=4000)
#minus_preds, plus_preds = np.split(pm_preds, 2)
#print('num pm preds', pm_preds.shape)
#print('b', b+1)
rounds = np.split(pm_preds, b+1)
#print('len(rounds)', len(rounds))
for rn, r in enumerate(rounds):
minus_preds, plus_preds = np.split(r, 2)
#print(minus_preds.shape, plus_preds.shape)
j = jb + rn
for i, (mp, pp) in enumerate(zip(minus_preds, plus_preds)):
new_y_minus = entropy(y[i], mp)
new_y_plus = entropy(y[i], pp)
one_grad = epsilon_map[j] * (new_y_plus - new_y_minus)
grads[i] += one_grad
for i in range(len(grads)):
grads[i] = grads[i] / (self.num_basis * self.sigma)
grads = self._apply_preprocessing_gradient(x, np.array(grads))
return grads
def loss_gradient_new(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray:
"""
Compute the gradient of the loss function w.r.t. `x`.
:param x: Sample input with shape as expected by the model.
:param y: Correct labels, one-vs-rest encoding.
:return: Array of gradients of the same shape as `x`.
"""
epsilon_map = self.sigma * np.random.normal(size=([self.num_basis] + list(self.input_shape)))
#print(epsilon_map.shape)
#print(epsilon_map.reshape(self.num_basis, -1).shape)
grads = [0.0] * len(x)
for j in range(epsilon_map.shape[0]):
minus_preds = []
#plus_preds = []
pluses = []
minus = None
plus = None
for r in range(2):
for i in range(len(x)):
if r == 0:
minus, plus = self._generate_sample_i(x[i : i + 1], epsilon_map, j)
minus_preds.append(self.predict(minus)[0])
pluses.append(plus)
else:
plus_pred = self.predict(pluses[i])[0]
new_y_minus = entropy(y[i], minus_preds[i])
new_y_plus = entropy(y[i], plus_pred)
one_grad = epsilon_map[j] * (new_y_plus - new_y_minus)
grads[i] += one_grad
#for j in range(epsilon_map.shape[0]):
# for i in range(len(x)):
# minus, plus = self._generate_sample_i(x[i : i + 1], epsilon_map, j)
# pred = self.predict(np.concatenate((minus, plus)))
# new_y_minus = entropy(y[i], pred[0])
# new_y_plus = entropy(y[i], pred[1])
# one_grad = epsilon_map[j] * (new_y_plus - new_y_minus)
# grads[i] += one_grad
# #pluses = [self._generate_sample_i(x[i : i + 1], epsilon_map, j)[1][0] for i in range(len(x))]
# #plus_preds = self.predict(pluses)
# #print('plus_preds.shape', plus_preds.shape)
# #print(len(pluses))
# #minuses = [self._generate_sample_i(x[i : i + 1], epsilon_map, j)[0][0] for i in range(len(x))]
# #minus_preds = self.predict(minuses)
# #print('minus_preds.shape', minus_preds.shape)
# #for i in range(len(x)):
# # grads[i] += epsilon_map[j] * (plus_preds[i] - minus_preds[i])
for i in range(len(grads)):
grads[i] = grads[i]* 2/self.num_basis / (2 * self.sigma)
grads = self._apply_preprocessing_gradient(x, np.array(grads))
return grads
def loss_gradient_old(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray:
#new_grads = self.loss_gradient_new(x, y)
"""
Compute the gradient of the loss function w.r.t. `x`.
:param x: Sample input with shape as expected by the model.
:param y: Correct labels, one-vs-rest encoding.
:return: Array of gradients of the same shape as `x`.
"""
epsilon_map = self.sigma * np.random.normal(size=([self.num_basis] + list(self.input_shape)))
#print(epsilon_map.shape)
#print(epsilon_map.reshape(self.num_basis, -1).shape)
grads = []
for i in range(len(x)):
#print('i', i)
minus, plus = self._generate_samples(x[i : i + 1], epsilon_map)
#print('shape', minus.shape, plus.shape)
# Vectorized; small tests weren't faster
# ent_vec = np.vectorize(lambda p: entropy(y[i], p), signature='(n)->()')
# new_y_minus = ent_vec(self.predict(minus))
# new_y_plus = ent_vec(self.predict(plus))
# Vanilla
new_y_minus = np.array([entropy(y[i], p) for p in self.predict(minus, batch_size=4000)])
new_y_plus = np.array([entropy(y[i], p) for p in self.predict(plus, batch_size=4000)])
#print('term1 shape', epsilon_map.reshape(self.num_basis, -1).shape)
#print('term2 shape', ((new_y_plus - new_y_minus).reshape(self.num_basis, -1) / (2 * self.sigma)).shape)
query_efficient_grad = 2 * np.mean(
np.multiply(
epsilon_map.reshape(self.num_basis, -1),
(new_y_plus - new_y_minus).reshape(self.num_basis, -1) / (2 * self.sigma),
).reshape([-1] + list(self.input_shape)),
axis=0,
)
grads.append(query_efficient_grad)
grads = self._apply_preprocessing_gradient(x, np.array(grads))
#print('old grads', grads)
#print('new grads', new_grads)
#print('equal', grads == new_grads)
return grads
def get_activations(self, x: np.ndarray, layer: Union[int, str], batch_size: int) -> np.ndarray:
"""
Return the output of the specified layer for input `x`. `layer` is specified by layer index (between 0 and
`nb_layers - 1`) or by name. The number of layers can be determined by counting the results returned by
calling `layer_names`.
:param x: Input for computing the activations.
:param layer: Layer for computing the activations.
:param batch_size: Size of batches.
:return: The output of `layer`, where the first dimension is the batch size corresponding to `x`.
"""
raise NotImplementedError
def save(self, filename: str, path: Optional[str] = None) -> None:
"""
Save a model to file specific to the backend framework.
:param filename: Name of the file where to save the model.
:param path: Path of the directory where to save the model. If no path is specified, the model will be stored in
the default data location of ART at `ART_DATA_PATH`.
"""
raise NotImplementedError
|
[
"scipy.stats.entropy",
"numpy.split",
"numpy.array",
"numpy.squeeze",
"art.utils.clip_and_round",
"logging.getLogger",
"numpy.repeat"
] |
[((1598, 1625), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1615, 1625), False, 'import logging\n'), ((6455, 6527), 'art.utils.clip_and_round', 'clip_and_round', (['(x - epsilon_map[i])', 'self.clip_values', 'self.round_samples'], {}), '(x - epsilon_map[i], self.clip_values, self.round_samples)\n', (6469, 6527), False, 'from art.utils import clip_and_round\n'), ((6590, 6662), 'art.utils.clip_and_round', 'clip_and_round', (['(x + epsilon_map[i])', 'self.clip_values', 'self.round_samples'], {}), '(x + epsilon_map[i], self.clip_values, self.round_samples)\n', (6604, 6662), False, 'from art.utils import clip_and_round\n'), ((3849, 3904), 'art.utils.clip_and_round', 'clip_and_round', (['x', 'self.clip_values', 'self.round_samples'], {}), '(x, self.clip_values, self.round_samples)\n', (3863, 3904), False, 'from art.utils import clip_and_round\n'), ((9075, 9099), 'numpy.array', 'np.array', (['minuses_pluses'], {}), '(minuses_pluses)\n', (9083, 9099), True, 'import numpy as np\n'), ((9129, 9158), 'numpy.squeeze', 'np.squeeze', (['minuses_pluses', '(1)'], {}), '(minuses_pluses, 1)\n', (9139, 9158), True, 'import numpy as np\n'), ((9434, 9459), 'numpy.split', 'np.split', (['pm_preds', '(b + 1)'], {}), '(pm_preds, b + 1)\n', (9442, 9459), True, 'import numpy as np\n'), ((10124, 10139), 'numpy.array', 'np.array', (['grads'], {}), '(grads)\n', (10132, 10139), True, 'import numpy as np\n'), ((12810, 12825), 'numpy.array', 'np.array', (['grads'], {}), '(grads)\n', (12818, 12825), True, 'import numpy as np\n'), ((14768, 14783), 'numpy.array', 'np.array', (['grads'], {}), '(grads)\n', (14776, 14783), True, 'import numpy as np\n'), ((4968, 5004), 'numpy.repeat', 'np.repeat', (['x', 'self.num_basis'], {'axis': '(0)'}), '(x, self.num_basis, axis=0)\n', (4977, 5004), True, 'import numpy as np\n'), ((5135, 5171), 'numpy.repeat', 'np.repeat', (['x', 'self.num_basis'], {'axis': '(0)'}), '(x, self.num_basis, axis=0)\n', (5144, 5171), True, 'import numpy as np\n'), ((9590, 9604), 'numpy.split', 'np.split', (['r', '(2)'], {}), '(r, 2)\n', (9598, 9604), True, 'import numpy as np\n'), ((9794, 9811), 'scipy.stats.entropy', 'entropy', (['y[i]', 'mp'], {}), '(y[i], mp)\n', (9801, 9811), False, 'from scipy.stats import entropy\n'), ((9841, 9858), 'scipy.stats.entropy', 'entropy', (['y[i]', 'pp'], {}), '(y[i], pp)\n', (9848, 9858), False, 'from scipy.stats import entropy\n'), ((13974, 13990), 'scipy.stats.entropy', 'entropy', (['y[i]', 'p'], {}), '(y[i], p)\n', (13981, 13990), False, 'from scipy.stats import entropy\n'), ((14074, 14090), 'scipy.stats.entropy', 'entropy', (['y[i]', 'p'], {}), '(y[i], p)\n', (14081, 14090), False, 'from scipy.stats import entropy\n'), ((11388, 11417), 'scipy.stats.entropy', 'entropy', (['y[i]', 'minus_preds[i]'], {}), '(y[i], minus_preds[i])\n', (11395, 11417), False, 'from scipy.stats import entropy\n'), ((11455, 11479), 'scipy.stats.entropy', 'entropy', (['y[i]', 'plus_pred'], {}), '(y[i], plus_pred)\n', (11462, 11479), False, 'from scipy.stats import entropy\n')]
|
import numpy as np
import straxen
import tempfile
import os
import unittest
import shutil
import uuid
test_run_id_1T = '180423_1021'
class TestBasics(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
temp_folder = uuid.uuid4().hex
# Keep one temp dir because we don't want to download the data every time.
cls.tempdir = os.path.join(tempfile.gettempdir(), temp_folder)
assert not os.path.exists(cls.tempdir)
print("Downloading test data (if needed)")
st = straxen.contexts.demo()
cls.run_id = test_run_id_1T
cls.st = st
@classmethod
def tearDownClass(cls):
# Make sure to only cleanup this dir after we have done all the tests
if os.path.exists(cls.tempdir):
shutil.rmtree(cls.tempdir)
def test_run_selection(self):
st = self.st
# Ignore strax-internal warnings
st.set_context_config({'free_options': tuple(st.config.keys())})
run_df = st.select_runs(available='raw_records')
print(run_df)
run_id = run_df.iloc[0]['name']
assert run_id == test_run_id_1T
def test_processing(self):
st = self.st
df = st.get_df(self.run_id, 'event_info')
assert len(df) > 0
assert 'cs1' in df.columns
assert df['cs1'].sum() > 0
assert not np.all(np.isnan(df['x'].values))
def test_get_livetime_sec(self):
st = self.st
events = st.get_array(self.run_id, 'peaks')
straxen.get_livetime_sec(st, test_run_id_1T, things=events)
def test_mini_analysis(self):
@straxen.mini_analysis(requires=('raw_records',))
def count_rr(raw_records):
return len(raw_records)
n = self.st.count_rr(self.run_id)
assert n > 100
|
[
"uuid.uuid4",
"straxen.contexts.demo",
"tempfile.gettempdir",
"os.path.exists",
"numpy.isnan",
"straxen.get_livetime_sec",
"straxen.mini_analysis",
"shutil.rmtree"
] |
[((529, 552), 'straxen.contexts.demo', 'straxen.contexts.demo', ([], {}), '()\n', (550, 552), False, 'import straxen\n'), ((744, 771), 'os.path.exists', 'os.path.exists', (['cls.tempdir'], {}), '(cls.tempdir)\n', (758, 771), False, 'import os\n'), ((1514, 1573), 'straxen.get_livetime_sec', 'straxen.get_livetime_sec', (['st', 'test_run_id_1T'], {'things': 'events'}), '(st, test_run_id_1T, things=events)\n', (1538, 1573), False, 'import straxen\n'), ((1618, 1666), 'straxen.mini_analysis', 'straxen.mini_analysis', ([], {'requires': "('raw_records',)"}), "(requires=('raw_records',))\n", (1639, 1666), False, 'import straxen\n'), ((246, 258), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (256, 258), False, 'import uuid\n'), ((381, 402), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (400, 402), False, 'import tempfile\n'), ((436, 463), 'os.path.exists', 'os.path.exists', (['cls.tempdir'], {}), '(cls.tempdir)\n', (450, 463), False, 'import os\n'), ((785, 811), 'shutil.rmtree', 'shutil.rmtree', (['cls.tempdir'], {}), '(cls.tempdir)\n', (798, 811), False, 'import shutil\n'), ((1369, 1393), 'numpy.isnan', 'np.isnan', (["df['x'].values"], {}), "(df['x'].values)\n", (1377, 1393), True, 'import numpy as np\n')]
|
from __future__ import annotations
from asyncio.events import AbstractEventLoop, TimerHandle
from asyncio.futures import Future
from typing import Mapping
from safe_set_result import safe_set_result
import scrypted_sdk
import numpy as np
import re
import tflite_runtime.interpreter as tflite
from pycoral.utils.edgetpu import make_interpreter
from pycoral.utils.edgetpu import list_edge_tpus
from pycoral.utils.edgetpu import run_inference
from pycoral.adapters.common import input_size
from pycoral.adapters import detect
from PIL import Image
import common
import io
import gstreamer
import json
import asyncio
import time
import os
import binascii
from urllib.parse import urlparse
from gi.repository import Gst
import multiprocessing
from third_party.sort import Sort
from scrypted_sdk.types import FFMpegInput, Lock, MediaObject, ObjectDetection, ObjectDetectionModel, ObjectDetectionResult, ObjectDetectionSession, OnOff, ObjectsDetected, ScryptedInterface, ScryptedMimeTypes
def parse_label_contents(contents: str):
lines = contents.splitlines()
ret = {}
for row_number, content in enumerate(lines):
pair = re.split(r'[:\s]+', content.strip(), maxsplit=1)
if len(pair) == 2 and pair[0].strip().isdigit():
ret[int(pair[0])] = pair[1].strip()
else:
ret[row_number] = content.strip()
return ret
class DetectionSession:
id: str
timerHandle: TimerHandle
future: Future
loop: AbstractEventLoop
score_threshold: float
running: bool
def __init__(self) -> None:
self.timerHandle = None
self.future = Future()
self.tracker = Sort()
self.running = False
def cancel(self):
if self.timerHandle:
self.timerHandle.cancel()
self.timerHandle = None
def timedOut(self):
safe_set_result(self.future)
def setTimeout(self, duration: float):
self.cancel()
self.loop.call_later(duration, lambda: self.timedOut())
class CoralPlugin(scrypted_sdk.ScryptedDeviceBase, ObjectDetection):
detection_sessions: Mapping[str, DetectionSession] = {}
session_mutex = multiprocessing.Lock()
def __init__(self, nativeId: str | None = None):
super().__init__(nativeId=nativeId)
labels_contents = scrypted_sdk.zip.open(
'fs/coco_labels.txt').read().decode('utf8')
self.labels = parse_label_contents(labels_contents)
edge_tpus = list_edge_tpus()
if len(edge_tpus):
model = scrypted_sdk.zip.open(
'fs/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite').read()
self.interpreter = make_interpreter(model)
else:
model = scrypted_sdk.zip.open(
'fs/mobilenet_ssd_v2_coco_quant_postprocess.tflite').read()
self.interpreter = tflite.Interpreter(model_content=model)
self.interpreter.allocate_tensors()
self.mutex = multiprocessing.Lock()
async def getInferenceModels(self) -> list[ObjectDetectionModel]:
ret = list[ObjectDetectionModel]()
_, height, width, channels = self.interpreter.get_input_details()[
0]['shape']
d = {
'id': 'mobilenet_ssd_v2_coco_quant_postprocess_edgetpu',
'name': '<NAME>',
'classes': list(self.labels.values()),
'inputShape': [int(width), int(height), int(channels)],
}
ret.append(d)
return ret
def create_detection_result(self, objs, size, tracker: Sort = None):
detections = list[ObjectDetectionResult]()
detection_result: ObjectsDetected = {}
detection_result['detections'] = detections
detection_result['inputDimensions'] = size
tracker_detections = []
for obj in objs:
element = [] # np.array([])
element.append(obj.bbox.xmin)
element.append(obj.bbox.ymin)
element.append(obj.bbox.xmax)
element.append(obj.bbox.ymax)
element.append(obj.score) # print('element= ',element)
tracker_detections.append(element)
tracker_detections = np.array(tracker_detections)
trdata = []
trackerFlag = False
if tracker and tracker_detections.any():
trdata = tracker.update(tracker_detections)
trackerFlag = True
if trackerFlag and (np.array(trdata)).size:
for td in trdata:
x0, y0, x1, y1, trackID = td[0].item(), td[1].item(
), td[2].item(), td[3].item(), td[4].item()
overlap = 0
for ob in objs:
dx0, dy0, dx1, dy1 = ob.bbox.xmin, ob.bbox.ymin, ob.bbox.xmax, ob.bbox.ymax
area = (min(dx1, x1)-max(dx0, x0)) * \
(min(dy1, y1)-max(dy0, y0))
if (area > overlap):
overlap = area
obj = ob
detection: ObjectDetectionResult = {}
detection['id'] = str(trackID)
detection['boundingBox'] = (
obj.bbox.xmin, obj.bbox.ymin, obj.bbox.ymax, obj.bbox.ymax)
detection['className'] = self.labels.get(obj.id, obj.id)
detection['score'] = obj.score
detections.append(detection)
else:
for obj in objs:
detection: ObjectDetectionResult = {}
detection['boundingBox'] = (
obj.bbox.xmin, obj.bbox.ymin, obj.bbox.ymax, obj.bbox.ymax)
detection['className'] = self.labels.get(obj.id, obj.id)
detection['score'] = obj.score
detections.append(detection)
return detection_result
def detection_event(self, detection_session: DetectionSession, detection_result: ObjectsDetected, event_buffer: bytes = None):
detection_result['detectionId'] = detection_session.id
detection_result['timestamp'] = int(time.time() * 1000)
asyncio.run_coroutine_threadsafe(self.onDeviceEvent(
ScryptedInterface.ObjectDetection.value, detection_result), loop=detection_session.loop)
def end_session(self, detection_session: DetectionSession):
print('detection ended', detection_session.id)
detection_session.cancel()
with self.session_mutex:
self.detection_sessions.pop(detection_session.id, None)
detection_result: ObjectsDetected = {}
detection_result['running'] = False
self.detection_event(detection_session, detection_result)
async def detectObjects(self, mediaObject: MediaObject, session: ObjectDetectionSession = None) -> ObjectsDetected:
score_threshold = -float('inf')
duration = None
detection_id = None
if session:
detection_id = session.get('detectionId', -float('inf'))
duration = session.get('duration', None)
score_threshold = session.get('minScore', score_threshold)
is_image = mediaObject and mediaObject.mimeType.startswith('image/')
with self.session_mutex:
if not is_image and not detection_id:
detection_id = binascii.b2a_hex(os.urandom(15)).decode('utf8')
if detection_id:
detection_session = self.detection_sessions.get(detection_id, None)
if not duration and not is_image:
if detection_session:
self.end_session(detection_session)
return
elif detection_id and not detection_session:
if not mediaObject:
raise Exception(
'session %s inactive and no mediaObject provided' % detection_id)
detection_session = DetectionSession()
detection_session.id = detection_id
detection_session.score_threshold = score_threshold
loop = asyncio.get_event_loop()
detection_session.loop = loop
self.detection_sessions[detection_id] = detection_session
detection_session.future.add_done_callback(
lambda _: self.end_session(detection_session))
if is_image:
stream = io.BytesIO(bytes(await scrypted_sdk.mediaManager.convertMediaObjectToBuffer(mediaObject, 'image/jpeg')))
image = Image.open(stream)
_, scale = common.set_resized_input(
self.interpreter, image.size, lambda size: image.resize(size, Image.ANTIALIAS))
tracker = None
if detection_session:
tracker = detection_session.tracker
with self.mutex:
self.interpreter.invoke()
objs = detect.get_objects(
self.interpreter, score_threshold=score_threshold, image_scale=scale)
return self.create_detection_result(objs, image.size, tracker = tracker)
new_session = not detection_session.running
if new_session:
detection_session.running = True
detection_session.setTimeout(duration / 1000)
if not new_session:
return
print('detection starting', detection_id)
b = await scrypted_sdk.mediaManager.convertMediaObjectToBuffer(mediaObject, ScryptedMimeTypes.MediaStreamUrl.value)
s = b.decode('utf8')
j: FFMpegInput = json.loads(s)
container = j['container']
videofmt = 'raw'
videosrc = j['url']
if container == 'mpegts' and videosrc.startswith('tcp://'):
parsed_url = urlparse(videosrc)
videofmt = 'gst'
videosrc = 'tcpclientsrc port=%s host=%s ! tsdemux' % (
parsed_url.port, parsed_url.hostname)
size = j['mediaStreamOptions']['video']
inference_size = input_size(self.interpreter)
width, height = inference_size
w, h = (size['width'], size['height'])
scale = min(width / w, height / h)
def user_callback(input_tensor, src_size, inference_box):
with self.mutex:
run_inference(self.interpreter, input_tensor)
objs = detect.get_objects(
self.interpreter, score_threshold=score_threshold, image_scale=(scale, scale))
# (result, mapinfo) = input_tensor.map(Gst.MapFlags.READ)
try:
detection_result = self.create_detection_result(objs,
src_size, detection_session.tracker)
# self.detection_event(detection_session, detection_result, mapinfo.data.tobytes())
self.detection_event(detection_session, detection_result)
if not session or not duration:
safe_set_result(detection_session.future)
finally:
# input_tensor.unmap(mapinfo)
pass
pipeline = gstreamer.run_pipeline(detection_session.future, user_callback,
src_size=(
size['width'], size['height']),
appsink_size=inference_size,
videosrc=videosrc,
videofmt=videofmt)
task = pipeline.run()
asyncio.ensure_future(task)
detection_result: ObjectsDetected = {}
detection_result['detectionId'] = detection_id
detection_result['running'] = True
return detection_result
def create_scrypted_plugin():
return CoralPlugin()
#
|
[
"pycoral.utils.edgetpu.make_interpreter",
"multiprocessing.Lock",
"pycoral.utils.edgetpu.run_inference",
"urllib.parse.urlparse",
"safe_set_result.safe_set_result",
"scrypted_sdk.mediaManager.convertMediaObjectToBuffer",
"json.loads",
"pycoral.adapters.common.input_size",
"asyncio.ensure_future",
"gstreamer.run_pipeline",
"tflite_runtime.interpreter.Interpreter",
"os.urandom",
"pycoral.utils.edgetpu.list_edge_tpus",
"asyncio.get_event_loop",
"pycoral.adapters.detect.get_objects",
"third_party.sort.Sort",
"asyncio.futures.Future",
"scrypted_sdk.zip.open",
"PIL.Image.open",
"time.time",
"numpy.array"
] |
[((2148, 2170), 'multiprocessing.Lock', 'multiprocessing.Lock', ([], {}), '()\n', (2168, 2170), False, 'import multiprocessing\n'), ((1611, 1619), 'asyncio.futures.Future', 'Future', ([], {}), '()\n', (1617, 1619), False, 'from asyncio.futures import Future\n'), ((1643, 1649), 'third_party.sort.Sort', 'Sort', ([], {}), '()\n', (1647, 1649), False, 'from third_party.sort import Sort\n'), ((1838, 1866), 'safe_set_result.safe_set_result', 'safe_set_result', (['self.future'], {}), '(self.future)\n', (1853, 1866), False, 'from safe_set_result import safe_set_result\n'), ((2454, 2470), 'pycoral.utils.edgetpu.list_edge_tpus', 'list_edge_tpus', ([], {}), '()\n', (2468, 2470), False, 'from pycoral.utils.edgetpu import list_edge_tpus\n'), ((2945, 2967), 'multiprocessing.Lock', 'multiprocessing.Lock', ([], {}), '()\n', (2965, 2967), False, 'import multiprocessing\n'), ((4153, 4181), 'numpy.array', 'np.array', (['tracker_detections'], {}), '(tracker_detections)\n', (4161, 4181), True, 'import numpy as np\n'), ((9435, 9448), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (9445, 9448), False, 'import json\n'), ((9874, 9902), 'pycoral.adapters.common.input_size', 'input_size', (['self.interpreter'], {}), '(self.interpreter)\n', (9884, 9902), False, 'from pycoral.adapters.common import input_size\n'), ((10985, 11167), 'gstreamer.run_pipeline', 'gstreamer.run_pipeline', (['detection_session.future', 'user_callback'], {'src_size': "(size['width'], size['height'])", 'appsink_size': 'inference_size', 'videosrc': 'videosrc', 'videofmt': 'videofmt'}), "(detection_session.future, user_callback, src_size=(\n size['width'], size['height']), appsink_size=inference_size, videosrc=\n videosrc, videofmt=videofmt)\n", (11007, 11167), False, 'import gstreamer\n'), ((11411, 11438), 'asyncio.ensure_future', 'asyncio.ensure_future', (['task'], {}), '(task)\n', (11432, 11438), False, 'import asyncio\n'), ((2656, 2679), 'pycoral.utils.edgetpu.make_interpreter', 'make_interpreter', (['model'], {}), '(model)\n', (2672, 2679), False, 'from pycoral.utils.edgetpu import make_interpreter\n'), ((2840, 2879), 'tflite_runtime.interpreter.Interpreter', 'tflite.Interpreter', ([], {'model_content': 'model'}), '(model_content=model)\n', (2858, 2879), True, 'import tflite_runtime.interpreter as tflite\n'), ((8411, 8429), 'PIL.Image.open', 'Image.open', (['stream'], {}), '(stream)\n', (8421, 8429), False, 'from PIL import Image\n'), ((9275, 9384), 'scrypted_sdk.mediaManager.convertMediaObjectToBuffer', 'scrypted_sdk.mediaManager.convertMediaObjectToBuffer', (['mediaObject', 'ScryptedMimeTypes.MediaStreamUrl.value'], {}), '(mediaObject,\n ScryptedMimeTypes.MediaStreamUrl.value)\n', (9327, 9384), False, 'import scrypted_sdk\n'), ((9630, 9648), 'urllib.parse.urlparse', 'urlparse', (['videosrc'], {}), '(videosrc)\n', (9638, 9648), False, 'from urllib.parse import urlparse\n'), ((4395, 4411), 'numpy.array', 'np.array', (['trdata'], {}), '(trdata)\n', (4403, 4411), True, 'import numpy as np\n'), ((6008, 6019), 'time.time', 'time.time', ([], {}), '()\n', (6017, 6019), False, 'import time\n'), ((8785, 8877), 'pycoral.adapters.detect.get_objects', 'detect.get_objects', (['self.interpreter'], {'score_threshold': 'score_threshold', 'image_scale': 'scale'}), '(self.interpreter, score_threshold=score_threshold,\n image_scale=scale)\n', (8803, 8877), False, 'from pycoral.adapters import detect\n'), ((10144, 10189), 'pycoral.utils.edgetpu.run_inference', 'run_inference', (['self.interpreter', 'input_tensor'], {}), '(self.interpreter, input_tensor)\n', (10157, 10189), False, 'from pycoral.utils.edgetpu import run_inference\n'), ((10213, 10314), 'pycoral.adapters.detect.get_objects', 'detect.get_objects', (['self.interpreter'], {'score_threshold': 'score_threshold', 'image_scale': '(scale, scale)'}), '(self.interpreter, score_threshold=score_threshold,\n image_scale=(scale, scale))\n', (10231, 10314), False, 'from pycoral.adapters import detect\n'), ((2518, 2605), 'scrypted_sdk.zip.open', 'scrypted_sdk.zip.open', (['"""fs/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite"""'], {}), "(\n 'fs/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite')\n", (2539, 2605), False, 'import scrypted_sdk\n'), ((2714, 2788), 'scrypted_sdk.zip.open', 'scrypted_sdk.zip.open', (['"""fs/mobilenet_ssd_v2_coco_quant_postprocess.tflite"""'], {}), "('fs/mobilenet_ssd_v2_coco_quant_postprocess.tflite')\n", (2735, 2788), False, 'import scrypted_sdk\n'), ((7970, 7994), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (7992, 7994), False, 'import asyncio\n'), ((10835, 10876), 'safe_set_result.safe_set_result', 'safe_set_result', (['detection_session.future'], {}), '(detection_session.future)\n', (10850, 10876), False, 'from safe_set_result import safe_set_result\n'), ((2295, 2338), 'scrypted_sdk.zip.open', 'scrypted_sdk.zip.open', (['"""fs/coco_labels.txt"""'], {}), "('fs/coco_labels.txt')\n", (2316, 2338), False, 'import scrypted_sdk\n'), ((8309, 8388), 'scrypted_sdk.mediaManager.convertMediaObjectToBuffer', 'scrypted_sdk.mediaManager.convertMediaObjectToBuffer', (['mediaObject', '"""image/jpeg"""'], {}), "(mediaObject, 'image/jpeg')\n", (8361, 8388), False, 'import scrypted_sdk\n'), ((7242, 7256), 'os.urandom', 'os.urandom', (['(15)'], {}), '(15)\n', (7252, 7256), False, 'import os\n')]
|
r"""
Monte Carlo vs Black-Scholes-Merton
===========================================
Time values of options and guarantees for various in-the-moneyness
are calculated using Monte Carlo simulations and the Black-Scholes-Merton
pricing formula for European put options.
The Black-Scholes-Merton pricing formula for European put options
can be expressed as below, where
:math:`X` and :math:`S_{0}` correspond to the sum assured
and the initial account value in this example.
.. math::
p=Xe^{-rT}N\left(-d_{2}\right)-S_{0}N\left(-d_{1}\right)
d_{1}=\frac{\ln\left(\frac{S_{0}}{X}\right)+\left(r+\frac{\sigma^{2}}{2}\right)T}{\sigma\sqrt{T}}
d_{2}=d_{1}-\sigma\sqrt{T}
The graph below shows the results obtained from
the Monte Carlo simulations with 10,000 risk neutral scenarios,
and from the Black-Scholes-Merton formula.
Reference: *Options, Futures, and Other Derivatives* by <NAME>
.. seealso::
* :doc:`/libraries/notebooks/savings/savings_example1` notebook in the :mod:`~savings` library
"""
import modelx as mx
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import norm, lognorm
import numpy as np
model = mx.read_model("CashValue_ME_EX1")
proj = model.Projection
proj.model_point_table = proj.model_point_moneyness
monte_carlo = pd.Series(proj.pv_claims_over_av('MATURITY'), index=proj.model_point().index)
monte_carlo = list(np.average(monte_carlo[i]) for i in range(1, 10))
S0 = proj.model_point_table['premium_pp'] * proj.model_point_table['policy_count']
fig, ax = plt.subplots()
ax.scatter(S0, monte_carlo, s= 10, alpha=1, label='Monte Carlo')
ax.scatter(S0, proj.formula_option_put(120), alpha=0.5, label='Black-Scholes-Merton')
ax.legend()
ax.grid(True)
fig.suptitle('TVOG by ITM')
|
[
"numpy.average",
"matplotlib.pyplot.subplots",
"modelx.read_model"
] |
[((1161, 1194), 'modelx.read_model', 'mx.read_model', (['"""CashValue_ME_EX1"""'], {}), "('CashValue_ME_EX1')\n", (1174, 1194), True, 'import modelx as mx\n'), ((1531, 1545), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1543, 1545), True, 'import matplotlib.pyplot as plt\n'), ((1382, 1408), 'numpy.average', 'np.average', (['monte_carlo[i]'], {}), '(monte_carlo[i])\n', (1392, 1408), True, 'import numpy as np\n')]
|
import os
import numpy as np
from sst import Fisher
from sst import camb_tools as ct
from sst import plot_tools
opj = os.path.join
def get_cls(cls_path, lmax, A_lens=1):
'''
returns
-------
cls : array-like
Lensed Cls (shape (4,lmax-1) with BB lensing power
reduced depending on A_lens.
order: TT, EE, BB, TE
'''
cls_nolens, _ = ct.get_spectra(cls_path, tag='r0',
lensed=False, prim_type='tot')
cls_lensed, _ = ct.get_spectra(cls_path, tag='r0',
lensed=True, prim_type='tot')
# truncate to lmax
cls_nolens = cls_nolens[:,:lmax-1]
cls_lensed = cls_lensed[:,:lmax-1]
BB_nolens = cls_nolens[2]
BB_lensed = cls_lensed[2]
# difference BB (lensed - unlensed = lens_contribution)
BB_lens_contr = BB_lensed - BB_nolens
# depending on A_lens, remove lensing contribution
cls_lensed[2] -= (1. - A_lens) * BB_lens_contr
return cls_lensed
def get_nls(lat_path, lmax, sac_path=None,
deproj_level=0):
'''
Arguments
-----------------
lat_path : str
Path to folder containing LAT noise cuves
lmax : int
Keyword Arguments
-----------------
sac_path : str, None
Path to folder containing SAC noise cuves
deproj_level : int
Foreground cleaning assumption, 0 - 4
0 is most optimistic
Returns
-------
nls : array-like
Shape (6, lmax - 1), order: TT, EE, BB, TE, TB, EB
Notes
-----
Looks like SAC noise curves are only for pol, so use
SAT TT for TT.
'''
# Add option to skip SAT.
# SO V3 (deproj0, S2(goal) 16000 deg2
# init noise curves (fill with 1K^2 noise)
# truncate later
nls = np.ones((6, 20000)) * 1e12
# load up LAT
# lat_tt_file = 'S4_2LAT_T_default_noisecurves_'\
# 'deproj{}_SENS0_mask_16000_ell_TT_yy.txt'.format(deproj_level) # NOTE
lat_tt_file = 'SOV3_T_default1-4-2_noisecurves_'\
'deproj{}_SENS2_mask_16000_ell_TT_yy.txt'.format(deproj_level)
lat_pol_file = lat_tt_file.replace('_T_', '_pol_')
lat_pol_file = lat_pol_file.replace('_TT_yy', '_EE_BB')
lat_tt_file = opj(lat_path, lat_tt_file)
lat_pol_file = opj(lat_path, lat_pol_file)
# load lat
ells_tt, nl_tt, ells_pol, nl_ee, nl_bb = ct.get_so_noise(
tt_file=lat_tt_file, pol_file=lat_pol_file, sat_file=None)
lmin_tt = int(ells_tt[0])
lmax_tt = int(ells_tt[-1])
#lmin_pol = int(ells_pol[0])
lmin_pol = 30 # as suggested on wiki
lmax_pol = int(ells_pol[-1])
if sac_path is not None:
sac_file = 'Db_noise_04.00_ilc_bin3_av.dat'
sac_file = opj(sac_path, sac_file)
# load sac, note these are Dell bandpowers
ell, sac_ee, sac_bb = np.loadtxt(sac_file).transpose()
dell = ell * (ell + 1) / 2. / np.pi
sac_ee /= dell
sac_bb /= dell
# interpolate
lmin_sac = int(ell[0])
lmax_sac = int(ell[-1])
ell_f = np.arange(lmin_sac, lmax_sac+1)
sac_ee = np.interp(ell_f, ell, sac_ee)
sac_bb = np.interp(ell_f, ell, sac_bb)
# combine, first lat then (if needed )sac because lat has lower lmin
nls[0,lmin_tt - 2:lmax_tt - 1] = nl_tt
nls[1,lmin_pol - 2:lmax_pol - 1] = nl_ee[ells_pol >= lmin_pol]
nls[2,lmin_pol - 2:lmax_pol - 1] = nl_bb[ells_pol >= lmin_pol]
nls[3] *= 0.
nls[4] *= 0.
nls[5] *= 0.
if sac_path is not None:
nls[1,lmin_sac - 2:lmax_sac - 1] = sac_ee
nls[2,lmin_sac - 2:lmax_sac - 1] = sac_bb
# trunacte to lmax
nls = nls[:,:lmax - 1]
return nls
def get_fiducial_nls(noise_amp_temp, noise_amp_pol, lmax):
'''
Create N_{\ell} = noise_amp^2 noise arrays.
Arguments
-----------------
noise_amp_temp : float
Noise ampltidue in uK arcmin.
noise_amp_pol : float
lmax : int
Returns
-------
nls : array-like
Shape (6, lmax - 1), order: TT, EE, BB, TE, TB, EB
'''
# init noise curves (fill with 1K^2 noise)
# truncate later
nls = np.ones((6, 20000)) * 1e12
# N_{\ell} = uK^2 radians^2
arcmin2radians = np.pi / 180. / 60.
noise_amp_temp *= arcmin2radians
noise_amp_pol *= arcmin2radians
# combine, first lat then sac because lat has lower lmin
nls[0,:] = noise_amp_temp ** 2
nls[1,:] = noise_amp_pol ** 2
nls[2,:] = noise_amp_pol ** 2
nls[3] *= 0.
nls[4] *= 0.
nls[5] *= 0.
# trunacte to lmax
nls = nls[:,:lmax - 1]
return nls
def get_prim_amp(prim_template='local', scalar_amp=2.1e-9):
common_amp = 16 * np.pi**4 * scalar_amp**2
if prim_template == 'local':
return 2 * common_amp
elif prim_template == 'equilateral':
return 6 * common_amp
elif prim_template == 'orthogonal':
return 6 * common_amp
def get_totcov(cls, nls, no_ee=False, no_tt=False):
totcov = nls.copy()
totcov[:4,:] += cls
if no_ee:
totcov[1,:] = 1e12
if no_tt:
totcov[0,:] = 1e12
return totcov
def run_fisher(template, ana_dir, camb_dir, totcov, ells, lmin=2, lmax=4999,fsky=0.03,
plot_tag='', tag=None):
F = Fisher(ana_dir)
camb_opts = dict(camb_out_dir=camb_dir,
tag='r0',
lensed=False,
high_ell=True)
F.get_camb_output(**camb_opts)
radii = F.get_updated_radii()
radii = radii[::2]
F.get_bins(lmin=lmin, lmax=lmax, load=True, verbose=False,
parity='odd', tag=tag)
# F.get_beta(func='equilateral', load=True, verbose=False, radii=radii, tag=tag)
F.get_beta(func='equilateral', load=True, verbose=True, radii=radii, tag=tag,
interp_factor=10)
# F.get_binned_bispec(template, load=True, tag=tag)
F.get_binned_bispec(template, load=True, tag=tag)
bin_invcov, bin_cov = F.get_binned_invcov(ells, totcov, return_bin_cov=True)
# Plot invcov, cov
plot_opts = dict(lmin=2)
bins = F.bins['bins']
plot_tools.cls_matrix(plot_tag, bins, bin_invcov, log=False, plot_dell=False,
inv=True, **plot_opts)
plot_tools.cls_matrix(plot_tag.replace('invcov', 'cov_dell'),
bins, bin_cov, log=False, plot_dell=True,
**plot_opts)
print(lmin, lmax)
fisher = F.naive_fisher(bin_invcov, lmin=lmin, lmax=lmax, fsky=fsky)
sigma = 1/np.sqrt(fisher)
return fisher, sigma
###### OLD
amp = get_prim_amp(template)
F.bispec['bispec'] *= amp
F.get_binned_invcov(nls=totcov)
bin_invcov = F.bin_invcov
bin_cov = F.bin_cov
bin_size = F.bins['bins'].size
bins = F.bins['bins']
num_pass = F.bins['num_pass_full']
bispec = F.bispec['bispec']
# Plot invcov, cov
plot_opts = dict(lmin=2)
plot_tools.cls_matrix(plot_tag, bins, bin_invcov, log=False, plot_dell=False,
inv=True, **plot_opts)
plot_tools.cls_matrix(plot_tag.replace('invcov', 'cov_dell'),
bins, bin_cov, log=False, plot_dell=True,
**plot_opts)
plot_tools.cls_matrix(plot_tag.replace('invcov', 'cov'),
bins, bin_cov, log=False, plot_dell=False,
**plot_opts)
# allocate bin-sized fisher matrix (same size as outer loop)
fisher_per_bin = np.ones(bin_size) * np.nan
# allocate 12 x 12 cov for use in inner loop
invcov = np.zeros((F.bispec['pol_trpl'].size, F.bispec['pol_trpl'].size))
# create (binned) inverse cov matrix for each ell
# i.e. use the fact that 12x12 pol invcov can be factored
# as (Cl-1)_l1^ip (Cl-1)_l2^jq (Cl-1)_l3^kr
invcov1 = np.ones((bin_size, 12, 12))
invcov2 = np.ones((bin_size, 12, 12))
invcov3 = np.ones((bin_size, 12, 12))
f_check = 0
for tidx_a, ptrp_a in enumerate(F.bispec['pol_trpl']):
# ptrp_a = ijk
for tidx_b, ptrp_b in enumerate(F.bispec['pol_trpl']):
# ptrp_a = pqr
# a is first bispectrum, b second one
# ptrp = pol triplet
ptrp_a1 = ptrp_a[0]
ptrp_a2 = ptrp_a[1]
ptrp_a3 = ptrp_a[2]
ptrp_b1 = ptrp_b[0]
ptrp_b2 = ptrp_b[1]
ptrp_b3 = ptrp_b[2]
invcov1[:,tidx_a,tidx_b] = bin_invcov[:,ptrp_a1,ptrp_b1]
invcov2[:,tidx_a,tidx_b] = bin_invcov[:,ptrp_a2,ptrp_b2]
invcov3[:,tidx_a,tidx_b] = bin_invcov[:,ptrp_a3,ptrp_b3]
# Depending on lmin, start outer loop not at first bin.
start_bidx = np.where(bins >= lmin)[0][0]
end_bidx = np.where(bins >= min(lmax, bins[-1]))[0][0] + 1
# loop same loop as in binned_bispectrum
for idx1, i1 in enumerate(bins[start_bidx:end_bidx]):
idx1 += start_bidx
cl1 = invcov1[idx1,:,:] # 12x12
# init
fisher_per_bin[idx1] = 0.
for idx2, i2 in enumerate(bins[idx1:end_bidx]):
idx2 += idx1
cl2 = invcov2[idx1,:,:] # 12x12
cl12 = cl1 * cl2
for idx3, i3 in enumerate(bins[idx2:end_bidx]):
idx3 += idx2
num = num_pass[idx1,idx2,idx3]
if num == 0:
continue
cl123 = cl12 * invcov3[idx3,:,:] #12x12
B = bispec[idx1,idx2,idx3,:]
f = np.einsum("i,ij,j", B, cl123, B)
# f0 = np.einsum("i,i", B, B)
# b0 = np.einsum("ij,ij", cl123, cl123)
# both B's have num
f /= float(num)
if i1 == i2 == i3:
f /= 6.
elif i1 != i2 != i3:
pass
else:
f /= 2.
fisher_per_bin[idx1] += f
f_check += f
fisher_per_bin *= fsky
f_check *= fsky
min_f = []
# print 'fisher_check:', f_check * (4*np.pi / np.sqrt(8))**2
# print 'sigma:', 1/np.sqrt(f_check) * (np.sqrt(8)/4./np.pi)
fisher_check = f_check * (4*np.pi / np.sqrt(8))**2
sigma = 1/np.sqrt(f_check) * (np.sqrt(8)/4./np.pi)
return fisher_check, sigma
# for lidx, lmin in enumerate(range(2, 40)):
# f = np.sum(fisher_per_bin[lmin-2:])
# min_f.append(np.sqrt(f))
if __name__ == '__main__':
# ana_dir = '/mn/stornext/d8/ITA/spider/adri/analysis/20181112_sst/' # S5
# ana_dir = '/mn/stornext/d8/ITA/spider/adri/analysis/20181123_sst/'
ana_dir = '/mn/stornext/d8/ITA/spider/adri/analysis/20181214_sst_debug/'
out_dir = opj(ana_dir, 'fisher')
camb_base = '/mn/stornext/d8/ITA/spider/adri/analysis/20171217_sst'
camb_dir = opj(camb_base, 'camb_output/high_acy/sparse_5000')
noise_base = '/mn/stornext/u3/adriaand/cmb_sst_ksw/ancillary/noise_curves'
# noise_base = '/mn/stornext/u3/adriaand/cmb_sst_ksw/ancillary/noise_curves/so/v3/so'
# lat_path = opj(noise_base, 's4/S4_2LAT_Tpol_default_noisecurves')
lat_path = opj(noise_base, 'so/v3/so')
# sac_path = noise_base
sac_path = None
# fixed
lmin = 2
# lmax = 4999
lmax = 4000 # has to match beta etc
lmax_f = 3000 # for fisher
lmin_f = 250
# A_lens = 0.13
A_lens = 1.
noise_amp_temp = 6.
noise_amp_pol = 6 * np.sqrt(2)
# NOTE
# noise_amp_temp = .0
# noise_amp_pol = .0 * np.sqrt(2)
opts = {}
# opts['nominal'] = dict(fsky=0.03, no_ee=False, no_tt=False, no_noise=False)
# opts['no_ee'] = dict(fsky=0.03, no_ee=True, no_tt=False, no_noise=False)
# opts['no_tt'] = dict(fsky=0.03, no_ee=False, no_tt=True, no_noise=False)
opts['nominal'] = dict(fsky=1., no_ee=False, no_tt=False, no_noise=False)
opts['no_ee'] = dict(fsky=1., no_ee=True, no_tt=False, no_noise=False)
opts['no_tt'] = dict(fsky=1., no_ee=False, no_tt=True, no_noise=False)
opts['cv_lim'] = dict(fsky=1., no_ee=False, no_tt=False, no_noise=True)
opts['no_ee_cv_lim'] = dict(fsky=1., no_ee=True, no_tt=False, no_noise=True)
opts['no_tt_cv_lim'] = dict(fsky=1., no_ee=False, no_tt=True, no_noise=True)
# for template in ['local', 'equilateral']:
for template in ['local']:
# with open(opj(out_dir, 'fisher_{}.txt'.format(template)), 'w') as text_file:
with open(opj(out_dir, 'fisher_so_{}.txt'.format(template)), 'w') as text_file:
for key in opts:
opt = opts[key]
no_noise = opt.get('no_noise')
fsky = opt.get('fsky')
no_ee = opt.get('no_ee')
no_tt = opt.get('no_tt')
cls = get_cls(camb_dir, lmax, A_lens=A_lens)
nls = get_nls(lat_path, lmax, sac_path=sac_path)
#nls = get_fiducial_nls(noise_amp_temp, noise_amp_pol, lmax)
if no_noise:
nls *= 0.
totcov = get_totcov(cls, nls, no_ee=no_ee, no_tt=no_tt)
ells = np.arange(2, lmax+1)
# plot_name = opj(out_dir, 'b_invcov_{}.png'.format(key))
plot_name = opj(out_dir, 'b_so_invcov_{}.png'.format(key))
# for template in ['local', 'equilateral', 'orthogonal']:
text_file.write('template: {}\n'.format(template))
text_file.write('option: {}\n'.format(key))
text_file.write('no_noise: {}\n'.format(no_noise))
text_file.write('fsky: {}\n'.format(fsky))
text_file.write('A_lens: {}\n'.format(A_lens))
text_file.write('no_ee: {}\n'.format(no_ee))
text_file.write('no_tt: {}\n'.format(no_tt))
fisher_check, sigma = run_fisher(template,
ana_dir, camb_dir, totcov, ells,
lmin=lmin_f, lmax=lmax_f, fsky=fsky,
plot_tag=plot_name, tag='r1_i10_b4')
text_file.write('fisher: {}\n'.format(fisher_check))
text_file.write('sigma: {}\n'.format(sigma))
text_file.write('\n')
|
[
"sst.camb_tools.get_so_noise",
"sst.plot_tools.cls_matrix",
"numpy.zeros",
"sst.Fisher",
"numpy.ones",
"numpy.einsum",
"numpy.where",
"numpy.arange",
"numpy.loadtxt",
"numpy.interp",
"sst.camb_tools.get_spectra",
"numpy.sqrt"
] |
[((387, 452), 'sst.camb_tools.get_spectra', 'ct.get_spectra', (['cls_path'], {'tag': '"""r0"""', 'lensed': '(False)', 'prim_type': '"""tot"""'}), "(cls_path, tag='r0', lensed=False, prim_type='tot')\n", (401, 452), True, 'from sst import camb_tools as ct\n'), ((502, 566), 'sst.camb_tools.get_spectra', 'ct.get_spectra', (['cls_path'], {'tag': '"""r0"""', 'lensed': '(True)', 'prim_type': '"""tot"""'}), "(cls_path, tag='r0', lensed=True, prim_type='tot')\n", (516, 566), True, 'from sst import camb_tools as ct\n'), ((2417, 2491), 'sst.camb_tools.get_so_noise', 'ct.get_so_noise', ([], {'tt_file': 'lat_tt_file', 'pol_file': 'lat_pol_file', 'sat_file': 'None'}), '(tt_file=lat_tt_file, pol_file=lat_pol_file, sat_file=None)\n', (2432, 2491), True, 'from sst import camb_tools as ct\n'), ((5317, 5332), 'sst.Fisher', 'Fisher', (['ana_dir'], {}), '(ana_dir)\n', (5323, 5332), False, 'from sst import Fisher\n'), ((6147, 6252), 'sst.plot_tools.cls_matrix', 'plot_tools.cls_matrix', (['plot_tag', 'bins', 'bin_invcov'], {'log': '(False)', 'plot_dell': '(False)', 'inv': '(True)'}), '(plot_tag, bins, bin_invcov, log=False, plot_dell=\n False, inv=True, **plot_opts)\n', (6168, 6252), False, 'from sst import plot_tools\n'), ((6963, 7068), 'sst.plot_tools.cls_matrix', 'plot_tools.cls_matrix', (['plot_tag', 'bins', 'bin_invcov'], {'log': '(False)', 'plot_dell': '(False)', 'inv': '(True)'}), '(plot_tag, bins, bin_invcov, log=False, plot_dell=\n False, inv=True, **plot_opts)\n', (6984, 7068), False, 'from sst import plot_tools\n'), ((7614, 7678), 'numpy.zeros', 'np.zeros', (["(F.bispec['pol_trpl'].size, F.bispec['pol_trpl'].size)"], {}), "((F.bispec['pol_trpl'].size, F.bispec['pol_trpl'].size))\n", (7622, 7678), True, 'import numpy as np\n'), ((7859, 7886), 'numpy.ones', 'np.ones', (['(bin_size, 12, 12)'], {}), '((bin_size, 12, 12))\n', (7866, 7886), True, 'import numpy as np\n'), ((7901, 7928), 'numpy.ones', 'np.ones', (['(bin_size, 12, 12)'], {}), '((bin_size, 12, 12))\n', (7908, 7928), True, 'import numpy as np\n'), ((7943, 7970), 'numpy.ones', 'np.ones', (['(bin_size, 12, 12)'], {}), '((bin_size, 12, 12))\n', (7950, 7970), True, 'import numpy as np\n'), ((1789, 1808), 'numpy.ones', 'np.ones', (['(6, 20000)'], {}), '((6, 20000))\n', (1796, 1808), True, 'import numpy as np\n'), ((3107, 3140), 'numpy.arange', 'np.arange', (['lmin_sac', '(lmax_sac + 1)'], {}), '(lmin_sac, lmax_sac + 1)\n', (3116, 3140), True, 'import numpy as np\n'), ((3156, 3185), 'numpy.interp', 'np.interp', (['ell_f', 'ell', 'sac_ee'], {}), '(ell_f, ell, sac_ee)\n', (3165, 3185), True, 'import numpy as np\n'), ((3203, 3232), 'numpy.interp', 'np.interp', (['ell_f', 'ell', 'sac_bb'], {}), '(ell_f, ell, sac_bb)\n', (3212, 3232), True, 'import numpy as np\n'), ((4194, 4213), 'numpy.ones', 'np.ones', (['(6, 20000)'], {}), '((6, 20000))\n', (4201, 4213), True, 'import numpy as np\n'), ((6556, 6571), 'numpy.sqrt', 'np.sqrt', (['fisher'], {}), '(fisher)\n', (6563, 6571), True, 'import numpy as np\n'), ((7524, 7541), 'numpy.ones', 'np.ones', (['bin_size'], {}), '(bin_size)\n', (7531, 7541), True, 'import numpy as np\n'), ((11430, 11440), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (11437, 11440), True, 'import numpy as np\n'), ((8724, 8746), 'numpy.where', 'np.where', (['(bins >= lmin)'], {}), '(bins >= lmin)\n', (8732, 8746), True, 'import numpy as np\n'), ((10245, 10261), 'numpy.sqrt', 'np.sqrt', (['f_check'], {}), '(f_check)\n', (10252, 10261), True, 'import numpy as np\n'), ((2882, 2902), 'numpy.loadtxt', 'np.loadtxt', (['sac_file'], {}), '(sac_file)\n', (2892, 2902), True, 'import numpy as np\n'), ((9514, 9546), 'numpy.einsum', 'np.einsum', (['"""i,ij,j"""', 'B', 'cl123', 'B'], {}), "('i,ij,j', B, cl123, B)\n", (9523, 9546), True, 'import numpy as np\n'), ((10216, 10226), 'numpy.sqrt', 'np.sqrt', (['(8)'], {}), '(8)\n', (10223, 10226), True, 'import numpy as np\n'), ((10265, 10275), 'numpy.sqrt', 'np.sqrt', (['(8)'], {}), '(8)\n', (10272, 10275), True, 'import numpy as np\n'), ((13117, 13139), 'numpy.arange', 'np.arange', (['(2)', '(lmax + 1)'], {}), '(2, lmax + 1)\n', (13126, 13139), True, 'import numpy as np\n')]
|
import weakref
import numpy as np
class Tree:
'''
Implementation of Nary-tree.
The source code is modified based on https://github.com/lianemeth/forest/blob/master/forest/NaryTree.py
Parameters
----------
key: object
key of the node
num_branch: int
how many branches in each node
children: Iterable[Tree]
reference of the children
parent: Tree
reference of the parent node
Returns
-------
an N-ary tree.
'''
def __init__(self, key, num_branch, children=None, parent=None):
self.key = key
self.children = children or [None for _ in range(num_branch)]
self._parent = weakref.ref(parent) if parent else None
@property
def parent(self):
if self._parent:
return self._parent()
def __getstate__(self):
self._parent = None
def __setstate__(self, state):
self.__dict__ = state
for child in self.children:
child._parent = weakref.ref(self)
def traversal(self, visit=None, *args, **kwargs):
if visit is not None:
visit(self, *args, **kwargs)
l = [self]
for child in self.children:
if child is not None:
l += child.traversal(visit, *args, **kwargs)
return l
def tree_based_non_dominated_sort(F):
"""
Tree-based efficient non-dominated sorting (T-ENS).
This algorithm is very efficient in many-objective optimization problems (MaOPs).
Parameters
----------
F: np.array
objective values for each individual.
Returns
-------
indices of the individuals in each front.
References
----------
<NAME>, <NAME>, <NAME>, and <NAME>,
A decision variable clustering based evolutionary algorithm for large-scale many-objective optimization,
IEEE Transactions on Evolutionary Computation, 2018, 22(1): 97-112.
"""
N, M = F.shape
# sort the rows in F
indices = np.lexsort(F.T[::-1])
F = F[indices]
obj_seq = np.argsort(F[:, :0:-1], axis=1) + 1
k = 0
forest = []
left = np.full(N, True)
while np.any(left):
forest.append(None)
for p, flag in enumerate(left):
if flag:
update_tree(F, p, forest, k, left, obj_seq)
k += 1
# convert forest to fronts
fronts = [[] for _ in range(k)]
for k, tree in enumerate(forest):
fronts[k].extend([indices[node.key] for node in tree.traversal()])
return fronts
def update_tree(F, p, forest, k, left, obj_seq):
_, M = F.shape
if forest[k] is None:
forest[k] = Tree(key=p, num_branch=M - 1)
left[p] = False
elif check_tree(F, p, forest[k], obj_seq, True):
left[p] = False
def check_tree(F, p, tree, obj_seq, add_pos):
if tree is None:
return True
N, M = F.shape
# find the minimal index m satisfying that p[obj_seq[tree.root][m]] < tree.root[obj_seq[tree.root][m]]
m = 0
while m < M - 1 and F[p, obj_seq[tree.key, m]] >= F[tree.key, obj_seq[tree.key, m]]:
m += 1
# if m not found
if m == M - 1:
# p is dominated by the solution at the root
return False
else:
for i in range(m + 1):
# p is dominated by a solution in the branch of the tree
if not check_tree(F, p, tree.children[i], obj_seq, i == m and add_pos):
return False
if tree.children[m] is None and add_pos:
# add p to the branch of the tree
tree.children[m] = Tree(key=p, num_branch=M - 1)
return True
|
[
"numpy.full",
"numpy.lexsort",
"numpy.any",
"numpy.argsort",
"weakref.ref"
] |
[((1995, 2016), 'numpy.lexsort', 'np.lexsort', (['F.T[::-1]'], {}), '(F.T[::-1])\n', (2005, 2016), True, 'import numpy as np\n'), ((2127, 2143), 'numpy.full', 'np.full', (['N', '(True)'], {}), '(N, True)\n', (2134, 2143), True, 'import numpy as np\n'), ((2154, 2166), 'numpy.any', 'np.any', (['left'], {}), '(left)\n', (2160, 2166), True, 'import numpy as np\n'), ((2051, 2082), 'numpy.argsort', 'np.argsort', (['F[:, :0:-1]'], {'axis': '(1)'}), '(F[:, :0:-1], axis=1)\n', (2061, 2082), True, 'import numpy as np\n'), ((688, 707), 'weakref.ref', 'weakref.ref', (['parent'], {}), '(parent)\n', (699, 707), False, 'import weakref\n'), ((1011, 1028), 'weakref.ref', 'weakref.ref', (['self'], {}), '(self)\n', (1022, 1028), False, 'import weakref\n')]
|
import sys
import pickle
import numpy as np
sys.path.append('./../')
sys.path.append('./../../')
from src.LocalGlobalAttentionModel.model import Model as parent_model
from .vel_param import VelParam as vel_param
from src.HMC.hmc import HMC
class Model(parent_model):
"""
This class describes a model where fixations are chosen from the static saliency
convolved with a Gaussian.
p(z_t|z_{t-1}) = s(t) * n(z_t|z_{t-1}, xi)
"""
def __init__(self, saliencies, xi):
super().__init__(saliencies)
self.xi = xi
self.gammas = None
def get_next_fix(self, im_ind, sub_ind, prev_fix, cur_fix, s_t):
"""
This method samples the next fixation given the current fixation from
p(z_t|z_{t-1}) = s(t) * n(z_t|z_{t-1}, xi).
It includes
:param im_ind: index of the current image
:param sub_ind:
:param prev_fix:
:param cur_fix: coordinates of the current fixation
:param s_t:
:return: [z_x, z_y] coordinates of the next fixation location.
"""
xi_val = self.xi.value
mean = cur_fix
rad_rows = (self.rows_grid - mean[0]) ** 2
rad_cols = (self.cols_grid - mean[1]) ** 2
# normal distribution over the entire image
gauss = np.exp(- rad_rows / (2 * xi_val[0]) - rad_cols / (2 * xi_val[1])) / \
(2 * np.pi * np.sqrt(xi_val[0] * xi_val[1]))
prob = gauss * self.saliencies[im_ind]
prob /= prob.sum()
# chose a pixel in the image from the distribution defined above
inds = np.random.choice(range(self.pixels_num), 1,
p=prob.flatten()) # choice uses the inverse transform method in 1d
next_fix = np.unravel_index(inds, self.saliencies[im_ind].shape)
next_fix = np.array([next_fix[0][0], next_fix[1][0]])
return next_fix, 0
def generate_gammas(self):
"""
In this model gamma = 1 for each data point.
"""
self.gammas = []
for i in range(len(self.fix_dists_2)):
self.gammas.append([])
for s in range(len(self.fix_dists_2[i])):
self.gammas[-1].append(np.zeros(self.fix_dists_2[i][s].shape[1]))
def sample(self, num_samples, save_steps, file_path):
"""
This methods generates samples from the posterior distribution of xi.
Since there is no explicit form for the posterior distribution of xi an HMC sampler is used.
See paper for further information.
:param num_samples: number of sampled to be generated.
:param save_steps: whether to save the chain
:param file_path: path where to save the chain
:return: list of length num_samples with samples of xi
"""
if not self.gammas:
self.generate_gammas()
vel = vel_param([0.1, 0.1])
delta = 1.5
n = 10
m = num_samples
# initiate an HMC instance
hmc = HMC(self.xi, vel, delta, n, m)
gammas_xi = [[self.gammas[i][s].copy() - 1] for i in range(len(self.gammas)) for s in
range(len(self.gammas[i]))]
# perform the sampling
hmc.HMC(gammas_xi, self.saliencies, self.fix_dists_2, self.dist_mat_per_fix)
samples_xi = hmc.get_samples()
if save_steps:
with open(file_path, 'wb') as f:
pickle.dump([samples_xi], f)
return samples_xi
def calc_prob_local(self, *args):
"""
This method calculates the probability of a local step which is always 0 in the case of this model.
:return: 0
"""
return 0
def calc_prob_global(self, im_ind, fixs_dists_2, sal_ts, fixs, for_nss=False):
"""
This method calculates the probability of a global step according to the local saliency model,
for an entire scanpath.
p(z_t|z_{t-1}) = s(z_t) * n(z_t|z_{t-1}, xi)
:param im_ind: index of the image
:param fixs_dists_2: an array of shape 3 x (T -1). see set_fix_dist_2 for description.
:param sal_ts: time series of the saliency value for each fixation. Array of length T.
:param fixs: fixation locations. Array of shape 2 x T
:param for_nss: whether to standerize the density for NSS or not.
:return: array of length T with the probability of each fixation
"""
xi = self.xi.value
radx = (self.rows_grid[:, :, np.newaxis] - fixs[im_ind][0][0, :-1]) ** 2
rady = (self.cols_grid[:, :, np.newaxis] - fixs[im_ind][0][1, :-1]) ** 2
gauss = np.exp(- radx / (2 * xi[0]) - rady / (2 * xi[1])) / (2 * np.pi * np.sqrt(xi[0] * xi[1]))
prob_all_pixels = gauss * self.saliencies[im_ind][:, :, np.newaxis]
if for_nss:
prob_global = prob_all_pixels / prob_all_pixels.sum(axis=(0, 1))
else:
# we assume here just one subject
sub = 0
X = fixs_dists_2[im_ind][sub]
nominator_gauss = np.exp(- 0.5 * X[0] / xi[0] - 0.5 * X[1] / xi[1]) / \
(2 * np.pi * np.sqrt(xi[0] * xi[1]))
nominator = nominator_gauss * sal_ts[im_ind][0][1:]
prob_global = nominator / prob_all_pixels.sum(axis=(0, 1))
return prob_global
def calc_ros(self, *args):
"""
This methods calculates the probability of a local step. In this model it is always 0.
:return: 0
"""
return 0
|
[
"sys.path.append",
"pickle.dump",
"numpy.zeros",
"numpy.unravel_index",
"numpy.array",
"numpy.exp",
"numpy.sqrt",
"src.HMC.hmc.HMC"
] |
[((46, 70), 'sys.path.append', 'sys.path.append', (['"""./../"""'], {}), "('./../')\n", (61, 70), False, 'import sys\n'), ((71, 98), 'sys.path.append', 'sys.path.append', (['"""./../../"""'], {}), "('./../../')\n", (86, 98), False, 'import sys\n'), ((1755, 1808), 'numpy.unravel_index', 'np.unravel_index', (['inds', 'self.saliencies[im_ind].shape'], {}), '(inds, self.saliencies[im_ind].shape)\n', (1771, 1808), True, 'import numpy as np\n'), ((1828, 1870), 'numpy.array', 'np.array', (['[next_fix[0][0], next_fix[1][0]]'], {}), '([next_fix[0][0], next_fix[1][0]])\n', (1836, 1870), True, 'import numpy as np\n'), ((2998, 3028), 'src.HMC.hmc.HMC', 'HMC', (['self.xi', 'vel', 'delta', 'n', 'm'], {}), '(self.xi, vel, delta, n, m)\n', (3001, 3028), False, 'from src.HMC.hmc import HMC\n'), ((1297, 1361), 'numpy.exp', 'np.exp', (['(-rad_rows / (2 * xi_val[0]) - rad_cols / (2 * xi_val[1]))'], {}), '(-rad_rows / (2 * xi_val[0]) - rad_cols / (2 * xi_val[1]))\n', (1303, 1361), True, 'import numpy as np\n'), ((4621, 4669), 'numpy.exp', 'np.exp', (['(-radx / (2 * xi[0]) - rady / (2 * xi[1]))'], {}), '(-radx / (2 * xi[0]) - rady / (2 * xi[1]))\n', (4627, 4669), True, 'import numpy as np\n'), ((1396, 1426), 'numpy.sqrt', 'np.sqrt', (['(xi_val[0] * xi_val[1])'], {}), '(xi_val[0] * xi_val[1])\n', (1403, 1426), True, 'import numpy as np\n'), ((3414, 3442), 'pickle.dump', 'pickle.dump', (['[samples_xi]', 'f'], {}), '([samples_xi], f)\n', (3425, 3442), False, 'import pickle\n'), ((4686, 4708), 'numpy.sqrt', 'np.sqrt', (['(xi[0] * xi[1])'], {}), '(xi[0] * xi[1])\n', (4693, 4708), True, 'import numpy as np\n'), ((5037, 5085), 'numpy.exp', 'np.exp', (['(-0.5 * X[0] / xi[0] - 0.5 * X[1] / xi[1])'], {}), '(-0.5 * X[0] / xi[0] - 0.5 * X[1] / xi[1])\n', (5043, 5085), True, 'import numpy as np\n'), ((2208, 2249), 'numpy.zeros', 'np.zeros', (['self.fix_dists_2[i][s].shape[1]'], {}), '(self.fix_dists_2[i][s].shape[1])\n', (2216, 2249), True, 'import numpy as np\n'), ((5134, 5156), 'numpy.sqrt', 'np.sqrt', (['(xi[0] * xi[1])'], {}), '(xi[0] * xi[1])\n', (5141, 5156), True, 'import numpy as np\n')]
|
import torch
import torch.nn as nn
import numpy as np
from itertools import combinations
import torch.nn.functional as F
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def cal_l2(x, y):
return torch.pow((x - y), 2).sum(-1).sum()
class ContrastiveLoss(nn.Module):
"""
Contrastive loss
Takes embeddings of two samples and a target label == 1 if samples are from the same class and label == 0 otherwise
"""
def __init__(self, margin):
super(ContrastiveLoss, self).__init__()
self.margin = margin
self.eps = 1e-9
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def forward(self, f_dic, B, N, size_average=True):
out = torch.zeros(1).to(self.device)
# Postive Samples Within Group Loss
# Assume the size of each feature is (B x N)
for kk in f_dic.keys():
# pdb.set_trace()
mat = f_dic[kk]
L = mat.size(0)
if L != 1:
mat_dup = mat.unsqueeze(0).expand(L, L, N)
batch_dup = mat.unsqueeze(1).expand(L, L, N)
distances = (mat_dup - batch_dup).pow(2).sum(dim=-1).sum()
out += (0.5 * distances / 6)
if len(f_dic) == 1:
pass
else:
for k1, k2 in list(combinations(f_dic, 2)):
b1 = len(f_dic[k1])
b2 = len(f_dic[k2])
for bb in range(b2):
# pdb.set_trace()
distances = cal_l2(f_dic[k1], f_dic[k2][bb].unsqueeze(0).expand(b1, N))/(b1+b2)
out += (0.5 * F.relu(self.margin - (distances + self.eps)).pow(2))
return out
|
[
"itertools.combinations",
"torch.cuda.is_available",
"numpy.exp",
"torch.pow",
"torch.nn.functional.relu",
"torch.zeros"
] |
[((159, 169), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (165, 169), True, 'import numpy as np\n'), ((607, 632), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (630, 632), False, 'import torch\n'), ((724, 738), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (735, 738), False, 'import torch\n'), ((1333, 1355), 'itertools.combinations', 'combinations', (['f_dic', '(2)'], {}), '(f_dic, 2)\n', (1345, 1355), False, 'from itertools import combinations\n'), ((201, 220), 'torch.pow', 'torch.pow', (['(x - y)', '(2)'], {}), '(x - y, 2)\n', (210, 220), False, 'import torch\n'), ((1639, 1683), 'torch.nn.functional.relu', 'F.relu', (['(self.margin - (distances + self.eps))'], {}), '(self.margin - (distances + self.eps))\n', (1645, 1683), True, 'import torch.nn.functional as F\n')]
|
from ..testutils import BaseTestCase, compare_files, temp_files, regenerate_references
import unittest
import numpy as np
import pickle
import time
import warnings
import pygsti
from pygsti.extras import idletomography as idt
#Helper functions
#Global dicts describing how to prep and measure in various bases
prepDict = { 'X': ('Gy',), 'Y': ('Gx',)*3, 'Z': (),
'-X': ('Gy',)*3, '-Y': ('Gx',), '-Z': ('Gx','Gx')}
measDict = { 'X': ('Gy',)*3, 'Y': ('Gx',), 'Z': (),
'-X': ('Gy',), '-Y': ('Gx',)*3, '-Z': ('Gx','Gx')}
#Global switches for debugging
hamiltonian=True
stochastic=True
affine=True
#Mimics a function that used to be in pyGSTi, replaced with build_cloudnoise_model_from_hops_and_weights
def build_XYCNOT_cloudnoise_model(nQubits, geometry="line", cnot_edges=None,
maxIdleWeight=1, maxSpamWeight=1, maxhops=0,
extraWeight1Hops=0, extraGateWeight=0, sparse=False,
roughNoise=None, sim_type="matrix", parameterization="H+S",
spamtype="lindblad", addIdleNoiseToAllGates=True,
errcomp_type="gates", return_clouds=False, verbosity=0):
availability = {}; nonstd_gate_unitaries = {}
if cnot_edges is not None: availability['Gcnot'] = cnot_edges
return pygsti.construction.build_cloudnoise_model_from_hops_and_weights(
nQubits, ['Gx','Gy','Gcnot'], nonstd_gate_unitaries, None, availability,
None, geometry, maxIdleWeight, maxSpamWeight, maxhops,
extraWeight1Hops, extraGateWeight, sparse,
roughNoise, sim_type, parameterization,
spamtype, addIdleNoiseToAllGates,
errcomp_type, True, return_clouds, verbosity)
def get_fileroot(nQubits, maxMaxLen, errMag, spamMag, nSamples, simtype, idleErrorInFiducials):
return temp_files + "/idletomog_%dQ_maxLen%d_errMag%.5f_spamMag%.5f_%s_%s_%s" % \
(nQubits,maxMaxLen,errMag,spamMag,
"nosampleerr" if (nSamples == "inf") else ("%dsamples" % nSamples),
simtype, 'idleErrInFids' if idleErrorInFiducials else 'noIdleErrInFids')
def make_idle_tomography_data(nQubits, maxLengths=(0,1,2,4), errMags=(0.01,0.001), spamMag=0,
nSamplesList=(100,'inf'), simtype="map"):
base_param = []
if hamiltonian: base_param.append('H')
if stochastic: base_param.append('S')
if affine: base_param.append('A')
base_param = '+'.join(base_param)
parameterization = base_param+" terms" if simtype.startswith('termorder') else base_param # "H+S+A"
gateset_idleInFids = build_XYCNOT_cloudnoise_model(nQubits, "line", [], min(2,nQubits), 1,
sim_type=simtype, parameterization=parameterization,
roughNoise=None, addIdleNoiseToAllGates=True)
gateset_noIdleInFids = build_XYCNOT_cloudnoise_model(nQubits, "line", [], min(2,nQubits), 1,
sim_type=simtype, parameterization=parameterization,
roughNoise=None, addIdleNoiseToAllGates=False)
listOfExperiments = idt.make_idle_tomography_list(nQubits, maxLengths, (prepDict,measDict), maxweight=min(2,nQubits),
include_hamiltonian=hamiltonian, include_stochastic=stochastic, include_affine=affine)
base_vec = None
for errMag in errMags:
#ky = 'A(Z%s)' % ('I'*(nQubits-1)); debug_errdict = {ky: 0.01 }
#ky = 'A(ZZ%s)' % ('I'*(nQubits-2)); debug_errdict = {ky: 0.01 }
debug_errdict = {}
if base_vec is None:
rand_vec = idt.set_idle_errors(nQubits, gateset_idleInFids, debug_errdict, rand_default=errMag,
hamiltonian=hamiltonian, stochastic=stochastic, affine=affine)
base_vec = rand_vec / errMag
err_vec = base_vec * errMag # for different errMags just scale the *same* random rates
idt.set_idle_errors(nQubits, gateset_idleInFids, debug_errdict, rand_default=err_vec,
hamiltonian=hamiltonian, stochastic=stochastic, affine=affine)
idt.set_idle_errors(nQubits, gateset_noIdleInFids, debug_errdict, rand_default=err_vec,
hamiltonian=hamiltonian, stochastic=stochastic, affine=affine) # same errors for w/ and w/out idle fiducial error
for nSamples in nSamplesList:
if nSamples == 'inf':
sampleError = 'none'; Nsamp = 100
else:
sampleError = 'multinomial'; Nsamp = nSamples
ds_idleInFids = pygsti.construction.generate_fake_data(
gateset_idleInFids, listOfExperiments, nSamples=Nsamp,
sampleError=sampleError, seed=8675309)
fileroot = get_fileroot(nQubits, maxLengths[-1], errMag, spamMag, nSamples, simtype, True)
pickle.dump(gateset_idleInFids, open("%s_gs.pkl" % fileroot, "wb"))
pickle.dump(ds_idleInFids, open("%s_ds.pkl" % fileroot, "wb"))
print("Wrote fileroot ",fileroot)
ds_noIdleInFids = pygsti.construction.generate_fake_data(
gateset_noIdleInFids, listOfExperiments, nSamples=Nsamp,
sampleError=sampleError, seed=8675309)
fileroot = get_fileroot(nQubits, maxLengths[-1], errMag, spamMag, nSamples, simtype, False)
pickle.dump(gateset_noIdleInFids, open("%s_gs.pkl" % fileroot, "wb"))
pickle.dump(ds_noIdleInFids, open("%s_ds.pkl" % fileroot, "wb"))
#FROM DEBUGGING Python2 vs Python3 issue (ended up being an ordered-dict)
##pygsti.io.write_dataset("%s_ds_chk.txt" % fileroot, ds_noIdleInFids)
#chk = pygsti.io.load_dataset("%s_ds_chk.txt" % fileroot)
#for opstr,dsrow in ds_noIdleInFids.items():
# for outcome in dsrow.counts:
# cnt1, cnt2 = dsrow.counts.get(outcome,0.0),chk[opstr].counts.get(outcome,0.0)
# if not np.isclose(cnt1,cnt2):
# raise ValueError("NOT EQUAL: %s != %s" % (str(dsrow.counts), str(chk[opstr].counts)))
#print("EQUAL!")
print("Wrote fileroot ",fileroot)
def helper_idle_tomography(nQubits, maxLengths=(1,2,4), file_maxLen=4, errMag=0.01, spamMag=0, nSamples=100,
simtype="map", idleErrorInFiducials=True, fitOrder=1, fileroot=None):
if fileroot is None:
fileroot = get_fileroot(nQubits, file_maxLen, errMag, spamMag, nSamples, simtype, idleErrorInFiducials)
mdl_datagen = pickle.load(open("%s_gs.pkl" % fileroot, "rb"))
ds = pickle.load(open("%s_ds.pkl" % fileroot, "rb"))
#print("DB: ",ds[ ('Gi',) ])
#print("DB: ",ds[ ('Gi','Gi') ])
#print("DB: ",ds[ ((('Gx',0),('Gx',1)),(('Gx',0),('Gx',1)),'Gi',(('Gx',0),('Gx',1)),(('Gx',0),('Gx',1))) ])
advanced = {'fit order': fitOrder}
results = idt.do_idle_tomography(nQubits, ds, maxLengths, (prepDict,measDict), maxweight=min(2,nQubits),
advancedOptions=advanced, include_hamiltonian=hamiltonian,
include_stochastic=stochastic, include_affine=affine)
if hamiltonian: ham_intrinsic_rates = results.intrinsic_rates['hamiltonian']
if stochastic: sto_intrinsic_rates = results.intrinsic_rates['stochastic']
if affine: aff_intrinsic_rates = results.intrinsic_rates['affine']
maxErrWeight=2 # hardcoded for now
datagen_ham_rates, datagen_sto_rates, datagen_aff_rates = \
idt.predicted_intrinsic_rates(nQubits, maxErrWeight, mdl_datagen, hamiltonian, stochastic, affine)
print("Predicted HAM = ",datagen_ham_rates)
print("Predicted STO = ",datagen_sto_rates)
print("Predicted AFF = ",datagen_aff_rates)
print("Intrinsic HAM = ",ham_intrinsic_rates)
print("Intrinsic STO = ",sto_intrinsic_rates)
print("Intrinsic AFF = ",aff_intrinsic_rates)
ham_diff = sto_diff = aff_diff = [0] # so max()=0 below for types we exclude
if hamiltonian: ham_diff = np.abs(ham_intrinsic_rates - datagen_ham_rates)
if stochastic: sto_diff = np.abs(sto_intrinsic_rates - datagen_sto_rates)
if affine: aff_diff = np.abs(aff_intrinsic_rates - datagen_aff_rates)
print("Err labels:", [ x.rep for x in results.error_list])
if hamiltonian: print("Ham diffs:", ham_diff)
if stochastic: print("Sto diffs:", sto_diff)
#if stochastic:
# for x,y in zip(sto_intrinsic_rates,datagen_sto_rates):
# print(" %g <--> %g" % (x,y))
if affine: print("Aff diffs:", aff_diff)
print("%s\n MAX DIFFS: " % fileroot, max(ham_diff),max(sto_diff),max(aff_diff))
return max(ham_diff),max(sto_diff),max(aff_diff)
#OLD - leftover from when we put data into a pandas data frame
# #add hamiltonian data to df
# N = len(labels) # number of hamiltonian/stochastic rates
# data = pd.DataFrame({'nQubits': [nQubits]*N, 'maxL':[maxLengths[-1]]*N,
# 'errMag': [errMag]*N, 'spamMag': [spamMag]*N,
# 'nSamples': [nSamples]*N,
# 'simtype': [simtype]*N, 'type': ['hamiltonian']*N,
# 'true_val': datagen_ham_rates, 'estimate': ham_intrinsic_rates,
# 'diff': ham_intrinsic_rates - datagen_ham_rates, 'abs_diff': ham_diff,
# 'fitOrder': [fitOrder]*N, 'idleErrorInFiducials': [idleErrorInFiducials]*N })
# df = df.append(data, ignore_index=True)
# #add stochastic data to df
# data = pd.DataFrame({'nQubits': [nQubits]*N, 'maxL':[maxLengths[-1]]*N,
# 'errMag': [errMag]*N, 'spamMag': [spamMag]*N,
# 'nSamples': [nSamples]*N,
# 'simtype': [simtype]*N, 'type': ['stochastic']*N,
# 'true_val': datagen_sto_rates, 'estimate': sto_intrinsic_rates,
# 'diff': sto_intrinsic_rates - datagen_sto_rates,'abs_diff': sto_diff,
# 'fitOrder': [fitOrder]*N, 'idleErrorInFiducials': [idleErrorInFiducials]*N })
# df = df.append(data, ignore_index=True)
# return df
class IDTTestCase(BaseTestCase):
def test_idletomography_1Q(self):
nQ = 1
#make perfect data - using termorder:1 here means the data is not CPTP and
# therefore won't be in [0,1], and creating a data set with sampleError="none"
# means that probabilities *won't* be clipped to [0,1] - so we get really
# funky and unphysical data here, but data that idle tomography should be
# able to fit *exactly* (with any errMags, so be pick a big one).
make_idle_tomography_data(nQ, maxLengths=(0,1,2,4), errMags=(0.01,), spamMag=0,
nSamplesList=('inf',), simtype="termorder") # how specify order
# Note: no spam error, as accounting for this isn't build into idle tomography yet.
maxH, maxS, maxA = helper_idle_tomography(nQ, maxLengths=(1,2,4), file_maxLen=4,
errMag=0.01, spamMag=0, nSamples='inf',
idleErrorInFiducials=False, fitOrder=1, simtype="termorder") # how specify order
#Make sure exact identification of errors was possible
self.assertLess(maxH, 1e-6)
self.assertLess(maxS, 1e-6)
self.assertLess(maxA, 1e-6)
def test_idletomography_2Q(self):
#Same thing but for 2 qubits
nQ = 2
make_idle_tomography_data(nQ, maxLengths=(0,1,2,4), errMags=(0.01,), spamMag=0,
nSamplesList=('inf',), simtype="termorder") #How specify order?
maxH, maxS, maxA = helper_idle_tomography(nQ, maxLengths=(1,2,4), file_maxLen=4,
errMag=0.01, spamMag=0, nSamples='inf',
idleErrorInFiducials=False, fitOrder=1, simtype="termorder") # how specify order?
self.assertLess(maxH, 1e-6)
self.assertLess(maxS, 1e-6)
self.assertLess(maxA, 1e-6)
def test_idletomog_gstdata_std1Q(self):
from pygsti.modelpacks.legacy import std1Q_XYI as std
std = pygsti.construction.stdmodule_to_smqmodule(std)
maxLens = [1,2,4]
expList = pygsti.construction.make_lsgst_experiment_list(std.target_model(), std.prepStrs,
std.effectStrs, std.germs_lite, maxLens)
ds = pygsti.construction.generate_fake_data(std.target_model().depolarize(0.01, 0.01),
expList, 1000, 'multinomial', seed=1234)
result = pygsti.do_long_sequence_gst(ds, std.target_model(), std.prepStrs, std.effectStrs, std.germs_lite, maxLens, verbosity=3)
#standard report will run idle tomography
pygsti.report.create_standard_report(result, temp_files + "/gstWithIdleTomogTestReportStd1Q",
"Test GST Report w/Idle Tomography Tab: StdXYI",
verbosity=3, auto_open=False)
def test_idletomog_gstdata_1Qofstd2Q(self):
# perform idle tomography on first qubit of 2Q
from pygsti.modelpacks.legacy import std2Q_XYICNOT as std2Q
from pygsti.modelpacks.legacy import std1Q_XYI as std
std2Q = pygsti.construction.stdmodule_to_smqmodule(std2Q)
std = pygsti.construction.stdmodule_to_smqmodule(std)
maxLens = [1,2,4]
expList = pygsti.construction.make_lsgst_experiment_list(std2Q.target_model(), std2Q.prepStrs,
std2Q.effectStrs, std2Q.germs_lite, maxLens)
mdl_datagen = std2Q.target_model().depolarize(0.01, 0.01)
ds2Q = pygsti.construction.generate_fake_data(mdl_datagen, expList, 1000, 'multinomial', seed=1234)
#Just analyze first qubit (qubit 0)
ds = pygsti.construction.filter_dataset(ds2Q, (0,))
start = std.target_model()
start.set_all_parameterizations("TP")
result = pygsti.do_long_sequence_gst(ds, start, std.prepStrs[0:4], std.effectStrs[0:4],
std.germs_lite, maxLens, verbosity=3, advancedOptions={'objective': 'chi2'})
#result = pygsti.do_model_test(start.depolarize(0.009,0.009), ds, std.target_model(), std.prepStrs[0:4],
# std.effectStrs[0:4], std.germs_lite, maxLens)
pygsti.report.create_standard_report(result, temp_files + "/gstWithIdleTomogTestReportStd1Qfrom2Q",
"Test GST Report w/Idle Tomog.: StdXYI from StdXYICNOT",
verbosity=3, auto_open=False)
def test_idletomog_gstdata_nQ(self):
try: from pygsti.objects import fastreplib
except ImportError:
warnings.warn("Skipping test_idletomog_gstdata_nQ b/c no fastreps!")
return
#Global dicts describing how to prep and measure in various bases
prepDict = { 'X': ('Gy',), 'Y': ('Gx',)*3, 'Z': (),
'-X': ('Gy',)*3, '-Y': ('Gx',), '-Z': ('Gx','Gx')}
measDict = { 'X': ('Gy',)*3, 'Y': ('Gx',), 'Z': (),
'-X': ('Gy',), '-Y': ('Gx',)*3, '-Z': ('Gx','Gx')}
nQubits = 2
maxLengths = [1,2,4]
## ----- Generate n-qubit operation sequences -----
if regenerate_references():
c = {} #Uncomment to re-generate cache SAVE
else:
c = pickle.load(open(compare_files+"/idt_nQsequenceCache.pkl", 'rb'))
t = time.time()
gss = pygsti.construction.create_XYCNOT_cloudnoise_sequences(
nQubits, maxLengths, 'line', [(0,1)], maxIdleWeight=2,
idleOnly=False, paramroot="H+S", cache=c, verbosity=3)
#print("GSS STRINGS: ")
#print('\n'.join(["%s: %s" % (s.str,str(s.tup)) for s in gss.allstrs]))
gss_strs = gss.allstrs
print("%.1fs" % (time.time()-t))
if regenerate_references():
pickle.dump(c, open(compare_files+"/idt_nQsequenceCache.pkl", 'wb'))
#Uncomment to re-generate cache
# To run idle tomography, we need "pauli fiducial pairs", so
# get fiducial pairs for Gi germ from gss and convert
# to "Pauli fidicual pairs" (which pauli state/basis is prepared or measured)
GiStr = pygsti.obj.Circuit(((),), num_lines=nQubits)
self.assertTrue(GiStr in gss.germs)
self.assertTrue(gss.Ls == maxLengths)
L0 = maxLengths[0] # all lengths should have same fidpairs, just take first one
plaq = gss.get_plaquette(L0, GiStr)
pauli_fidpairs = idt.fidpairs_to_pauli_fidpairs(plaq.fidpairs, (prepDict,measDict), nQubits)
print(plaq.fidpairs)
print()
print('\n'.join([ "%s, %s" % (p[0],p[1]) for p in pauli_fidpairs]))
self.assertEqual(len(plaq.fidpairs), len(pauli_fidpairs))
self.assertEqual(len(plaq.fidpairs), 16) # (will need to change this if use H+S+A above)
# ---- Create some fake data ----
target_model = build_XYCNOT_cloudnoise_model(nQubits, "line", [(0,1)], 2, 1,
sim_type="map", parameterization="H+S")
#Note: generate data with affine errors too (H+S+A used below)
mdl_datagen = build_XYCNOT_cloudnoise_model(nQubits, "line", [(0,1)], 2, 1,
sim_type="map", parameterization="H+S+A",
roughNoise=(1234,0.001))
#This *only* (re)sets Gi errors...
idt.set_idle_errors(nQubits, mdl_datagen, {}, rand_default=0.001,
hamiltonian=True, stochastic=True, affine=True) # no seed? FUTURE?
problemStr = pygsti.obj.Circuit([()], num_lines=nQubits)
print("Problem: ",problemStr.str)
assert(problemStr in gss.allstrs)
ds = pygsti.construction.generate_fake_data(mdl_datagen, gss.allstrs, 1000, 'multinomial', seed=1234)
# ----- Run idle tomography with our custom (GST) set of pauli fiducial pairs ----
advanced = {'pauli_fidpairs': pauli_fidpairs, 'jacobian mode': "together"}
idtresults = idt.do_idle_tomography(nQubits, ds, maxLengths, (prepDict,measDict), maxweight=2,
advancedOptions=advanced, include_hamiltonian='auto',
include_stochastic='auto', include_affine='auto')
#Note: inclue_affine="auto" should have detected that we don't have the sequences to
# determine the affine intrinsic rates:
self.assertEqual(set(idtresults.intrinsic_rates.keys()), set(['hamiltonian','stochastic']))
idt.create_idletomography_report(idtresults, temp_files + "/idleTomographyGSTSeqTestReport",
"Test idle tomography report w/GST seqs", auto_open=False)
#Run GST on the data (set tolerance high so this 2Q-GST run doesn't take long)
gstresults = pygsti.do_long_sequence_gst_base(ds, target_model, gss,
advancedOptions={'tolerance': 1e-1}, verbosity=3)
#In FUTURE, we shouldn't need to set need to set the basis of our nQ GST results in order to make a report
for estkey in gstresults.estimates: # 'default'
gstresults.estimates[estkey].models['go0'].basis = pygsti.obj.Basis.cast("pp",16)
gstresults.estimates[estkey].models['target'].basis = pygsti.obj.Basis.cast("pp",16)
#pygsti.report.create_standard_report(gstresults, temp_files + "/gstWithIdleTomogTestReport",
# "Test GST Report w/Idle Tomography Tab",
# verbosity=3, auto_open=False)
pygsti.report.create_nqnoise_report(gstresults, temp_files + "/gstWithIdleTomogTestReport",
"Test nQNoise Report w/Idle Tomography Tab",
verbosity=3, auto_open=False)
def test_automatic_paulidicts(self):
expected_prepDict = { 'X': ('Gy',), 'Y': ('Gx',)*3, 'Z': (),
'-X': ('Gy',)*3, '-Y': ('Gx',), '-Z': ('Gx','Gx')}
expected_measDict = { 'X': ('Gy',)*3, 'Y': ('Gx',), 'Z': (),
'-X': ('Gy',), '-Y': ('Gx',)*3, '-Z': ('Gx','Gx')}
target_model = build_XYCNOT_cloudnoise_model(3, "line", [(0,1)], 2, 1,
sim_type="map", parameterization="H+S+A")
prepDict, measDict = idt.determine_paulidicts(target_model)
self.assertEqual(prepDict, expected_prepDict)
self.assertEqual(measDict, expected_measDict)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
[
"pygsti.obj.Circuit",
"numpy.abs",
"pygsti.do_long_sequence_gst",
"pygsti.construction.build_cloudnoise_model_from_hops_and_weights",
"pygsti.construction.filter_dataset",
"unittest.main",
"pygsti.extras.idletomography.predicted_intrinsic_rates",
"pygsti.do_long_sequence_gst_base",
"pygsti.modelpacks.legacy.std2Q_XYICNOT.target_model",
"pygsti.construction.stdmodule_to_smqmodule",
"pygsti.extras.idletomography.determine_paulidicts",
"pygsti.construction.create_XYCNOT_cloudnoise_sequences",
"pygsti.construction.generate_fake_data",
"pygsti.report.create_standard_report",
"pygsti.extras.idletomography.create_idletomography_report",
"pygsti.report.create_nqnoise_report",
"pygsti.obj.Basis.cast",
"pygsti.extras.idletomography.fidpairs_to_pauli_fidpairs",
"pygsti.extras.idletomography.do_idle_tomography",
"pygsti.extras.idletomography.set_idle_errors",
"pygsti.modelpacks.legacy.std1Q_XYI.target_model",
"time.time",
"warnings.warn"
] |
[((1386, 1764), 'pygsti.construction.build_cloudnoise_model_from_hops_and_weights', 'pygsti.construction.build_cloudnoise_model_from_hops_and_weights', (['nQubits', "['Gx', 'Gy', 'Gcnot']", 'nonstd_gate_unitaries', 'None', 'availability', 'None', 'geometry', 'maxIdleWeight', 'maxSpamWeight', 'maxhops', 'extraWeight1Hops', 'extraGateWeight', 'sparse', 'roughNoise', 'sim_type', 'parameterization', 'spamtype', 'addIdleNoiseToAllGates', 'errcomp_type', '(True)', 'return_clouds', 'verbosity'], {}), "(nQubits, [\n 'Gx', 'Gy', 'Gcnot'], nonstd_gate_unitaries, None, availability, None,\n geometry, maxIdleWeight, maxSpamWeight, maxhops, extraWeight1Hops,\n extraGateWeight, sparse, roughNoise, sim_type, parameterization,\n spamtype, addIdleNoiseToAllGates, errcomp_type, True, return_clouds,\n verbosity)\n", (1450, 1764), False, 'import pygsti\n'), ((7676, 7778), 'pygsti.extras.idletomography.predicted_intrinsic_rates', 'idt.predicted_intrinsic_rates', (['nQubits', 'maxErrWeight', 'mdl_datagen', 'hamiltonian', 'stochastic', 'affine'], {}), '(nQubits, maxErrWeight, mdl_datagen,\n hamiltonian, stochastic, affine)\n', (7705, 7778), True, 'from pygsti.extras import idletomography as idt\n'), ((20978, 21004), 'unittest.main', 'unittest.main', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (20991, 21004), False, 'import unittest\n'), ((4021, 4177), 'pygsti.extras.idletomography.set_idle_errors', 'idt.set_idle_errors', (['nQubits', 'gateset_idleInFids', 'debug_errdict'], {'rand_default': 'err_vec', 'hamiltonian': 'hamiltonian', 'stochastic': 'stochastic', 'affine': 'affine'}), '(nQubits, gateset_idleInFids, debug_errdict,\n rand_default=err_vec, hamiltonian=hamiltonian, stochastic=stochastic,\n affine=affine)\n', (4040, 4177), True, 'from pygsti.extras import idletomography as idt\n'), ((4204, 4362), 'pygsti.extras.idletomography.set_idle_errors', 'idt.set_idle_errors', (['nQubits', 'gateset_noIdleInFids', 'debug_errdict'], {'rand_default': 'err_vec', 'hamiltonian': 'hamiltonian', 'stochastic': 'stochastic', 'affine': 'affine'}), '(nQubits, gateset_noIdleInFids, debug_errdict,\n rand_default=err_vec, hamiltonian=hamiltonian, stochastic=stochastic,\n affine=affine)\n', (4223, 4362), True, 'from pygsti.extras import idletomography as idt\n'), ((8182, 8229), 'numpy.abs', 'np.abs', (['(ham_intrinsic_rates - datagen_ham_rates)'], {}), '(ham_intrinsic_rates - datagen_ham_rates)\n', (8188, 8229), True, 'import numpy as np\n'), ((8261, 8308), 'numpy.abs', 'np.abs', (['(sto_intrinsic_rates - datagen_sto_rates)'], {}), '(sto_intrinsic_rates - datagen_sto_rates)\n', (8267, 8308), True, 'import numpy as np\n'), ((8340, 8387), 'numpy.abs', 'np.abs', (['(aff_intrinsic_rates - datagen_aff_rates)'], {}), '(aff_intrinsic_rates - datagen_aff_rates)\n', (8346, 8387), True, 'import numpy as np\n'), ((12234, 12281), 'pygsti.construction.stdmodule_to_smqmodule', 'pygsti.construction.stdmodule_to_smqmodule', (['std'], {}), '(std)\n', (12276, 12281), False, 'import pygsti\n'), ((12899, 13084), 'pygsti.report.create_standard_report', 'pygsti.report.create_standard_report', (['result', "(temp_files + '/gstWithIdleTomogTestReportStd1Q')", '"""Test GST Report w/Idle Tomography Tab: StdXYI"""'], {'verbosity': '(3)', 'auto_open': '(False)'}), "(result, temp_files +\n '/gstWithIdleTomogTestReportStd1Q',\n 'Test GST Report w/Idle Tomography Tab: StdXYI', verbosity=3, auto_open\n =False)\n", (12935, 13084), False, 'import pygsti\n'), ((13412, 13461), 'pygsti.construction.stdmodule_to_smqmodule', 'pygsti.construction.stdmodule_to_smqmodule', (['std2Q'], {}), '(std2Q)\n', (13454, 13461), False, 'import pygsti\n'), ((13476, 13523), 'pygsti.construction.stdmodule_to_smqmodule', 'pygsti.construction.stdmodule_to_smqmodule', (['std'], {}), '(std)\n', (13518, 13523), False, 'import pygsti\n'), ((13845, 13941), 'pygsti.construction.generate_fake_data', 'pygsti.construction.generate_fake_data', (['mdl_datagen', 'expList', '(1000)', '"""multinomial"""'], {'seed': '(1234)'}), "(mdl_datagen, expList, 1000,\n 'multinomial', seed=1234)\n", (13883, 13941), False, 'import pygsti\n'), ((13996, 14042), 'pygsti.construction.filter_dataset', 'pygsti.construction.filter_dataset', (['ds2Q', '(0,)'], {}), '(ds2Q, (0,))\n', (14030, 14042), False, 'import pygsti\n'), ((14060, 14078), 'pygsti.modelpacks.legacy.std1Q_XYI.target_model', 'std.target_model', ([], {}), '()\n', (14076, 14078), True, 'from pygsti.modelpacks.legacy import std1Q_XYI as std\n'), ((14142, 14306), 'pygsti.do_long_sequence_gst', 'pygsti.do_long_sequence_gst', (['ds', 'start', 'std.prepStrs[0:4]', 'std.effectStrs[0:4]', 'std.germs_lite', 'maxLens'], {'verbosity': '(3)', 'advancedOptions': "{'objective': 'chi2'}"}), "(ds, start, std.prepStrs[0:4], std.effectStrs[0:\n 4], std.germs_lite, maxLens, verbosity=3, advancedOptions={'objective':\n 'chi2'})\n", (14169, 14306), False, 'import pygsti\n'), ((14549, 14747), 'pygsti.report.create_standard_report', 'pygsti.report.create_standard_report', (['result', "(temp_files + '/gstWithIdleTomogTestReportStd1Qfrom2Q')", '"""Test GST Report w/Idle Tomog.: StdXYI from StdXYICNOT"""'], {'verbosity': '(3)', 'auto_open': '(False)'}), "(result, temp_files +\n '/gstWithIdleTomogTestReportStd1Qfrom2Q',\n 'Test GST Report w/Idle Tomog.: StdXYI from StdXYICNOT', verbosity=3,\n auto_open=False)\n", (14585, 14747), False, 'import pygsti\n'), ((15700, 15711), 'time.time', 'time.time', ([], {}), '()\n', (15709, 15711), False, 'import time\n'), ((15726, 15899), 'pygsti.construction.create_XYCNOT_cloudnoise_sequences', 'pygsti.construction.create_XYCNOT_cloudnoise_sequences', (['nQubits', 'maxLengths', '"""line"""', '[(0, 1)]'], {'maxIdleWeight': '(2)', 'idleOnly': '(False)', 'paramroot': '"""H+S"""', 'cache': 'c', 'verbosity': '(3)'}), "(nQubits, maxLengths,\n 'line', [(0, 1)], maxIdleWeight=2, idleOnly=False, paramroot='H+S',\n cache=c, verbosity=3)\n", (15780, 15899), False, 'import pygsti\n'), ((16500, 16544), 'pygsti.obj.Circuit', 'pygsti.obj.Circuit', (['((),)'], {'num_lines': 'nQubits'}), '(((),), num_lines=nQubits)\n', (16518, 16544), False, 'import pygsti\n'), ((16792, 16868), 'pygsti.extras.idletomography.fidpairs_to_pauli_fidpairs', 'idt.fidpairs_to_pauli_fidpairs', (['plaq.fidpairs', '(prepDict, measDict)', 'nQubits'], {}), '(plaq.fidpairs, (prepDict, measDict), nQubits)\n', (16822, 16868), True, 'from pygsti.extras import idletomography as idt\n'), ((17783, 17900), 'pygsti.extras.idletomography.set_idle_errors', 'idt.set_idle_errors', (['nQubits', 'mdl_datagen', '{}'], {'rand_default': '(0.001)', 'hamiltonian': '(True)', 'stochastic': '(True)', 'affine': '(True)'}), '(nQubits, mdl_datagen, {}, rand_default=0.001,\n hamiltonian=True, stochastic=True, affine=True)\n', (17802, 17900), True, 'from pygsti.extras import idletomography as idt\n'), ((17955, 17998), 'pygsti.obj.Circuit', 'pygsti.obj.Circuit', (['[()]'], {'num_lines': 'nQubits'}), '([()], num_lines=nQubits)\n', (17973, 17998), False, 'import pygsti\n'), ((18096, 18196), 'pygsti.construction.generate_fake_data', 'pygsti.construction.generate_fake_data', (['mdl_datagen', 'gss.allstrs', '(1000)', '"""multinomial"""'], {'seed': '(1234)'}), "(mdl_datagen, gss.allstrs, 1000,\n 'multinomial', seed=1234)\n", (18134, 18196), False, 'import pygsti\n'), ((18389, 18583), 'pygsti.extras.idletomography.do_idle_tomography', 'idt.do_idle_tomography', (['nQubits', 'ds', 'maxLengths', '(prepDict, measDict)'], {'maxweight': '(2)', 'advancedOptions': 'advanced', 'include_hamiltonian': '"""auto"""', 'include_stochastic': '"""auto"""', 'include_affine': '"""auto"""'}), "(nQubits, ds, maxLengths, (prepDict, measDict),\n maxweight=2, advancedOptions=advanced, include_hamiltonian='auto',\n include_stochastic='auto', include_affine='auto')\n", (18411, 18583), True, 'from pygsti.extras import idletomography as idt\n'), ((18899, 19058), 'pygsti.extras.idletomography.create_idletomography_report', 'idt.create_idletomography_report', (['idtresults', "(temp_files + '/idleTomographyGSTSeqTestReport')", '"""Test idle tomography report w/GST seqs"""'], {'auto_open': '(False)'}), "(idtresults, temp_files +\n '/idleTomographyGSTSeqTestReport',\n 'Test idle tomography report w/GST seqs', auto_open=False)\n", (18931, 19058), True, 'from pygsti.extras import idletomography as idt\n'), ((19194, 19303), 'pygsti.do_long_sequence_gst_base', 'pygsti.do_long_sequence_gst_base', (['ds', 'target_model', 'gss'], {'advancedOptions': "{'tolerance': 0.1}", 'verbosity': '(3)'}), "(ds, target_model, gss, advancedOptions={\n 'tolerance': 0.1}, verbosity=3)\n", (19226, 19303), False, 'import pygsti\n'), ((19996, 20170), 'pygsti.report.create_nqnoise_report', 'pygsti.report.create_nqnoise_report', (['gstresults', "(temp_files + '/gstWithIdleTomogTestReport')", '"""Test nQNoise Report w/Idle Tomography Tab"""'], {'verbosity': '(3)', 'auto_open': '(False)'}), "(gstresults, temp_files +\n '/gstWithIdleTomogTestReport',\n 'Test nQNoise Report w/Idle Tomography Tab', verbosity=3, auto_open=False)\n", (20031, 20170), False, 'import pygsti\n'), ((20798, 20836), 'pygsti.extras.idletomography.determine_paulidicts', 'idt.determine_paulidicts', (['target_model'], {}), '(target_model)\n', (20822, 20836), True, 'from pygsti.extras import idletomography as idt\n'), ((3688, 3843), 'pygsti.extras.idletomography.set_idle_errors', 'idt.set_idle_errors', (['nQubits', 'gateset_idleInFids', 'debug_errdict'], {'rand_default': 'errMag', 'hamiltonian': 'hamiltonian', 'stochastic': 'stochastic', 'affine': 'affine'}), '(nQubits, gateset_idleInFids, debug_errdict,\n rand_default=errMag, hamiltonian=hamiltonian, stochastic=stochastic,\n affine=affine)\n', (3707, 3843), True, 'from pygsti.extras import idletomography as idt\n'), ((4664, 4800), 'pygsti.construction.generate_fake_data', 'pygsti.construction.generate_fake_data', (['gateset_idleInFids', 'listOfExperiments'], {'nSamples': 'Nsamp', 'sampleError': 'sampleError', 'seed': '(8675309)'}), '(gateset_idleInFids,\n listOfExperiments, nSamples=Nsamp, sampleError=sampleError, seed=8675309)\n', (4702, 4800), False, 'import pygsti\n'), ((5197, 5335), 'pygsti.construction.generate_fake_data', 'pygsti.construction.generate_fake_data', (['gateset_noIdleInFids', 'listOfExperiments'], {'nSamples': 'Nsamp', 'sampleError': 'sampleError', 'seed': '(8675309)'}), '(gateset_noIdleInFids,\n listOfExperiments, nSamples=Nsamp, sampleError=sampleError, seed=8675309)\n', (5235, 5335), False, 'import pygsti\n'), ((12374, 12392), 'pygsti.modelpacks.legacy.std1Q_XYI.target_model', 'std.target_model', ([], {}), '()\n', (12390, 12392), True, 'from pygsti.modelpacks.legacy import std1Q_XYI as std\n'), ((12752, 12770), 'pygsti.modelpacks.legacy.std1Q_XYI.target_model', 'std.target_model', ([], {}), '()\n', (12768, 12770), True, 'from pygsti.modelpacks.legacy import std1Q_XYI as std\n'), ((13616, 13636), 'pygsti.modelpacks.legacy.std2Q_XYICNOT.target_model', 'std2Q.target_model', ([], {}), '()\n', (13634, 13636), True, 'from pygsti.modelpacks.legacy import std2Q_XYICNOT as std2Q\n'), ((19597, 19628), 'pygsti.obj.Basis.cast', 'pygsti.obj.Basis.cast', (['"""pp"""', '(16)'], {}), "('pp', 16)\n", (19618, 19628), False, 'import pygsti\n'), ((19694, 19725), 'pygsti.obj.Basis.cast', 'pygsti.obj.Basis.cast', (['"""pp"""', '(16)'], {}), "('pp', 16)\n", (19715, 19725), False, 'import pygsti\n'), ((13786, 13806), 'pygsti.modelpacks.legacy.std2Q_XYICNOT.target_model', 'std2Q.target_model', ([], {}), '()\n', (13804, 13806), True, 'from pygsti.modelpacks.legacy import std2Q_XYICNOT as std2Q\n'), ((14960, 15028), 'warnings.warn', 'warnings.warn', (['"""Skipping test_idletomog_gstdata_nQ b/c no fastreps!"""'], {}), "('Skipping test_idletomog_gstdata_nQ b/c no fastreps!')\n", (14973, 15028), False, 'import warnings\n'), ((12566, 12584), 'pygsti.modelpacks.legacy.std1Q_XYI.target_model', 'std.target_model', ([], {}), '()\n', (12582, 12584), True, 'from pygsti.modelpacks.legacy import std1Q_XYI as std\n'), ((16085, 16096), 'time.time', 'time.time', ([], {}), '()\n', (16094, 16096), False, 'import time\n')]
|
#
# Author : <NAME>
# Copyright (c) 2020 <NAME>. All rights reserved.
# Licensed under the MIT License. See LICENSE file in the project root for full license information.
#
#
# Test function helpers.
#
import numpy as np
def constantode(t,x):
"""Function containing a constant ODE x' = 1.
"""
xprime = np.empty([1], float);
xprime[0] = 1;
return xprime;
def constantodeJ(t, x):
"""Function containing the Jacobian of constantode.
"""
df = np.empty([1,1], float);
df[0,0] = 0;
return df;
def stableode(t,x):
"""Function containing the ODE x' = -x.
"""
xprime = np.empty([1], float);
xprime[0] = -x[0];
return xprime;
def stableodeJ(t, x):
"""Function containing the Jacobian of stableode.
"""
df = np.empty([1,1], float);
df[0,0] = -1;
return df;
def multivariableode(t,x):
"""Function containing the ODE x_1' = -x_1 + x_2
x_2' = -x_2 .
"""
xprime = np.empty([2], float);
xprime[0] = -x[0] + x[1];
xprime[1] = -x[1];
return xprime;
def multivariableodeJ(t, x):
"""Function containing the Jacobian of multivariableode.
"""
df = np.empty([2,2], float);
df[0,0] = -1;
df[0,1] = +1;
df[1,0] = 0;
df[1,1] = -1;
return df;
def stiffode(t, x):
"""Function containing the stiff ODE x_1' = -x_1
x_2' = -100(x_2 - sin(t)) + cos(t).
"""
xprime = np.empty([2], float);
xprime[0] = -x[0];
xprime[1] = -100*(x[1] - np.sin(t)) + np.cos(t);
return xprime;
def stiffodeJ(t, x):
"""Function containing the Jacobian of stiffode.
"""
df = np.empty([2,2], float);
df[0,0] = -1;
df[0,1] = 0;
df[1,0] = 0;
df[1,1] = -100;
return df;
|
[
"numpy.empty",
"numpy.sin",
"numpy.cos"
] |
[((312, 332), 'numpy.empty', 'np.empty', (['[1]', 'float'], {}), '([1], float)\n', (320, 332), True, 'import numpy as np\n'), ((456, 479), 'numpy.empty', 'np.empty', (['[1, 1]', 'float'], {}), '([1, 1], float)\n', (464, 479), True, 'import numpy as np\n'), ((587, 607), 'numpy.empty', 'np.empty', (['[1]', 'float'], {}), '([1], float)\n', (595, 607), True, 'import numpy as np\n'), ((731, 754), 'numpy.empty', 'np.empty', (['[1, 1]', 'float'], {}), '([1, 1], float)\n', (739, 754), True, 'import numpy as np\n'), ((903, 923), 'numpy.empty', 'np.empty', (['[2]', 'float'], {}), '([2], float)\n', (911, 923), True, 'import numpy as np\n'), ((1088, 1111), 'numpy.empty', 'np.empty', (['[2, 2]', 'float'], {}), '([2, 2], float)\n', (1096, 1111), True, 'import numpy as np\n'), ((1321, 1341), 'numpy.empty', 'np.empty', (['[2]', 'float'], {}), '([2], float)\n', (1329, 1341), True, 'import numpy as np\n'), ((1515, 1538), 'numpy.empty', 'np.empty', (['[2, 2]', 'float'], {}), '([2, 2], float)\n', (1523, 1538), True, 'import numpy as np\n'), ((1403, 1412), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (1409, 1412), True, 'import numpy as np\n'), ((1390, 1399), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (1396, 1399), True, 'import numpy as np\n')]
|
from sklearn import datasets
import numpy as np
def get_info():
return {
'name': 'sklearn_iris',
'description': 'ScikitLearn | Iris',
'class_names': ['Iris Setosa', 'Iris Versicolor', 'Iris Virginica']
}
def get_data(datasets_path):
data = datasets.load_iris()
return {
'X_train': np.array(data.data),
'y_train': np.array(data.target),
'X_test': np.array([]),
'y_test': np.array([]),
'class_names': ['Iris Setosa', 'Iris Versicolor', 'Iris Virginica']
}
|
[
"sklearn.datasets.load_iris",
"numpy.array"
] |
[((280, 300), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (298, 300), False, 'from sklearn import datasets\n'), ((334, 353), 'numpy.array', 'np.array', (['data.data'], {}), '(data.data)\n', (342, 353), True, 'import numpy as np\n'), ((374, 395), 'numpy.array', 'np.array', (['data.target'], {}), '(data.target)\n', (382, 395), True, 'import numpy as np\n'), ((415, 427), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (423, 427), True, 'import numpy as np\n'), ((447, 459), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (455, 459), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
import WindFarmGenetic # wind farm layout optimization using genetic algorithms classes
from datetime import datetime
import os
from sklearn.svm import SVR
import pickle
# Wind farm settings and algorithm settings
# parameters for the genetic algorithm
elite_rate = 0.2
cross_rate = 0.6
random_rate = 0.5
mutate_rate = 0.1
wt_N = 25 # number of wind turbines 15, 20, or 25
# NA_loc_array : not available location array, the index starting from 1
# L1 : wind farm, cells 121(inclusive) to 144(inclusive)
NA_loc_array = np.arange(121, 145, 1)
#
# # L2
# NA_loc_array = np.arange(61, 85, 1)
#
# # L3
# NA_loc_array = np.concatenate((np.arange(11, 144, 12), np.arange(12, 145, 12)))
#
# # L4
# NA_loc_array = np.concatenate((np.arange(6, 144, 12), np.arange(7, 145, 12)))
#
# # L5
# NA_loc_array = np.concatenate((np.arange(41, 105, 12), np.arange(42, 105, 12),
# np.arange(43, 105, 12),
# np.arange(44, 105, 12)))
#
# # L6
# NA_loc_array = np.concatenate((np.arange(1, 28, 12), np.arange(2, 28, 12),
# np.arange(12, 37, 12),
# np.arange(11, 37, 12),
# np.arange(109, 145, 12), np.arange(119, 145, 12),
# np.arange(110, 145, 12),
# np.arange(120, 145, 12),
# ))
#
# # L7
# NA_loc_array = np.arange(133, 145, 1)
#
# # L8
# NA_loc_array = np.arange(61, 73, 1)
#
# # L9
# NA_loc_array = np.arange(12, 145, 12)
#
# # L10
# NA_loc_array = np.arange(6, 145, 12)
#
# # L11
# NA_loc_array = np.concatenate((np.arange(42, 105, 12),
# np.arange(43, 105, 12)))
#
# # L12
# NA_loc_array = np.array((1, 2, 11, 12, 13, 24, 121, 132, 133, 134, 143, 144))
# convert numpy array to list, datatype convert
NA_loc = NA_loc_array.tolist()
# L0
# NA_loc = []
population_size = 120 # how many layouts in a population
iteration_times = 200 # how many iterations in a genetic algorithm run
n_inits = 100 # number of initial populations n_inits >= run_times
run_times = 100 # number of different initial populations
# wind farm size, cells
cols_cells = 12 # number of cells each row
rows_cells = 12 # number of cells each column
cell_width = 77.0 * 3 # unit : m
# all data will be save in data folder
data_folder = "data"
if not os.path.exists(data_folder):
os.makedirs(data_folder)
# Create a WindFarmGenetic object
# create an WindFarmGenetic object. Specify the number of rows and the number columns of the wind farm land. N is the number of wind turbines.
# NA_loc is the not available locations on the wind farm land. Landowners does not want to participate in the wind farm.
# pop_size: how many individuals in the population
# iteration: iteration times of the genetic algorithm
wfg = WindFarmGenetic.WindFarmGenetic(rows=rows_cells, cols=cols_cells, N=wt_N, NA_loc=NA_loc, pop_size=population_size,
iteration=iteration_times, cell_width=cell_width, elite_rate=elite_rate,
cross_rate=cross_rate, random_rate=random_rate, mutate_rate=mutate_rate)
# Specify the wind distribution
# wind distribution is discrete (number of wind speeds) by (number of wind directions)
# wfg.init_1_direction_1_N_speed_13()
# file name to store the wind power distribution SVR model
# svr_model_filename = 'svr_1s1d_N_13.svr'
# wfg.init_4_direction_1_speed_13()
# svr_model_filename = 'svr_1s4d_13.svr'
wfg.init_6_direction_1_speed_13()
# svr_model_filename = 'svr_1s6d_13.svr'
################################################
# generate initial populations
################################################
# initial population saved folder
init_pops_data_folder = "{}/init_data".format(data_folder)
if not os.path.exists(init_pops_data_folder):
os.makedirs(init_pops_data_folder)
# generate initial populations to start with and store them
# in order to start from the same initial population for different methods
# so it is fair to compare the final results
for i in range(n_inits):
wfg.gen_init_pop_NA()
wfg.save_init_pop_NA("{}/init_{}.dat".format(init_pops_data_folder, i),
"{}/init_{}_NA.dat".format(init_pops_data_folder, i))
# Create results folder
# results folder
# adaptive_best_layouts_N60_9_20190422213718.dat : best layout for AGA of run index 9
# result_CGA_20190422213715.dat : run time and best eta for CGA method
results_data_folder = "data/results"
if not os.path.exists(results_data_folder):
os.makedirs(results_data_folder)
# if cg,ag,sg folder does not exist, create these folders. Folders to store the running results
# cg: convertional genetic algorithm
# ag: adaptive genetic algorithm
# sg: support vector regression guided genetic algorithm
cg_result_folder = "{}/cg".format(results_data_folder)
if not os.path.exists(cg_result_folder):
os.makedirs(cg_result_folder)
ag_result_folder = "{}/ag".format(results_data_folder)
if not os.path.exists(ag_result_folder):
os.makedirs(ag_result_folder)
sg_result_folder = "{}/sg".format(results_data_folder)
if not os.path.exists(sg_result_folder):
os.makedirs(sg_result_folder)
# resul_arr: run_times by 2 , the first column is the run time in seconds for each run and the second column is the conversion efficiency for the run
result_arr = np.zeros((run_times, 2), dtype=np.float32)
# Run adaptive genetic algorithm (AGA)
# CGA: Conventional genetic algorithm
for i in range(0, run_times): # run times
print("run times {} ...".format(i))
# load initial population
wfg.load_init_pop_NA("{}/init_{}.dat".format(init_pops_data_folder, i),
"{}/init_{}_NA.dat".format(init_pops_data_folder, i))
# run the conventional genetic algorithm and return run time and conversion efficiency
run_time, eta = wfg.conventional_genetic_alg(i, result_folder=cg_result_folder)
result_arr[i, 0] = run_time
result_arr[i, 1] = eta
time_stamp = datetime.now().strftime("%Y%m%d%H%M%S")
# save the run time and etas to a file
filename = "{}/result_conventional_{}.dat".format(cg_result_folder, time_stamp)
np.savetxt(filename, result_arr, fmt='%f', delimiter=" ")
# Run adaptive genetic algorithm (AGA)
# AGA: adaptive genetic algorithm
for i in range(0, run_times): # run times
print("run times {} ...".format(i))
wfg.load_init_pop_NA("{}/init_{}.dat".format(init_pops_data_folder, i),
"{}/init_{}_NA.dat".format(init_pops_data_folder, i))
run_time, eta = wfg.adaptive_genetic_alg(i, result_folder=ag_result_folder)
result_arr[i, 0] = run_time
result_arr[i, 1] = eta
time_stamp = datetime.now().strftime("%Y%m%d%H%M%S")
filename = "{}/result_adaptive_{}.dat".format(ag_result_folder, time_stamp)
np.savetxt(filename, result_arr, fmt='%f', delimiter=" ")
# Run support vector regression guided genetic algorithm (SUGGA)
# Generate wind distribution surface
#############################################
# generate wind distribution surface
#############################################
n_mc_samples = 10000 # svr train data, number of layouts to average
wds_data_folder = "{}/wds".format(data_folder)
if not os.path.exists(wds_data_folder):
os.makedirs(wds_data_folder)
# mc : monte-carlo
# number of layouts to generate as the training data for regression
# to build the power distribution surface
# mc_layout.dat file stores layouts only with 0s and 1s. 0 means no turbine here. 1 means one turbine here.
# mc_layout_NA.dat file stores layouts with 0s, 1s and 2s. 2 means no turbine and not available for turbine.
# These two files are used to generate wind power distribution.
# Each file has 10000 lines. Each line is layout.
# gen_mc_grid_with_NA_loc function generates these two files.
train_mc_layouts, train_mc_layouts_NA = WindFarmGenetic.LayoutGridMCGenerator.gen_mc_grid_with_NA_loc(rows_cells,
cols_cells,
n_mc_samples,
wt_N, NA_loc,
"{}/mc_layout.dat".format(
wds_data_folder),
"{}/mc_layout_NA.dat".format(
wds_data_folder))
# wfg.init_1_direction_1_N_speed_13()
# file name to store the wind power distribution SVR model
# svr_model_filename = 'svr_1s1d_N_13.svr'
# wfg.init_4_direction_1_speed_13()
# svr_model_filename = 'svr_1s4d_13.svr'
# wfg.init_6_direction_1_speed_13()
svr_model_filename = 'svr_1s6d_13.svr'
# load Monte-Carlo layouts from a text file. 10000 random layouts
layouts = np.genfromtxt("{}/mc_layout.dat".format(wds_data_folder), delimiter=" ", dtype=np.int32)
# generate the location index coordinate and average power output at each location index coordinate
# location index coordinate : in the cells, the cell with index 1 has location index (0,0) and the cell 2 has (1,0)
# store the location index coordinate in x.dat and average power in y.dat
wfg.mc_gen_xy_NA(rows=rows_cells, cols=cols_cells, layouts=layouts, n=n_mc_samples, N=wt_N,
xfname="{}/x.dat".format(wds_data_folder),
yfname="{}/y.dat".format(wds_data_folder))
# read index location coordinates
x_original = pd.read_csv("{}/x.dat".format(wds_data_folder), header=None, nrows=rows_cells * cols_cells,
delim_whitespace=True, dtype=np.float32)
x_original = x_original.values
# read the power output of each index location coordinate
y_original = pd.read_csv("{}/y.dat".format(wds_data_folder), header=None, nrows=rows_cells * cols_cells,
delim_whitespace=True, dtype=np.float32)
y_original = y_original.values.flatten()
# create a SVR object and specify the kernal and other parameters
svr_model = SVR(kernel='rbf', C=2000.0, gamma=0.3, epsilon=.1)
# build the SVR power distribution model
svr_model.fit(x_original, y_original)
# save the SVR model to a file
pickle.dump(svr_model, open("{}/{}".format(wds_data_folder, svr_model_filename), 'wb'))
# This is how to load SVR model from a file
# svr_model = pickle.load(open("{}/{}".format(wds_data_folder,svr_model_filename), 'rb'))
# SUGGA: support vector regression guided genetic algorithm
for i in range(0, run_times): # run times
print("run times {} ...".format(i))
wfg.load_init_pop_NA("{}/init_{}.dat".format(init_pops_data_folder, i),
"{}/init_{}_NA.dat".format(init_pops_data_folder, i))
run_time, eta = wfg.sugga_genetic_alg(i, svr_model=svr_model, result_folder=sg_result_folder)
result_arr[i, 0] = run_time
result_arr[i, 1] = eta
time_stamp = datetime.now().strftime("%Y%m%d%H%M%S")
filename = "{}/result_sugga_{}.dat".format(sg_result_folder, time_stamp)
np.savetxt(filename, result_arr, fmt='%f', delimiter=" ")
|
[
"sklearn.svm.SVR",
"os.makedirs",
"numpy.savetxt",
"numpy.zeros",
"os.path.exists",
"numpy.arange",
"WindFarmGenetic.WindFarmGenetic",
"datetime.datetime.now"
] |
[((581, 603), 'numpy.arange', 'np.arange', (['(121)', '(145)', '(1)'], {}), '(121, 145, 1)\n', (590, 603), True, 'import numpy as np\n'), ((2996, 3268), 'WindFarmGenetic.WindFarmGenetic', 'WindFarmGenetic.WindFarmGenetic', ([], {'rows': 'rows_cells', 'cols': 'cols_cells', 'N': 'wt_N', 'NA_loc': 'NA_loc', 'pop_size': 'population_size', 'iteration': 'iteration_times', 'cell_width': 'cell_width', 'elite_rate': 'elite_rate', 'cross_rate': 'cross_rate', 'random_rate': 'random_rate', 'mutate_rate': 'mutate_rate'}), '(rows=rows_cells, cols=cols_cells, N=wt_N,\n NA_loc=NA_loc, pop_size=population_size, iteration=iteration_times,\n cell_width=cell_width, elite_rate=elite_rate, cross_rate=cross_rate,\n random_rate=random_rate, mutate_rate=mutate_rate)\n', (3027, 3268), False, 'import WindFarmGenetic\n'), ((5590, 5632), 'numpy.zeros', 'np.zeros', (['(run_times, 2)'], {'dtype': 'np.float32'}), '((run_times, 2), dtype=np.float32)\n', (5598, 5632), True, 'import numpy as np\n'), ((6401, 6459), 'numpy.savetxt', 'np.savetxt', (['filename', 'result_arr'], {'fmt': '"""%f"""', 'delimiter': '""" """'}), "(filename, result_arr, fmt='%f', delimiter=' ')\n", (6411, 6459), True, 'import numpy as np\n'), ((7053, 7111), 'numpy.savetxt', 'np.savetxt', (['filename', 'result_arr'], {'fmt': '"""%f"""', 'delimiter': '""" """'}), "(filename, result_arr, fmt='%f', delimiter=' ')\n", (7063, 7111), True, 'import numpy as np\n'), ((10641, 10692), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""rbf"""', 'C': '(2000.0)', 'gamma': '(0.3)', 'epsilon': '(0.1)'}), "(kernel='rbf', C=2000.0, gamma=0.3, epsilon=0.1)\n", (10644, 10692), False, 'from sklearn.svm import SVR\n'), ((11630, 11688), 'numpy.savetxt', 'np.savetxt', (['filename', 'result_arr'], {'fmt': '"""%f"""', 'delimiter': '""" """'}), "(filename, result_arr, fmt='%f', delimiter=' ')\n", (11640, 11688), True, 'import numpy as np\n'), ((2520, 2547), 'os.path.exists', 'os.path.exists', (['data_folder'], {}), '(data_folder)\n', (2534, 2547), False, 'import os\n'), ((2554, 2578), 'os.makedirs', 'os.makedirs', (['data_folder'], {}), '(data_folder)\n', (2565, 2578), False, 'import os\n'), ((3996, 4033), 'os.path.exists', 'os.path.exists', (['init_pops_data_folder'], {}), '(init_pops_data_folder)\n', (4010, 4033), False, 'import os\n'), ((4040, 4074), 'os.makedirs', 'os.makedirs', (['init_pops_data_folder'], {}), '(init_pops_data_folder)\n', (4051, 4074), False, 'import os\n'), ((4720, 4755), 'os.path.exists', 'os.path.exists', (['results_data_folder'], {}), '(results_data_folder)\n', (4734, 4755), False, 'import os\n'), ((4762, 4794), 'os.makedirs', 'os.makedirs', (['results_data_folder'], {}), '(results_data_folder)\n', (4773, 4794), False, 'import os\n'), ((5086, 5118), 'os.path.exists', 'os.path.exists', (['cg_result_folder'], {}), '(cg_result_folder)\n', (5100, 5118), False, 'import os\n'), ((5125, 5154), 'os.makedirs', 'os.makedirs', (['cg_result_folder'], {}), '(cg_result_folder)\n', (5136, 5154), False, 'import os\n'), ((5221, 5253), 'os.path.exists', 'os.path.exists', (['ag_result_folder'], {}), '(ag_result_folder)\n', (5235, 5253), False, 'import os\n'), ((5260, 5289), 'os.makedirs', 'os.makedirs', (['ag_result_folder'], {}), '(ag_result_folder)\n', (5271, 5289), False, 'import os\n'), ((5356, 5388), 'os.path.exists', 'os.path.exists', (['sg_result_folder'], {}), '(sg_result_folder)\n', (5370, 5388), False, 'import os\n'), ((5395, 5424), 'os.makedirs', 'os.makedirs', (['sg_result_folder'], {}), '(sg_result_folder)\n', (5406, 5424), False, 'import os\n'), ((7480, 7511), 'os.path.exists', 'os.path.exists', (['wds_data_folder'], {}), '(wds_data_folder)\n', (7494, 7511), False, 'import os\n'), ((7518, 7546), 'os.makedirs', 'os.makedirs', (['wds_data_folder'], {}), '(wds_data_folder)\n', (7529, 7546), False, 'import os\n'), ((6239, 6253), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6251, 6253), False, 'from datetime import datetime\n'), ((6935, 6949), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6947, 6949), False, 'from datetime import datetime\n'), ((11515, 11529), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (11527, 11529), False, 'from datetime import datetime\n')]
|
# -*- coding: utf-8 -*-
"""
目的
- アノテーション作業の前の一番最初の画像データの前処理
- 画像サイズを小さくする & 画像サイズを揃える
"""
import os
import glob
import numpy as np
from PIL import Image
import argparse
def main(args):
img_files = glob.glob(os.path.join(args.img_dir, args.img_filter))
print('image_dir : ', args.img_dir, ', filter : ', args.img_filter)
print('image file number : ', len(img_files))
"""
画像サイズが異なるものがあるが、縦横比は同じと仮定。
高さを302に固定して、リサイズする。
これで不具合出るようなら、強制的に(402, 302)でリサイズすれば良い。
"""
height_size = 302
for img_file in img_files:
org_img = Image.open(img_file)
img = org_img.copy()
if img.height > img.width: # 向きを一定にする
img = img.rotate(90, expand=True)
scale = float(height_size) / img.height
res_img = img.resize((int(img.width*scale), height_size))
res_img.save(os.path.join(args.out_dir, img_file.split('/')[-1]))
print(img_file, np.array(org_img).shape, '->', np.array(res_img).shape)
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='argparser')
parser.add_argument('--img_dir', type=str, default='data/org_images')
parser.add_argument('--out_dir', type=str, default='data/res_images')
parser.add_argument('--img_filter', type=str, default='*.JPG')
args = parser.parse_args()
main(args)
|
[
"numpy.array",
"os.path.join",
"argparse.ArgumentParser",
"PIL.Image.open"
] |
[((1035, 1083), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""argparser"""'}), "(description='argparser')\n", (1058, 1083), False, 'import argparse\n'), ((216, 259), 'os.path.join', 'os.path.join', (['args.img_dir', 'args.img_filter'], {}), '(args.img_dir, args.img_filter)\n', (228, 259), False, 'import os\n'), ((569, 589), 'PIL.Image.open', 'Image.open', (['img_file'], {}), '(img_file)\n', (579, 589), False, 'from PIL import Image\n'), ((924, 941), 'numpy.array', 'np.array', (['org_img'], {}), '(org_img)\n', (932, 941), True, 'import numpy as np\n'), ((955, 972), 'numpy.array', 'np.array', (['res_img'], {}), '(res_img)\n', (963, 972), True, 'import numpy as np\n')]
|
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training Loop."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow_graphics.projects.cvxnet.lib import datasets
from tensorflow_graphics.projects.cvxnet.lib import models
from tensorflow_graphics.projects.cvxnet.lib import utils
tf.disable_eager_execution()
flags = tf.app.flags
logging = tf.logging
tf.logging.set_verbosity(tf.logging.INFO)
utils.define_flags()
FLAGS = flags.FLAGS
def main(unused_argv):
tf.set_random_seed(2191997)
np.random.seed(6281996)
logging.info("=> Starting ...")
# Select dataset.
logging.info("=> Preparing datasets ...")
data = datasets.get_dataset(FLAGS.dataset, "train", FLAGS)
batch = tf.data.make_one_shot_iterator(data).get_next()
# Select model.
logging.info("=> Creating {} model".format(FLAGS.model))
model = models.get_model(FLAGS.model, FLAGS)
optimizer = tf.train.AdamOptimizer(FLAGS.lr)
# Set up the graph
train_loss, train_op, global_step = model.compute_loss(
batch, training=True, optimizer=optimizer)
# Training hooks
stop_hook = tf.train.StopAtStepHook(last_step=FLAGS.max_steps)
summary_writer = tf.summary.FileWriter(FLAGS.train_dir)
ops = tf.get_collection(tf.GraphKeys.SUMMARIES)
summary_hook = tf.train.SummarySaverHook(
save_steps=100, summary_writer=summary_writer, summary_op=ops)
step_counter_hook = tf.train.StepCounterHook(summary_writer=summary_writer)
hooks = [stop_hook, step_counter_hook, summary_hook]
logging.info("=> Start training loop ...")
with tf.train.MonitoredTrainingSession(
checkpoint_dir=FLAGS.train_dir,
hooks=hooks,
scaffold=None,
save_checkpoint_steps=FLAGS.save_every,
save_checkpoint_secs=None,
save_summaries_steps=None,
save_summaries_secs=None,
log_step_count_steps=None,
max_wait_secs=3600) as mon_sess:
while not mon_sess.should_stop():
mon_sess.run([batch, train_loss, global_step, train_op])
if __name__ == "__main__":
tf.app.run(main)
|
[
"numpy.random.seed",
"tensorflow.compat.v1.train.SummarySaverHook",
"tensorflow_graphics.projects.cvxnet.lib.utils.define_flags",
"tensorflow.compat.v1.data.make_one_shot_iterator",
"tensorflow.compat.v1.disable_eager_execution",
"tensorflow.compat.v1.set_random_seed",
"tensorflow.compat.v1.train.AdamOptimizer",
"tensorflow.compat.v1.train.StopAtStepHook",
"tensorflow.compat.v1.logging.set_verbosity",
"tensorflow_graphics.projects.cvxnet.lib.models.get_model",
"tensorflow.compat.v1.summary.FileWriter",
"tensorflow.compat.v1.get_collection",
"tensorflow.compat.v1.train.StepCounterHook",
"tensorflow.compat.v1.train.MonitoredTrainingSession",
"tensorflow_graphics.projects.cvxnet.lib.datasets.get_dataset",
"tensorflow.compat.v1.app.run"
] |
[((951, 979), 'tensorflow.compat.v1.disable_eager_execution', 'tf.disable_eager_execution', ([], {}), '()\n', (977, 979), True, 'import tensorflow.compat.v1 as tf\n'), ((1023, 1064), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (1047, 1064), True, 'import tensorflow.compat.v1 as tf\n'), ((1066, 1086), 'tensorflow_graphics.projects.cvxnet.lib.utils.define_flags', 'utils.define_flags', ([], {}), '()\n', (1084, 1086), False, 'from tensorflow_graphics.projects.cvxnet.lib import utils\n'), ((1134, 1161), 'tensorflow.compat.v1.set_random_seed', 'tf.set_random_seed', (['(2191997)'], {}), '(2191997)\n', (1152, 1161), True, 'import tensorflow.compat.v1 as tf\n'), ((1164, 1187), 'numpy.random.seed', 'np.random.seed', (['(6281996)'], {}), '(6281996)\n', (1178, 1187), True, 'import numpy as np\n'), ((1297, 1348), 'tensorflow_graphics.projects.cvxnet.lib.datasets.get_dataset', 'datasets.get_dataset', (['FLAGS.dataset', '"""train"""', 'FLAGS'], {}), "(FLAGS.dataset, 'train', FLAGS)\n", (1317, 1348), False, 'from tensorflow_graphics.projects.cvxnet.lib import datasets\n'), ((1495, 1531), 'tensorflow_graphics.projects.cvxnet.lib.models.get_model', 'models.get_model', (['FLAGS.model', 'FLAGS'], {}), '(FLAGS.model, FLAGS)\n', (1511, 1531), False, 'from tensorflow_graphics.projects.cvxnet.lib import models\n'), ((1546, 1578), 'tensorflow.compat.v1.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['FLAGS.lr'], {}), '(FLAGS.lr)\n', (1568, 1578), True, 'import tensorflow.compat.v1 as tf\n'), ((1742, 1792), 'tensorflow.compat.v1.train.StopAtStepHook', 'tf.train.StopAtStepHook', ([], {'last_step': 'FLAGS.max_steps'}), '(last_step=FLAGS.max_steps)\n', (1765, 1792), True, 'import tensorflow.compat.v1 as tf\n'), ((1812, 1850), 'tensorflow.compat.v1.summary.FileWriter', 'tf.summary.FileWriter', (['FLAGS.train_dir'], {}), '(FLAGS.train_dir)\n', (1833, 1850), True, 'import tensorflow.compat.v1 as tf\n'), ((1859, 1900), 'tensorflow.compat.v1.get_collection', 'tf.get_collection', (['tf.GraphKeys.SUMMARIES'], {}), '(tf.GraphKeys.SUMMARIES)\n', (1876, 1900), True, 'import tensorflow.compat.v1 as tf\n'), ((1918, 2010), 'tensorflow.compat.v1.train.SummarySaverHook', 'tf.train.SummarySaverHook', ([], {'save_steps': '(100)', 'summary_writer': 'summary_writer', 'summary_op': 'ops'}), '(save_steps=100, summary_writer=summary_writer,\n summary_op=ops)\n', (1943, 2010), True, 'import tensorflow.compat.v1 as tf\n'), ((2036, 2091), 'tensorflow.compat.v1.train.StepCounterHook', 'tf.train.StepCounterHook', ([], {'summary_writer': 'summary_writer'}), '(summary_writer=summary_writer)\n', (2060, 2091), True, 'import tensorflow.compat.v1 as tf\n'), ((2661, 2677), 'tensorflow.compat.v1.app.run', 'tf.app.run', (['main'], {}), '(main)\n', (2671, 2677), True, 'import tensorflow.compat.v1 as tf\n'), ((2200, 2473), 'tensorflow.compat.v1.train.MonitoredTrainingSession', 'tf.train.MonitoredTrainingSession', ([], {'checkpoint_dir': 'FLAGS.train_dir', 'hooks': 'hooks', 'scaffold': 'None', 'save_checkpoint_steps': 'FLAGS.save_every', 'save_checkpoint_secs': 'None', 'save_summaries_steps': 'None', 'save_summaries_secs': 'None', 'log_step_count_steps': 'None', 'max_wait_secs': '(3600)'}), '(checkpoint_dir=FLAGS.train_dir, hooks=\n hooks, scaffold=None, save_checkpoint_steps=FLAGS.save_every,\n save_checkpoint_secs=None, save_summaries_steps=None,\n save_summaries_secs=None, log_step_count_steps=None, max_wait_secs=3600)\n', (2233, 2473), True, 'import tensorflow.compat.v1 as tf\n'), ((1359, 1395), 'tensorflow.compat.v1.data.make_one_shot_iterator', 'tf.data.make_one_shot_iterator', (['data'], {}), '(data)\n', (1389, 1395), True, 'import tensorflow.compat.v1 as tf\n')]
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import sys
import tensorflow as tf
from tensorflow.python.client import timeline
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
import seq2seq_model
from tensorflow.python.framework import graph_util
flags = tf.flags
logging = tf.logging
logging.set_verbosity(tf.logging.ERROR)
flags.DEFINE_integer("encoder_step", 100, "sequence length")
flags.DEFINE_integer("encoder_layer", 8, "num layer")
flags.DEFINE_integer("decoder_step", 30, "sequence length")
flags.DEFINE_integer("decoder_layer", 4, "num layer")
flags.DEFINE_integer("hidden_size", 128, "hidden size")
flags.DEFINE_integer("batch_size", 1, "mini batch size")
flags.DEFINE_boolean('profile', False, 'profile kernel runtime')
flags.DEFINE_string('backend', 'tf', 'tf or wolong or ngraph')
flags.DEFINE_integer("num_iter", 10, "mini batch size")
flags.DEFINE_integer("warmup", 5, "mini batch size")
flags.DEFINE_boolean('xla', False, 'enable xla')
flags.DEFINE_string('frozen_file', '', 'output path for the frozen pb file')
flags.DEFINE_integer("parallel", 0, "tf.ConfigProto.inter_op_parallelism_threads")
FLAGS = flags.FLAGS
import ctypes
_cudart = ctypes.CDLL('libcudart.so')
def profile_start():
ret = _cudart.cudaProfilerStart()
if ret != 0:
raise Exception("cudaProfilerStart() returned %d" % ret)
def profile_stop():
ret = _cudart.cudaProfilerStop()
if ret != 0:
raise Exception("cudaProfilerStop() returned %d" % ret)
def main(_):
profile_stop()
session_conf = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False,
graph_options=tf.GraphOptions(infer_shapes=True),
inter_op_parallelism_threads=FLAGS.parallel
)
if FLAGS.xla:
session_conf.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
with tf.Graph().as_default(), tf.Session(config=session_conf) as session:
profile_stop()
batch_size = FLAGS.batch_size
model = seq2seq_model.Seq2SeqModel(
batch_size, FLAGS.hidden_size, FLAGS.encoder_layer, FLAGS.encoder_step, FLAGS.decoder_layer, FLAGS.decoder_step)
eval_inputs = tf.placeholder(
tf.float32, [FLAGS.encoder_step, FLAGS.batch_size, FLAGS.hidden_size], 'eval_input')
eval_inputs_list = tf.split(value=eval_inputs, axis=0, num_or_size_splits=FLAGS.encoder_step)
for i in range(len(eval_inputs_list)):
eval_inputs_list[i] = tf.squeeze(eval_inputs_list[i],axis=[0])
logits = model(eval_inputs_list)
lstm_inputs = np.ones(
(FLAGS.encoder_step, FLAGS.batch_size, FLAGS.hidden_size))
session.run(tf.global_variables_initializer())
if FLAGS.frozen_file != '':
constant_graph = graph_util.convert_variables_to_constants(session, session.graph_def, [logits.name.split(':')[0]])
with tf.gfile.GFile(FLAGS.frozen_file, "wb") as f:
f.write(constant_graph.SerializeToString())
if not FLAGS.profile:
# warm up
for i in range(FLAGS.warmup):
res = session.run(logits, {
eval_inputs: lstm_inputs})
out_flat = res.flat
if (len(out_flat) > 0):
max_len = min(10, len(out_flat))
print(logits.name)
print(out_flat[:max_len], "...(size=", len(out_flat), "end with", out_flat[-1], ")")
iter_times = []
profile_start()
for i in range(FLAGS.num_iter):
start_time = time.time()
res = session.run(logits, {
eval_inputs: lstm_inputs})
iter_time = (time.time() - start_time) * 1000
iter_times.append(iter_time)
print("Iteration time %f ms" % (iter_time))
profile_stop()
print("Summary: [min, max, mean] = [%f, %f, %f] ms" % (
min(iter_times), max(iter_times), sum(iter_times) / len(iter_times)))
else:
profile_start()
options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
for i in range(5):
start_time = time.time()
res = session.run(logits, {
eval_inputs: lstm_inputs},
options=options,
run_metadata=run_metadata)
end_time = (time.time() - start_time) * 1000
print("iteration time %f ms" % (end_time))
fetched_timeline = timeline.Timeline(run_metadata.step_stats)
chrome_trace = fetched_timeline.generate_chrome_trace_format()
with open('timelines/timeline_step_%d.json' % i, 'w') as f:
f.write(chrome_trace)
profile_stop()
if __name__ == "__main__":
tf.app.run()
|
[
"tensorflow.GraphOptions",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"numpy.ones",
"time.time",
"tensorflow.RunOptions",
"tensorflow.placeholder",
"tensorflow.python.client.timeline.Timeline",
"ctypes.CDLL",
"tensorflow.RunMetadata",
"seq2seq_model.Seq2SeqModel",
"tensorflow.Graph",
"tensorflow.squeeze",
"tensorflow.gfile.GFile",
"tensorflow.split",
"tensorflow.app.run"
] |
[((1369, 1396), 'ctypes.CDLL', 'ctypes.CDLL', (['"""libcudart.so"""'], {}), "('libcudart.so')\n", (1380, 1396), False, 'import ctypes\n'), ((5180, 5192), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (5190, 5192), True, 'import tensorflow as tf\n'), ((2080, 2111), 'tensorflow.Session', 'tf.Session', ([], {'config': 'session_conf'}), '(config=session_conf)\n', (2090, 2111), True, 'import tensorflow as tf\n'), ((2202, 2346), 'seq2seq_model.Seq2SeqModel', 'seq2seq_model.Seq2SeqModel', (['batch_size', 'FLAGS.hidden_size', 'FLAGS.encoder_layer', 'FLAGS.encoder_step', 'FLAGS.decoder_layer', 'FLAGS.decoder_step'], {}), '(batch_size, FLAGS.hidden_size, FLAGS.\n encoder_layer, FLAGS.encoder_step, FLAGS.decoder_layer, FLAGS.decoder_step)\n', (2228, 2346), False, 'import seq2seq_model\n'), ((2378, 2482), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[FLAGS.encoder_step, FLAGS.batch_size, FLAGS.hidden_size]', '"""eval_input"""'], {}), "(tf.float32, [FLAGS.encoder_step, FLAGS.batch_size, FLAGS.\n hidden_size], 'eval_input')\n", (2392, 2482), True, 'import tensorflow as tf\n'), ((2519, 2593), 'tensorflow.split', 'tf.split', ([], {'value': 'eval_inputs', 'axis': '(0)', 'num_or_size_splits': 'FLAGS.encoder_step'}), '(value=eval_inputs, axis=0, num_or_size_splits=FLAGS.encoder_step)\n', (2527, 2593), True, 'import tensorflow as tf\n'), ((2781, 2847), 'numpy.ones', 'np.ones', (['(FLAGS.encoder_step, FLAGS.batch_size, FLAGS.hidden_size)'], {}), '((FLAGS.encoder_step, FLAGS.batch_size, FLAGS.hidden_size))\n', (2788, 2847), True, 'import numpy as np\n'), ((1837, 1871), 'tensorflow.GraphOptions', 'tf.GraphOptions', ([], {'infer_shapes': '(True)'}), '(infer_shapes=True)\n', (1852, 1871), True, 'import tensorflow as tf\n'), ((2675, 2716), 'tensorflow.squeeze', 'tf.squeeze', (['eval_inputs_list[i]'], {'axis': '[0]'}), '(eval_inputs_list[i], axis=[0])\n', (2685, 2716), True, 'import tensorflow as tf\n'), ((2882, 2915), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2913, 2915), True, 'import tensorflow as tf\n'), ((4340, 4391), 'tensorflow.RunOptions', 'tf.RunOptions', ([], {'trace_level': 'tf.RunOptions.FULL_TRACE'}), '(trace_level=tf.RunOptions.FULL_TRACE)\n', (4353, 4391), True, 'import tensorflow as tf\n'), ((4419, 4435), 'tensorflow.RunMetadata', 'tf.RunMetadata', ([], {}), '()\n', (4433, 4435), True, 'import tensorflow as tf\n'), ((2055, 2065), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2063, 2065), True, 'import tensorflow as tf\n'), ((3099, 3138), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['FLAGS.frozen_file', '"""wb"""'], {}), "(FLAGS.frozen_file, 'wb')\n", (3113, 3138), True, 'import tensorflow as tf\n'), ((3809, 3820), 'time.time', 'time.time', ([], {}), '()\n', (3818, 3820), False, 'import time\n'), ((4496, 4507), 'time.time', 'time.time', ([], {}), '()\n', (4505, 4507), False, 'import time\n'), ((4880, 4922), 'tensorflow.python.client.timeline.Timeline', 'timeline.Timeline', (['run_metadata.step_stats'], {}), '(run_metadata.step_stats)\n', (4897, 4922), False, 'from tensorflow.python.client import timeline\n'), ((3955, 3966), 'time.time', 'time.time', ([], {}), '()\n', (3964, 3966), False, 'import time\n'), ((4753, 4764), 'time.time', 'time.time', ([], {}), '()\n', (4762, 4764), False, 'import time\n')]
|
#!/usr/bin/env python3
import numpy as np
import sys
def make_instance():
# normal、fire、water、electric、grass、ice、fighting, poison, ground,
# flying, psychic, bug, rock, ghost, dragon, dark, steel, fairy
type_matrix = np.array([[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5, 0.0, 1.0, 1.0, 0.5, 1.0],
[1.0, 0.5, 0.5, 1.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 0.5, 1.0, 0.5, 1.0, 2.0, 1.0],
[1.0, 2.0, 0.5, 1.0, 0.5, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 1.0, 0.5, 1.0, 1.0, 1.0],
[1.0, 1.0, 2.0, 0.5, 0.5, 1.0, 1.0, 1.0, 0.0, 2.0, 1.0, 1.0, 1.0, 1.0, 0.5, 1.0, 1.0, 1.0],
[1.0, 0.5, 2.0, 1.0, 0.5, 1.0, 1.0, 0.5, 2.0, 0.5, 1.0, 0.5, 2.0, 1.0, 0.5, 1.0, 0.5, 1.0],
[1.0, 0.5, 0.5, 1.0, 2.0, 0.5, 1.0, 1.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 0.5, 1.0],
[2.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 0.5, 1.0, 0.5, 0.5, 0.5, 2.0, 0.0, 1.0, 2.0, 2.0, 0.5],
[1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 0.5, 0.5, 1.0, 1.0, 1.0, 0.5, 0.5, 1.0, 1.0, 0.0, 2.0],
[1.0, 2.0, 1.0, 2.0, 0.5, 1.0, 1.0, 2.0, 1.0, 0.0, 1.0, 0.5, 2.0, 1.0, 1.0, 1.0, 2.0, 1.0],
[1.0, 1.0, 1.0, 0.5, 2.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 2.0, 0.5, 1.0, 1.0, 1.0, 0.5, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 1.0, 1.0, 0.5, 1.0, 1.0, 1.0, 1.0, 0.0, 0.5, 1.0],
[1.0, 0.5, 1.0, 1.0, 2.0, 1.0, 0.5, 0.5, 1.0, 0.5, 2.0, 1.0, 1.0, 0.5, 1.0, 2.0, 0.5, 0.5],
[1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 0.5, 1.0, 0.5, 2.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 0.5, 1.0],
[0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 2.0, 1.0, 0.5, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 0.5, 0.0],
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 2.0, 1.0, 0.5, 1.0, 0.5],
[1.0, 0.5, 0.5, 0.5, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 0.5, 2.0],
[1.0, 0.5, 1.0, 1.0, 1.0, 1.0, 2.0, 0.5, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 0.5, 1.0]])
# make weak_matrix
weak_matrix = np.where(type_matrix==2.0, 1.0, 0.0)
resist_matrix = np.where(type_matrix<1.0, 1.0, 0.0)
# set enemy & skill
# enemy1
enemy1 = [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
skill1 = [[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
# enemy2
enemy2 = [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
skill2 = [[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
# enemy3
enemy3 = [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
skill3 = [[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
# combine enemy into one list
enemy = [enemy1, enemy2, enemy3]
# combine skill into one list
skill = [skill1, skill2, skill3]
return type_matrix, weak_matrix, resist_matrix, enemy, skill
|
[
"numpy.where",
"numpy.array"
] |
[((232, 1995), 'numpy.array', 'np.array', (['[[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5, 0.0, 1.0,\n 1.0, 0.5, 1.0], [1.0, 0.5, 0.5, 1.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0,\n 2.0, 0.5, 1.0, 0.5, 1.0, 2.0, 1.0], [1.0, 2.0, 0.5, 1.0, 0.5, 1.0, 1.0,\n 1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 1.0, 0.5, 1.0, 1.0, 1.0], [1.0, 1.0, 2.0,\n 0.5, 0.5, 1.0, 1.0, 1.0, 0.0, 2.0, 1.0, 1.0, 1.0, 1.0, 0.5, 1.0, 1.0, \n 1.0], [1.0, 0.5, 2.0, 1.0, 0.5, 1.0, 1.0, 0.5, 2.0, 0.5, 1.0, 0.5, 2.0,\n 1.0, 0.5, 1.0, 0.5, 1.0], [1.0, 0.5, 0.5, 1.0, 2.0, 0.5, 1.0, 1.0, 2.0,\n 2.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 0.5, 1.0], [2.0, 1.0, 1.0, 1.0, 1.0,\n 2.0, 1.0, 0.5, 1.0, 0.5, 0.5, 0.5, 2.0, 0.0, 1.0, 2.0, 2.0, 0.5], [1.0,\n 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 0.5, 0.5, 1.0, 1.0, 1.0, 0.5, 0.5, 1.0, \n 1.0, 0.0, 2.0], [1.0, 2.0, 1.0, 2.0, 0.5, 1.0, 1.0, 2.0, 1.0, 0.0, 1.0,\n 0.5, 2.0, 1.0, 1.0, 1.0, 2.0, 1.0], [1.0, 1.0, 1.0, 0.5, 2.0, 1.0, 2.0,\n 1.0, 1.0, 1.0, 1.0, 2.0, 0.5, 1.0, 1.0, 1.0, 0.5, 1.0], [1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 2.0, 2.0, 1.0, 1.0, 0.5, 1.0, 1.0, 1.0, 1.0, 0.0, 0.5, \n 1.0], [1.0, 0.5, 1.0, 1.0, 2.0, 1.0, 0.5, 0.5, 1.0, 0.5, 2.0, 1.0, 1.0,\n 0.5, 1.0, 2.0, 0.5, 0.5], [1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 0.5, 1.0, 0.5,\n 2.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 0.5, 1.0], [0.0, 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 2.0, 1.0, 0.5, 1.0, 1.0], [1.0,\n 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, \n 1.0, 0.5, 0.0], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5, 1.0, 1.0, 1.0, 2.0,\n 1.0, 1.0, 2.0, 1.0, 0.5, 1.0, 0.5], [1.0, 0.5, 0.5, 0.5, 1.0, 2.0, 1.0,\n 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 0.5, 2.0], [1.0, 0.5, 1.0,\n 1.0, 1.0, 1.0, 2.0, 0.5, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 0.5, 1.0]]'], {}), '([[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5,\n 0.0, 1.0, 1.0, 0.5, 1.0], [1.0, 0.5, 0.5, 1.0, 2.0, 2.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 2.0, 0.5, 1.0, 0.5, 1.0, 2.0, 1.0], [1.0, 2.0, 0.5, 1.0, 0.5,\n 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 1.0, 0.5, 1.0, 1.0, 1.0], [1.0,\n 1.0, 2.0, 0.5, 0.5, 1.0, 1.0, 1.0, 0.0, 2.0, 1.0, 1.0, 1.0, 1.0, 0.5, \n 1.0, 1.0, 1.0], [1.0, 0.5, 2.0, 1.0, 0.5, 1.0, 1.0, 0.5, 2.0, 0.5, 1.0,\n 0.5, 2.0, 1.0, 0.5, 1.0, 0.5, 1.0], [1.0, 0.5, 0.5, 1.0, 2.0, 0.5, 1.0,\n 1.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 0.5, 1.0], [2.0, 1.0, 1.0,\n 1.0, 1.0, 2.0, 1.0, 0.5, 1.0, 0.5, 0.5, 0.5, 2.0, 0.0, 1.0, 2.0, 2.0, \n 0.5], [1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 0.5, 0.5, 1.0, 1.0, 1.0, 0.5,\n 0.5, 1.0, 1.0, 0.0, 2.0], [1.0, 2.0, 1.0, 2.0, 0.5, 1.0, 1.0, 2.0, 1.0,\n 0.0, 1.0, 0.5, 2.0, 1.0, 1.0, 1.0, 2.0, 1.0], [1.0, 1.0, 1.0, 0.5, 2.0,\n 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 2.0, 0.5, 1.0, 1.0, 1.0, 0.5, 1.0], [1.0,\n 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 1.0, 1.0, 0.5, 1.0, 1.0, 1.0, 1.0, \n 0.0, 0.5, 1.0], [1.0, 0.5, 1.0, 1.0, 2.0, 1.0, 0.5, 0.5, 1.0, 0.5, 2.0,\n 1.0, 1.0, 0.5, 1.0, 2.0, 0.5, 0.5], [1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 0.5,\n 1.0, 0.5, 2.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 0.5, 1.0], [0.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 2.0, 1.0, 0.5, 1.0, \n 1.0], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,\n 1.0, 2.0, 1.0, 0.5, 0.0], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5, 1.0, 1.0,\n 1.0, 2.0, 1.0, 1.0, 2.0, 1.0, 0.5, 1.0, 0.5], [1.0, 0.5, 0.5, 0.5, 1.0,\n 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 0.5, 2.0], [1.0,\n 0.5, 1.0, 1.0, 1.0, 1.0, 2.0, 0.5, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, \n 2.0, 0.5, 1.0]])\n', (240, 1995), True, 'import numpy as np\n'), ((2432, 2470), 'numpy.where', 'np.where', (['(type_matrix == 2.0)', '(1.0)', '(0.0)'], {}), '(type_matrix == 2.0, 1.0, 0.0)\n', (2440, 2470), True, 'import numpy as np\n'), ((2489, 2526), 'numpy.where', 'np.where', (['(type_matrix < 1.0)', '(1.0)', '(0.0)'], {}), '(type_matrix < 1.0, 1.0, 0.0)\n', (2497, 2526), True, 'import numpy as np\n')]
|
import logging
import matplotlib.pyplot as plt
import numpy as np
import pytest
from shapely.affinity import rotate
from pyroll.core import SquareGroove, Profile
groove = SquareGroove(0, 3, tip_depth=20, tip_angle=91 / 180 * np.pi)
def test_from_groove():
Profile.from_groove(groove, width=45, height=50)
Profile.from_groove(groove, filling=0.9, gap=3)
def test_from_groove_errors():
with pytest.raises(TypeError):
Profile.from_groove(groove, width=55, filling=0.9, height=50, gap=3)
with pytest.raises(TypeError):
Profile.from_groove(groove, width=55, height=50, gap=3)
with pytest.raises(TypeError):
Profile.from_groove(groove, width=55, filling=0.9, height=50)
with pytest.raises(TypeError):
Profile.from_groove(groove, height=50)
with pytest.raises(TypeError):
Profile.from_groove(groove, gap=3)
with pytest.raises(TypeError):
Profile.from_groove(groove, width=55)
with pytest.raises(TypeError):
Profile.from_groove(groove, filling=0.9)
with pytest.raises(ValueError):
Profile.from_groove(groove, height=-1, width=50)
with pytest.raises(ValueError):
Profile.from_groove(groove, gap=-1, width=50)
with pytest.raises(ValueError):
Profile.from_groove(groove, width=-1, height=50)
with pytest.raises(ValueError):
Profile.from_groove(groove, filling=0, height=50)
def test_from_groove_warnings(caplog):
logging.getLogger("pyroll").error("Marker Error")
Profile.from_groove(groove, width=55, height=50)
Profile.from_groove(groove, filling=1.1, gap=3)
if not caplog.records:
pytest.xfail("Expected to fail if ran together with CLI tests, since CLI is modifying logging, so pytest does not capture.")
assert len([r for r in caplog.records if r.levelname == "WARNING" and r.msg.startswith("Encountered")]) > 1
def test_round():
p1 = Profile.round(radius=15)
p2 = Profile.round(diameter=30)
assert p1.cross_section == p2.cross_section
def test_round_errors():
with pytest.raises(ValueError):
Profile.round(radius=-1)
with pytest.raises(ValueError):
Profile.round(diameter=0)
def test_square():
p1 = Profile.square(side=10, corner_radius=1)
p2 = Profile.square(diagonal=10 * np.sqrt(2), corner_radius=1)
assert p1.cross_section == p2.cross_section
p3 = Profile.square(side=10)
p4 = Profile.square(diagonal=10 * np.sqrt(2))
assert p3.cross_section == p4.cross_section
def test_square_errors():
with pytest.raises(TypeError):
Profile.square(side=10, diagonal=10)
with pytest.raises(TypeError):
Profile.square()
with pytest.raises(ValueError):
Profile.square(side=-1)
with pytest.raises(ValueError):
Profile.square(diagonal=0)
with pytest.raises(ValueError):
Profile.square(corner_radius=-1, side=10)
def test_box():
Profile.box(height=10, width=20)
Profile.box(height=10, width=20, corner_radius=1)
def test_box_errors():
with pytest.raises(ValueError):
Profile.box(height=-1, width=5)
with pytest.raises(ValueError):
Profile.box(height=10, width=-1)
with pytest.raises(ValueError):
Profile.box(corner_radius=-1, height=10, width=5)
def test_diamond():
Profile.diamond(height=10, width=20)
Profile.diamond(height=10, width=20, corner_radius=1)
def test_diamond_errors():
with pytest.raises(ValueError):
Profile.diamond(height=-1, width=5)
with pytest.raises(ValueError):
Profile.diamond(height=10, width=-1)
with pytest.raises(ValueError):
Profile.diamond(corner_radius=-1, height=10, width=5)
def test_square_box_equivalence():
p1 = Profile.square(side=10, corner_radius=0)
p2 = Profile.box(height=10, width=10, corner_radius=0)
assert np.isclose(p1.cross_section.symmetric_difference(rotate(p2.cross_section, angle=45, origin=(0, 0))).area, 0)
p1 = Profile.square(side=10, corner_radius=2)
p2 = Profile.box(height=10, width=10, corner_radius=2)
assert np.isclose(p1.cross_section.symmetric_difference(rotate(p2.cross_section, angle=45, origin=(0, 0))).area, 0)
|
[
"pyroll.core.Profile.from_groove",
"pyroll.core.Profile.round",
"pyroll.core.Profile.diamond",
"pytest.raises",
"pytest.xfail",
"shapely.affinity.rotate",
"pyroll.core.Profile.square",
"pyroll.core.SquareGroove",
"pyroll.core.Profile.box",
"logging.getLogger",
"numpy.sqrt"
] |
[((174, 234), 'pyroll.core.SquareGroove', 'SquareGroove', (['(0)', '(3)'], {'tip_depth': '(20)', 'tip_angle': '(91 / 180 * np.pi)'}), '(0, 3, tip_depth=20, tip_angle=91 / 180 * np.pi)\n', (186, 234), False, 'from pyroll.core import SquareGroove, Profile\n'), ((265, 313), 'pyroll.core.Profile.from_groove', 'Profile.from_groove', (['groove'], {'width': '(45)', 'height': '(50)'}), '(groove, width=45, height=50)\n', (284, 313), False, 'from pyroll.core import SquareGroove, Profile\n'), ((318, 365), 'pyroll.core.Profile.from_groove', 'Profile.from_groove', (['groove'], {'filling': '(0.9)', 'gap': '(3)'}), '(groove, filling=0.9, gap=3)\n', (337, 365), False, 'from pyroll.core import SquareGroove, Profile\n'), ((1509, 1557), 'pyroll.core.Profile.from_groove', 'Profile.from_groove', (['groove'], {'width': '(55)', 'height': '(50)'}), '(groove, width=55, height=50)\n', (1528, 1557), False, 'from pyroll.core import SquareGroove, Profile\n'), ((1562, 1609), 'pyroll.core.Profile.from_groove', 'Profile.from_groove', (['groove'], {'filling': '(1.1)', 'gap': '(3)'}), '(groove, filling=1.1, gap=3)\n', (1581, 1609), False, 'from pyroll.core import SquareGroove, Profile\n'), ((1913, 1937), 'pyroll.core.Profile.round', 'Profile.round', ([], {'radius': '(15)'}), '(radius=15)\n', (1926, 1937), False, 'from pyroll.core import SquareGroove, Profile\n'), ((1947, 1973), 'pyroll.core.Profile.round', 'Profile.round', ([], {'diameter': '(30)'}), '(diameter=30)\n', (1960, 1973), False, 'from pyroll.core import SquareGroove, Profile\n'), ((2219, 2259), 'pyroll.core.Profile.square', 'Profile.square', ([], {'side': '(10)', 'corner_radius': '(1)'}), '(side=10, corner_radius=1)\n', (2233, 2259), False, 'from pyroll.core import SquareGroove, Profile\n'), ((2386, 2409), 'pyroll.core.Profile.square', 'Profile.square', ([], {'side': '(10)'}), '(side=10)\n', (2400, 2409), False, 'from pyroll.core import SquareGroove, Profile\n'), ((2924, 2956), 'pyroll.core.Profile.box', 'Profile.box', ([], {'height': '(10)', 'width': '(20)'}), '(height=10, width=20)\n', (2935, 2956), False, 'from pyroll.core import SquareGroove, Profile\n'), ((2961, 3010), 'pyroll.core.Profile.box', 'Profile.box', ([], {'height': '(10)', 'width': '(20)', 'corner_radius': '(1)'}), '(height=10, width=20, corner_radius=1)\n', (2972, 3010), False, 'from pyroll.core import SquareGroove, Profile\n'), ((3309, 3345), 'pyroll.core.Profile.diamond', 'Profile.diamond', ([], {'height': '(10)', 'width': '(20)'}), '(height=10, width=20)\n', (3324, 3345), False, 'from pyroll.core import SquareGroove, Profile\n'), ((3350, 3403), 'pyroll.core.Profile.diamond', 'Profile.diamond', ([], {'height': '(10)', 'width': '(20)', 'corner_radius': '(1)'}), '(height=10, width=20, corner_radius=1)\n', (3365, 3403), False, 'from pyroll.core import SquareGroove, Profile\n'), ((3738, 3778), 'pyroll.core.Profile.square', 'Profile.square', ([], {'side': '(10)', 'corner_radius': '(0)'}), '(side=10, corner_radius=0)\n', (3752, 3778), False, 'from pyroll.core import SquareGroove, Profile\n'), ((3788, 3837), 'pyroll.core.Profile.box', 'Profile.box', ([], {'height': '(10)', 'width': '(10)', 'corner_radius': '(0)'}), '(height=10, width=10, corner_radius=0)\n', (3799, 3837), False, 'from pyroll.core import SquareGroove, Profile\n'), ((3968, 4008), 'pyroll.core.Profile.square', 'Profile.square', ([], {'side': '(10)', 'corner_radius': '(2)'}), '(side=10, corner_radius=2)\n', (3982, 4008), False, 'from pyroll.core import SquareGroove, Profile\n'), ((4018, 4067), 'pyroll.core.Profile.box', 'Profile.box', ([], {'height': '(10)', 'width': '(10)', 'corner_radius': '(2)'}), '(height=10, width=10, corner_radius=2)\n', (4029, 4067), False, 'from pyroll.core import SquareGroove, Profile\n'), ((408, 432), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (421, 432), False, 'import pytest\n'), ((442, 510), 'pyroll.core.Profile.from_groove', 'Profile.from_groove', (['groove'], {'width': '(55)', 'filling': '(0.9)', 'height': '(50)', 'gap': '(3)'}), '(groove, width=55, filling=0.9, height=50, gap=3)\n', (461, 510), False, 'from pyroll.core import SquareGroove, Profile\n'), ((520, 544), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (533, 544), False, 'import pytest\n'), ((554, 609), 'pyroll.core.Profile.from_groove', 'Profile.from_groove', (['groove'], {'width': '(55)', 'height': '(50)', 'gap': '(3)'}), '(groove, width=55, height=50, gap=3)\n', (573, 609), False, 'from pyroll.core import SquareGroove, Profile\n'), ((619, 643), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (632, 643), False, 'import pytest\n'), ((653, 714), 'pyroll.core.Profile.from_groove', 'Profile.from_groove', (['groove'], {'width': '(55)', 'filling': '(0.9)', 'height': '(50)'}), '(groove, width=55, filling=0.9, height=50)\n', (672, 714), False, 'from pyroll.core import SquareGroove, Profile\n'), ((724, 748), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (737, 748), False, 'import pytest\n'), ((758, 796), 'pyroll.core.Profile.from_groove', 'Profile.from_groove', (['groove'], {'height': '(50)'}), '(groove, height=50)\n', (777, 796), False, 'from pyroll.core import SquareGroove, Profile\n'), ((806, 830), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (819, 830), False, 'import pytest\n'), ((840, 874), 'pyroll.core.Profile.from_groove', 'Profile.from_groove', (['groove'], {'gap': '(3)'}), '(groove, gap=3)\n', (859, 874), False, 'from pyroll.core import SquareGroove, Profile\n'), ((884, 908), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (897, 908), False, 'import pytest\n'), ((918, 955), 'pyroll.core.Profile.from_groove', 'Profile.from_groove', (['groove'], {'width': '(55)'}), '(groove, width=55)\n', (937, 955), False, 'from pyroll.core import SquareGroove, Profile\n'), ((965, 989), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (978, 989), False, 'import pytest\n'), ((999, 1039), 'pyroll.core.Profile.from_groove', 'Profile.from_groove', (['groove'], {'filling': '(0.9)'}), '(groove, filling=0.9)\n', (1018, 1039), False, 'from pyroll.core import SquareGroove, Profile\n'), ((1049, 1074), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1062, 1074), False, 'import pytest\n'), ((1084, 1132), 'pyroll.core.Profile.from_groove', 'Profile.from_groove', (['groove'], {'height': '(-1)', 'width': '(50)'}), '(groove, height=-1, width=50)\n', (1103, 1132), False, 'from pyroll.core import SquareGroove, Profile\n'), ((1142, 1167), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1155, 1167), False, 'import pytest\n'), ((1177, 1222), 'pyroll.core.Profile.from_groove', 'Profile.from_groove', (['groove'], {'gap': '(-1)', 'width': '(50)'}), '(groove, gap=-1, width=50)\n', (1196, 1222), False, 'from pyroll.core import SquareGroove, Profile\n'), ((1232, 1257), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1245, 1257), False, 'import pytest\n'), ((1267, 1315), 'pyroll.core.Profile.from_groove', 'Profile.from_groove', (['groove'], {'width': '(-1)', 'height': '(50)'}), '(groove, width=-1, height=50)\n', (1286, 1315), False, 'from pyroll.core import SquareGroove, Profile\n'), ((1325, 1350), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1338, 1350), False, 'import pytest\n'), ((1360, 1409), 'pyroll.core.Profile.from_groove', 'Profile.from_groove', (['groove'], {'filling': '(0)', 'height': '(50)'}), '(groove, filling=0, height=50)\n', (1379, 1409), False, 'from pyroll.core import SquareGroove, Profile\n'), ((1646, 1780), 'pytest.xfail', 'pytest.xfail', (['"""Expected to fail if ran together with CLI tests, since CLI is modifying logging, so pytest does not capture."""'], {}), "(\n 'Expected to fail if ran together with CLI tests, since CLI is modifying logging, so pytest does not capture.'\n )\n", (1658, 1780), False, 'import pytest\n'), ((2059, 2084), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2072, 2084), False, 'import pytest\n'), ((2094, 2118), 'pyroll.core.Profile.round', 'Profile.round', ([], {'radius': '(-1)'}), '(radius=-1)\n', (2107, 2118), False, 'from pyroll.core import SquareGroove, Profile\n'), ((2128, 2153), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2141, 2153), False, 'import pytest\n'), ((2163, 2188), 'pyroll.core.Profile.round', 'Profile.round', ([], {'diameter': '(0)'}), '(diameter=0)\n', (2176, 2188), False, 'from pyroll.core import SquareGroove, Profile\n'), ((2546, 2570), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (2559, 2570), False, 'import pytest\n'), ((2580, 2616), 'pyroll.core.Profile.square', 'Profile.square', ([], {'side': '(10)', 'diagonal': '(10)'}), '(side=10, diagonal=10)\n', (2594, 2616), False, 'from pyroll.core import SquareGroove, Profile\n'), ((2626, 2650), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (2639, 2650), False, 'import pytest\n'), ((2660, 2676), 'pyroll.core.Profile.square', 'Profile.square', ([], {}), '()\n', (2674, 2676), False, 'from pyroll.core import SquareGroove, Profile\n'), ((2686, 2711), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2699, 2711), False, 'import pytest\n'), ((2721, 2744), 'pyroll.core.Profile.square', 'Profile.square', ([], {'side': '(-1)'}), '(side=-1)\n', (2735, 2744), False, 'from pyroll.core import SquareGroove, Profile\n'), ((2754, 2779), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2767, 2779), False, 'import pytest\n'), ((2789, 2815), 'pyroll.core.Profile.square', 'Profile.square', ([], {'diagonal': '(0)'}), '(diagonal=0)\n', (2803, 2815), False, 'from pyroll.core import SquareGroove, Profile\n'), ((2825, 2850), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2838, 2850), False, 'import pytest\n'), ((2860, 2901), 'pyroll.core.Profile.square', 'Profile.square', ([], {'corner_radius': '(-1)', 'side': '(10)'}), '(corner_radius=-1, side=10)\n', (2874, 2901), False, 'from pyroll.core import SquareGroove, Profile\n'), ((3045, 3070), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3058, 3070), False, 'import pytest\n'), ((3080, 3111), 'pyroll.core.Profile.box', 'Profile.box', ([], {'height': '(-1)', 'width': '(5)'}), '(height=-1, width=5)\n', (3091, 3111), False, 'from pyroll.core import SquareGroove, Profile\n'), ((3121, 3146), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3134, 3146), False, 'import pytest\n'), ((3156, 3188), 'pyroll.core.Profile.box', 'Profile.box', ([], {'height': '(10)', 'width': '(-1)'}), '(height=10, width=-1)\n', (3167, 3188), False, 'from pyroll.core import SquareGroove, Profile\n'), ((3198, 3223), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3211, 3223), False, 'import pytest\n'), ((3233, 3282), 'pyroll.core.Profile.box', 'Profile.box', ([], {'corner_radius': '(-1)', 'height': '(10)', 'width': '(5)'}), '(corner_radius=-1, height=10, width=5)\n', (3244, 3282), False, 'from pyroll.core import SquareGroove, Profile\n'), ((3442, 3467), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3455, 3467), False, 'import pytest\n'), ((3477, 3512), 'pyroll.core.Profile.diamond', 'Profile.diamond', ([], {'height': '(-1)', 'width': '(5)'}), '(height=-1, width=5)\n', (3492, 3512), False, 'from pyroll.core import SquareGroove, Profile\n'), ((3522, 3547), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3535, 3547), False, 'import pytest\n'), ((3557, 3593), 'pyroll.core.Profile.diamond', 'Profile.diamond', ([], {'height': '(10)', 'width': '(-1)'}), '(height=10, width=-1)\n', (3572, 3593), False, 'from pyroll.core import SquareGroove, Profile\n'), ((3603, 3628), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3616, 3628), False, 'import pytest\n'), ((3638, 3691), 'pyroll.core.Profile.diamond', 'Profile.diamond', ([], {'corner_radius': '(-1)', 'height': '(10)', 'width': '(5)'}), '(corner_radius=-1, height=10, width=5)\n', (3653, 3691), False, 'from pyroll.core import SquareGroove, Profile\n'), ((1454, 1481), 'logging.getLogger', 'logging.getLogger', (['"""pyroll"""'], {}), "('pyroll')\n", (1471, 1481), False, 'import logging\n'), ((2298, 2308), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2305, 2308), True, 'import numpy as np\n'), ((2448, 2458), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2455, 2458), True, 'import numpy as np\n'), ((3898, 3947), 'shapely.affinity.rotate', 'rotate', (['p2.cross_section'], {'angle': '(45)', 'origin': '(0, 0)'}), '(p2.cross_section, angle=45, origin=(0, 0))\n', (3904, 3947), False, 'from shapely.affinity import rotate\n'), ((4128, 4177), 'shapely.affinity.rotate', 'rotate', (['p2.cross_section'], {'angle': '(45)', 'origin': '(0, 0)'}), '(p2.cross_section, angle=45, origin=(0, 0))\n', (4134, 4177), False, 'from shapely.affinity import rotate\n')]
|
import numpy as np
from matplotlib import pyplot
try:
import ConfigParser
except ModuleNotFoundError:
import configparser as ConfigParser
import argparse
import h5py
from scipy.signal import savgol_filter
import Pointing
from os import listdir, getcwd
from os.path import isfile, join
import Mapping
import Pointing
import mpi4py
import FitSource
import EphemNew
import healpy as hp
def cel2gal(ra,dec, inverse=False):
_r, _d = ra*np.pi/180., (np.pi/2. - dec*np.pi/180.)
if inverse:
r = hp.Rotator(coord=['G','C'])
else:
r = hp.Rotator(coord=['C','G'])
_d, _r = r(_d, _r)
return _r*180./np.pi, (np.pi/2. - _d)*180./np.pi
def SlewDistance(az):
daz = np.abs(az[:az.size-1] - az[1:az.size])
# loop over spikes
start = np.argmax(daz)
peaks = [start]
searchRange = 1000
indices = np.arange(daz.size).astype(int)
find = np.zeros(daz.size).astype(bool)
thres = 0.01
while True:
find = find | (indices > start-searchRange) & (indices < start + searchRange)
if (np.sum(find) == daz.size):
break
start = (indices[~find])[np.argmax(daz[~find])]
peaks += [start]
if np.max(daz[find]) < thres:
break
peaks = np.sort(np.array(peaks))
peakAz = az[peaks]
slewDist = np.abs(peakAz[:peakAz.size//2 *2:2] - peakAz[1:peakAz.size//2 *2:2])
return np.median(slewDist)
def main(filename, plotDir='Plots/'):
"""
"""
# Which pixels and sidebands?
pixelOffsets = Pointing.GetPixelOffsets('COMAP_FEEDS.dat')
# READ IN THE DATA
d = h5py.File(filename)
tod = d['spectrometer/tod']
mjd = d['spectrometer/MJD'][:]
if len(d['pointing/az'].shape) > 1:
az = d['pointing/az'][0,:]
el = d['pointing/el'][0,:]
else:
az = d['pointing/az'][:]
el = d['pointing/el'][:]
mjdpoint = d['pointing/MJD'][:]
slewDist = SlewDistance(az)
ra, dec, pa, az, el, mjd = Pointing.GetPointing(az, el, mjd,
mjdpoint, pixelOffsets,
lon=Pointing.comap_lon,
lat=Pointing.comap_lat)
# Calculate data sizes:
nHorns = tod.shape[0]
nSBs = tod.shape[1]
nFreqs = tod.shape[2]
nSamps = tod.shape[3]
# Calculate the position of Jupiter
clon, clat, diam = EphemNew.rdplan(mjd[0:1], 5,
Pointing.comap_lon*np.pi/180.,
Pointing.comap_lat*np.pi/180.)
EphemNew.precess(clon, clat, mjd[0:1])
# Loop over horns/SBs
P1out = None
prefix = filename.split('/')[-1].split('.')[0]
for iHorn in range(nHorns):
print('Processing Horn {:d}'.format(iHorn+1))
_tod = np.nanmean(np.nanmean(tod[iHorn,:,5:-5,:],axis=0),axis=0)
#Tim: Pass this function whatever chunk of time-ordered data you have in memory
P1, P1e, cross, mweight, weight, model = FitSource.FitTOD(_tod,
ra[0,:], # horn 0 because we want the relative offset from Focal Plane
dec[0,:],
clon*180./np.pi,
clat*180./np.pi,
pa[0,:],
prefix='{}_Horn{}'.format(prefix, iHorn+1),
plotDir=plotDir)
if isinstance(P1out, type(None)):
P1out = np.zeros((nHorns, len(P1)))
Peout = np.zeros((nHorns, len(P1e)))
mout = np.zeros(mweight.shape)
hout = np.zeros(weight.shape)
if not isinstance(P1, type(None)):
P1out[iHorn, :] = P1
Peout[iHorn, :] = P1e
mout += mweight*(model+1)**2
hout += weight*(model+1)**2
pyplot.imshow(mout/hout, extent=[-100/2. * 1.5, 100/2.*1.5,-100/2. * 1.5, 100/2.*1.5] )
pyplot.xlabel('Az offset (arcmin)')
pyplot.ylabel('EL offset (arcmin)')
pyplot.title('{}'.format(prefix))
pyplot.grid(True)
pyplot.savefig('{}/FeedPositions_{}.png'.format(plotDir, prefix), bbox_inches='tight')
pyplot.clf()
meanMJD = np.mean(mjd)
meanEl = np.median(el)
meanAz = np.median(az)
d.close()
print('SLEW DISTANCE', slewDist)
return P1out, Peout, mout/hout, meanMJD, meanEl, meanAz
from mpi4py import MPI
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--filename', type=str)
parser.add_argument('--filelist', default=None, type=str)
parser.add_argument('--fitoutputdir', default='.', type=str)
args = parser.parse_args()
P1 = None
if isinstance(args.filelist, type(None)):
main(args.filename)
else:
filelist = np.loadtxt(args.filelist, dtype=str)
for i, f in enumerate(filelist):
print('Opening',f)
_P1, _P1e, m, meanMJD, meanEl, meanAz = main(f)
prefix = f.split('/')[-1].split('.h')[0]
output = h5py.File('{}/{}_JupiterFits.h5'.format(args.fitoutputdir, prefix))
output['P1'] = _P1
output['P1e'] = _P1e
coords = np.zeros(3)
coords[:] = meanAz, meanEl, meanMJD,
output['coords'] = coords
output['map'] = m
output.close()
|
[
"numpy.abs",
"argparse.ArgumentParser",
"numpy.sum",
"numpy.argmax",
"matplotlib.pyplot.clf",
"EphemNew.rdplan",
"numpy.mean",
"numpy.arange",
"Pointing.GetPixelOffsets",
"numpy.nanmean",
"matplotlib.pyplot.imshow",
"healpy.Rotator",
"numpy.max",
"numpy.loadtxt",
"Pointing.GetPointing",
"h5py.File",
"numpy.median",
"EphemNew.precess",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.grid",
"numpy.zeros",
"numpy.array",
"matplotlib.pyplot.xlabel"
] |
[((706, 746), 'numpy.abs', 'np.abs', (['(az[:az.size - 1] - az[1:az.size])'], {}), '(az[:az.size - 1] - az[1:az.size])\n', (712, 746), True, 'import numpy as np\n'), ((785, 799), 'numpy.argmax', 'np.argmax', (['daz'], {}), '(daz)\n', (794, 799), True, 'import numpy as np\n'), ((1321, 1395), 'numpy.abs', 'np.abs', (['(peakAz[:peakAz.size // 2 * 2:2] - peakAz[1:peakAz.size // 2 * 2:2])'], {}), '(peakAz[:peakAz.size // 2 * 2:2] - peakAz[1:peakAz.size // 2 * 2:2])\n', (1327, 1395), True, 'import numpy as np\n'), ((1402, 1421), 'numpy.median', 'np.median', (['slewDist'], {}), '(slewDist)\n', (1411, 1421), True, 'import numpy as np\n'), ((1549, 1592), 'Pointing.GetPixelOffsets', 'Pointing.GetPixelOffsets', (['"""COMAP_FEEDS.dat"""'], {}), "('COMAP_FEEDS.dat')\n", (1573, 1592), False, 'import Pointing\n'), ((1625, 1644), 'h5py.File', 'h5py.File', (['filename'], {}), '(filename)\n', (1634, 1644), False, 'import h5py\n'), ((2001, 2111), 'Pointing.GetPointing', 'Pointing.GetPointing', (['az', 'el', 'mjd', 'mjdpoint', 'pixelOffsets'], {'lon': 'Pointing.comap_lon', 'lat': 'Pointing.comap_lat'}), '(az, el, mjd, mjdpoint, pixelOffsets, lon=Pointing.\n comap_lon, lat=Pointing.comap_lat)\n', (2021, 2111), False, 'import Pointing\n'), ((2469, 2574), 'EphemNew.rdplan', 'EphemNew.rdplan', (['mjd[0:1]', '(5)', '(Pointing.comap_lon * np.pi / 180.0)', '(Pointing.comap_lat * np.pi / 180.0)'], {}), '(mjd[0:1], 5, Pointing.comap_lon * np.pi / 180.0, Pointing.\n comap_lat * np.pi / 180.0)\n', (2484, 2574), False, 'import EphemNew\n'), ((2645, 2683), 'EphemNew.precess', 'EphemNew.precess', (['clon', 'clat', 'mjd[0:1]'], {}), '(clon, clat, mjd[0:1])\n', (2661, 2683), False, 'import EphemNew\n'), ((4181, 4290), 'matplotlib.pyplot.imshow', 'pyplot.imshow', (['(mout / hout)'], {'extent': '[-100 / 2.0 * 1.5, 100 / 2.0 * 1.5, -100 / 2.0 * 1.5, 100 / 2.0 * 1.5]'}), '(mout / hout, extent=[-100 / 2.0 * 1.5, 100 / 2.0 * 1.5, -100 /\n 2.0 * 1.5, 100 / 2.0 * 1.5])\n', (4194, 4290), False, 'from matplotlib import pyplot\n'), ((4273, 4308), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""Az offset (arcmin)"""'], {}), "('Az offset (arcmin)')\n", (4286, 4308), False, 'from matplotlib import pyplot\n'), ((4313, 4348), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""EL offset (arcmin)"""'], {}), "('EL offset (arcmin)')\n", (4326, 4348), False, 'from matplotlib import pyplot\n'), ((4391, 4408), 'matplotlib.pyplot.grid', 'pyplot.grid', (['(True)'], {}), '(True)\n', (4402, 4408), False, 'from matplotlib import pyplot\n'), ((4504, 4516), 'matplotlib.pyplot.clf', 'pyplot.clf', ([], {}), '()\n', (4514, 4516), False, 'from matplotlib import pyplot\n'), ((4537, 4549), 'numpy.mean', 'np.mean', (['mjd'], {}), '(mjd)\n', (4544, 4549), True, 'import numpy as np\n'), ((4564, 4577), 'numpy.median', 'np.median', (['el'], {}), '(el)\n', (4573, 4577), True, 'import numpy as np\n'), ((4592, 4605), 'numpy.median', 'np.median', (['az'], {}), '(az)\n', (4601, 4605), True, 'import numpy as np\n'), ((4782, 4807), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4805, 4807), False, 'import argparse\n'), ((514, 542), 'healpy.Rotator', 'hp.Rotator', ([], {'coord': "['G', 'C']"}), "(coord=['G', 'C'])\n", (524, 542), True, 'import healpy as hp\n'), ((564, 592), 'healpy.Rotator', 'hp.Rotator', ([], {'coord': "['C', 'G']"}), "(coord=['C', 'G'])\n", (574, 592), True, 'import healpy as hp\n'), ((1265, 1280), 'numpy.array', 'np.array', (['peaks'], {}), '(peaks)\n', (1273, 1280), True, 'import numpy as np\n'), ((5132, 5168), 'numpy.loadtxt', 'np.loadtxt', (['args.filelist'], {'dtype': 'str'}), '(args.filelist, dtype=str)\n', (5142, 5168), True, 'import numpy as np\n'), ((857, 876), 'numpy.arange', 'np.arange', (['daz.size'], {}), '(daz.size)\n', (866, 876), True, 'import numpy as np\n'), ((900, 918), 'numpy.zeros', 'np.zeros', (['daz.size'], {}), '(daz.size)\n', (908, 918), True, 'import numpy as np\n'), ((1063, 1075), 'numpy.sum', 'np.sum', (['find'], {}), '(find)\n', (1069, 1075), True, 'import numpy as np\n'), ((1141, 1162), 'numpy.argmax', 'np.argmax', (['daz[~find]'], {}), '(daz[~find])\n', (1150, 1162), True, 'import numpy as np\n'), ((1200, 1217), 'numpy.max', 'np.max', (['daz[find]'], {}), '(daz[find])\n', (1206, 1217), True, 'import numpy as np\n'), ((2891, 2933), 'numpy.nanmean', 'np.nanmean', (['tod[iHorn, :, 5:-5, :]'], {'axis': '(0)'}), '(tod[iHorn, :, 5:-5, :], axis=0)\n', (2901, 2933), True, 'import numpy as np\n'), ((3918, 3941), 'numpy.zeros', 'np.zeros', (['mweight.shape'], {}), '(mweight.shape)\n', (3926, 3941), True, 'import numpy as np\n'), ((3961, 3983), 'numpy.zeros', 'np.zeros', (['weight.shape'], {}), '(weight.shape)\n', (3969, 3983), True, 'import numpy as np\n'), ((5529, 5540), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (5537, 5540), True, 'import numpy as np\n')]
|
#///////////////////////////////////////////////////////////////////////////////
#// BSD 3-Clause License
#//
#// Copyright (C) 2018-2019, New York University , Max Planck Gesellschaft
#// Copyright note valid unless otherwise stated in individual files.
#// All rights reserved.
#///////////////////////////////////////////////////////////////////////////////
# brief Example for using the PinBulletWrapper for a quadruped robot.
from __future__ import print_function
import os
import rospkg
import numpy as np
import time
import robot_properties_solo
from robot_properties_solo.config import SoloConfig
import pybullet as p
import pinocchio as se3
from pinocchio.utils import zero
from py_pinocchio_bullet.wrapper import PinBulletWrapper
class QuadrupedRobot(PinBulletWrapper):
def __init__(self, physicsClient=None):
if physicsClient is None:
self.physicsClient = p.connect(p.DIRECT)
p.setGravity(0,0, -9.81)
p.setPhysicsEngineParameter(fixedTimeStep=1.0/1000.0, numSubSteps=1)
# Load the plain.
plain_urdf = (rospkg.RosPack().get_path("robot_properties_solo") +
"/urdf/plane_with_restitution.urdf")
self.planeId = p.loadURDF(plain_urdf)
# Load the robot
robotStartPos = [0.,0,0.40]
robotStartOrientation = p.getQuaternionFromEuler([0,0,0])
self.urdf_path = SoloConfig.urdf_path
self.robotId = p.loadURDF(self.urdf_path, robotStartPos,
robotStartOrientation, flags=p.URDF_USE_INERTIA_FROM_FILE,
useFixedBase=False)
p.getBasePositionAndOrientation(self.robotId)
# Create the robot wrapper in pinocchio.
package_dirs = [os.path.dirname(os.path.dirname(self.urdf_path)) + '/urdf']
self.pin_robot = SoloConfig.buildRobotWrapper()
# Query all the joints.
num_joints = p.getNumJoints(self.robotId)
for ji in range(num_joints):
p.changeDynamics(self.robotId, ji, linearDamping=.04,
angularDamping=0.04, restitution=0.0, lateralFriction=0.5)
self.base_link_name = "base_link"
self.joint_names = ['FL_HFE', 'FL_KFE', 'FR_HFE', 'FR_KFE', 'HL_HFE',
'HL_KFE', 'HR_HFE', 'HR_KFE']
controlled_joints = ['FL_HFE', 'FL_KFE', 'FR_HFE', 'FR_KFE', 'HL_HFE',
'HL_KFE', 'HR_HFE', 'HR_KFE']
# Creates the wrapper by calling the super.__init__.
super(QuadrupedRobot,self).__init__(self.robotId, self.pin_robot,
controlled_joints,
['FL_ANKLE', 'FR_ANKLE', 'HL_ANKLE', 'HR_ANKLE']
)
if __name__ == "__main__":
np.set_printoptions(precision=2, suppress=True)
# Setup pybullet for the quadruped and a wrapper to pinocchio.
quad = QuadrupedRobot()
# Get the current state and modify the joints to have the legs
# bend inwards.
q, dq = quad.get_state()
q[7] = q[9] = 0.8
q[11] = q[13] = -0.8
q[8] = q[10] = -1.6
q[12] = q[14] = 1.6
# Take the initial joint states as desired state.
q_des = q[7:].copy()
# Update the simulation state to the new initial configuration.
quad.reset_state(q, dq)
# Run the simulator for 2000 steps = 2 seconds.
for i in range(2000):
# Get the current state (position and velocity)
q, dq = quad.get_state()
active_contact_frames, contact_forces = quad.get_force()
# Alternative, if you want to use properties from the pinocchio robot
# like the jacobian or similar, you can also get the state and update
# the pinocchio internals with one call:
#
# q, dq = quad.get_state_update_pinocchio()
if i % 100 == 0:
print('Forces:', active_contact_frames, contact_forces)
# Compute the command torques at the joints. The torque
# vector only takes the actuated joints (excluding the base)
tau = 5. * (q_des - q[7:]) - 0.1 * dq[6:]
# Send the commands to the robot.
quad.send_joint_command(tau)
# Step the simulator and sleep.
p.stepSimulation()
time.sleep(0.001)
# Print the final active force frames and the forces
force_frames, forces = quad.get_force()
print("Active force_frames:", force_frames)
print("Corresponding forces:", forces)
|
[
"pybullet.getQuaternionFromEuler",
"pybullet.connect",
"numpy.set_printoptions",
"pybullet.stepSimulation",
"pybullet.setGravity",
"pybullet.changeDynamics",
"pybullet.getBasePositionAndOrientation",
"rospkg.RosPack",
"os.path.dirname",
"time.sleep",
"pybullet.setPhysicsEngineParameter",
"robot_properties_solo.config.SoloConfig.buildRobotWrapper",
"pybullet.getNumJoints",
"pybullet.loadURDF"
] |
[((2638, 2685), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)', 'suppress': '(True)'}), '(precision=2, suppress=True)\n', (2657, 2685), True, 'import numpy as np\n'), ((1220, 1242), 'pybullet.loadURDF', 'p.loadURDF', (['plain_urdf'], {}), '(plain_urdf)\n', (1230, 1242), True, 'import pybullet as p\n'), ((1337, 1372), 'pybullet.getQuaternionFromEuler', 'p.getQuaternionFromEuler', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (1361, 1372), True, 'import pybullet as p\n'), ((1441, 1566), 'pybullet.loadURDF', 'p.loadURDF', (['self.urdf_path', 'robotStartPos', 'robotStartOrientation'], {'flags': 'p.URDF_USE_INERTIA_FROM_FILE', 'useFixedBase': '(False)'}), '(self.urdf_path, robotStartPos, robotStartOrientation, flags=p.\n URDF_USE_INERTIA_FROM_FILE, useFixedBase=False)\n', (1451, 1566), True, 'import pybullet as p\n'), ((1594, 1639), 'pybullet.getBasePositionAndOrientation', 'p.getBasePositionAndOrientation', (['self.robotId'], {}), '(self.robotId)\n', (1625, 1639), True, 'import pybullet as p\n'), ((1799, 1829), 'robot_properties_solo.config.SoloConfig.buildRobotWrapper', 'SoloConfig.buildRobotWrapper', ([], {}), '()\n', (1827, 1829), False, 'from robot_properties_solo.config import SoloConfig\n'), ((1884, 1912), 'pybullet.getNumJoints', 'p.getNumJoints', (['self.robotId'], {}), '(self.robotId)\n', (1898, 1912), True, 'import pybullet as p\n'), ((4080, 4098), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (4096, 4098), True, 'import pybullet as p\n'), ((4107, 4124), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (4117, 4124), False, 'import time\n'), ((898, 917), 'pybullet.connect', 'p.connect', (['p.DIRECT'], {}), '(p.DIRECT)\n', (907, 917), True, 'import pybullet as p\n'), ((930, 955), 'pybullet.setGravity', 'p.setGravity', (['(0)', '(0)', '(-9.81)'], {}), '(0, 0, -9.81)\n', (942, 955), True, 'import pybullet as p\n'), ((967, 1037), 'pybullet.setPhysicsEngineParameter', 'p.setPhysicsEngineParameter', ([], {'fixedTimeStep': '(1.0 / 1000.0)', 'numSubSteps': '(1)'}), '(fixedTimeStep=1.0 / 1000.0, numSubSteps=1)\n', (994, 1037), True, 'import pybullet as p\n'), ((1963, 2080), 'pybullet.changeDynamics', 'p.changeDynamics', (['self.robotId', 'ji'], {'linearDamping': '(0.04)', 'angularDamping': '(0.04)', 'restitution': '(0.0)', 'lateralFriction': '(0.5)'}), '(self.robotId, ji, linearDamping=0.04, angularDamping=0.04,\n restitution=0.0, lateralFriction=0.5)\n', (1979, 2080), True, 'import pybullet as p\n'), ((1085, 1101), 'rospkg.RosPack', 'rospkg.RosPack', ([], {}), '()\n', (1099, 1101), False, 'import rospkg\n'), ((1730, 1761), 'os.path.dirname', 'os.path.dirname', (['self.urdf_path'], {}), '(self.urdf_path)\n', (1745, 1761), False, 'import os\n')]
|
import numpy as np
def clamp(value, min, max):
return np.clip(value, min, max)
def lerp(a, b, fraction):
fraction = clamp(fraction, 0, 1)
return a * (1 - fraction) + b * fraction
def fit(value, omin, omax, nmin, nmax):
v = (value - omin) / (omax - omin)
return v * (nmax - nmin) + nmin
def fit01(value, min, max):
return value * (max - min) + min
def fit10(value, min, max):
return (1.0 - value) * (max - min) + min
def fit11(value, min, max):
return fit(value, -1, 1, min, max)
def fit_to_01(value, min, max):
return (value - min) / (max - min)
def fit_11_to_01(value):
return (value + 1.0) * 0.5
|
[
"numpy.clip"
] |
[((60, 84), 'numpy.clip', 'np.clip', (['value', 'min', 'max'], {}), '(value, min, max)\n', (67, 84), True, 'import numpy as np\n')]
|
"""
Run few-shot learning on FashionProductImaes dataset using code from github
repo https://github.com/oscarknagg/few-shot under
MIT License
Copyright (c) 2019 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
reproducing results of
Snell et al Prototypical Networks. In places where substantial changes have
been made to the original code, this is marked with an ADAPTED/BEFORE comment
"""
import torch
from torch.optim import Adam
import torch.nn.parallel
from torch.utils.data import DataLoader
from torchvision import transforms, models
import warnings
import numpy as np
from typing import Callable, Tuple
from few_shot.models import get_few_shot_encoder
from few_shot.core import NShotTaskSampler, create_nshot_task_label
from few_shot.proto import proto_net_episode
from few_shot.train import fit
from few_shot.callbacks import *
from few_shot.utils import setup_dirs
from few_shot.metrics import categorical_accuracy
from few_shot_learning.datasets import FashionProductImages, \
FashionProductImagesSmall
from few_shot_learning.models import Identity
from config import DATA_PATH, PATH
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
def few_shot_training(
datadir=DATA_PATH,
dataset='fashion',
num_input_channels=3,
drop_lr_every=20,
validation_episodes=200,
evaluation_episodes=1000,
episodes_per_epoch=100,
n_epochs=80,
small_dataset=False,
n_train=1,
n_test=1,
k_train=30,
k_test=5,
q_train=5,
q_test=1,
distance='l2',
pretrained=False,
monitor_validation=False,
n_val_classes=10,
architecture='resnet18',
gpu=None
):
setup_dirs()
if dataset == 'fashion':
dataset_class = FashionProductImagesSmall if small_dataset \
else FashionProductImages
else:
raise (ValueError, 'Unsupported dataset')
param_str = f'{dataset}_nt={n_train}_kt={k_train}_qt={q_train}_' \
f'nv={n_test}_kv={k_test}_qv={q_test}_small={small_dataset}_' \
f'pretrained={pretrained}_validate={monitor_validation}'
print(param_str)
###################
# Create datasets #
###################
# ADAPTED: data transforms including augmentation
resize = (80, 60) if small_dataset else (400, 300)
background_transform = transforms.Compose([
transforms.RandomResizedCrop(resize, scale=(0.8, 1.0)),
# transforms.RandomGrayscale(),
transforms.RandomPerspective(),
transforms.RandomHorizontalFlip(),
# transforms.Resize(resize),
transforms.ToTensor(),
# transforms.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
])
evaluation_transform = transforms.Compose([
transforms.Resize(resize),
# transforms.CenterCrop(224),
transforms.ToTensor(),
# transforms.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
])
if monitor_validation:
if not n_val_classes >= k_test:
n_val_classes = k_test
print("Warning: `n_val_classes` < `k_test`. Take a larger number"
" of validation classes next time. Increased to `k_test`"
" classes")
# class structure for background (training), validation (validation),
# evaluation (test): take a random subset of background classes
validation_classes = list(
np.random.choice(dataset_class.background_classes, n_val_classes))
background_classes = list(set(dataset_class.background_classes).difference(
set(validation_classes)))
# use keyword for evaluation classes
evaluation_classes = 'evaluation'
# Meta-validation set
validation = dataset_class(datadir, split='all',
classes=validation_classes,
transform=evaluation_transform)
# ADAPTED: in the original code, `episodes_per_epoch` was provided to
# `NShotTaskSampler` instead of `validation_episodes`.
validation_sampler = NShotTaskSampler(validation, validation_episodes,
n_test, k_test, q_test)
validation_taskloader = DataLoader(
validation,
batch_sampler=validation_sampler,
num_workers=4
)
else:
# use keyword for both background and evaluation classes
background_classes = 'background'
evaluation_classes = 'evaluation'
# Meta-training set
background = dataset_class(datadir, split='all',
classes=background_classes,
transform=background_transform)
background_sampler = NShotTaskSampler(background, episodes_per_epoch,
n_train, k_train, q_train)
background_taskloader = DataLoader(
background,
batch_sampler=background_sampler,
num_workers=4
)
# Meta-test set
evaluation = dataset_class(datadir, split='all',
classes=evaluation_classes,
transform=evaluation_transform)
# ADAPTED: in the original code, `episodes_per_epoch` was provided to
# `NShotTaskSampler` instead of `evaluation_episodes`.
evaluation_sampler = NShotTaskSampler(evaluation, evaluation_episodes,
n_test, k_test, q_test)
evaluation_taskloader = DataLoader(
evaluation,
batch_sampler=evaluation_sampler,
num_workers=4
)
#########
# Model #
#########
if torch.cuda.is_available():
if gpu is not None:
device = torch.device('cuda', gpu)
else:
device = torch.device('cuda')
torch.backends.cudnn.benchmark = True
else:
device = torch.device('cpu')
if not pretrained:
model = get_few_shot_encoder(num_input_channels)
# ADAPTED
model.to(device)
# BEFORE
# model.to(device, dtype=torch.double)
else:
assert torch.cuda.is_available()
model = models.__dict__[architecture](pretrained=True)
model.fc = Identity()
if gpu is not None:
model = model.cuda(gpu)
else:
model = model.cuda()
# TODO this is too risky: I'm not sure that this can work, since in
# the few-shot github repo the batch axis is actually split into
# support and query samples
# model = torch.nn.DataParallel(model).cuda()
def lr_schedule(epoch, lr):
# Drop lr every 2000 episodes
if epoch % drop_lr_every == 0:
return lr / 2
else:
return lr
############
# Training #
############
print(f'Training Prototypical network on {dataset}...')
optimiser = Adam(model.parameters(), lr=1e-3)
loss_fn = torch.nn.NLLLoss().to(device)
callbacks = [
# ADAPTED: this is the test monitoring now - and is only done at the
# end of training.
EvaluateFewShot(
eval_fn=proto_net_episode,
num_tasks=evaluation_episodes, # THIS IS NOT USED
n_shot=n_test,
k_way=k_test,
q_queries=q_test,
taskloader=evaluation_taskloader,
prepare_batch=prepare_nshot_task(n_test, k_test, q_test, device=device),
distance=distance,
on_epoch_end=False,
on_train_end=True,
prefix='test_'
)
]
if monitor_validation:
callbacks.append(
# ADAPTED: this is the validation monitoring now - computed
# after every epoch.
EvaluateFewShot(
eval_fn=proto_net_episode,
num_tasks=evaluation_episodes, # THIS IS NOT USED
n_shot=n_test,
k_way=k_test,
q_queries=q_test,
# BEFORE taskloader=evaluation_taskloader,
taskloader=validation_taskloader, # ADAPTED
prepare_batch=prepare_nshot_task(n_test, k_test, q_test, device=device),
distance=distance,
on_epoch_end=True, # ADAPTED
on_train_end=False, # ADAPTED
prefix='val_'
)
)
callbacks.extend([
ModelCheckpoint(
filepath=PATH + f'/models/proto_nets/{param_str}.pth',
monitor=f'val_{n_test}-shot_{k_test}-way_acc',
verbose=1, # ADAPTED
save_best_only=monitor_validation # ADAPTED
),
LearningRateScheduler(schedule=lr_schedule),
CSVLogger(PATH + f'/logs/proto_nets/{param_str}.csv'),
])
fit(
model,
optimiser,
loss_fn,
epochs=n_epochs,
dataloader=background_taskloader,
prepare_batch=prepare_nshot_task(n_train, k_train, q_train, device=device),
callbacks=callbacks,
metrics=['categorical_accuracy'],
fit_function=proto_net_episode,
fit_function_kwargs={'n_shot': n_train, 'k_way': k_train,
'q_queries': q_train, 'train': True,
'distance': distance},
)
# ADAPTED: the original code used torch.double
def prepare_nshot_task(n: int, k: int, q: int, device=None) -> Callable:
"""Typical n-shot task preprocessing.
# Arguments
n: Number of samples for each class in the n-shot classification task
k: Number of classes in the n-shot classification task
q: Number of query samples for each class in the n-shot classification task
# Returns
prepare_nshot_task_: A Callable that processes a few shot tasks with specified n, k and q
"""
def prepare_nshot_task_(batch: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[
torch.Tensor, torch.Tensor]:
"""Create 0-k label and move to GPU.
TODO: Move to arbitrary device
"""
x, y = batch
# BEFROE x = x.double().cuda()
x = x.to(device) # ADPATED
# Create dummy 0-(num_classes - 1) label
y = create_nshot_task_label(k, q).to(device)
return x, y
return prepare_nshot_task_
class EvaluateFewShot(Callback):
"""Evaluate a network on an n-shot, k-way classification tasks after every epoch.
# Arguments
eval_fn: Callable to perform few-shot classification. Examples include `proto_net_episode`,
`matching_net_episode` and `meta_gradient_step` (MAML).
num_tasks: int. Number of n-shot classification tasks to evaluate the model with.
n_shot: int. Number of samples for each class in the n-shot classification tasks.
k_way: int. Number of classes in the n-shot classification tasks.
q_queries: int. Number query samples for each class in the n-shot classification tasks.
task_loader: Instance of NShotWrapper class
prepare_batch: function. The preprocessing function to apply to samples from the dataset.
prefix: str. Prefix to identify dataset.
"""
def __init__(self,
eval_fn: Callable,
num_tasks: int,
n_shot: int,
k_way: int,
q_queries: int,
taskloader: torch.utils.data.DataLoader,
prepare_batch: Callable,
prefix: str = 'val_',
on_epoch_end: bool = True,
on_train_end: bool = False,
**kwargs):
super(EvaluateFewShot, self).__init__()
self.eval_fn = eval_fn
self.num_tasks = num_tasks
self.n_shot = n_shot
self.k_way = k_way
self.q_queries = q_queries
self.taskloader = taskloader
self.prepare_batch = prepare_batch
self.prefix = prefix
self.kwargs = kwargs
self.metric_name = f'{self.prefix}{self.n_shot}-shot_{self.k_way}-way_acc'
# ADAPTED
self._on_epoch_end = on_epoch_end
self._on_train_end = on_train_end
def on_train_begin(self, logs=None):
self.loss_fn = self.params['loss_fn']
self.optimiser = self.params['optimiser']
# ADAPTED
def on_epoch_end(self, epoch, logs=None):
if self._on_epoch_end:
self._validate(epoch, logs=logs)
# ADAPTED
def on_train_end(self, epoch, logs=None):
if self._on_train_end:
self._validate(epoch, logs=logs)
# ADAPTED
def _validate(self, epoch, logs=None):
logs = logs or {}
seen = 0
totals = {'loss': 0, self.metric_name: 0}
for batch_index, batch in enumerate(self.taskloader):
x, y = self.prepare_batch(batch)
loss, y_pred = self.eval_fn(
self.model,
self.optimiser,
self.loss_fn,
x,
y,
n_shot=self.n_shot,
k_way=self.k_way,
q_queries=self.q_queries,
train=False,
**self.kwargs
)
seen += y_pred.shape[0]
totals['loss'] += loss.item() * y_pred.shape[0]
totals[self.metric_name] += categorical_accuracy(y, y_pred) * \
y_pred.shape[0]
logs[self.prefix + 'loss'] = totals['loss'] / seen
logs[self.metric_name] = totals[self.metric_name] / seen
class ModelCheckpoint(Callback):
"""Save the model after every epoch.
`filepath` can contain named formatting options, which will be filled the value of `epoch` and keys in `logs`
(passed in `on_epoch_end`).
For example: if `filepath` is `weights.{epoch:02d}-{val_loss:.2f}.hdf5`, then the model checkpoints will be saved
with the epoch number and the validation loss in the filename.
# Arguments
filepath: string, path to save the model file.
monitor: quantity to monitor.
verbose: verbosity mode, 0 or 1.
save_best_only: if `save_best_only=True`,
the latest best model according to
the quantity monitored will not be overwritten.
mode: one of {auto, min, max}.
If `save_best_only=True`, the decision
to overwrite the current save file is made
based on either the maximization or the
minimization of the monitored quantity. For `val_acc`,
this should be `max`, for `val_loss` this should
be `min`, etc. In `auto` mode, the direction is
automatically inferred from the name of the monitored quantity.
save_weights_only: if True, then only the model's weights will be
saved (`model.save_weights(filepath)`), else the full model
is saved (`model.save(filepath)`).
period: Interval (number of epochs) between checkpoints.
"""
def __init__(self, filepath, monitor='val_loss', verbose=0,
save_best_only=False, mode='auto', period=1):
super(ModelCheckpoint, self).__init__()
self.monitor = monitor
self.verbose = verbose
self.filepath = filepath
self.save_best_only = save_best_only
self.period = period
self.epochs_since_last_save = 0
if mode not in ['auto', 'min', 'max']:
raise ValueError('Mode must be one of (auto, min, max).')
if mode == 'min':
self.monitor_op = np.less
self.best = np.Inf
elif mode == 'max':
self.monitor_op = np.greater
self.best = -np.Inf
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
self.best = -np.Inf
else:
self.monitor_op = np.less
# BEFORE: THIS IS A BUG
# self.best = np.Inf
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epochs_since_last_save += 1
if self.epochs_since_last_save >= self.period:
self.epochs_since_last_save = 0
filepath = self.filepath.format(epoch=epoch + 1, **logs)
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
warnings.warn(
'Can save best model only with %s available, '
'skipping.' % (self.monitor), RuntimeWarning)
else:
if self.monitor_op(current, self.best):
if self.verbose > 0:
print(
'\nEpoch %05d: %s improved from %0.5f to %0.5f,'
' saving model to %s'
% (epoch + 1, self.monitor, self.best,
current, filepath))
self.best = current
torch.save(self.model.state_dict(), filepath)
else:
if self.verbose > 0:
print(
'\nEpoch %05d: %s did not improve from %0.5f' %
(epoch + 1, self.monitor, self.best))
else:
if self.verbose > 0:
print('\nEpoch %05d: saving model to %s' % (
epoch + 1, filepath))
torch.save(self.model.state_dict(), filepath)
|
[
"few_shot.metrics.categorical_accuracy",
"numpy.random.choice",
"few_shot.core.NShotTaskSampler",
"torch.utils.data.DataLoader",
"torchvision.transforms.RandomHorizontalFlip",
"few_shot.core.create_nshot_task_label",
"few_shot_learning.models.Identity",
"torchvision.transforms.RandomPerspective",
"torch.nn.NLLLoss",
"torch.cuda.is_available",
"torch.device",
"few_shot.models.get_few_shot_encoder",
"few_shot.utils.setup_dirs",
"torchvision.transforms.RandomResizedCrop",
"torchvision.transforms.Resize",
"warnings.warn",
"torchvision.transforms.ToTensor"
] |
[((2831, 2843), 'few_shot.utils.setup_dirs', 'setup_dirs', ([], {}), '()\n', (2841, 2843), False, 'from few_shot.utils import setup_dirs\n'), ((5978, 6053), 'few_shot.core.NShotTaskSampler', 'NShotTaskSampler', (['background', 'episodes_per_epoch', 'n_train', 'k_train', 'q_train'], {}), '(background, episodes_per_epoch, n_train, k_train, q_train)\n', (5994, 6053), False, 'from few_shot.core import NShotTaskSampler, create_nshot_task_label\n'), ((6124, 6195), 'torch.utils.data.DataLoader', 'DataLoader', (['background'], {'batch_sampler': 'background_sampler', 'num_workers': '(4)'}), '(background, batch_sampler=background_sampler, num_workers=4)\n', (6134, 6195), False, 'from torch.utils.data import DataLoader\n'), ((6580, 6653), 'few_shot.core.NShotTaskSampler', 'NShotTaskSampler', (['evaluation', 'evaluation_episodes', 'n_test', 'k_test', 'q_test'], {}), '(evaluation, evaluation_episodes, n_test, k_test, q_test)\n', (6596, 6653), False, 'from few_shot.core import NShotTaskSampler, create_nshot_task_label\n'), ((6724, 6795), 'torch.utils.data.DataLoader', 'DataLoader', (['evaluation'], {'batch_sampler': 'evaluation_sampler', 'num_workers': '(4)'}), '(evaluation, batch_sampler=evaluation_sampler, num_workers=4)\n', (6734, 6795), False, 'from torch.utils.data import DataLoader\n'), ((6881, 6906), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6904, 6906), False, 'import torch\n'), ((5324, 5397), 'few_shot.core.NShotTaskSampler', 'NShotTaskSampler', (['validation', 'validation_episodes', 'n_test', 'k_test', 'q_test'], {}), '(validation, validation_episodes, n_test, k_test, q_test)\n', (5340, 5397), False, 'from few_shot.core import NShotTaskSampler, create_nshot_task_label\n'), ((5476, 5547), 'torch.utils.data.DataLoader', 'DataLoader', (['validation'], {'batch_sampler': 'validation_sampler', 'num_workers': '(4)'}), '(validation, batch_sampler=validation_sampler, num_workers=4)\n', (5486, 5547), False, 'from torch.utils.data import DataLoader\n'), ((7112, 7131), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (7124, 7131), False, 'import torch\n'), ((7176, 7216), 'few_shot.models.get_few_shot_encoder', 'get_few_shot_encoder', (['num_input_channels'], {}), '(num_input_channels)\n', (7196, 7216), False, 'from few_shot.models import get_few_shot_encoder\n'), ((7349, 7374), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7372, 7374), False, 'import torch\n'), ((7457, 7467), 'few_shot_learning.models.Identity', 'Identity', ([], {}), '()\n', (7465, 7467), False, 'from few_shot_learning.models import Identity\n'), ((3528, 3582), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['resize'], {'scale': '(0.8, 1.0)'}), '(resize, scale=(0.8, 1.0))\n', (3556, 3582), False, 'from torchvision import transforms, models\n'), ((3632, 3662), 'torchvision.transforms.RandomPerspective', 'transforms.RandomPerspective', ([], {}), '()\n', (3660, 3662), False, 'from torchvision import transforms, models\n'), ((3672, 3705), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (3703, 3705), False, 'from torchvision import transforms, models\n'), ((3752, 3773), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3771, 3773), False, 'from torchvision import transforms, models\n'), ((3955, 3980), 'torchvision.transforms.Resize', 'transforms.Resize', (['resize'], {}), '(resize)\n', (3972, 3980), False, 'from torchvision import transforms, models\n'), ((4028, 4049), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4047, 4049), False, 'from torchvision import transforms, models\n'), ((4659, 4724), 'numpy.random.choice', 'np.random.choice', (['dataset_class.background_classes', 'n_val_classes'], {}), '(dataset_class.background_classes, n_val_classes)\n', (4675, 4724), True, 'import numpy as np\n'), ((6957, 6982), 'torch.device', 'torch.device', (['"""cuda"""', 'gpu'], {}), "('cuda', gpu)\n", (6969, 6982), False, 'import torch\n'), ((7018, 7038), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (7030, 7038), False, 'import torch\n'), ((8168, 8186), 'torch.nn.NLLLoss', 'torch.nn.NLLLoss', ([], {}), '()\n', (8184, 8186), False, 'import torch\n'), ((11393, 11422), 'few_shot.core.create_nshot_task_label', 'create_nshot_task_label', (['k', 'q'], {}), '(k, q)\n', (11416, 11422), False, 'from few_shot.core import NShotTaskSampler, create_nshot_task_label\n'), ((14483, 14514), 'few_shot.metrics.categorical_accuracy', 'categorical_accuracy', (['y', 'y_pred'], {}), '(y, y_pred)\n', (14503, 14514), False, 'from few_shot.metrics import categorical_accuracy\n'), ((17561, 17666), 'warnings.warn', 'warnings.warn', (["('Can save best model only with %s available, skipping.' % self.monitor)", 'RuntimeWarning'], {}), "('Can save best model only with %s available, skipping.' %\n self.monitor, RuntimeWarning)\n", (17574, 17666), False, 'import warnings\n')]
|
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_series_equal
from visions import StandardSet
from compressio.compress import compress_func
from compressio.type_compressor import DefaultCompressor
bool_dtype = "boolean" if int(pd.__version__.split(".")[0]) >= 1 else "Bool"
@pytest.mark.parametrize(
"series,before,expected",
[
(
pd.Series([10.0, 100.0, np.iinfo(np.int16).max * 1.0], dtype=np.float64),
np.float64,
"int16",
),
(pd.Series([np.nan, 1], dtype=np.float64), np.float64, "Int8"),
(
pd.Series([True, False, None, None, None, None, True, False] * 1000),
np.object,
bool_dtype,
),
],
)
def test_compress_series(series, before, expected):
assert series.dtype == before
compressed_series = compress_func(
series,
typeset=StandardSet(),
compressor=DefaultCompressor(),
with_inference=True,
inplace=False,
)
assert str(compressed_series.dtype) == expected
assert_series_equal(series, compressed_series, check_dtype=False)
|
[
"pandas.__version__.split",
"numpy.iinfo",
"visions.StandardSet",
"pandas.Series",
"compressio.type_compressor.DefaultCompressor",
"pandas.testing.assert_series_equal"
] |
[((1089, 1154), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['series', 'compressed_series'], {'check_dtype': '(False)'}), '(series, compressed_series, check_dtype=False)\n', (1108, 1154), False, 'from pandas.testing import assert_series_equal\n'), ((918, 931), 'visions.StandardSet', 'StandardSet', ([], {}), '()\n', (929, 931), False, 'from visions import StandardSet\n'), ((952, 971), 'compressio.type_compressor.DefaultCompressor', 'DefaultCompressor', ([], {}), '()\n', (969, 971), False, 'from compressio.type_compressor import DefaultCompressor\n'), ((539, 579), 'pandas.Series', 'pd.Series', (['[np.nan, 1]'], {'dtype': 'np.float64'}), '([np.nan, 1], dtype=np.float64)\n', (548, 579), True, 'import pandas as pd\n'), ((624, 692), 'pandas.Series', 'pd.Series', (['([True, False, None, None, None, None, True, False] * 1000)'], {}), '([True, False, None, None, None, None, True, False] * 1000)\n', (633, 692), True, 'import pandas as pd\n'), ((267, 292), 'pandas.__version__.split', 'pd.__version__.split', (['"""."""'], {}), "('.')\n", (287, 292), True, 'import pandas as pd\n'), ((424, 442), 'numpy.iinfo', 'np.iinfo', (['np.int16'], {}), '(np.int16)\n', (432, 442), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
from scipy import interpolate
plt.rc('text', usetex=True)
plt.rc('text.latex', preamble=r'\usepackage{amsmath}\usepackage{amssymb}\usepackage{siunitx}')
# Colours
col_b16agss09 = '#A50026'
col_b16gs98 = '#D73027'
col_agss09 = '#F46D43'
col_agss09ph = '#FDAE61'
col_ags05 = '#fEE090'
col_bs05agsop = '#FFFFBF'
col_bs05op = '#E0F3F8'
col_bp04 = '#ABD9E9'
col_bp00 = '#74ADD1'
col_bp98 = '#4575B4'
col_gs98 = '#313695'
def plot_setup(size=6,ratio=0.618):
fig.set_size_inches(size,ratio*size)
ax.tick_params(which='both', direction='in', bottom=True, top=True, left=True, right=True)
ax.tick_params(which='major', length=6)
ax.tick_params(which='minor', length=4)
#plt.minorticks_on()
conversion = 365.0*24.0*60.0*60.0*1.0e4*1.0e-20
res1 = np.genfromtxt("primakoff.dat")
res2 = np.genfromtxt("compton.dat")
res3 = np.genfromtxt("all_ff.dat")
res4 = np.genfromtxt("all_gaee.dat")
res5 = np.genfromtxt("metals.dat")
res6 = np.genfromtxt("TP.dat")
res7 = np.genfromtxt("LP.dat")
res8 = np.genfromtxt("TP_Rosseland.dat")
res9 = np.genfromtxt("LP_Rosseland.dat")
#corr = np.genfromtxt("weighted_compton.dat")
#weighted_compton = interpolate.interp1d(corr[:,0], corr[:,1], bounds_error=False, fill_value=0)
common_path = "../data/benchmarks/"
ref1 = np.genfromtxt(common_path+"2013_redondo_primakoff.dat")
ref2 = np.genfromtxt(common_path+"2013_redondo_compton.dat")
compton = interpolate.interp1d(ref2[:,0], ref2[:,1], bounds_error=False, fill_value=0)
ref3 = np.genfromtxt(common_path+"2013_redondo_ff.dat")
ref4 = np.genfromtxt(common_path+"2013_redondo_all.dat")
ref5 = np.genfromtxt(common_path+"2020_giannotti_TP.dat")
ref6 = np.genfromtxt(common_path+"2020_giannotti_LP.dat")
ref7 = np.genfromtxt(common_path+"2020-o'hare.dat")
ref8 = np.genfromtxt(common_path+"2020_caputo_LP.dat")
conv_fac = 1.0e-4/(365.0*24.0*60.0*60.0*1.0e10)
## Validation plots for axion-photon interactions
# Primakoff approximation [hep-ex/0702006] based on [astro-ph/0402114]
omega = np.linspace(0,10,300)
fig, ax = plt.subplots()
plot_setup()
plt.plot(omega, 6.02*omega**2.481*np.exp(-omega/1.205),':', color=col_agss09, label=r'Primakoff approx. (BP04)')
plt.plot(ref1[:,0], conv_fac*(1.0e4/50.0)*ref1[:,1], '-', color=col_b16agss09, label=r'Primakoff (Redondo)')
plt.plot(res1[:,0], res1[:,1]/1.0e10, 'k--', label=r'Primakoff (AGSS09)')
plt.plot(res6[:,0], res6[:,1]/1.0e10, 'k--', label=r'TP (AGSS09)')
plt.title(r'Axion-photon interactions, $g_{a\gamma\gamma} = \SI{e-10}{\GeV^{-1}}$, OP opacities')
plt.xlabel(r'Energy $\omega$ [keV]')
plt.ylabel(r'Axion flux $\mathrm{d}\Phi_a/\mathrm{d}\omega$ [\SI{e10}{\per\cm\squared\per\keV\per\s}]')
plt.xlim([0,10])
#plt.ylim([0,8])
plt.legend(frameon=False)
plt.savefig("validation_gagg.pdf", bbox_inches='tight')
#plt.show()
plt.close()
fig, ax = plt.subplots()
plot_setup()
plt.plot(omega, 6.02*omega**2.481*np.exp(-omega/1.205),':', color=col_agss09, label=r'Primakoff approx. (BP04)')
plt.plot(ref1[:,0], conv_fac*(1.0e4/50.0)*ref1[:,1], '-', color=col_b16agss09, label=r'Primakoff (Redondo)')
plt.plot(res1[:,0], res1[:,1]/1.0e10, 'k--', label=r'Primakoff (AGSS09)')
plt.plot(res6[:,0], res6[:,1]/1.0e10, 'k-', label=r'TP (AGSS09)')
plt.plot(res8[:,0], res8[:,1]/1.0e10, 'k--', label=r'TP Rosseland (AGSS09)')
plt.plot(ref5[:,0], ref5[:,1]*4.0*1.4995, '-', color='green', label=r'TP (Giannotti)')#correct B conversion in giannotti result and adjust coupling constant
plt.title(r'Axion-photon interactions, $g_{a\gamma\gamma} = \SI{e-10}{\GeV^{-1}}$, OP opacities')
plt.xlabel(r'Energy $\omega$ [keV]')
plt.ylabel(r'Axion flux $\mathrm{d}\Phi_a/\mathrm{d}\omega$ [\SI{e10}{\per\cm\squared\per\keV\per\s}]')
plt.xlim([0.1,10])
plt.yscale('log')
plt.xscale('log')
#plt.ylim([0,8])
plt.legend(frameon=False)
plt.savefig("validation_Tplasmon.pdf", bbox_inches='tight')
plt.show()
plt.close()
fig, ax = plt.subplots()
plot_setup()
plt.plot(omega, 6.02*omega**2.481*np.exp(-omega/1.205),':', color=col_agss09, label=r'Primakoff approx. (BP04)')
plt.plot(ref1[:,0], conv_fac*(1.0e4/50.0)*ref1[:,1], '-', color=col_b16agss09, label=r'Primakoff (Redondo)')
plt.plot(res1[:,0], res1[:,1]/1.0e10, 'k--', label=r'Primakoff (AGSS09)')
plt.plot(res7[:,0], res7[:,1]/1.0e10, 'k-', label=r'LP (AGSS09)')
plt.plot(res9[:,0], res9[:,1]/1.0e10, 'k--', label=r'LP Rosseland (AGSS09)')
plt.plot(ref6[:,0], ref6[:,1]*4.0, '--', color='green', label=r'LP (Giannotti)') # correct coupling
plt.plot(ref7[:,0], ref7[:,1]/1.0e10*4.0/1.7856, '--', color='orange', label=r'LP (O´Hare)') # correct coupling and angular average
plt.plot(ref8[:,0], ref8[:,1]/1.0e10*(3.0/5.0)**2, '--', color='gold', label=r'LP (Caputo)') #correct field values
plt.title(r'Axion-photon interactions, $g_{a\gamma\gamma} = \SI{e-10}{\GeV^{-1}}$, OP opacities')
plt.xlabel(r'Energy $\omega$ [keV]')
plt.ylabel(r'Axion flux $\mathrm{d}\Phi_a/\mathrm{d}\omega$ [\SI{e10}{\per\cm\squared\per\keV\per\s}]')
plt.xlim([0.001,0.4])
plt.yscale('log')
plt.xscale('log')
plt.ylim([0.0,37])
plt.legend(frameon=False)
plt.savefig("validation_Lplasmon.pdf", bbox_inches='tight')
plt.show()
plt.close()
fig, ax = plt.subplots()
## Validation plots for axion-electron interactions
plot_setup()
plt.plot(ref2[:,0], 100.0*conv_fac*(0.5*ref2[:,1]), 'b-', label=r'Compton (Redondo)')
plt.plot(ref3[:,0], 100.0*conv_fac*ref3[:,1], 'm-', label=r'FF (Redondo)')
plt.plot(ref4[:,0], 1.0e11*ref4[:,1]*(1.0e-13/0.511e-10)**2/(24.0*60.0*60.0) - 100.0*conv_fac*(0.5*compton(ref4[:,0])), 'g-', label=r'All')
plt.plot(res2[:,0], res2[:,1]/1.0e8, 'k--', label=r'Compton (B16-AGSS09)')
plt.plot(res3[:,0], res3[:,1]/1.0e8, 'k--', label=r'FF (B16-AGSS09)')
plt.plot(res4[:,0], res4[:,1]/1.0e8, 'k--', label=r'All (B16-AGSS09)')
plt.plot(res5[:,0], res5[:,1]/1.0e8, 'k--', label=r'Metals (B16-AGSS09)')
plt.title(r'Axion-electron interactions, $g_{aee} = \num{e-13}$, OP opacities')
plt.xlabel(r'Energy $\omega$ [keV]')
plt.ylabel(r'Axion flux $\mathrm{d}\Phi_a/\mathrm{d}\omega$ [\SI{e8}{\per\cm\squared\per\keV\per\s}]')
plt.xlim([0,10])
plt.ylim([0,12])
plt.legend(ncol=2, frameon=False)
plt.savefig("validation_gaee.pdf")
#plt.show()
plt.close()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"numpy.genfromtxt",
"matplotlib.pyplot.rc",
"numpy.linspace",
"numpy.exp",
"scipy.interpolate.interp1d",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] |
[((83, 110), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (89, 110), True, 'import matplotlib.pyplot as plt\n'), ((111, 212), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text.latex"""'], {'preamble': '"""\\\\usepackage{amsmath}\\\\usepackage{amssymb}\\\\usepackage{siunitx}"""'}), "('text.latex', preamble=\n '\\\\usepackage{amsmath}\\\\usepackage{amssymb}\\\\usepackage{siunitx}')\n", (117, 212), True, 'import matplotlib.pyplot as plt\n'), ((812, 842), 'numpy.genfromtxt', 'np.genfromtxt', (['"""primakoff.dat"""'], {}), "('primakoff.dat')\n", (825, 842), True, 'import numpy as np\n'), ((850, 878), 'numpy.genfromtxt', 'np.genfromtxt', (['"""compton.dat"""'], {}), "('compton.dat')\n", (863, 878), True, 'import numpy as np\n'), ((886, 913), 'numpy.genfromtxt', 'np.genfromtxt', (['"""all_ff.dat"""'], {}), "('all_ff.dat')\n", (899, 913), True, 'import numpy as np\n'), ((921, 950), 'numpy.genfromtxt', 'np.genfromtxt', (['"""all_gaee.dat"""'], {}), "('all_gaee.dat')\n", (934, 950), True, 'import numpy as np\n'), ((958, 985), 'numpy.genfromtxt', 'np.genfromtxt', (['"""metals.dat"""'], {}), "('metals.dat')\n", (971, 985), True, 'import numpy as np\n'), ((993, 1016), 'numpy.genfromtxt', 'np.genfromtxt', (['"""TP.dat"""'], {}), "('TP.dat')\n", (1006, 1016), True, 'import numpy as np\n'), ((1024, 1047), 'numpy.genfromtxt', 'np.genfromtxt', (['"""LP.dat"""'], {}), "('LP.dat')\n", (1037, 1047), True, 'import numpy as np\n'), ((1055, 1088), 'numpy.genfromtxt', 'np.genfromtxt', (['"""TP_Rosseland.dat"""'], {}), "('TP_Rosseland.dat')\n", (1068, 1088), True, 'import numpy as np\n'), ((1096, 1129), 'numpy.genfromtxt', 'np.genfromtxt', (['"""LP_Rosseland.dat"""'], {}), "('LP_Rosseland.dat')\n", (1109, 1129), True, 'import numpy as np\n'), ((1318, 1375), 'numpy.genfromtxt', 'np.genfromtxt', (["(common_path + '2013_redondo_primakoff.dat')"], {}), "(common_path + '2013_redondo_primakoff.dat')\n", (1331, 1375), True, 'import numpy as np\n'), ((1381, 1436), 'numpy.genfromtxt', 'np.genfromtxt', (["(common_path + '2013_redondo_compton.dat')"], {}), "(common_path + '2013_redondo_compton.dat')\n", (1394, 1436), True, 'import numpy as np\n'), ((1445, 1523), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['ref2[:, 0]', 'ref2[:, 1]'], {'bounds_error': '(False)', 'fill_value': '(0)'}), '(ref2[:, 0], ref2[:, 1], bounds_error=False, fill_value=0)\n', (1465, 1523), False, 'from scipy import interpolate\n'), ((1529, 1579), 'numpy.genfromtxt', 'np.genfromtxt', (["(common_path + '2013_redondo_ff.dat')"], {}), "(common_path + '2013_redondo_ff.dat')\n", (1542, 1579), True, 'import numpy as np\n'), ((1585, 1636), 'numpy.genfromtxt', 'np.genfromtxt', (["(common_path + '2013_redondo_all.dat')"], {}), "(common_path + '2013_redondo_all.dat')\n", (1598, 1636), True, 'import numpy as np\n'), ((1642, 1694), 'numpy.genfromtxt', 'np.genfromtxt', (["(common_path + '2020_giannotti_TP.dat')"], {}), "(common_path + '2020_giannotti_TP.dat')\n", (1655, 1694), True, 'import numpy as np\n'), ((1700, 1752), 'numpy.genfromtxt', 'np.genfromtxt', (["(common_path + '2020_giannotti_LP.dat')"], {}), "(common_path + '2020_giannotti_LP.dat')\n", (1713, 1752), True, 'import numpy as np\n'), ((1758, 1804), 'numpy.genfromtxt', 'np.genfromtxt', (['(common_path + "2020-o\'hare.dat")'], {}), '(common_path + "2020-o\'hare.dat")\n', (1771, 1804), True, 'import numpy as np\n'), ((1810, 1859), 'numpy.genfromtxt', 'np.genfromtxt', (["(common_path + '2020_caputo_LP.dat')"], {}), "(common_path + '2020_caputo_LP.dat')\n", (1823, 1859), True, 'import numpy as np\n'), ((2038, 2061), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(300)'], {}), '(0, 10, 300)\n', (2049, 2061), True, 'import numpy as np\n'), ((2071, 2085), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2083, 2085), True, 'import matplotlib.pyplot as plt\n'), ((2212, 2334), 'matplotlib.pyplot.plot', 'plt.plot', (['ref1[:, 0]', '(conv_fac * (10000.0 / 50.0) * ref1[:, 1])', '"""-"""'], {'color': 'col_b16agss09', 'label': '"""Primakoff (Redondo)"""'}), "(ref1[:, 0], conv_fac * (10000.0 / 50.0) * ref1[:, 1], '-', color=\n col_b16agss09, label='Primakoff (Redondo)')\n", (2220, 2334), True, 'import matplotlib.pyplot as plt\n'), ((2321, 2409), 'matplotlib.pyplot.plot', 'plt.plot', (['res1[:, 0]', '(res1[:, 1] / 10000000000.0)', '"""k--"""'], {'label': '"""Primakoff (AGSS09)"""'}), "(res1[:, 0], res1[:, 1] / 10000000000.0, 'k--', label=\n 'Primakoff (AGSS09)')\n", (2329, 2409), True, 'import matplotlib.pyplot as plt\n'), ((2395, 2471), 'matplotlib.pyplot.plot', 'plt.plot', (['res6[:, 0]', '(res6[:, 1] / 10000000000.0)', '"""k--"""'], {'label': '"""TP (AGSS09)"""'}), "(res6[:, 0], res6[:, 1] / 10000000000.0, 'k--', label='TP (AGSS09)')\n", (2403, 2471), True, 'import matplotlib.pyplot as plt\n'), ((2463, 2573), 'matplotlib.pyplot.title', 'plt.title', (['"""Axion-photon interactions, $g_{a\\\\gamma\\\\gamma} = \\\\SI{e-10}{\\\\GeV^{-1}}$, OP opacities"""'], {}), "(\n 'Axion-photon interactions, $g_{a\\\\gamma\\\\gamma} = \\\\SI{e-10}{\\\\GeV^{-1}}$, OP opacities'\n )\n", (2472, 2573), True, 'import matplotlib.pyplot as plt\n'), ((2561, 2597), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Energy $\\\\omega$ [keV]"""'], {}), "('Energy $\\\\omega$ [keV]')\n", (2571, 2597), True, 'import matplotlib.pyplot as plt\n'), ((2598, 2722), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Axion flux $\\\\mathrm{d}\\\\Phi_a/\\\\mathrm{d}\\\\omega$ [\\\\SI{e10}{\\\\per\\\\cm\\\\squared\\\\per\\\\keV\\\\per\\\\s}]"""'], {}), "(\n 'Axion flux $\\\\mathrm{d}\\\\Phi_a/\\\\mathrm{d}\\\\omega$ [\\\\SI{e10}{\\\\per\\\\cm\\\\squared\\\\per\\\\keV\\\\per\\\\s}]'\n )\n", (2608, 2722), True, 'import matplotlib.pyplot as plt\n'), ((2702, 2719), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 10]'], {}), '([0, 10])\n', (2710, 2719), True, 'import matplotlib.pyplot as plt\n'), ((2737, 2762), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'frameon': '(False)'}), '(frameon=False)\n', (2747, 2762), True, 'import matplotlib.pyplot as plt\n'), ((2764, 2819), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""validation_gagg.pdf"""'], {'bbox_inches': '"""tight"""'}), "('validation_gagg.pdf', bbox_inches='tight')\n", (2775, 2819), True, 'import matplotlib.pyplot as plt\n'), ((2832, 2843), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2841, 2843), True, 'import matplotlib.pyplot as plt\n'), ((2856, 2870), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2868, 2870), True, 'import matplotlib.pyplot as plt\n'), ((2997, 3119), 'matplotlib.pyplot.plot', 'plt.plot', (['ref1[:, 0]', '(conv_fac * (10000.0 / 50.0) * ref1[:, 1])', '"""-"""'], {'color': 'col_b16agss09', 'label': '"""Primakoff (Redondo)"""'}), "(ref1[:, 0], conv_fac * (10000.0 / 50.0) * ref1[:, 1], '-', color=\n col_b16agss09, label='Primakoff (Redondo)')\n", (3005, 3119), True, 'import matplotlib.pyplot as plt\n'), ((3106, 3194), 'matplotlib.pyplot.plot', 'plt.plot', (['res1[:, 0]', '(res1[:, 1] / 10000000000.0)', '"""k--"""'], {'label': '"""Primakoff (AGSS09)"""'}), "(res1[:, 0], res1[:, 1] / 10000000000.0, 'k--', label=\n 'Primakoff (AGSS09)')\n", (3114, 3194), True, 'import matplotlib.pyplot as plt\n'), ((3180, 3255), 'matplotlib.pyplot.plot', 'plt.plot', (['res6[:, 0]', '(res6[:, 1] / 10000000000.0)', '"""k-"""'], {'label': '"""TP (AGSS09)"""'}), "(res6[:, 0], res6[:, 1] / 10000000000.0, 'k-', label='TP (AGSS09)')\n", (3188, 3255), True, 'import matplotlib.pyplot as plt\n'), ((3246, 3337), 'matplotlib.pyplot.plot', 'plt.plot', (['res8[:, 0]', '(res8[:, 1] / 10000000000.0)', '"""k--"""'], {'label': '"""TP Rosseland (AGSS09)"""'}), "(res8[:, 0], res8[:, 1] / 10000000000.0, 'k--', label=\n 'TP Rosseland (AGSS09)')\n", (3254, 3337), True, 'import matplotlib.pyplot as plt\n'), ((3323, 3419), 'matplotlib.pyplot.plot', 'plt.plot', (['ref5[:, 0]', '(ref5[:, 1] * 4.0 * 1.4995)', '"""-"""'], {'color': '"""green"""', 'label': '"""TP (Giannotti)"""'}), "(ref5[:, 0], ref5[:, 1] * 4.0 * 1.4995, '-', color='green', label=\n 'TP (Giannotti)')\n", (3331, 3419), True, 'import matplotlib.pyplot as plt\n'), ((3481, 3591), 'matplotlib.pyplot.title', 'plt.title', (['"""Axion-photon interactions, $g_{a\\\\gamma\\\\gamma} = \\\\SI{e-10}{\\\\GeV^{-1}}$, OP opacities"""'], {}), "(\n 'Axion-photon interactions, $g_{a\\\\gamma\\\\gamma} = \\\\SI{e-10}{\\\\GeV^{-1}}$, OP opacities'\n )\n", (3490, 3591), True, 'import matplotlib.pyplot as plt\n'), ((3579, 3615), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Energy $\\\\omega$ [keV]"""'], {}), "('Energy $\\\\omega$ [keV]')\n", (3589, 3615), True, 'import matplotlib.pyplot as plt\n'), ((3616, 3740), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Axion flux $\\\\mathrm{d}\\\\Phi_a/\\\\mathrm{d}\\\\omega$ [\\\\SI{e10}{\\\\per\\\\cm\\\\squared\\\\per\\\\keV\\\\per\\\\s}]"""'], {}), "(\n 'Axion flux $\\\\mathrm{d}\\\\Phi_a/\\\\mathrm{d}\\\\omega$ [\\\\SI{e10}{\\\\per\\\\cm\\\\squared\\\\per\\\\keV\\\\per\\\\s}]'\n )\n", (3626, 3740), True, 'import matplotlib.pyplot as plt\n'), ((3720, 3739), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.1, 10]'], {}), '([0.1, 10])\n', (3728, 3739), True, 'import matplotlib.pyplot as plt\n'), ((3739, 3756), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (3749, 3756), True, 'import matplotlib.pyplot as plt\n'), ((3757, 3774), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (3767, 3774), True, 'import matplotlib.pyplot as plt\n'), ((3793, 3818), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'frameon': '(False)'}), '(frameon=False)\n', (3803, 3818), True, 'import matplotlib.pyplot as plt\n'), ((3820, 3879), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""validation_Tplasmon.pdf"""'], {'bbox_inches': '"""tight"""'}), "('validation_Tplasmon.pdf', bbox_inches='tight')\n", (3831, 3879), True, 'import matplotlib.pyplot as plt\n'), ((3880, 3890), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3888, 3890), True, 'import matplotlib.pyplot as plt\n'), ((3891, 3902), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3900, 3902), True, 'import matplotlib.pyplot as plt\n'), ((3915, 3929), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3927, 3929), True, 'import matplotlib.pyplot as plt\n'), ((4056, 4178), 'matplotlib.pyplot.plot', 'plt.plot', (['ref1[:, 0]', '(conv_fac * (10000.0 / 50.0) * ref1[:, 1])', '"""-"""'], {'color': 'col_b16agss09', 'label': '"""Primakoff (Redondo)"""'}), "(ref1[:, 0], conv_fac * (10000.0 / 50.0) * ref1[:, 1], '-', color=\n col_b16agss09, label='Primakoff (Redondo)')\n", (4064, 4178), True, 'import matplotlib.pyplot as plt\n'), ((4165, 4253), 'matplotlib.pyplot.plot', 'plt.plot', (['res1[:, 0]', '(res1[:, 1] / 10000000000.0)', '"""k--"""'], {'label': '"""Primakoff (AGSS09)"""'}), "(res1[:, 0], res1[:, 1] / 10000000000.0, 'k--', label=\n 'Primakoff (AGSS09)')\n", (4173, 4253), True, 'import matplotlib.pyplot as plt\n'), ((4239, 4314), 'matplotlib.pyplot.plot', 'plt.plot', (['res7[:, 0]', '(res7[:, 1] / 10000000000.0)', '"""k-"""'], {'label': '"""LP (AGSS09)"""'}), "(res7[:, 0], res7[:, 1] / 10000000000.0, 'k-', label='LP (AGSS09)')\n", (4247, 4314), True, 'import matplotlib.pyplot as plt\n'), ((4305, 4396), 'matplotlib.pyplot.plot', 'plt.plot', (['res9[:, 0]', '(res9[:, 1] / 10000000000.0)', '"""k--"""'], {'label': '"""LP Rosseland (AGSS09)"""'}), "(res9[:, 0], res9[:, 1] / 10000000000.0, 'k--', label=\n 'LP Rosseland (AGSS09)')\n", (4313, 4396), True, 'import matplotlib.pyplot as plt\n'), ((4382, 4470), 'matplotlib.pyplot.plot', 'plt.plot', (['ref6[:, 0]', '(ref6[:, 1] * 4.0)', '"""--"""'], {'color': '"""green"""', 'label': '"""LP (Giannotti)"""'}), "(ref6[:, 0], ref6[:, 1] * 4.0, '--', color='green', label=\n 'LP (Giannotti)')\n", (4390, 4470), True, 'import matplotlib.pyplot as plt\n'), ((4482, 4593), 'matplotlib.pyplot.plot', 'plt.plot', (['ref7[:, 0]', '(ref7[:, 1] / 10000000000.0 * 4.0 / 1.7856)', '"""--"""'], {'color': '"""orange"""', 'label': '"""LP (O´Hare)"""'}), "(ref7[:, 0], ref7[:, 1] / 10000000000.0 * 4.0 / 1.7856, '--', color\n ='orange', label='LP (O´Hare)')\n", (4490, 4593), True, 'import matplotlib.pyplot as plt\n'), ((4614, 4726), 'matplotlib.pyplot.plot', 'plt.plot', (['ref8[:, 0]', '(ref8[:, 1] / 10000000000.0 * (3.0 / 5.0) ** 2)', '"""--"""'], {'color': '"""gold"""', 'label': '"""LP (Caputo)"""'}), "(ref8[:, 0], ref8[:, 1] / 10000000000.0 * (3.0 / 5.0) ** 2, '--',\n color='gold', label='LP (Caputo)')\n", (4622, 4726), True, 'import matplotlib.pyplot as plt\n'), ((4731, 4841), 'matplotlib.pyplot.title', 'plt.title', (['"""Axion-photon interactions, $g_{a\\\\gamma\\\\gamma} = \\\\SI{e-10}{\\\\GeV^{-1}}$, OP opacities"""'], {}), "(\n 'Axion-photon interactions, $g_{a\\\\gamma\\\\gamma} = \\\\SI{e-10}{\\\\GeV^{-1}}$, OP opacities'\n )\n", (4740, 4841), True, 'import matplotlib.pyplot as plt\n'), ((4829, 4865), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Energy $\\\\omega$ [keV]"""'], {}), "('Energy $\\\\omega$ [keV]')\n", (4839, 4865), True, 'import matplotlib.pyplot as plt\n'), ((4866, 4990), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Axion flux $\\\\mathrm{d}\\\\Phi_a/\\\\mathrm{d}\\\\omega$ [\\\\SI{e10}{\\\\per\\\\cm\\\\squared\\\\per\\\\keV\\\\per\\\\s}]"""'], {}), "(\n 'Axion flux $\\\\mathrm{d}\\\\Phi_a/\\\\mathrm{d}\\\\omega$ [\\\\SI{e10}{\\\\per\\\\cm\\\\squared\\\\per\\\\keV\\\\per\\\\s}]'\n )\n", (4876, 4990), True, 'import matplotlib.pyplot as plt\n'), ((4970, 4992), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.001, 0.4]'], {}), '([0.001, 0.4])\n', (4978, 4992), True, 'import matplotlib.pyplot as plt\n'), ((4992, 5009), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (5002, 5009), True, 'import matplotlib.pyplot as plt\n'), ((5010, 5027), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (5020, 5027), True, 'import matplotlib.pyplot as plt\n'), ((5028, 5047), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 37]'], {}), '([0.0, 37])\n', (5036, 5047), True, 'import matplotlib.pyplot as plt\n'), ((5048, 5073), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'frameon': '(False)'}), '(frameon=False)\n', (5058, 5073), True, 'import matplotlib.pyplot as plt\n'), ((5075, 5134), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""validation_Lplasmon.pdf"""'], {'bbox_inches': '"""tight"""'}), "('validation_Lplasmon.pdf', bbox_inches='tight')\n", (5086, 5134), True, 'import matplotlib.pyplot as plt\n'), ((5135, 5145), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5143, 5145), True, 'import matplotlib.pyplot as plt\n'), ((5146, 5157), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5155, 5157), True, 'import matplotlib.pyplot as plt\n'), ((5169, 5183), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5181, 5183), True, 'import matplotlib.pyplot as plt\n'), ((5249, 5346), 'matplotlib.pyplot.plot', 'plt.plot', (['ref2[:, 0]', '(100.0 * conv_fac * (0.5 * ref2[:, 1]))', '"""b-"""'], {'label': '"""Compton (Redondo)"""'}), "(ref2[:, 0], 100.0 * conv_fac * (0.5 * ref2[:, 1]), 'b-', label=\n 'Compton (Redondo)')\n", (5257, 5346), True, 'import matplotlib.pyplot as plt\n'), ((5335, 5414), 'matplotlib.pyplot.plot', 'plt.plot', (['ref3[:, 0]', '(100.0 * conv_fac * ref3[:, 1])', '"""m-"""'], {'label': '"""FF (Redondo)"""'}), "(ref3[:, 0], 100.0 * conv_fac * ref3[:, 1], 'm-', label='FF (Redondo)')\n", (5343, 5414), True, 'import matplotlib.pyplot as plt\n'), ((5550, 5638), 'matplotlib.pyplot.plot', 'plt.plot', (['res2[:, 0]', '(res2[:, 1] / 100000000.0)', '"""k--"""'], {'label': '"""Compton (B16-AGSS09)"""'}), "(res2[:, 0], res2[:, 1] / 100000000.0, 'k--', label=\n 'Compton (B16-AGSS09)')\n", (5558, 5638), True, 'import matplotlib.pyplot as plt\n'), ((5625, 5703), 'matplotlib.pyplot.plot', 'plt.plot', (['res3[:, 0]', '(res3[:, 1] / 100000000.0)', '"""k--"""'], {'label': '"""FF (B16-AGSS09)"""'}), "(res3[:, 0], res3[:, 1] / 100000000.0, 'k--', label='FF (B16-AGSS09)')\n", (5633, 5703), True, 'import matplotlib.pyplot as plt\n'), ((5695, 5774), 'matplotlib.pyplot.plot', 'plt.plot', (['res4[:, 0]', '(res4[:, 1] / 100000000.0)', '"""k--"""'], {'label': '"""All (B16-AGSS09)"""'}), "(res4[:, 0], res4[:, 1] / 100000000.0, 'k--', label='All (B16-AGSS09)')\n", (5703, 5774), True, 'import matplotlib.pyplot as plt\n'), ((5766, 5853), 'matplotlib.pyplot.plot', 'plt.plot', (['res5[:, 0]', '(res5[:, 1] / 100000000.0)', '"""k--"""'], {'label': '"""Metals (B16-AGSS09)"""'}), "(res5[:, 0], res5[:, 1] / 100000000.0, 'k--', label=\n 'Metals (B16-AGSS09)')\n", (5774, 5853), True, 'import matplotlib.pyplot as plt\n'), ((5841, 5920), 'matplotlib.pyplot.title', 'plt.title', (['"""Axion-electron interactions, $g_{aee} = \\\\num{e-13}$, OP opacities"""'], {}), "('Axion-electron interactions, $g_{aee} = \\\\num{e-13}$, OP opacities')\n", (5850, 5920), True, 'import matplotlib.pyplot as plt\n'), ((5921, 5957), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Energy $\\\\omega$ [keV]"""'], {}), "('Energy $\\\\omega$ [keV]')\n", (5931, 5957), True, 'import matplotlib.pyplot as plt\n'), ((5958, 6081), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Axion flux $\\\\mathrm{d}\\\\Phi_a/\\\\mathrm{d}\\\\omega$ [\\\\SI{e8}{\\\\per\\\\cm\\\\squared\\\\per\\\\keV\\\\per\\\\s}]"""'], {}), "(\n 'Axion flux $\\\\mathrm{d}\\\\Phi_a/\\\\mathrm{d}\\\\omega$ [\\\\SI{e8}{\\\\per\\\\cm\\\\squared\\\\per\\\\keV\\\\per\\\\s}]'\n )\n", (5968, 6081), True, 'import matplotlib.pyplot as plt\n'), ((6061, 6078), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 10]'], {}), '([0, 10])\n', (6069, 6078), True, 'import matplotlib.pyplot as plt\n'), ((6078, 6095), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 12]'], {}), '([0, 12])\n', (6086, 6095), True, 'import matplotlib.pyplot as plt\n'), ((6096, 6129), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'ncol': '(2)', 'frameon': '(False)'}), '(ncol=2, frameon=False)\n', (6106, 6129), True, 'import matplotlib.pyplot as plt\n'), ((6131, 6165), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""validation_gaee.pdf"""'], {}), "('validation_gaee.pdf')\n", (6142, 6165), True, 'import matplotlib.pyplot as plt\n'), ((6178, 6189), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6187, 6189), True, 'import matplotlib.pyplot as plt\n'), ((2133, 2155), 'numpy.exp', 'np.exp', (['(-omega / 1.205)'], {}), '(-omega / 1.205)\n', (2139, 2155), True, 'import numpy as np\n'), ((2918, 2940), 'numpy.exp', 'np.exp', (['(-omega / 1.205)'], {}), '(-omega / 1.205)\n', (2924, 2940), True, 'import numpy as np\n'), ((3977, 3999), 'numpy.exp', 'np.exp', (['(-omega / 1.205)'], {}), '(-omega / 1.205)\n', (3983, 3999), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# Copyright (C) 2014 Open Data ("Open Data" refers to
# one or more of the following companies: Open Data Partners LLC,
# Open Data Research LLC, or Open Data Capital LLC.)
#
# This file is part of Hadrian.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import unittest
import numpy
from titus.genpy import PFAEngine
from titus.producer.tools import look
from titus.producer.cart import *
class TestProducerCart(unittest.TestCase):
@staticmethod
def data():
while True:
x = random.uniform(0, 10)
y = random.uniform(0, 10)
if x < 4.0:
if y < 6.0:
z = random.gauss(5, 1)
else:
z = random.gauss(8, 1)
else:
if y < 2.0:
z = random.gauss(1, 1)
else:
z = random.gauss(2, 1)
if z < 0.0:
z = 0.0
elif z >= 10.0:
z = 9.99999
a = "A" + str(int(x))
b = "B" + str(int(y/2) * 2)
c = "C" + str(int(z/3) * 3)
yield (x, y, z, a, b, c)
def testCartMustBuildNumericalNumerical(self):
random.seed(12345)
numpy.seterr(divide="ignore", invalid="ignore")
dataset = Dataset.fromIterable(((x, y, z) for (x, y, z, a, b, c) in TestProducerCart.data()), 100000, ("x", "y", "z"))
tree = TreeNode.fromWholeDataset(dataset, "z")
tree.splitMaxDepth(2)
doc = tree.pfaDocument({"type": "record", "name": "Datum", "fields": [{"name": "x", "type": "double"}, {"name": "y", "type": "double"}]}, "TreeNode")
# look(doc, maxDepth=8)
self.assertEqual(doc["cells"]["tree"]["init"]["field"], "x")
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["value"], 4.00, places=2)
self.assertEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["field"], "y")
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["value"], 6.00, places=2)
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["pass"]["double"], 5.00, places=2)
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["fail"]["double"], 8.02, places=2)
self.assertEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["field"], "y")
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["value"], 2.00, places=2)
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["pass"]["double"], 1.09, places=2)
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["fail"]["double"], 2.00, places=2)
engine, = PFAEngine.fromJson(doc)
self.assertAlmostEqual(engine.action({"x": 2.0, "y": 3.0}), 5.00, places=2)
self.assertAlmostEqual(engine.action({"x": 2.0, "y": 8.0}), 8.02, places=2)
self.assertAlmostEqual(engine.action({"x": 7.0, "y": 1.0}), 1.09, places=2)
self.assertAlmostEqual(engine.action({"x": 7.0, "y": 5.0}), 2.00, places=2)
doc = tree.pfaDocument(
{"type": "record", "name": "Datum", "fields": [{"name": "x", "type": "double"}, {"name": "y", "type": "double"}]},
"TreeNode",
nodeScores=True, datasetSize=True, predictandUnique=True, nTimesVariance=True, gain=True)
# look(doc, maxDepth=8)
engine, = PFAEngine.fromJson(doc)
def testCartMustBuildNumericalCategorical(self):
random.seed(12345)
numpy.seterr(divide="ignore", invalid="ignore")
dataset = Dataset.fromIterable(((x, y, c) for (x, y, z, a, b, c) in TestProducerCart.data()), 100000, ("x", "y", "c"))
tree = TreeNode.fromWholeDataset(dataset, "c")
tree.splitMaxDepth(2)
doc = tree.pfaDocument({"type": "record", "name": "Datum", "fields": [{"name": "x", "type": "double"}, {"name": "y", "type": "double"}]}, "TreeNode")
# look(doc, maxDepth=8)
self.assertEqual(doc["cells"]["tree"]["init"]["field"], "x")
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["value"], 4.00, places=2)
self.assertEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["field"], "y")
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["value"], 6.00, places=2)
self.assertEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["pass"]["string"], "C3")
self.assertEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["fail"]["string"], "C6")
self.assertEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["field"], "y")
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["value"], 2.00, places=2)
self.assertEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["pass"]["string"], "C0")
self.assertEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["fail"]["string"], "C0")
engine, = PFAEngine.fromJson(doc)
self.assertEqual(engine.action({"x": 2.0, "y": 3.0}), "C3")
self.assertEqual(engine.action({"x": 2.0, "y": 8.0}), "C6")
self.assertEqual(engine.action({"x": 7.0, "y": 1.0}), "C0")
self.assertEqual(engine.action({"x": 7.0, "y": 5.0}), "C0")
doc = tree.pfaDocument(
{"type": "record", "name": "Datum", "fields": [{"name": "x", "type": "double"}, {"name": "y", "type": "double"}]},
"TreeNode",
nodeScores=True, datasetSize=True, predictandDistribution=True, predictandUnique=True, entropy=True, gain=True)
# look(doc, maxDepth=8)
engine, = PFAEngine.fromJson(doc)
def testCartMustBuildCategoricalNumerical(self):
random.seed(12345)
numpy.seterr(divide="ignore", invalid="ignore")
dataset = Dataset.fromIterable(((a, b, z) for (x, y, z, a, b, c) in TestProducerCart.data()), 100000, ("a", "b", "z"))
tree = TreeNode.fromWholeDataset(dataset, "z")
tree.splitMaxDepth(2)
doc = tree.pfaDocument({"type": "record", "name": "Datum", "fields": [{"name": "a", "type": "string"}, {"name": "b", "type": "string"}]}, "TreeNode")
# look(doc, maxDepth=8)
self.assertEqual(doc["cells"]["tree"]["init"]["field"], "a")
self.assertEqual(doc["cells"]["tree"]["init"]["value"], ["A0", "A1", "A2", "A3"])
self.assertEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["field"], "b")
self.assertEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["value"], ["B6", "B8"])
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["pass"]["double"], 8.02, places=2)
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["fail"]["double"], 5.00, places=2)
self.assertEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["field"], "b")
self.assertEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["value"], ["B0"])
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["pass"]["double"], 1.09, places=2)
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["fail"]["double"], 2.00, places=2)
engine, = PFAEngine.fromJson(doc)
self.assertAlmostEqual(engine.action({"a": "A1", "b": "B6"}), 8.02, places=2)
self.assertAlmostEqual(engine.action({"a": "A1", "b": "B2"}), 5.00, places=2)
self.assertAlmostEqual(engine.action({"a": "A5", "b": "B0"}), 1.09, places=2)
self.assertAlmostEqual(engine.action({"a": "A5", "b": "B4"}), 2.00, places=2)
doc = tree.pfaDocument(
{"type": "record", "name": "Datum", "fields": [{"name": "a", "type": "string"}, {"name": "b", "type": "string"}]},
"TreeNode",
nodeScores=True, datasetSize=True, predictandUnique=True, nTimesVariance=True, gain=True)
# look(doc, maxDepth=8)
engine, = PFAEngine.fromJson(doc)
def testCartMustBuildCategoricalCategorical(self):
random.seed(12345)
numpy.seterr(divide="ignore", invalid="ignore")
dataset = Dataset.fromIterable(((a, b, c) for (x, y, z, a, b, c) in TestProducerCart.data()), 100000, ("a", "b", "c"))
tree = TreeNode.fromWholeDataset(dataset, "c")
tree.splitMaxDepth(2)
doc = tree.pfaDocument({"type": "record", "name": "Datum", "fields": [{"name": "a", "type": "string"}, {"name": "b", "type": "string"}]}, "TreeNode")
# look(doc, maxDepth=8)
self.assertEqual(doc["cells"]["tree"]["init"]["field"], "a")
self.assertEqual(doc["cells"]["tree"]["init"]["value"], ["A0", "A1", "A2", "A3"])
self.assertEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["field"], "b")
self.assertEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["value"], ["B6", "B8"])
self.assertEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["pass"]["string"], "C6")
self.assertEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["fail"]["string"], "C3")
self.assertEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["field"], "b")
self.assertEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["value"], ["B0"])
self.assertEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["pass"]["string"], "C0")
self.assertEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["fail"]["string"], "C0")
engine, = PFAEngine.fromJson(doc)
self.assertEqual(engine.action({"a": "A1", "b": "B6"}), "C6")
self.assertEqual(engine.action({"a": "A1", "b": "B2"}), "C3")
self.assertEqual(engine.action({"a": "A5", "b": "B0"}), "C0")
self.assertEqual(engine.action({"a": "A5", "b": "B4"}), "C0")
doc = tree.pfaDocument(
{"type": "record", "name": "Datum", "fields": [{"name": "a", "type": "string"}, {"name": "b", "type": "string"}]},
"TreeNode",
nodeScores=True, datasetSize=True, predictandDistribution=True, predictandUnique=True, entropy=True, gain=True)
# look(doc, maxDepth=8)
engine, = PFAEngine.fromJson(doc)
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"random.uniform",
"numpy.seterr",
"random.seed",
"random.gauss",
"titus.genpy.PFAEngine.fromJson"
] |
[((10616, 10631), 'unittest.main', 'unittest.main', ([], {}), '()\n', (10629, 10631), False, 'import unittest\n'), ((1732, 1750), 'random.seed', 'random.seed', (['(12345)'], {}), '(12345)\n', (1743, 1750), False, 'import random\n'), ((1759, 1806), 'numpy.seterr', 'numpy.seterr', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (1771, 1806), False, 'import numpy\n'), ((3236, 3259), 'titus.genpy.PFAEngine.fromJson', 'PFAEngine.fromJson', (['doc'], {}), '(doc)\n', (3254, 3259), False, 'from titus.genpy import PFAEngine\n'), ((3932, 3955), 'titus.genpy.PFAEngine.fromJson', 'PFAEngine.fromJson', (['doc'], {}), '(doc)\n', (3950, 3955), False, 'from titus.genpy import PFAEngine\n'), ((4018, 4036), 'random.seed', 'random.seed', (['(12345)'], {}), '(12345)\n', (4029, 4036), False, 'import random\n'), ((4045, 4092), 'numpy.seterr', 'numpy.seterr', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (4057, 4092), False, 'import numpy\n'), ((5458, 5481), 'titus.genpy.PFAEngine.fromJson', 'PFAEngine.fromJson', (['doc'], {}), '(doc)\n', (5476, 5481), False, 'from titus.genpy import PFAEngine\n'), ((6112, 6135), 'titus.genpy.PFAEngine.fromJson', 'PFAEngine.fromJson', (['doc'], {}), '(doc)\n', (6130, 6135), False, 'from titus.genpy import PFAEngine\n'), ((6198, 6216), 'random.seed', 'random.seed', (['(12345)'], {}), '(12345)\n', (6209, 6216), False, 'import random\n'), ((6225, 6272), 'numpy.seterr', 'numpy.seterr', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (6237, 6272), False, 'import numpy\n'), ((7684, 7707), 'titus.genpy.PFAEngine.fromJson', 'PFAEngine.fromJson', (['doc'], {}), '(doc)\n', (7702, 7707), False, 'from titus.genpy import PFAEngine\n'), ((8388, 8411), 'titus.genpy.PFAEngine.fromJson', 'PFAEngine.fromJson', (['doc'], {}), '(doc)\n', (8406, 8411), False, 'from titus.genpy import PFAEngine\n'), ((8476, 8494), 'random.seed', 'random.seed', (['(12345)'], {}), '(12345)\n', (8487, 8494), False, 'import random\n'), ((8503, 8550), 'numpy.seterr', 'numpy.seterr', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (8515, 8550), False, 'import numpy\n'), ((9898, 9921), 'titus.genpy.PFAEngine.fromJson', 'PFAEngine.fromJson', (['doc'], {}), '(doc)\n', (9916, 9921), False, 'from titus.genpy import PFAEngine\n'), ((10560, 10583), 'titus.genpy.PFAEngine.fromJson', 'PFAEngine.fromJson', (['doc'], {}), '(doc)\n', (10578, 10583), False, 'from titus.genpy import PFAEngine\n'), ((1041, 1062), 'random.uniform', 'random.uniform', (['(0)', '(10)'], {}), '(0, 10)\n', (1055, 1062), False, 'import random\n'), ((1079, 1100), 'random.uniform', 'random.uniform', (['(0)', '(10)'], {}), '(0, 10)\n', (1093, 1100), False, 'import random\n'), ((1177, 1195), 'random.gauss', 'random.gauss', (['(5)', '(1)'], {}), '(5, 1)\n', (1189, 1195), False, 'import random\n'), ((1242, 1260), 'random.gauss', 'random.gauss', (['(8)', '(1)'], {}), '(8, 1)\n', (1254, 1260), False, 'import random\n'), ((1331, 1349), 'random.gauss', 'random.gauss', (['(1)', '(1)'], {}), '(1, 1)\n', (1343, 1349), False, 'import random\n'), ((1396, 1414), 'random.gauss', 'random.gauss', (['(2)', '(1)'], {}), '(2, 1)\n', (1408, 1414), False, 'import random\n')]
|
import os
import unittest
import numpy
import moments
import time
class ResultsTestCase(unittest.TestCase):
def setUp(self):
self.startTime = time.time()
def tearDown(self):
t = time.time() - self.startTime
print("%s: %.3f seconds" % (self.id(), t))
def test_1d_ic(self):
# This just the standard neutral model
n = 10
fs = moments.Spectrum(numpy.zeros(n+1))
fs.integrate([1], tf=10, dt_fac=0.01)
answer = moments.Spectrum(1./numpy.arange(n+1))
self.assert_(numpy.ma.allclose(fs, answer, atol=5e-5))
def test_1pop(self):
n = 15
f = lambda x: [1+0.0001*x]
sfs = moments.Spectrum(numpy.zeros([n+1]))
sfs.integrate(f, 5, 0.01, theta=1.0, h=0.1, gamma=-1)
sfs_ref = moments.Spectrum.from_file('test_files/1_pop.fs')
self.assertTrue(numpy.allclose(sfs, sfs_ref))
def test_2pops_neutral(self):
n = 20
mig = numpy.ones([2, 2])
f = lambda x: [1, 1+0.0001*x]
sfs = moments.Spectrum(numpy.zeros([n+1, n+1]))
sfs.integrate(f, 10, 0.005, theta=1.0, h=[0.5, 0.5], gamma=[0, 0], m=mig)
sfs_ref = moments.Spectrum.from_file('test_files/2_pops_neutral.fs')
self.assertTrue(numpy.allclose(sfs, sfs_ref))
def test_2pops(self):
n1, n2 = 15, 20
mig = numpy.ones([2, 2])
f = lambda x: [1, 1+0.0001*x]
sfs = moments.Spectrum(numpy.zeros([n1+1, n2+1]))
sfs.integrate(f, 10, 0.005, theta=1.0, h=[0.6, 0.6], gamma=[2, 2], m=mig)
sfs_ref = moments.Spectrum.from_file('test_files/2_pops.fs')
self.assertTrue(numpy.allclose(sfs, sfs_ref))
def test_3pops_slow(self):
n1, n2, n3 = 15, 20, 18
gamma = [0, 0.5, -2]
h = [0.5, 0.1, 0.9]
mig = numpy.array([[0, 5, 2],[1, 0, 1],[10, 0, 1]])
f = lambda x: [1, 1, 1+0.0001*x]
sfs = moments.Spectrum(numpy.zeros([n1+1, n2+1, n3+1]))
sfs.integrate(f, 10, 0.01, theta=1.0, h=h, gamma=gamma, m=mig)
sfs_ref = moments.Spectrum.from_file('test_files/3_pops.fs')
self.assertTrue(numpy.allclose(sfs, sfs_ref))
def test_IM(self):
params = (0.8, 2.0, 0.6, 0.45, 5.0, 0.3)
ns = (20,13)
theta = 1000.
fs = theta*moments.Demographics2D.IM(params, ns)
dadi_fs = moments.Spectrum.from_file('test_files/IM.fs')
resid = moments.Inference.Anscombe_Poisson_residual(fs,dadi_fs)
self.assert_(abs(resid).max() < 0.25)
suite = unittest.TestLoader().loadTestsFromTestCase(ResultsTestCase)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"numpy.ma.allclose",
"moments.Spectrum.from_file",
"moments.Demographics2D.IM",
"moments.Inference.Anscombe_Poisson_residual",
"numpy.allclose",
"numpy.zeros",
"numpy.ones",
"time.time",
"numpy.array",
"unittest.TestLoader",
"numpy.arange"
] |
[((2626, 2641), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2639, 2641), False, 'import unittest\n'), ((156, 167), 'time.time', 'time.time', ([], {}), '()\n', (165, 167), False, 'import time\n'), ((794, 843), 'moments.Spectrum.from_file', 'moments.Spectrum.from_file', (['"""test_files/1_pop.fs"""'], {}), "('test_files/1_pop.fs')\n", (820, 843), False, 'import moments\n'), ((966, 984), 'numpy.ones', 'numpy.ones', (['[2, 2]'], {}), '([2, 2])\n', (976, 984), False, 'import numpy\n'), ((1179, 1237), 'moments.Spectrum.from_file', 'moments.Spectrum.from_file', (['"""test_files/2_pops_neutral.fs"""'], {}), "('test_files/2_pops_neutral.fs')\n", (1205, 1237), False, 'import moments\n'), ((1361, 1379), 'numpy.ones', 'numpy.ones', (['[2, 2]'], {}), '([2, 2])\n', (1371, 1379), False, 'import numpy\n'), ((1576, 1626), 'moments.Spectrum.from_file', 'moments.Spectrum.from_file', (['"""test_files/2_pops.fs"""'], {}), "('test_files/2_pops.fs')\n", (1602, 1626), False, 'import moments\n'), ((1820, 1867), 'numpy.array', 'numpy.array', (['[[0, 5, 2], [1, 0, 1], [10, 0, 1]]'], {}), '([[0, 5, 2], [1, 0, 1], [10, 0, 1]])\n', (1831, 1867), False, 'import numpy\n'), ((2060, 2110), 'moments.Spectrum.from_file', 'moments.Spectrum.from_file', (['"""test_files/3_pops.fs"""'], {}), "('test_files/3_pops.fs')\n", (2086, 2110), False, 'import moments\n'), ((2357, 2403), 'moments.Spectrum.from_file', 'moments.Spectrum.from_file', (['"""test_files/IM.fs"""'], {}), "('test_files/IM.fs')\n", (2383, 2403), False, 'import moments\n'), ((2421, 2477), 'moments.Inference.Anscombe_Poisson_residual', 'moments.Inference.Anscombe_Poisson_residual', (['fs', 'dadi_fs'], {}), '(fs, dadi_fs)\n', (2464, 2477), False, 'import moments\n'), ((2533, 2554), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (2552, 2554), False, 'import unittest\n'), ((205, 216), 'time.time', 'time.time', ([], {}), '()\n', (214, 216), False, 'import time\n'), ((404, 422), 'numpy.zeros', 'numpy.zeros', (['(n + 1)'], {}), '(n + 1)\n', (415, 422), False, 'import numpy\n'), ((545, 586), 'numpy.ma.allclose', 'numpy.ma.allclose', (['fs', 'answer'], {'atol': '(5e-05)'}), '(fs, answer, atol=5e-05)\n', (562, 586), False, 'import numpy\n'), ((694, 714), 'numpy.zeros', 'numpy.zeros', (['[n + 1]'], {}), '([n + 1])\n', (705, 714), False, 'import numpy\n'), ((868, 896), 'numpy.allclose', 'numpy.allclose', (['sfs', 'sfs_ref'], {}), '(sfs, sfs_ref)\n', (882, 896), False, 'import numpy\n'), ((1054, 1081), 'numpy.zeros', 'numpy.zeros', (['[n + 1, n + 1]'], {}), '([n + 1, n + 1])\n', (1065, 1081), False, 'import numpy\n'), ((1262, 1290), 'numpy.allclose', 'numpy.allclose', (['sfs', 'sfs_ref'], {}), '(sfs, sfs_ref)\n', (1276, 1290), False, 'import numpy\n'), ((1449, 1478), 'numpy.zeros', 'numpy.zeros', (['[n1 + 1, n2 + 1]'], {}), '([n1 + 1, n2 + 1])\n', (1460, 1478), False, 'import numpy\n'), ((1651, 1679), 'numpy.allclose', 'numpy.allclose', (['sfs', 'sfs_ref'], {}), '(sfs, sfs_ref)\n', (1665, 1679), False, 'import numpy\n'), ((1938, 1975), 'numpy.zeros', 'numpy.zeros', (['[n1 + 1, n2 + 1, n3 + 1]'], {}), '([n1 + 1, n2 + 1, n3 + 1])\n', (1949, 1975), False, 'import numpy\n'), ((2135, 2163), 'numpy.allclose', 'numpy.allclose', (['sfs', 'sfs_ref'], {}), '(sfs, sfs_ref)\n', (2149, 2163), False, 'import numpy\n'), ((2300, 2337), 'moments.Demographics2D.IM', 'moments.Demographics2D.IM', (['params', 'ns'], {}), '(params, ns)\n', (2325, 2337), False, 'import moments\n'), ((505, 524), 'numpy.arange', 'numpy.arange', (['(n + 1)'], {}), '(n + 1)\n', (517, 524), False, 'import numpy\n')]
|
import torch
import math
import numpy as np
def convert_locations_to_boxes(locations, priors, center_variance,
size_variance):
"""Convert regressional location results of SSD into boxes in the form of (center_x, center_y, h, w).
The conversion:
$$predicted\_center * center_variance = \frac {real\_center - prior\_center} {prior\_hw}$$
$$exp(predicted\_hw * size_variance) = \frac {real\_hw} {prior\_hw}$$
We do it in the inverse direction here.
Args:
locations (batch_size, num_priors, 4): the regression output of SSD. It will contain the outputs as well.
priors (num_priors, 4) or (batch_size/1, num_priors, 4): prior boxes.
center_variance: a float used to change the scale of center.
size_variance: a float used to change of scale of size.
Returns:
boxes: priors: [[center_x, center_y, h, w]]. All the values
are relative to the image size.
"""
# priors can have one dimension less.
# if priors.dim() + 1 == locations.dim():
# priors = priors.unsqueeze(0)
# return torch.cat([
# locations[..., :2] * center_variance * priors[..., 2:] + priors[..., :2],
# torch.exp(locations[..., 2:] * size_variance) * priors[..., 2:]
# ], dim=locations.dim() - 1)
#print('locations:',locations)
# print('priors.size():',priors.size)
return locations*center_variance+torch.from_numpy(priors).cuda()
def convert_boxes_to_locations(quad_form_boxes, quad_form_priors, center_variance, size_variance):
# priors can have one dimension less
# if center_form_priors.dim() + 1 == center_form_boxes.dim():
# center_form_priors = center_form_priors.unsqueeze(0)
# return torch.cat([
# (center_form_boxes[..., :2] - center_form_priors[..., :2]) / center_form_priors[..., 2:] / center_variance,
# torch.log(center_form_boxes[..., 2:] / center_form_priors[..., 2:]) / size_variance
# ], dim=center_form_boxes.dim() - 1)
return (quad_form_boxes-quad_form_priors) / center_variance
def area_of(left_top, right_bottom) -> torch.Tensor:
"""Compute the areas of rectangles given two corners.
Args:
left_top (N, 2): left top corner.
right_bottom (N, 2): right bottom corner.
Returns:
area (N): return the area.
"""
hw = torch.clamp(right_bottom - left_top, min=0.0)
return hw[..., 0] * hw[..., 1]
import shapely
from shapely.geometry import Polygon,MultiPoint #多边形
from itertools import product
import time
#萨瑟兰-Hodgman算法
def clip(subjectPolygon, clipPolygon):
def inside(p):
return(cp2[0]-cp1[0])*(p[1]-cp1[1]) > (cp2[1]-cp1[1])*(p[0]-cp1[0])
def computeIntersection():
dc = [ cp1[0] - cp2[0], cp1[1] - cp2[1] ]
dp = [ s[0] - e[0], s[1] - e[1] ]
n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0]
n2 = s[0] * e[1] - s[1] * e[0]
n3 = 1.0/(dc[0] * dp[1] - dc[1] * dp[0])
return [(n1*dp[0] - n2*dc[0]) * n3, (n1*dp[1] - n2*dc[1]) * n3]
outputList = subjectPolygon
cp1 = clipPolygon[-1]
for clipVertex in clipPolygon:
cp2 = clipVertex
inputList = outputList
outputList = []
if inputList==[]:
return [[0,0]]*4
s = inputList[-1]
for subjectVertex in inputList:
e = subjectVertex
if inside(e):
if not inside(s):
outputList.append(computeIntersection())
outputList.append(e)
elif inside(s):
outputList.append(computeIntersection())
s = e
cp1 = cp2
return(outputList)
def PolygonArea(corners):
n = len(corners) # of corners
area = 0.0
for i in range(n):
j = (i + 1) % n
area += corners[i][0] * corners[j][1]
area -= corners[j][0] * corners[i][1]
area = abs(area)/2.0
return area
def calc_iou_Hodgman(quad1,quad2):
intersection = clip(quad1, quad2)
if intersection == 0:
return 0
intersection_area = PolygonArea(intersection)
print('intersection_area:',intersection_area)
print('PolygonArea(quad1):',PolygonArea(quad1))
print('PolygonArea(quad2):',PolygonArea(quad2))
print('PolygonArea(quad1) + PolygonArea(quad2):',PolygonArea(quad1) + PolygonArea(quad2))
union_area=(PolygonArea(quad1) + PolygonArea(quad2) - intersection_area)
print('union_area:',union_area)
iou = intersection_area / union_area
return iou
def iou_of(boxes0, boxes1, eps=1e-5):
"""Return intersection-over-union (Jaccard index) of boxes.
Args:
boxes0 (1,N,8): ground truth boxes.
boxes1 (N,1,8): predicted boxes.
eps: a small number to avoid 0 as denominator.
Returns:
iou (N): IoU values.
"""
start = time.time()
# print('boxes0.shape:',np.shape(boxes0))
# print('boxes1.shape:',np.shape(boxes1))
boxes0=np.reshape(boxes0,(-1,4,2))
boxes1=np.reshape(boxes1,(-1,4,2))
iou_result=np.zeros(shape=(np.shape(boxes1)[0],np.shape(boxes0)[0]),dtype=np.float32)
for i, j in product(range(np.shape(boxes1)[0]),range(np.shape(boxes0)[0])):
quad1=boxes0[j]
quad2=boxes1[i]
quad1=np.reshape(np.array(quad1),(4,2))
quad2=np.reshape(np.array(quad2),(4,2))
# iou=calc_iou_Hodgman(quad1,quad2)
# if iou > 1 or iou < 0:
# print('iou:',iou)
# assert iou <= 1 and iou >=0
# iou_result[i][j] = iou
poly1 = Polygon(quad1.reshape(4,2)).convex_hull
poly2 = Polygon(quad2.reshape(4,2)).convex_hull
union_poly = np.concatenate((quad1.reshape(4,2),quad2.reshape(4,2))) # 合并两个box坐标,变为8*2
if not poly1.intersects(poly2): # 如果两四边形不相交
iou = 0
else:
try:
inter_area = poly1.intersection(poly2).area # 相交面积
#print(inter_area)
union_area = MultiPoint(union_poly).convex_hull.area
if union_area == 0:
iou = 0
else:
iou = float(inter_area) / union_area
iou_result[i][j] = iou
except shapely.geos.TopologicalError:
print('shapely.geos.TopologicalError occured, iou set to 0')
iou = 0
assert iou <= 1 and iou >= 0
end = time.time()
#print('time consuming:',end-start)
return iou_result
def distance_sum(quad_gt,quad_from_priors):
ret = []
# print('quad_gt.size:', np.shape(quad_gt))
quad_gt=np.reshape(np.array(quad_gt),(-1,4,2))
quad_from_priors=np.reshape(np.array(quad_from_priors),(-1,4,2))
for i in range(np.shape(quad_gt)[0]):
# ret_temp=b-a[i,:].sum(axis=1,keepdims=True)
ret_temp = np.sum(np.sqrt(np.sum(np.power(quad_from_priors - quad_gt[i, ...],2), axis=2, keepdims=False)),axis=1,keepdims=True)
#print('ret_temp.shape:',np.shape(ret_temp))
ret.append(ret_temp)
# print('ret.size:',len(ret))
ret = np.concatenate(ret, axis=1)
#print('ret.shape:', np.shape(ret))
# print('quad_gt.shape:',np.shape(quad_gt))
# print('quad_from_priors.shape:',np.shape(quad_from_priors))
# print('ret.shape:',np.shape(ret))
return ret
# overlap_left_top = torch.max(boxes0[..., :2], boxes1[..., :2])
# overlap_right_bottom = torch.min(boxes0[..., 2:], boxes1[..., 2:])
#
# overlap_area = area_of(overlap_left_top, overlap_right_bottom)
# area0 = area_of(boxes0[..., :2], boxes0[..., 2:])
# area1 = area_of(boxes1[..., :2], boxes1[..., 2:])
# return overlap_area / (area0 + area1 - overlap_area + eps)
def get_pos_distance_array(pos_distance_threshold):
#根据不同尺度的default box自适应决定default box和gt距离的阈值
# print('distance_threshold:',distance_threshold)
# scale = [0.039,0.098,0.156,0.215,0.273,0.332,0.391]
# diff_from_ratio = [1.656,1.588,1.491,1.403,1.323,1.261,1.203,1.068]#this if for different aspect ratio settings
# diff_from_ratio = [1.656,1.656,1.656,1.656,1.656,1.656,1.656,1.656]
# pos_distance_array = []
# pos_distance_array += 64 * 64 * list(np.array([18 * [scale[0] * item] for item in diff_from_ratio]).reshape(-1))
# pos_distance_array += 32 * 32 * list(np.array([18 * [scale[1] * item] for item in diff_from_ratio]).reshape(-1))
# pos_distance_array += 16 * 16 * list(np.array([18 * [scale[2] * item] for item in diff_from_ratio]).reshape(-1))
# pos_distance_array += 8 * 8 * list(np.array([18 * [scale[3] * item] for item in diff_from_ratio]).reshape(-1))
# pos_distance_array += 4 * 4 * list(np.array([18 * [scale[4] * item] for item in diff_from_ratio]).reshape(-1))
# pos_distance_array += 2 * 2 * list(np.array([18 * [scale[5] * item] for item in diff_from_ratio]).reshape(-1))
# pos_distance_array += 1 * 1 * list(np.array([18 * [scale[5] * item] for item in diff_from_ratio]).reshape(-1))
# print('len(pos_distance_array):',len(pos_distance_array))
# print('pos_distance_threshold:',pos_distance_threshold)
n = 144
pos_distance_array = []
pos_distance_array+=64*64*n*[pos_distance_threshold[0]]#0~32768
pos_distance_array+=32*32*n*[pos_distance_threshold[1]]#32768~40960
pos_distance_array+=16*16*n*[pos_distance_threshold[2]]#40960~43008
pos_distance_array+=8*8*n*[pos_distance_threshold[3]]#43008~43520
pos_distance_array+=4*4*n*[pos_distance_threshold[4]]#43520~43648
pos_distance_array+=2*2*n*[pos_distance_threshold[5]]#43648~43680
pos_distance_array+=1*1*n*[pos_distance_threshold[6]]#43680~43688
# print('distance_array.size:',np.shape(distance_array))
# print('len:distance_array:',len(pos_distance_array))
return np.array(pos_distance_array)
def get_ignore_distance_array(ignore_distance_threshold):
#根据不同尺度的default box自适应决定default box和gt距离的阈值
# print('distance_threshold:',distance_threshold)
ignore_distance_array = []
n = 126
ignore_distance_array+=64*64*n*[ignore_distance_threshold[0]]#0~32768
ignore_distance_array+=32*32*n*[ignore_distance_threshold[1]]#32768~40960
ignore_distance_array+=16*16*n*[ignore_distance_threshold[2]]#40960~43008
ignore_distance_array+=8*8*n*[ignore_distance_threshold[3]]#43008~43520
ignore_distance_array+=4*4*n*[ignore_distance_threshold[4]]#43520~43648
ignore_distance_array+=2*2*n*[ignore_distance_threshold[5]]#43648~43680
ignore_distance_array+=1*1*n*[ignore_distance_threshold[6]]#43680~43688
# print('distance_array.size:',np.shape(distance_array))
return np.array(ignore_distance_array)
def assign_priors(quad_gt, quad_form_priors,iou_threshold,pos_distance_threshold):
"""Assign ground truth boxes and targets to priors.
Args:
gt_boxes (num_targets, 4): ground truth boxes.
gt_labels (num_targets): labels of targets.
priors (num_priors, 4): corner form priors
Returns:
boxes (num_priors, 4): real values for priors.
labels (num_priros): labels for priors.
"""
# size: num_priors x num_targets
#ious = iou_of(quad_gt, quad_form_priors)
#ious = iou_of(quad_gt, quad_form_priors)
distance = distance_sum(quad_gt,quad_form_priors)
# size: num_priors
# 表示每一个prior对应distance最小的target的distance值
best_target_per_prior=np.min(distance,axis=1)
# 表示每一个prior对应distance最小的target的target的index值
best_target_per_prior_index=np.argmin(distance,axis=1)
#print(np.shape(best_target_per_prior))
#print(np.shape(best_target_per_prior_index))
# size: num_targets
# 表示每一个target对应distance最小的prior的distance值
best_prior_per_target=np.min(distance,axis=0)
# 表示每一个target对应distance最小的prior的index
best_prior_per_target_index=np.argmin(distance,axis=0)
# 将每一个target对应的最大的prior赋值给这个prior对应最大的target
for target_index, prior_index in enumerate(best_prior_per_target_index):
best_target_per_prior_index[prior_index] = target_index
# 2.0 is used to make sure every target has a prior assigned
best_target_per_prior[best_prior_per_target_index]=2
# size: num_priors
gt_labels=np.ones(shape=np.shape(quad_gt)[0])
labels = gt_labels[best_target_per_prior_index]
# print('distance_threshold:',distance_threshold)
pos_distance_array=get_pos_distance_array(pos_distance_threshold)
ignore_distance_array=pos_distance_array * 1.995#1.995是根据曼哈顿距离度量中iou=0.3算出来的一个倍数关系
labels[best_target_per_prior > pos_distance_array] = 0 # the backgournd id
# print('shape:',np.shape(best_target_per_prior > pos_distance_array))
#ignore_mask = np.multiply(best_target_per_prior > pos_distance_array ,best_target_per_prior < ignore_distance_array)
# print('ignore_mask.size1:',ignore_mask.sum())
#labels[ignore_mask] = -1
quad = quad_gt[best_target_per_prior_index]
# np.savetxt("/home/binchengxiong/boxes.txt", quad)
# np.savetxt("/home/binchengxiong/labels.txt", labels)
return quad,labels
def hard_negative_mining(loss, labels, neg_pos_ratio):
"""
It used to suppress the presence of a large number of negative prediction.
It works on image level not batch level.
For any example/image, it keeps all the positive predictions and
cut the number of negative predictions to make sure the ratio
between the negative examples and positive examples is no more
the given ratio for an image.
Args:
loss (N, num_priors): the loss for each example.
labels (N, num_priors): the labels.
neg_pos_ratio: the ratio between the negative examples and positive examples.
"""
pos_mask = labels == 1
#ignore_mask = labels == -1
# print('ignore_mask.size',ignore_mask.size())
# print('ignore_mask2.size:',ignore_mask.sum())
num_pos = pos_mask.long().sum(dim=1, keepdim=True)
# print('num_pos:',num_pos)
num_neg = num_pos * neg_pos_ratio
# print('pos_mask.size()[1]:',pos_mask.size()[1])
# print('total train sample num:',num_pos * (neg_pos_ratio + 1))
#把正样本对应的loss设为负无穷大,这样对loss进行降序排序的时候正样本的loss就会处于最后面
# print('loss.size',loss.size())
loss[pos_mask] = -math.inf
#loss[ignore_mask] = -math.inf
try:
ordered_loss, indexes = loss.sort(dim=1, descending=True)
# print('ordered_loss:',ordered_loss)
# print('loss.size:',loss.size())
except RuntimeError:
print('loss.size()',loss.size())
print('loss:',loss)
_, orders = indexes.sort(dim=1)
neg_mask = orders < num_neg
return pos_mask | neg_mask
#顶点形式的default box表示形式
def center_form_to_corner_form(locations):
return torch.cat([locations[..., :2] - locations[..., 2:] / 2,
locations[..., :2] + locations[..., 2:] / 2], locations.dim() - 1)
def corner_form_to_center_form(boxes):
return torch.cat([
(boxes[..., :2] + boxes[..., 2:]) / 2,
boxes[..., 2:] - boxes[..., :2]
], boxes.dim() - 1)
|
[
"shapely.geometry.MultiPoint",
"numpy.power",
"numpy.argmin",
"time.time",
"numpy.shape",
"numpy.min",
"torch.clamp",
"numpy.array",
"numpy.reshape",
"numpy.concatenate",
"torch.from_numpy"
] |
[((2362, 2407), 'torch.clamp', 'torch.clamp', (['(right_bottom - left_top)'], {'min': '(0.0)'}), '(right_bottom - left_top, min=0.0)\n', (2373, 2407), False, 'import torch\n'), ((4727, 4738), 'time.time', 'time.time', ([], {}), '()\n', (4736, 4738), False, 'import time\n'), ((4842, 4872), 'numpy.reshape', 'np.reshape', (['boxes0', '(-1, 4, 2)'], {}), '(boxes0, (-1, 4, 2))\n', (4852, 4872), True, 'import numpy as np\n'), ((4881, 4911), 'numpy.reshape', 'np.reshape', (['boxes1', '(-1, 4, 2)'], {}), '(boxes1, (-1, 4, 2))\n', (4891, 4911), True, 'import numpy as np\n'), ((6272, 6283), 'time.time', 'time.time', ([], {}), '()\n', (6281, 6283), False, 'import time\n'), ((6931, 6958), 'numpy.concatenate', 'np.concatenate', (['ret'], {'axis': '(1)'}), '(ret, axis=1)\n', (6945, 6958), True, 'import numpy as np\n'), ((9611, 9639), 'numpy.array', 'np.array', (['pos_distance_array'], {}), '(pos_distance_array)\n', (9619, 9639), True, 'import numpy as np\n'), ((10449, 10480), 'numpy.array', 'np.array', (['ignore_distance_array'], {}), '(ignore_distance_array)\n', (10457, 10480), True, 'import numpy as np\n'), ((11194, 11218), 'numpy.min', 'np.min', (['distance'], {'axis': '(1)'}), '(distance, axis=1)\n', (11200, 11218), True, 'import numpy as np\n'), ((11300, 11327), 'numpy.argmin', 'np.argmin', (['distance'], {'axis': '(1)'}), '(distance, axis=1)\n', (11309, 11327), True, 'import numpy as np\n'), ((11517, 11541), 'numpy.min', 'np.min', (['distance'], {'axis': '(0)'}), '(distance, axis=0)\n', (11523, 11541), True, 'import numpy as np\n'), ((11615, 11642), 'numpy.argmin', 'np.argmin', (['distance'], {'axis': '(0)'}), '(distance, axis=0)\n', (11624, 11642), True, 'import numpy as np\n'), ((6476, 6493), 'numpy.array', 'np.array', (['quad_gt'], {}), '(quad_gt)\n', (6484, 6493), True, 'import numpy as np\n'), ((6536, 6562), 'numpy.array', 'np.array', (['quad_from_priors'], {}), '(quad_from_priors)\n', (6544, 6562), True, 'import numpy as np\n'), ((5152, 5167), 'numpy.array', 'np.array', (['quad1'], {}), '(quad1)\n', (5160, 5167), True, 'import numpy as np\n'), ((5200, 5215), 'numpy.array', 'np.array', (['quad2'], {}), '(quad2)\n', (5208, 5215), True, 'import numpy as np\n'), ((6592, 6609), 'numpy.shape', 'np.shape', (['quad_gt'], {}), '(quad_gt)\n', (6600, 6609), True, 'import numpy as np\n'), ((1434, 1458), 'torch.from_numpy', 'torch.from_numpy', (['priors'], {}), '(priors)\n', (1450, 1458), False, 'import torch\n'), ((5029, 5045), 'numpy.shape', 'np.shape', (['boxes1'], {}), '(boxes1)\n', (5037, 5045), True, 'import numpy as np\n'), ((5056, 5072), 'numpy.shape', 'np.shape', (['boxes0'], {}), '(boxes0)\n', (5064, 5072), True, 'import numpy as np\n'), ((12005, 12022), 'numpy.shape', 'np.shape', (['quad_gt'], {}), '(quad_gt)\n', (12013, 12022), True, 'import numpy as np\n'), ((4940, 4956), 'numpy.shape', 'np.shape', (['boxes1'], {}), '(boxes1)\n', (4948, 4956), True, 'import numpy as np\n'), ((4960, 4976), 'numpy.shape', 'np.shape', (['boxes0'], {}), '(boxes0)\n', (4968, 4976), True, 'import numpy as np\n'), ((6710, 6757), 'numpy.power', 'np.power', (['(quad_from_priors - quad_gt[i, ...])', '(2)'], {}), '(quad_from_priors - quad_gt[i, ...], 2)\n', (6718, 6757), True, 'import numpy as np\n'), ((5848, 5870), 'shapely.geometry.MultiPoint', 'MultiPoint', (['union_poly'], {}), '(union_poly)\n', (5858, 5870), False, 'from shapely.geometry import Polygon, MultiPoint\n')]
|
# import Libraries of other lib packages
import numpy
import bob.core
# import our own Library
import bob.extension
bob.extension.load_bob_library('bob.io.base', __file__)
from ._library import File as _File_C, HDF5File as _HDF5File_C, extensions
from . import version
from .version import module as __version__
from .version import api as __api_version__
import os
class File(_File_C):
__doc__ = _File_C.__doc__
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
class HDF5File(_HDF5File_C):
__doc__ = _HDF5File_C.__doc__
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
return self.close()
def __contains__(self, x):
__doc__ = self.has_key.__doc__
return self.has_key(x)
def __iter__(self):
__doc__ = self.keys.__doc__
return iter(self.keys())
def __getitem__(self, name):
__doc__ = self.get.__doc__
return self.get(name)
def __setitem__(self, name, value):
__doc__ = self.set.__doc__
return self.set(name, value)
def values(self):
'''Yields the datasets contained in the current directory.
Yields
-------
object
The datasets that are being read.
'''
return (self[key] for key in self)
def items(self):
'''Yields the keys and the datasets contained in the current directory.
Yields
-------
tuple
The key and the datasets that are being read in a tuple.
'''
return ((key, self[key]) for key in self)
def _is_string(s):
"""Returns ``True`` if the given object is a string
This method can be used with Python-2.x or 3.x and returns a string
respecting each environment's constraints.
"""
from sys import version_info
return (version_info[0] < 3 and isinstance(s, (str, unicode))) or \
isinstance(s, (bytes, str))
@numpy.deprecate(new_name="os.makedirs(directory, exist_ok=True)")
def create_directories_safe(directory, dryrun=False):
"""Creates a directory if it does not exists, with concurrent access support.
This function will also create any parent directories that might be required.
If the dryrun option is selected, it does not actually create the directory,
but just writes the (Linux) command that would have been executed.
**Parameters:**
``directory`` : str
The directory that you want to create.
``dryrun`` : bool
Only ``print`` the command to console, but do not execute it.
"""
if dryrun:
print("[dry-run] mkdir -p '%s'" % directory)
else:
os.makedirs(directory, exist_ok=True)
def load(inputs):
"""load(inputs) -> data
Loads the contents of a file, an iterable of files, or an iterable of
:py:class:`bob.io.base.File`'s into a :py:class:`numpy.ndarray`.
**Parameters:**
``inputs`` : various types
This might represent several different entities:
1. The name of a file (full path) from where to load the data. In this
case, this assumes that the file contains an array and returns a loaded
numpy ndarray.
2. An iterable of filenames to be loaded in memory. In this case, this
would assume that each file contains a single 1D sample or a set of 1D
samples, load them in memory and concatenate them into a single and
returned 2D :py:class:`numpy.ndarray`.
3. An iterable of :py:class:`File`. In this case, this would assume
that each :py:class:`File` contains a single 1D sample or a set
of 1D samples, load them in memory if required and concatenate them into
a single and returned 2D :py:class:`numpy.ndarray`.
4. An iterable with mixed filenames and :py:class:`File`. In this
case, this would returned a 2D :py:class:`numpy.ndarray`, as described
by points 2 and 3 above.
**Returns:**
``data`` : :py:class:`numpy.ndarray`
The data loaded from the given ``inputs``.
"""
from collections import Iterable
import numpy
if _is_string(inputs):
if not os.path.exists(inputs):
raise RuntimeError(f"`{inputs}' does not exist!")
return File(inputs, 'r').read()
elif isinstance(inputs, Iterable):
retval = []
for obj in inputs:
if _is_string(obj):
retval.append(load(obj))
elif isinstance(obj, File):
retval.append(obj.read())
else:
raise TypeError(
"Iterable contains an object which is not a filename nor a "
"bob.io.base.File.")
return numpy.vstack(retval)
else:
raise TypeError(
"Unexpected input object. This function is expecting a filename, "
"or an iterable of filenames and/or bob.io.base.File's")
def merge(filenames):
"""merge(filenames) -> files
Converts an iterable of filenames into an iterable over read-only
:py:class:`bob.io.base.File`'s.
**Parameters:**
``filenames`` : str or [str]
A list of file names.
This might represent:
1. A single filename. In this case, an iterable with a single
:py:class:`File` is returned.
2. An iterable of filenames to be converted into an iterable of
:py:class:`File`'s.
**Returns:**
``files`` : [:py:class:`File`]
The list of files.
"""
from collections import Iterable
from .utils import is_string
if is_string(filenames):
return [File(filenames, 'r')]
elif isinstance(filenames, Iterable):
return [File(k, 'r') for k in filenames]
else:
raise TypeError(
"Unexpected input object. This function is expecting an "
"iterable of filenames.")
def save(array, filename, create_directories=False):
"""Saves the contents of an array-like object to file.
Effectively, this is the same as creating a :py:class:`File` object
with the mode flag set to ``'w'`` (write with truncation) and calling
:py:meth:`File.write` passing ``array`` as parameter.
Parameters:
``array`` : array_like
The array-like object to be saved on the file
``filename`` : str
The name of the file where you need the contents saved to
``create_directories`` : bool
Automatically generate the directories if required (defaults to ``False``
because of compatibility reasons; might change in future to default to
``True``)
"""
# create directory if not existent yet
if create_directories:
create_directories_safe(os.path.dirname(filename))
# requires data is c-contiguous and aligned, will create a copy otherwise
array = numpy.require(array, requirements=('C_CONTIGUOUS', 'ALIGNED'))
return File(filename, 'w').write(array)
# Just to make it homogenous with the C++ API
write = save
read = load
def append(array, filename):
"""append(array, filename) -> position
Appends the contents of an array-like object to file.
Effectively, this is the same as creating a :py:class:`File` object
with the mode flag set to ``'a'`` (append) and calling
:py:meth:`File.append` passing ``array`` as parameter.
**Parameters:**
``array`` : array_like
The array-like object to be saved on the file
``filename`` : str
The name of the file where you need the contents saved to
**Returns:**
``position`` : int
See :py:meth:`File.append`
"""
# requires data is c-contiguous and aligned, will create a copy otherwise
array = numpy.require(array, requirements=('C_CONTIGUOUS', 'ALIGNED'))
return File(filename, 'a').append(array)
def peek(filename):
"""peek(filename) -> dtype, shape, stride
Returns the type of array (frame or sample) saved in the given file.
Effectively, this is the same as creating a :py:class:`File` object
with the mode flag set to `r` (read-only) and calling
:py:meth:`File.describe`.
**Parameters**:
``filename`` : str
The name of the file to peek information from
**Returns:**
``dtype, shape, stride`` : see :py:meth:`File.describe`
"""
return File(filename, 'r').describe()
def peek_all(filename):
"""peek_all(filename) -> dtype, shape, stride
Returns the type of array (for full readouts) saved in the given file.
Effectively, this is the same as creating a :py:class:`File` object
with the mode flag set to ``'r'`` (read-only) and returning
``File.describe`` with its parameter ``all`` set to ``True``.
**Parameters:**
``filename`` : str
The name of the file to peek information from
**Returns:**
``dtype, shape, stride`` : see :py:meth:`File.describe`
"""
return File(filename, 'r').describe(all=True)
# Keeps compatibility with the previously existing API
open = File
def get_config():
"""Returns a string containing the configuration information.
"""
return bob.extension.get_config(__name__, version.externals, version.api)
def get_include_directories():
"""get_include_directories() -> includes
Returns a list of include directories for dependent libraries, such as HDF5.
This function is automatically used by
:py:func:`bob.extension.get_bob_libraries` to retrieve the non-standard
include directories that are required to use the C bindings of this library
in dependent classes. You shouldn't normally need to call this function by
hand.
**Returns:**
``includes`` : [str]
The list of non-standard include directories required to use the C bindings
of this class. For now, only the directory for the HDF5 headers are
returned.
"""
# try to use pkg_config first
try:
from bob.extension.utils import find_header
# locate pkg-config on our own
header = 'hdf5.h'
candidates = find_header(header)
if not candidates:
raise RuntimeError(
"could not find %s's `%s' - have you installed %s on this "
"machine?" % ('hdf5', header, 'hdf5'))
return [os.path.dirname(candidates[0])]
except RuntimeError:
from bob.extension import pkgconfig
pkg = pkgconfig('hdf5')
return pkg.include_directories()
def get_macros():
"""get_macros() -> macros
Returns a list of preprocessor macros, such as ``(HAVE_HDF5, 1)``. This
function is automatically used by :py:func:`bob.extension.get_bob_libraries`
to retrieve the prerpocessor definitions that are required to use the C
bindings of this library in dependent classes. You shouldn't normally need to
call this function by hand.
**Returns:**
``macros`` : [(str,str)]
The list of preprocessor macros required to use the C bindings of this
class. For now, only ``('HAVE_HDF5', '1')`` is returned, when applicable.
"""
# get include directories
if get_include_directories():
return [('HAVE_HDF5', '1')]
def _generate_features(reader, paths, same_size=False):
"""Load and stack features in a memory efficient way. This function is
meant to be used inside :py:func:`vstack_features`.
Parameters
----------
reader : ``collections.Callable``
See the documentation of :py:func:`vstack_features`.
paths : ``collections.Iterable``
See the documentation of :py:func:`vstack_features`.
same_size : :obj:`bool`, optional
See the documentation of :py:func:`vstack_features`.
Yields
------
object
The first object returned is a tuple of :py:class:`numpy.dtype` of
features and the shape of the first feature. The rest of objects are
the actual values in features. The features are returned in C order.
"""
shape_determined = False
for i, path in enumerate(paths):
feature = numpy.atleast_2d(reader(path))
feature = numpy.ascontiguousarray(feature)
if not shape_determined:
shape_determined = True
dtype = feature.dtype
shape = list(feature.shape)
yield (dtype, shape)
else:
# make sure all features have the same shape and dtype
if same_size:
assert shape == list(feature.shape)
else:
assert shape[1:] == list(feature.shape[1:])
assert dtype == feature.dtype
if same_size:
yield (feature.ravel(),)
else:
for feat in feature:
yield (feat.ravel(),)
def vstack_features(reader, paths, same_size=False, dtype=None):
"""Stacks all features in a memory efficient way.
Parameters
----------
reader : ``collections.Callable``
The function to load the features. The function should only take one
argument ``path`` and return loaded features. Use :any:`functools.partial`
to accommodate your reader to this format.
The features returned by ``reader`` are expected to have the same
:py:class:`numpy.dtype` and the same shape except for their first
dimension. First dimension should correspond to the number of samples.
paths : ``collections.Iterable``
An iterable of paths to iterate on. Whatever is inside path is given to
``reader`` so they do not need to be necessarily paths to actual files.
If ``same_size`` is ``True``, ``len(paths)`` must be valid.
same_size : :obj:`bool`, optional
If ``True``, it assumes that arrays inside all the paths are the same
shape. If you know the features are the same size in all paths, set this
to ``True`` to improve the performance.
dtype : :py:class:`numpy.dtype`, optional
If provided, the data will be casted to this format.
Returns
-------
numpy.ndarray
The read features with the shape ``(n_samples, *features_shape[1:])``.
Examples
--------
This function in a simple way is equivalent to calling
``numpy.vstack([reader(p) for p in paths])``.
>>> import numpy
>>> from bob.io.base import vstack_features
>>> def reader(path):
... # in each file, there are 5 samples and features are 2 dimensional.
... return numpy.arange(10).reshape(5,2)
>>> paths = ['path1', 'path2']
>>> all_features = vstack_features(reader, paths)
>>> numpy.allclose(all_features, numpy.array(
... [[0, 1],
... [2, 3],
... [4, 5],
... [6, 7],
... [8, 9],
... [0, 1],
... [2, 3],
... [4, 5],
... [6, 7],
... [8, 9]]))
True
>>> all_features_with_more_memory = numpy.vstack([reader(p) for p in paths])
>>> numpy.allclose(all_features, all_features_with_more_memory)
True
You can allocate the array at once to improve the performance if you know
that all features in paths have the same shape and you know the total number
of the paths:
>>> all_features = vstack_features(reader, paths, same_size=True)
>>> numpy.allclose(all_features, numpy.array(
... [[0, 1],
... [2, 3],
... [4, 5],
... [6, 7],
... [8, 9],
... [0, 1],
... [2, 3],
... [4, 5],
... [6, 7],
... [8, 9]]))
True
"""
iterable = _generate_features(reader, paths, same_size)
data_dtype, shape = next(iterable)
if dtype is None:
dtype = data_dtype
if same_size:
# numpy black magic: https://stackoverflow.com/a/12473478/1286165
field_dtype = [("", (dtype, (numpy.prod(shape),)))]
total_size = len(paths)
all_features = numpy.fromiter(iterable, field_dtype, total_size)
else:
field_dtype = [("", (dtype, (numpy.prod(shape[1:]),)))]
all_features = numpy.fromiter(iterable, field_dtype)
# go from a field array to a normal array
all_features = all_features.view(dtype)
# the shape is assumed to be (n_samples, ...) it can be (5, 2) or (5, 3, 4).
shape = list(shape)
shape[0] = -1
return numpy.reshape(all_features, shape, order="C")
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
|
[
"os.makedirs",
"os.path.dirname",
"os.path.exists",
"numpy.require",
"numpy.prod",
"bob.extension.utils.find_header",
"bob.extension.pkgconfig",
"numpy.reshape",
"numpy.fromiter",
"numpy.deprecate",
"numpy.ascontiguousarray",
"numpy.vstack"
] |
[((1860, 1925), 'numpy.deprecate', 'numpy.deprecate', ([], {'new_name': '"""os.makedirs(directory, exist_ok=True)"""'}), "(new_name='os.makedirs(directory, exist_ok=True)')\n", (1875, 1925), False, 'import numpy\n'), ((6421, 6483), 'numpy.require', 'numpy.require', (['array'], {'requirements': "('C_CONTIGUOUS', 'ALIGNED')"}), "(array, requirements=('C_CONTIGUOUS', 'ALIGNED'))\n", (6434, 6483), False, 'import numpy\n'), ((7254, 7316), 'numpy.require', 'numpy.require', (['array'], {'requirements': "('C_CONTIGUOUS', 'ALIGNED')"}), "(array, requirements=('C_CONTIGUOUS', 'ALIGNED'))\n", (7267, 7316), False, 'import numpy\n'), ((15239, 15284), 'numpy.reshape', 'numpy.reshape', (['all_features', 'shape'], {'order': '"""C"""'}), "(all_features, shape, order='C')\n", (15252, 15284), False, 'import numpy\n'), ((2540, 2577), 'os.makedirs', 'os.makedirs', (['directory'], {'exist_ok': '(True)'}), '(directory, exist_ok=True)\n', (2551, 2577), False, 'import os\n'), ((9473, 9492), 'bob.extension.utils.find_header', 'find_header', (['header'], {}), '(header)\n', (9484, 9492), False, 'from bob.extension.utils import find_header\n'), ((11379, 11411), 'numpy.ascontiguousarray', 'numpy.ascontiguousarray', (['feature'], {}), '(feature)\n', (11402, 11411), False, 'import numpy\n'), ((14851, 14900), 'numpy.fromiter', 'numpy.fromiter', (['iterable', 'field_dtype', 'total_size'], {}), '(iterable, field_dtype, total_size)\n', (14865, 14900), False, 'import numpy\n'), ((14988, 15025), 'numpy.fromiter', 'numpy.fromiter', (['iterable', 'field_dtype'], {}), '(iterable, field_dtype)\n', (15002, 15025), False, 'import numpy\n'), ((3976, 3998), 'os.path.exists', 'os.path.exists', (['inputs'], {}), '(inputs)\n', (3990, 3998), False, 'import os\n'), ((4449, 4469), 'numpy.vstack', 'numpy.vstack', (['retval'], {}), '(retval)\n', (4461, 4469), False, 'import numpy\n'), ((6307, 6332), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (6322, 6332), False, 'import os\n'), ((9674, 9704), 'os.path.dirname', 'os.path.dirname', (['candidates[0]'], {}), '(candidates[0])\n', (9689, 9704), False, 'import os\n'), ((9779, 9796), 'bob.extension.pkgconfig', 'pkgconfig', (['"""hdf5"""'], {}), "('hdf5')\n", (9788, 9796), False, 'from bob.extension import pkgconfig\n'), ((14781, 14798), 'numpy.prod', 'numpy.prod', (['shape'], {}), '(shape)\n', (14791, 14798), False, 'import numpy\n'), ((14942, 14963), 'numpy.prod', 'numpy.prod', (['shape[1:]'], {}), '(shape[1:])\n', (14952, 14963), False, 'import numpy\n')]
|
from __future__ import print_function
import os
import argparse
import numpy as np
from dcase_task2.lasagne_wrapper.network import Network
from utils.data_tut18_task2 import load_data as load_data_tut18_task2
from utils.data_tut18_task2 import ID_CLASS_MAPPING as id_class_mapping_tut18_task2
from config.settings import EXP_ROOT
# seed seed for reproducibility
np.random.seed(4711)
def select_model(model_path):
""" select model """
model_str = os.path.basename(model_path)
model_str = model_str.split('.py')[0]
import_root = ".".join((model_path.split(os.path.sep))[:-1])
exec("from %s import %s as model" % (import_root, model_str))
model.EXP_NAME = model_str
return model
def load_data(data_set, fold, args):
""" select data """
if "tut18T2ver" in data_set:
normalize = "norm" in data_set
spec_dir = data_set.split("-")[1]
data = load_data_tut18_task2(fold=fold, n_workers=1, spec_dir=spec_dir,
train_verified=True, train_unverified=False, normalize=normalize,
fix_lengths=args.no_len_fix, max_len=args.max_len, min_len=args.min_len,
train_file=args.train_file, train_on_all=args.train_on_all,
validate_verified=not args.validate_unverified)
id_class_mapping = id_class_mapping_tut18_task2
elif "tut18T2unver" in data_set:
normalize = "norm" in data_set
spec_dir = data_set.split("-")[1]
data = load_data_tut18_task2(fold=fold, n_workers=1, spec_dir=spec_dir,
train_verified=False, train_unverified=True, normalize=normalize,
fix_lengths=args.no_len_fix, max_len=args.max_len, min_len=args.min_len,
train_file=args.train_file, train_on_all=args.train_on_all,
validate_verified=not args.validate_unverified)
id_class_mapping = id_class_mapping_tut18_task2
elif "tut18T2" in data_set:
normalize = "norm" in data_set
spec_dir = data_set.split("-")[1]
data = load_data_tut18_task2(fold=fold, n_workers=1, spec_dir=spec_dir,
train_verified=True, train_unverified=True, normalize=normalize,
fix_lengths=args.no_len_fix, max_len=args.max_len, min_len=args.min_len,
train_file=args.train_file, train_on_all=args.train_on_all,
validate_verified=not args.validate_unverified)
id_class_mapping = id_class_mapping_tut18_task2
return data, id_class_mapping
def get_dump_file_paths(out_path, fold):
par = 'params.pkl' if fold is None else 'params_%d.pkl' % fold
log = 'results.pkl' if fold is None else 'results_%d.pkl' % fold
dump_file = os.path.join(out_path, par)
log_file = os.path.join(out_path, log)
return dump_file, log_file
if __name__ == '__main__':
""" main """
# add argument parser
parser = argparse.ArgumentParser(description='Train audio tagging network.')
parser.add_argument('--model', help='select model to train.')
parser.add_argument('--data', help='select model to train.')
parser.add_argument('--fold', help='train split.', type=int, default=None)
parser.add_argument('--ini_params', help='path to pretrained parameters.', type=str, default=None)
parser.add_argument('--tag', help='add tag to result files.', type=str, default=None)
parser.add_argument('--fine_tune', help='use fine-tune train configuration.', action='store_true')
# tut18 task2
parser.add_argument('--train_file', help='train data file.', type=str, default="train.csv")
parser.add_argument('--max_len', help='maximum spectrogram length.', type=int, default=None)
parser.add_argument('--min_len', help='minimum spectrogram length.', type=int, default=None)
parser.add_argument('--no_len_fix', help='do not fix lengths of spectrograms.', action='store_false')
parser.add_argument('--train_on_all', help='use all files for training.', action='store_true')
parser.add_argument('--validate_unverified', help='validate also on unverified samples.', action='store_true')
args = parser.parse_args()
# select model
model = select_model(args.model)
# load data
print("\nLoading data ...")
data, _ = load_data(args.data, args.fold, args)
# set model dump file
print("\nPreparing model ...")
out_path = os.path.join(os.path.join(EXP_ROOT), model.EXP_NAME)
dump_file, log_file = get_dump_file_paths(out_path, args.fold)
# change parameter dump files
if not args.fine_tune:
dump_file = dump_file.replace(".pkl", "_it0.pkl")
log_file = log_file.replace(".pkl", "_it0.pkl")
print("parameter file", dump_file)
print("log file", log_file)
# compile network
net = model.build_model()
# initialize neural network
my_net = Network(net)
# load initial parametrization
if args.ini_params:
ini_params = args.ini_params % args.fold
ini_params = dump_file.replace(os.path.basename(dump_file).split(".")[0], ini_params)
my_net.load(ini_params)
print("initial parameter file %s" % ini_params)
# add tag to results
if args.tag:
dump_file = dump_file.replace(".pkl", "_%s.pkl" % args.tag)
log_file = log_file.replace(".pkl", "_%s.pkl" % args.tag)
print("tagged parameter file %s" % dump_file)
# train network
train_strategy = model.compile_train_strategy(args.fine_tune)
my_net.fit(data, train_strategy, log_file=log_file, dump_file=dump_file)
|
[
"numpy.random.seed",
"argparse.ArgumentParser",
"os.path.basename",
"utils.data_tut18_task2.load_data",
"os.path.join",
"dcase_task2.lasagne_wrapper.network.Network"
] |
[((368, 388), 'numpy.random.seed', 'np.random.seed', (['(4711)'], {}), '(4711)\n', (382, 388), True, 'import numpy as np\n'), ((463, 491), 'os.path.basename', 'os.path.basename', (['model_path'], {}), '(model_path)\n', (479, 491), False, 'import os\n'), ((2947, 2974), 'os.path.join', 'os.path.join', (['out_path', 'par'], {}), '(out_path, par)\n', (2959, 2974), False, 'import os\n'), ((2990, 3017), 'os.path.join', 'os.path.join', (['out_path', 'log'], {}), '(out_path, log)\n', (3002, 3017), False, 'import os\n'), ((3135, 3202), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train audio tagging network."""'}), "(description='Train audio tagging network.')\n", (3158, 3202), False, 'import argparse\n'), ((5079, 5091), 'dcase_task2.lasagne_wrapper.network.Network', 'Network', (['net'], {}), '(net)\n', (5086, 5091), False, 'from dcase_task2.lasagne_wrapper.network import Network\n'), ((907, 1234), 'utils.data_tut18_task2.load_data', 'load_data_tut18_task2', ([], {'fold': 'fold', 'n_workers': '(1)', 'spec_dir': 'spec_dir', 'train_verified': '(True)', 'train_unverified': '(False)', 'normalize': 'normalize', 'fix_lengths': 'args.no_len_fix', 'max_len': 'args.max_len', 'min_len': 'args.min_len', 'train_file': 'args.train_file', 'train_on_all': 'args.train_on_all', 'validate_verified': '(not args.validate_unverified)'}), '(fold=fold, n_workers=1, spec_dir=spec_dir,\n train_verified=True, train_unverified=False, normalize=normalize,\n fix_lengths=args.no_len_fix, max_len=args.max_len, min_len=args.min_len,\n train_file=args.train_file, train_on_all=args.train_on_all,\n validate_verified=not args.validate_unverified)\n', (928, 1234), True, 'from utils.data_tut18_task2 import load_data as load_data_tut18_task2\n'), ((4618, 4640), 'os.path.join', 'os.path.join', (['EXP_ROOT'], {}), '(EXP_ROOT)\n', (4630, 4640), False, 'import os\n'), ((1557, 1884), 'utils.data_tut18_task2.load_data', 'load_data_tut18_task2', ([], {'fold': 'fold', 'n_workers': '(1)', 'spec_dir': 'spec_dir', 'train_verified': '(False)', 'train_unverified': '(True)', 'normalize': 'normalize', 'fix_lengths': 'args.no_len_fix', 'max_len': 'args.max_len', 'min_len': 'args.min_len', 'train_file': 'args.train_file', 'train_on_all': 'args.train_on_all', 'validate_verified': '(not args.validate_unverified)'}), '(fold=fold, n_workers=1, spec_dir=spec_dir,\n train_verified=False, train_unverified=True, normalize=normalize,\n fix_lengths=args.no_len_fix, max_len=args.max_len, min_len=args.min_len,\n train_file=args.train_file, train_on_all=args.train_on_all,\n validate_verified=not args.validate_unverified)\n', (1578, 1884), True, 'from utils.data_tut18_task2 import load_data as load_data_tut18_task2\n'), ((2202, 2528), 'utils.data_tut18_task2.load_data', 'load_data_tut18_task2', ([], {'fold': 'fold', 'n_workers': '(1)', 'spec_dir': 'spec_dir', 'train_verified': '(True)', 'train_unverified': '(True)', 'normalize': 'normalize', 'fix_lengths': 'args.no_len_fix', 'max_len': 'args.max_len', 'min_len': 'args.min_len', 'train_file': 'args.train_file', 'train_on_all': 'args.train_on_all', 'validate_verified': '(not args.validate_unverified)'}), '(fold=fold, n_workers=1, spec_dir=spec_dir,\n train_verified=True, train_unverified=True, normalize=normalize,\n fix_lengths=args.no_len_fix, max_len=args.max_len, min_len=args.min_len,\n train_file=args.train_file, train_on_all=args.train_on_all,\n validate_verified=not args.validate_unverified)\n', (2223, 2528), True, 'from utils.data_tut18_task2 import load_data as load_data_tut18_task2\n'), ((5240, 5267), 'os.path.basename', 'os.path.basename', (['dump_file'], {}), '(dump_file)\n', (5256, 5267), False, 'import os\n')]
|
import sys
import numpy
from scipy import special
import statsmodels.api as sm
from galpy.util import bovy_plot
import define_rcsample
def plot_rcdistancecomparison(plotfilename):
# Get the sample
rcdata= define_rcsample.get_rcsample()
# Now plot the differece
bovy_plot.bovy_print()
levels= special.erf(numpy.arange(1,3)/numpy.sqrt(2.))
bovy_plot.scatterplot(rcdata['RC_DIST'],
(rcdata['RC_DIST_H']-rcdata['RC_DIST'])/rcdata['RC_DIST'],
conditional=True,
levels=levels,
linestyle='none',color='k',marker=',',
xrange=[0.,7.49],yrange=[-0.075,0.075],
xlabel=r'$M_{K_s}\!-\!\mathrm{based\ distance\,(kpc)}$',
ylabel=r'$\mathrm{Fractional\ difference\ of}\ M_H\ \mathrm{vs.}\ M_{K_s}$',
onedhistx=True,bins=31)
bovy_plot.bovy_plot([0.,10.],[0.,0.],'--',lw=2.,color='0.75',overplot=True)
# Plot lowess
lowess= sm.nonparametric.lowess
z= lowess((rcdata['RC_DIST_H']-rcdata['RC_DIST'])/rcdata['RC_DIST'],
rcdata['RC_DIST'],frac=.3)
bovy_plot.bovy_plot(z[:,0],z[:,1],'w--',lw=2.,overplot=True)
bovy_plot.bovy_end_print(plotfilename)
return None
if __name__ == '__main__':
plot_rcdistancecomparison(sys.argv[1])
|
[
"galpy.util.bovy_plot.bovy_plot",
"define_rcsample.get_rcsample",
"galpy.util.bovy_plot.bovy_end_print",
"galpy.util.bovy_plot.bovy_print",
"galpy.util.bovy_plot.scatterplot",
"numpy.arange",
"numpy.sqrt"
] |
[((213, 243), 'define_rcsample.get_rcsample', 'define_rcsample.get_rcsample', ([], {}), '()\n', (241, 243), False, 'import define_rcsample\n'), ((277, 299), 'galpy.util.bovy_plot.bovy_print', 'bovy_plot.bovy_print', ([], {}), '()\n', (297, 299), False, 'from galpy.util import bovy_plot\n'), ((362, 780), 'galpy.util.bovy_plot.scatterplot', 'bovy_plot.scatterplot', (["rcdata['RC_DIST']", "((rcdata['RC_DIST_H'] - rcdata['RC_DIST']) / rcdata['RC_DIST'])"], {'conditional': '(True)', 'levels': 'levels', 'linestyle': '"""none"""', 'color': '"""k"""', 'marker': '""","""', 'xrange': '[0.0, 7.49]', 'yrange': '[-0.075, 0.075]', 'xlabel': '"""$M_{K_s}\\\\!-\\\\!\\\\mathrm{based\\\\ distance\\\\,(kpc)}$"""', 'ylabel': '"""$\\\\mathrm{Fractional\\\\ difference\\\\ of}\\\\ M_H\\\\ \\\\mathrm{vs.}\\\\ M_{K_s}$"""', 'onedhistx': '(True)', 'bins': '(31)'}), "(rcdata['RC_DIST'], (rcdata['RC_DIST_H'] - rcdata[\n 'RC_DIST']) / rcdata['RC_DIST'], conditional=True, levels=levels,\n linestyle='none', color='k', marker=',', xrange=[0.0, 7.49], yrange=[-\n 0.075, 0.075], xlabel=\n '$M_{K_s}\\\\!-\\\\!\\\\mathrm{based\\\\ distance\\\\,(kpc)}$', ylabel=\n '$\\\\mathrm{Fractional\\\\ difference\\\\ of}\\\\ M_H\\\\ \\\\mathrm{vs.}\\\\ M_{K_s}$',\n onedhistx=True, bins=31)\n", (383, 780), False, 'from galpy.util import bovy_plot\n'), ((944, 1035), 'galpy.util.bovy_plot.bovy_plot', 'bovy_plot.bovy_plot', (['[0.0, 10.0]', '[0.0, 0.0]', '"""--"""'], {'lw': '(2.0)', 'color': '"""0.75"""', 'overplot': '(True)'}), "([0.0, 10.0], [0.0, 0.0], '--', lw=2.0, color='0.75',\n overplot=True)\n", (963, 1035), False, 'from galpy.util import bovy_plot\n'), ((1205, 1272), 'galpy.util.bovy_plot.bovy_plot', 'bovy_plot.bovy_plot', (['z[:, 0]', 'z[:, 1]', '"""w--"""'], {'lw': '(2.0)', 'overplot': '(True)'}), "(z[:, 0], z[:, 1], 'w--', lw=2.0, overplot=True)\n", (1224, 1272), False, 'from galpy.util import bovy_plot\n'), ((1270, 1308), 'galpy.util.bovy_plot.bovy_end_print', 'bovy_plot.bovy_end_print', (['plotfilename'], {}), '(plotfilename)\n', (1294, 1308), False, 'from galpy.util import bovy_plot\n'), ((324, 342), 'numpy.arange', 'numpy.arange', (['(1)', '(3)'], {}), '(1, 3)\n', (336, 342), False, 'import numpy\n'), ((342, 357), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (352, 357), False, 'import numpy\n')]
|
#!/usr/bin/env python
"""
Given the output of fconv_slopes, plot the thermodynamic
gradients corresponding to an initial model.
<NAME>
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('infile', type=str,
help='Name of file containing thermodynamic gradients to plot.')
parser.add_argument('-f', '--format', type=str, default='png',
help='Format of the desired output files. Can be, e.g. "png" or "eps". Defaults to "png".')
parser.add_argument('-rup', '--radius_upper', type=float,
help='Upper bound for the plotted radius.')
parser.add_argument('-o', '--outname', type=str, help='Base name of output file to use (w/o extension).')
args = parser.parse_args()
class ConvectiveGradients(object):
def __init__(self, infile=None):
if infile:
self.r, self.actual, self.adiabatic, self.ledoux = np.loadtxt(infile, unpack=True)
self.infile = infile
else:
self.r = []
self.actual = []
self.adiabatic = []
self.ledoux = []
self.infile = ''
def plot(self, fmt=None, rup=None, outname=None, show=False):
fig = plt.figure()
ax = fig.add_subplot(111)
idxup = -1
if rup:
ax.set_xlim([0, rup])
# Get the lowest index where radius > rup
idxup = np.where(self.r > rup)[0][0]
ax.set_xlabel('$\mathrm{r (cm)}$')
# ax2 = ax.twinx()
# ax.plot(self.r[:idxup], self.adiabatic[:idxup], color='blue', linestyle='-', label='adiabatic')
# ax.plot(self.r[:idxup], self.actual[:idxup], color='green', linestyle='--', label='actual')
# ax.plot(self.r[:idxup], self.ledoux[:idxup], color='red', linestyle=':', label='ledoux')
dadiabatic = self.actual[:idxup]-self.adiabatic[:idxup]
neg_idx, pos_idx = self.get_signed_indices(dadiabatic)
# ax2.plot(self.r[:idxup][neg_idx], dadiabatic[neg_idx], color='black', marker='v', markersize=8,
# linestyle='-', label='actual-adiabatic (-)')
# ax2.plot(self.r[:idxup][pos_idx], dadiabatic[pos_idx], color='black', marker='^', markersize=8,
# linestyle='-', label='actual-adiabatic (+)')
dledoux = self.actual[:idxup]-self.ledoux[:idxup]
neg_idx, pos_idx = self.get_signed_indices(dadiabatic)
# ax2.plot(self.r[:idxup][neg_idx], dledoux[neg_idx], color='magenta', marker='v', markersize=8,
# linestyle=':', label='actual-ledoux (-)')
# ax2.plot(self.r[:idxup][pos_idx], dledoux[pos_idx], color='magenta', marker='^', markersize=8,
# linestyle=':', label='actual-ledoux (+)')
ax.plot(self.r[:idxup], dadiabatic, color='blue', linestyle='-', label='adiabatic $\mathrm{\\nabla_{conv}}$')
ax.plot(self.r[:idxup], dledoux, color='red', linestyle='-.', label='ledoux $\mathrm{\\nabla_{conv}}$')
mx = max(np.amax(dadiabatic), np.amax(dledoux))
mn = min(np.amin(dadiabatic), np.amin(dledoux))
mlin = min(abs(mx), abs(mn))
plt.yscale('symlog', linthreshy=0.5*mlin)
ax.set_ylabel('$\mathrm{\\nabla_{actual} - \\nabla_{conv}}$')
plt.legend()
if fmt=='png':
if not outname:
outname = self.infile + '.png'
plt.savefig(outname, dpi=300)
else:
if not outname:
outname = self.infile + '.eps'
plt.savefig(outname)
if show:
plt.show()
plt.close(fig)
def get_signed_indices(self, dvec):
neg_idx = np.where(dvec < 0.0)
pos_idx = np.where(dvec > 0.0)
return neg_idx, pos_idx
if __name__=='__main__':
cg = ConvectiveGradients(args.infile)
cg.plot(args.format, args.radius_upper, args.outname)
|
[
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"numpy.amin",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"numpy.amax",
"matplotlib.pyplot.figure",
"numpy.where",
"numpy.loadtxt",
"matplotlib.pyplot.savefig"
] |
[((256, 281), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (279, 281), False, 'import argparse\n'), ((1303, 1315), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1313, 1315), True, 'import matplotlib.pyplot as plt\n'), ((3213, 3256), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""symlog"""'], {'linthreshy': '(0.5 * mlin)'}), "('symlog', linthreshy=0.5 * mlin)\n", (3223, 3256), True, 'import matplotlib.pyplot as plt\n'), ((3333, 3345), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3343, 3345), True, 'import matplotlib.pyplot as plt\n'), ((3734, 3754), 'numpy.where', 'np.where', (['(dvec < 0.0)'], {}), '(dvec < 0.0)\n', (3742, 3754), True, 'import numpy as np\n'), ((3773, 3793), 'numpy.where', 'np.where', (['(dvec > 0.0)'], {}), '(dvec > 0.0)\n', (3781, 3793), True, 'import numpy as np\n'), ((992, 1023), 'numpy.loadtxt', 'np.loadtxt', (['infile'], {'unpack': '(True)'}), '(infile, unpack=True)\n', (1002, 1023), True, 'import numpy as np\n'), ((3073, 3092), 'numpy.amax', 'np.amax', (['dadiabatic'], {}), '(dadiabatic)\n', (3080, 3092), True, 'import numpy as np\n'), ((3094, 3110), 'numpy.amax', 'np.amax', (['dledoux'], {}), '(dledoux)\n', (3101, 3110), True, 'import numpy as np\n'), ((3129, 3148), 'numpy.amin', 'np.amin', (['dadiabatic'], {}), '(dadiabatic)\n', (3136, 3148), True, 'import numpy as np\n'), ((3150, 3166), 'numpy.amin', 'np.amin', (['dledoux'], {}), '(dledoux)\n', (3157, 3166), True, 'import numpy as np\n'), ((3456, 3485), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outname'], {'dpi': '(300)'}), '(outname, dpi=300)\n', (3467, 3485), True, 'import matplotlib.pyplot as plt\n'), ((3587, 3607), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outname'], {}), '(outname)\n', (3598, 3607), True, 'import matplotlib.pyplot as plt\n'), ((3637, 3647), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3645, 3647), True, 'import matplotlib.pyplot as plt\n'), ((3660, 3674), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (3669, 3674), True, 'import matplotlib.pyplot as plt\n'), ((1493, 1515), 'numpy.where', 'np.where', (['(self.r > rup)'], {}), '(self.r > rup)\n', (1501, 1515), True, 'import numpy as np\n')]
|
from __future__ import print_function
import random
import nltk
from nltk.corpus import treebank
import numpy as np
from sklearn.feature_extraction import DictVectorizer
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import classification_report
from keras.layers import Dense, Dropout, Activation
from keras.models import Sequential
from keras.utils import np_utils, plot_model
from keras.wrappers.scikit_learn import KerasClassifier
import matplotlib.pyplot as plt
CUSTOM_SEED = 42
def add_basic_features(sentence_terms, index):
""" Compute some very basic word features.
:param sentence_terms: [w1, w2, ...]
:type sentence_terms: list
:param index: the index of the word
:type index: int
:return: dict containing features
:rtype: dict
"""
term = sentence_terms[index]
return {
'nb_terms': len(sentence_terms),
'term': term,
'is_first': index == 0,
'is_last': index == len(sentence_terms) - 1,
'is_capitalized': term[0].upper() == term[0],
'is_all_caps': term.upper() == term,
'is_all_lower': term.lower() == term,
'prefix-1': term[0],
'prefix-2': term[:2],
'prefix-3': term[:3],
'suffix-1': term[-1],
'suffix-2': term[-2:],
'suffix-3': term[-3:],
'prev_word': '' if index == 0 else sentence_terms[index - 1],
'next_word': '' if index == len(sentence_terms) - 1 else sentence_terms[index + 1]
}
def untag(tagged_sentence):
"""
Remove the tag for each tagged term.
:param tagged_sentence: a POS tagged sentence
:type tagged_sentence: list
:return: a list of tags
:rtype: list of strings
"""
return [w for w, _ in tagged_sentence]
def transform_to_dataset(tagged_sentences):
"""
Split tagged sentences to X and y datasets and append some basic features.
:param tagged_sentences: a list of POS tagged sentences
:param tagged_sentences: list of list of tuples (term_i, tag_i)
:return:
"""
X, y = [], []
for pos_tags in tagged_sentences:
for index, (term, class_) in enumerate(pos_tags):
# Add basic NLP features for each sentence term
X.append(add_basic_features(untag(pos_tags), index))
y.append(class_)
return X, y
def build_model(input_dim, hidden_neurons, output_dim):
"""
Construct, compile and return a Keras model which will be used to fit/predict
"""
model = Sequential([
Dense(hidden_neurons, input_dim=input_dim),
Activation('relu'),
Dropout(0.2),
Dense(hidden_neurons),
Activation('relu'),
Dropout(0.2),
Dense(output_dim, activation='softmax')
])
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
def plot_model_performance(train_loss, train_acc, train_val_loss, train_val_acc):
""" Plot model loss and accuracy through epochs. """
green = '#72C29B'
orange = '#FFA577'
with plt.xkcd():
fig, (ax1, ax2) = plt.subplots(2, figsize=(10, 8))
ax1.plot(range(1, len(train_loss) + 1), train_loss, green, linewidth=5,
label='training')
ax1.plot(range(1, len(train_val_loss) + 1), train_val_loss, orange,
linewidth=5, label='validation')
ax1.set_xlabel('# epoch')
ax1.set_ylabel('loss')
ax1.tick_params('y')
ax1.legend(loc='upper right', shadow=False)
ax1.set_title('Model loss through #epochs', fontweight='bold')
ax2.plot(range(1, len(train_acc) + 1), train_acc, green, linewidth=5,
label='training')
ax2.plot(range(1, len(train_val_acc) + 1), train_val_acc, orange,
linewidth=5, label='validation')
ax2.set_xlabel('# epoch')
ax2.set_ylabel('accuracy')
ax2.tick_params('y')
ax2.legend(loc='lower right', shadow=False)
ax2.set_title('Model accuracy through #epochs', fontweight='bold')
plt.tight_layout()
plt.show()
if __name__ == '__main__':
nb_samples = 100
# Ensure reproducibility
np.random.seed(CUSTOM_SEED)
sentences = treebank.tagged_sents(tagset='universal')[:nb_samples]
print('a random sentence: \n-> {}'.format(random.choice(sentences)))
tags = set([tag for sentence in treebank.tagged_sents() for _, tag in sentence])
print('nb_tags: {}\ntags: {}'.format(len(tags), tags))
# We use approximately 60% of the tagged sentences for training,
# 20% as the validation set and 20% to evaluate our model.
train_test_cutoff = int(.80 * len(sentences))
training_sentences = sentences[:train_test_cutoff]
testing_sentences = sentences[train_test_cutoff:]
train_val_cutoff = int(.25 * len(training_sentences))
validation_sentences = training_sentences[:train_val_cutoff]
training_sentences = training_sentences[train_val_cutoff:]
# For training, validation and testing sentences, we split the
# attributes into X (input variables) and y (output variables).
X_train, y_train = transform_to_dataset(training_sentences)
X_test, y_test = transform_to_dataset(testing_sentences)
X_val, y_val = transform_to_dataset(validation_sentences)
# Fit our DictVectorizer with our set of features
dict_vectorizer = DictVectorizer(sparse=False)
dict_vectorizer.fit(X_train + X_test + X_val)
# Convert dict features to vectors
X_train_vect = dict_vectorizer.transform(X_train)
X_test_vect = dict_vectorizer.transform(X_test)
X_val_vect = dict_vectorizer.transform(X_val)
# Fit LabelEncoder with our list of classes
label_encoder = LabelEncoder()
label_encoder.fit(y_train + y_test + y_val)
# Encode class values as integers
y_train_enc = label_encoder.transform(y_train)
y_test_enc = label_encoder.transform(y_test)
y_val_enc = label_encoder.transform(y_val)
# Convert integers to dummy variables (one hot encoded)
y_train_dummy = np_utils.to_categorical(y_train_enc)
y_test_dummy = np_utils.to_categorical(y_test_enc)
y_val_dummy = np_utils.to_categorical(y_val_enc)
# Set model parameters
model_params = {
'build_fn': build_model,
'input_dim': X_train_vect.shape[1],
'hidden_neurons': 512,
'output_dim': y_train_dummy.shape[1],
'epochs': 5,
'batch_size': 256,
'verbose': 1,
'validation_data': (X_val_vect, y_val_dummy),
'shuffle': True
}
# Create a new sklearn classifier
clf = KerasClassifier(**model_params)
# Finally, fit our classifier
hist = clf.fit(X_train_vect, y_train_dummy)
# Plot model performance
plot_model_performance(
train_loss=hist.history.get('loss', []),
train_acc=hist.history.get('acc', []),
train_val_loss=hist.history.get('val_loss', []),
train_val_acc=hist.history.get('val_acc', [])
)
# Evaluate model accuracy
score = clf.score(X_test_vect, y_test_dummy, verbose=0)
print('model accuracy: {}'.format(score))
# Compute classification report
y_preds = clf.predict(X_test_vect)
# Our target names are our label encoded targets
target_names = label_encoder.classes_
# Compute classification report
classif_report = classification_report(
y_true=y_test_enc, y_pred=y_preds,
target_names=target_names
)
print(classif_report)
# Visualize model architecture
plot_model(clf.model, to_file='tmp/model_structure.png', show_shapes=True)
# Finally save model
clf.model.save('/tmp/keras_mlp.h5')
|
[
"nltk.corpus.treebank.tagged_sents",
"keras.wrappers.scikit_learn.KerasClassifier",
"numpy.random.seed",
"matplotlib.pyplot.show",
"keras.layers.Activation",
"keras.layers.Dropout",
"random.choice",
"sklearn.preprocessing.LabelEncoder",
"sklearn.metrics.classification_report",
"matplotlib.pyplot.subplots",
"keras.utils.np_utils.to_categorical",
"keras.utils.plot_model",
"sklearn.feature_extraction.DictVectorizer",
"keras.layers.Dense",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.xkcd"
] |
[((4196, 4214), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4212, 4214), True, 'import matplotlib.pyplot as plt\n'), ((4220, 4230), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4228, 4230), True, 'import matplotlib.pyplot as plt\n'), ((4322, 4349), 'numpy.random.seed', 'np.random.seed', (['CUSTOM_SEED'], {}), '(CUSTOM_SEED)\n', (4336, 4349), True, 'import numpy as np\n'), ((5544, 5572), 'sklearn.feature_extraction.DictVectorizer', 'DictVectorizer', ([], {'sparse': '(False)'}), '(sparse=False)\n', (5558, 5572), False, 'from sklearn.feature_extraction import DictVectorizer\n'), ((5897, 5911), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (5909, 5911), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((6236, 6272), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['y_train_enc'], {}), '(y_train_enc)\n', (6259, 6272), False, 'from keras.utils import np_utils, plot_model\n'), ((6293, 6328), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['y_test_enc'], {}), '(y_test_enc)\n', (6316, 6328), False, 'from keras.utils import np_utils, plot_model\n'), ((6348, 6382), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['y_val_enc'], {}), '(y_val_enc)\n', (6371, 6382), False, 'from keras.utils import np_utils, plot_model\n'), ((6805, 6836), 'keras.wrappers.scikit_learn.KerasClassifier', 'KerasClassifier', ([], {}), '(**model_params)\n', (6820, 6836), False, 'from keras.wrappers.scikit_learn import KerasClassifier\n'), ((7578, 7666), 'sklearn.metrics.classification_report', 'classification_report', ([], {'y_true': 'y_test_enc', 'y_pred': 'y_preds', 'target_names': 'target_names'}), '(y_true=y_test_enc, y_pred=y_preds, target_names=\n target_names)\n', (7599, 7666), False, 'from sklearn.metrics import classification_report\n'), ((7757, 7831), 'keras.utils.plot_model', 'plot_model', (['clf.model'], {'to_file': '"""tmp/model_structure.png"""', 'show_shapes': '(True)'}), "(clf.model, to_file='tmp/model_structure.png', show_shapes=True)\n", (7767, 7831), False, 'from keras.utils import np_utils, plot_model\n'), ((3177, 3187), 'matplotlib.pyplot.xkcd', 'plt.xkcd', ([], {}), '()\n', (3185, 3187), True, 'import matplotlib.pyplot as plt\n'), ((3216, 3248), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {'figsize': '(10, 8)'}), '(2, figsize=(10, 8))\n', (3228, 3248), True, 'import matplotlib.pyplot as plt\n'), ((4369, 4410), 'nltk.corpus.treebank.tagged_sents', 'treebank.tagged_sents', ([], {'tagset': '"""universal"""'}), "(tagset='universal')\n", (4390, 4410), False, 'from nltk.corpus import treebank\n'), ((2622, 2664), 'keras.layers.Dense', 'Dense', (['hidden_neurons'], {'input_dim': 'input_dim'}), '(hidden_neurons, input_dim=input_dim)\n', (2627, 2664), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((2675, 2693), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2685, 2693), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((2704, 2716), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2711, 2716), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((2727, 2748), 'keras.layers.Dense', 'Dense', (['hidden_neurons'], {}), '(hidden_neurons)\n', (2732, 2748), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((2759, 2777), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2769, 2777), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((2788, 2800), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2795, 2800), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((2811, 2850), 'keras.layers.Dense', 'Dense', (['output_dim'], {'activation': '"""softmax"""'}), "(output_dim, activation='softmax')\n", (2816, 2850), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((4471, 4495), 'random.choice', 'random.choice', (['sentences'], {}), '(sentences)\n', (4484, 4495), False, 'import random\n'), ((4537, 4560), 'nltk.corpus.treebank.tagged_sents', 'treebank.tagged_sents', ([], {}), '()\n', (4558, 4560), False, 'from nltk.corpus import treebank\n')]
|
import pandas as pd
import numpy as np
from .QCBase import VarNames
class Exporter(object):
""" Export class which writes parsed data to a certain format"""
valid_formats = ["pdf", "xlsx", "txt", "csv", "dataframe"]
def __init__(self, data=None):
self.data = data
# for later: add pandas independent functions to export arrays to file
def arrays_to_dframe(self, **kwargs):
""" Using keyworded arguments, expects arrays """
try:
df = pd.DataFrame(kwargs)
except ValueError: #if arrays do not have the same length
d = {}
for key, value in kwargs.items():
d[key] = pd.Series(value)
df = pd.DataFrame(d)
return df
def ExcitedStateSummary(self, results, fname="es_smry", fmt="csv",
ground_state=False):
""" Exports energy related excited state quantities to file
Parameters
----------
results : CCParser.ParseContainer
Parsing container that holds parsed values.
fname : string
Filename prefix.
fmt : string
Output format ('csv', 'xlsx'/'xls' or 'df' for pandas.DataFrame).
ground_state : bool
Whether to include an empty line in the table for the ground state.
"""
if fmt not in Exporter.valid_formats:
raise ValueError("File format '{0:}' not recognized or supported!".format(fmt))
if False in getattr(results, VarNames.has_converged).data:
raise ValueError("Not converged state detected!")
d = {}
# (1) Excitation energies (default minimum)
#if hasattr(results, VarNames.exc_energy_rel):
d[VarNames.exc_energy_rel] = getattr(results, VarNames.exc_energy_rel).data
n_states = len(d[VarNames.exc_energy_rel])
# (2) Oscillator strengths
if hasattr(results, VarNames.osc_str):
d[VarNames.osc_str] = getattr(results, VarNames.osc_str).data
# (3) Amplitudes
if hasattr(results, VarNames.amplitudes):
ampl = getattr(results, VarNames.amplitudes)
pieces = [a.to_dataframe() for a in ampl]
key = [x for x in range(1,len(pieces)+1)]
amp_df = pd.concat(pieces, keys=key, names=["State", "Row ID"])
# prepare MultiIndex (there has to be a better way to do that...)
arrays = [[x for x in range(1, n_states+1)],
[0 for x in range(n_states)]]
tuples = list(zip(*arrays))# asterisk unpacks
df1 = pd.DataFrame(d)
df1.index = pd.MultiIndex.from_tuples(tuples, names=["State", "Row ID"])
df = pd.concat([df1, amp_df], axis=1)
# add row to MultiIndex, see https://stackoverflow.com/q/24917700
if ground_state:
df.loc[(0,0),:] = np.nan
df.sort_index(level=0, inplace=True)
# EXPORT TO FILE or dataframe
fout = fname + "." + fmt
if fmt == "csv":
df.to_csv(fout, encoding="utf-8")
elif fmt == ("xlsx" or "xls"):
writer = pd.ExcelWriter(fout)
df.to_excel(writer, "Sheet1")
writer.save()
elif fmt.lower() == ("dataframe" or "df"):
return df
def ReducedWeights(self, results, nbsfA, extern=None, fmt="print",
fname="AmplAnl", silent=False):
""" Calculate reduced weights based on fragment information.
The reduced weight for a single excitation :math:`i \\rightarrow a` is defined as
:math:`v_{i}^{a} = 0.5\\cdot(c_{i,A}^{2} + c_{a,A}^{2})\\cdot w_{i}^{a}`, with
c and w being the molecular orbital coefficient and transition weight,
respectively.
The MO coefficients from the output first have to be transformed to an
orthonormal basis.
Parameters
----------
results : CCParser.ParseContainer
Container object which contains excited state amplitudes
nbsfA : int
Number of basis functions on System A (assumes system A comes first!)
extern : CCParser.ParseContainer
Optional second container which contains orthonormalisation matrix and/or MO coefficients
fmt : string
Output format. Available are "print", "dataframe", "xlsx" or "csv"
fname : string
Output file name (basename only).
silent : bool
Whether to ignore lengthy printouts.
"""
# consistency
has_extern = True if extern != None else False
if False in getattr(results, VarNames.has_converged).data:
raise ValueError("Not converged state detected!")
if not has_extern and not hasattr(results, VarNames.orthonorm_matrix):
raise AttributeError("Could not find orthonormalization matrix! Was it parsed?")
elif has_extern and not hasattr(extern, VarNames.orthonorm_matrix):
raise AttributeError("Could not find orthonormalization matrix! Was it parsed?")
elif not has_extern and not hasattr(results, VarNames.mo_coefficients):
raise AttributeError("Could not find MO coefficients! Were they parsed?")
elif has_extern and not hasattr(extern, VarNames.mo_coefficients):
raise AttributeError("Could not find MO coefficients! Were they parsed?")
elif not hasattr(results, VarNames.amplitudes):
raise AttributeError("Could not find amplitudes! Were they parsed?")
elif not hasattr(results, VarNames.n_bas):
raise AttributeError("Could not find number of basis functions! Was it parsed?")
else:
# (1) Orthonormalization matrix, hardcoded last
X = getattr(results, VarNames.orthonorm_matrix).get_last() if not \
has_extern else getattr(extern, VarNames.orthonorm_matrix).get_last()
X_inv = np.linalg.inv(X)
# (2) MO coeffiecients, hardcoded last
C = getattr(results, VarNames.mo_coefficients).get_last() if not \
has_extern else getattr(extern, VarNames.mo_coefficients).get_last()
C_prime = C * X_inv # Szabo, Ostlund, page 142
max_mo = C.shape[0]
# (3) Amplitudes
ampl = getattr(results, VarNames.amplitudes)
n_states = len(ampl)
# (4) Number of basis functions
nbsf = getattr(results, VarNames.n_bas).get_last()
# (4) Output variables
sum_weights = [0 for i in range(n_states)]
sum_redweights = [0 for i in range(n_states)]
# --------------
sos_A = [0 for a in range(C_prime.shape[0])]
sos_B = [0 for a in range(C_prime.shape[0])]
for c, vect in enumerate(C_prime):
for n in range(nbsf):
if n < nbsfA:
sos_A[c] += vect[0,n]**2
else:
sos_B[c] += vect[0,n]**2
for i,a in enumerate(ampl):#state
for t in range(len(a.occ)):#transition
if max(a.virt[t]) > max_mo:
if not silent:
print("State {0:>2d}: Omitting transition with weight \
{1:.1%} due to missing MO coefficients.".format(i+1, a.weights[t]))
continue
if len(a.occ[t]) == 1:#single amplitudes
rw = 0.5*(sos_A[a.occ[t][0]-1] + sos_A[a.virt[t][0]-1]) * a.weights[t]
elif len(a.occ[t]) == 2:#double amplitudes
rw = 0.25*(sos_A[a.occ[t][0]-1] + sos_A[a.occ[t][1]-1] +
sos_A[a.virt[t][0]-1] + sos_A[a.virt[t][1]-1]
)*a.weights[t]
else:
raise IndexError("Currently no more than double \
amplitudes are supported!")
sum_weights[i] += a.weights[t]
sum_redweights[i] += rw
#----------------
# Export as
fout = fname + "." + fmt
d = {"State": [i+1 for i in range(n_states)],
"sum_weight" : sum_weights,
"sum_red_weight" : sum_redweights}
df = pd.DataFrame(d)
df = df.assign(diff=df["sum_weight"]-df["sum_red_weight"],
ratio=df["sum_red_weight"]/df["sum_weight"])
if fmt == "print":
print("State | Sum(W) | Sum(P) | Sum(W) - Sum(P) | ratio P/W |\n",50*"-")
for i in range(n_states):
print(" S{0:>2d} | {1:.3f} | {2:.3f} | {3:15.3f} | {4:.1%}".format(
i+1, sum_weights[i], sum_redweights[i], sum_weights[i] -
sum_redweights[i], sum_redweights[i]/sum_weights[i]))
elif fmt == "dataframe":
return df
elif fmt == "csv":
df.to_csv(fout, encoding="utf-8")
elif fmt == "xlsx" or fmt == "xls":
writer = pd.ExcelWriter(fout)
df.to_excel(writer, "Sheet1")
writer.save()
else:
raise ValueError("Output format not supported!")
def MO_Molden(self, results, atom_basis, fname="molecular_orbitals",
tmp_5d=True):
""" Writes molecular orbitals to a molden file.
Expects molecular geometry in Angstrom.
More information on the molden format at
http://www.cmbi.ru.nl/molden/molden_format.html
Parameters
----------
results : CCParser.ParseContainer
Container object which holds MO coefficients.
exponents : dict
Dictionary mapping GTO exponents/coefficients to atoms. Expected
format of dictionary entry is list of strings.
fname : string
Output file name.
"""
from .QCBase import PeriodicTable
import re
C = results.C.get_last()
xyz = results.xyz.get_last()
en = results.mo_energies.get_last()
PeTa = PeriodicTable()
#TODO: Permutator needed in case of different formats (Molcas, Gaussian)
with open(fname+".molden", "w") as out:
out.write("[Molden Format]\n")
# write XYZ
out.write("[Atoms] (Angs)\n")
for i,atom in enumerate(xyz):
num = PeTa.get_atomic_num(atom[0])
out.write("{0:>3}{1:7d}{2:5d}".format(atom[0], i+1, num))
out.write("".join("{0:16.8f}".format(c) for c in atom[1:])+"\n")
# write basis exponents
out.write("[GTO]\n")
for n in range(len(xyz)):
# atom sequence number, 0
out.write("{0:d}{1:5d}\n".format(n+1, 0))
symb = xyz[n][0].upper()
#a = atom.upper()
basis = atom_basis[symb]
for coeff in basis:
# shell label, number of primitives, 1.00
if re.search(r"[SDPF]", coeff[0]):
out.write("{0:}{1:6d}{2:12.6f}\n".format(
coeff[0], int(coeff[1]), float(coeff[2])))
# exponent, contraction coefficient
else:
out.write("{0:18.8e}{1:18.8e}\n".format(
float(coeff[0]), float(coeff[1])))
out.write("\n")
for imo in range(C.shape[0]):#assumes counting from MO 1 !!
out.write("[MO]\nSym=X\n")
if imo < en.n_occ:#occupied
out.write("Ene={0:12.6f}\n".format(en.occ[imo]))
out.write("Spin=alpha\n")
out.write("Occup=1\n")
else:#virtual
out.write("Ene={0:12.6f}\n".format(en.virt[imo]))
out.write("Spin=alpha\n")
out.write("Occup=0\n")
for i in range(C.shape[1]):
out.write("{0:6d}{1: 22.12e}\n".format(i+1,C[imo, i]))
if tmp_5d:
out.write("[5D]\n")
print("MOs written to Molden file.")
|
[
"pandas.DataFrame",
"pandas.MultiIndex.from_tuples",
"numpy.linalg.inv",
"pandas.Series",
"re.search",
"pandas.ExcelWriter",
"pandas.concat"
] |
[((2617, 2632), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (2629, 2632), True, 'import pandas as pd\n'), ((2653, 2713), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (['tuples'], {'names': "['State', 'Row ID']"}), "(tuples, names=['State', 'Row ID'])\n", (2678, 2713), True, 'import pandas as pd\n'), ((2736, 2768), 'pandas.concat', 'pd.concat', (['[df1, amp_df]'], {'axis': '(1)'}), '([df1, amp_df], axis=1)\n', (2745, 2768), True, 'import pandas as pd\n'), ((510, 530), 'pandas.DataFrame', 'pd.DataFrame', (['kwargs'], {}), '(kwargs)\n', (522, 530), True, 'import pandas as pd\n'), ((2306, 2360), 'pandas.concat', 'pd.concat', (['pieces'], {'keys': 'key', 'names': "['State', 'Row ID']"}), "(pieces, keys=key, names=['State', 'Row ID'])\n", (2315, 2360), True, 'import pandas as pd\n'), ((721, 736), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (733, 736), True, 'import pandas as pd\n'), ((3165, 3185), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['fout'], {}), '(fout)\n', (3179, 3185), True, 'import pandas as pd\n'), ((687, 703), 'pandas.Series', 'pd.Series', (['value'], {}), '(value)\n', (696, 703), True, 'import pandas as pd\n'), ((11200, 11229), 're.search', 're.search', (['"""[SDPF]"""', 'coeff[0]'], {}), "('[SDPF]', coeff[0])\n", (11209, 11229), False, 'import re\n'), ((6002, 6018), 'numpy.linalg.inv', 'np.linalg.inv', (['X'], {}), '(X)\n', (6015, 6018), True, 'import numpy as np\n'), ((8373, 8388), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (8385, 8388), True, 'import pandas as pd\n'), ((9170, 9190), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['fout'], {}), '(fout)\n', (9184, 9190), True, 'import pandas as pd\n')]
|
"""
4) Sinkhorn vs. blurred Wasserstein distances
==========================================================
Sinkhorn divergences rely on a simple idea:
by **blurring** the transport plan through the addition of
an entropic penalty, we can reduce the effective dimensionality
of the transportation problem and compute **sensible approximations of the
Wasserstein distance at a low computational cost**.
"""
##################################################
# As discussed in previous notebooks, the *vanilla* Sinkhorn loop
# can be symmetrized, de-biased and turned into a genuine
# multiscale algorithm: available through the
# :mod:`SamplesLoss("sinkhorn") <geomloss.SamplesLoss>` layer, the **Sinkhorn divergence**
#
# .. math::
# \text{S}_\varepsilon(\alpha,\beta)~=~ \text{OT}_\varepsilon(\alpha,\beta)
# - \tfrac{1}{2}\text{OT}_\varepsilon(\alpha,\alpha)
# - \tfrac{1}{2}\text{OT}_\varepsilon(\beta,\beta),
#
# is a tractable approximation of the Wasserstein distance
# that **retains its key geometric properties** - positivity, convexity,
# metrization of the convergence in law.
#
# **But is it really the best way of smoothing our transportation problem?**
# When "p = 2" and :math:`\text{C}(x,y)=\tfrac{1}{2}\|x-y\|^2`,
# a very sensible alternative to Sinkhorn divergences is the
# **blurred Wasserstein distance**
#
# .. math::
# \text{B}_\varepsilon(\alpha,\beta) ~=~ \text{W}_2(\,k_{\varepsilon/4}\star\alpha,\,k_{\varepsilon/4}\star\beta\,),
#
# where :math:`\text{W}_2` denotes the *true* Wasserstein distance associated to
# our cost function :math:`\text{C}` and
#
# .. math::
# k_{\varepsilon/4}: (x-y) \mapsto \exp(-\|x-y\|^2 / \tfrac{2}{4}\varepsilon)
#
# is a Gaussian kernel of deviation :math:`\sigma = \sqrt{\varepsilon}/2`.
# On top of making explicit our intuitions on **low-frequency Optimal Transport**, this
# simple divergence enjoys a collection of desirable properties:
#
# - It is the **square of a distance** that metrizes the convergence in law.
# - It takes the "correct" values on atomic **Dirac masses**, lifting
# the ground cost function to the space of positive measures:
#
# .. math::
# \text{B}_\varepsilon(\delta_x,\delta_y)~=~\text{C}(x,y)
# ~=~\tfrac{1}{2}\|x-y\|^2~=~\text{S}_\varepsilon(\delta_x,\delta_y).
#
# - It has the same **asymptotic properties** as the Sinkhorn divergence,
# interpolating between the true Wasserstein distance (when :math:`\varepsilon \rightarrow 0`)
# and a degenerate kernel norm (when :math:`\varepsilon \rightarrow +\infty`).
# - Thanks to the joint convexity of the Wasserstein distance,
# :math:`\text{B}_\varepsilon(\alpha,\beta)` is a **decreasing** function of :math:`\varepsilon`:
# as we remove small-scale details, we lower the overall transport cost.
#
# To compare the Sinkhorn and blurred Wasserstein divergences, a simple experiment
# is to **display their values on pairs of 1D measures** for increasing values of
# the temperature :math:`\varepsilon`:
# having generated random samples :math:`\alpha` and :math:`\beta`
# on the unit interval, we can simply compute :math:`\text{S}_\varepsilon(\alpha,\beta)`
# with our :mod:`SamplesLoss("sinkhorn") <geomloss.SamplesLoss>` layer
# while the blurred Wasserstein loss :math:`\text{B}_\varepsilon(\alpha,\beta)` can be
# quickly approximated with the **addition of a Gaussian noise** followed
# by a **sorting pass**.
##############################################
# Setup
# ---------------------
# Standard imports:
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import KernelDensity # display as density curves
import torch
from geomloss import SamplesLoss
use_cuda = torch.cuda.is_available()
# N.B.: We use float64 numbers to get nice limits when blur -> +infinity
dtype = torch.cuda.DoubleTensor if use_cuda else torch.DoubleTensor
###############################################
# Display routine:
t_plot = np.linspace(-0.5, 1.5, 1000)[:, np.newaxis]
def display_samples(ax, x, color, label=None):
"""Displays samples on the unit interval using a density curve."""
kde = KernelDensity(kernel="gaussian", bandwidth=0.005).fit(x.data.cpu().numpy())
dens = np.exp(kde.score_samples(t_plot))
dens[0] = 0
dens[-1] = 0
ax.fill(t_plot, dens, color=color, label=label)
###############################################
# Experiment
# -------------
def rweight():
"""Random weight."""
return torch.rand(1).type(dtype)
N = 100 if not use_cuda else 10 ** 3 # Number of samples per measure
C = 100 if not use_cuda else 10000 # number of copies for the Gaussian blur
for _ in range(5): # Repeat the experiment 5 times
K = 5 # Generate random 1D measures as the superposition of K=5 intervals
t = torch.linspace(0, 1, N // K).type(dtype).view(-1, 1)
X_i = torch.cat([rweight() ** 2 * t + rweight() - 0.5 for k in range(K)], dim=0)
Y_j = torch.cat([rweight() ** 2 * t + rweight() - 0.5 for k in range(K)], dim=0)
# Compute the limits when blur = 0...
x_, _ = X_i.sort(dim=0)
y_, _ = Y_j.sort(dim=0)
true_wass = (0.5 / len(X_i)) * ((x_ - y_) ** 2).sum()
true_wass = true_wass.item()
# and when blur = +infinity:
mean_diff = 0.5 * ((X_i.mean(0) - Y_j.mean(0)) ** 2).sum()
mean_diff = mean_diff.item()
blurs = [0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.0]
sink, bwass = [], []
for blur in blurs:
# Compute the Sinkhorn divergence:
# N.B.: To be super-precise, we use the well-tested "online" backend
# with a very large 'scaling' coefficient
loss = SamplesLoss("sinkhorn", p=2, blur=blur, scaling=0.99, backend="online")
sink.append(loss(X_i, Y_j).item())
# Compute the blurred Wasserstein distance:
x_i = torch.cat([X_i] * C, dim=0)
y_j = torch.cat([Y_j] * C, dim=0)
x_i = x_i + 0.5 * blur * torch.randn(x_i.shape).type(dtype)
y_j = y_j + 0.5 * blur * torch.randn(y_j.shape).type(dtype)
x_, _ = x_i.sort(dim=0)
y_, _ = y_j.sort(dim=0)
wass = (0.5 / len(x_i)) * ((x_ - y_) ** 2).sum()
bwass.append(wass.item())
# Fancy display:
plt.figure(figsize=(12, 5))
if N < 10 ** 5:
ax = plt.subplot(1, 2, 1)
display_samples(ax, X_i, (1.0, 0, 0, 0.5), label="$\\alpha$")
display_samples(ax, Y_j, (0, 0, 1.0, 0.5), label="$\\beta$")
plt.axis([-0.5, 1.5, -0.1, 5.5])
plt.ylabel("density")
ax.legend()
plt.tight_layout()
ax = plt.subplot(1, 2, 2)
plt.plot([0.01, 10], [true_wass, true_wass], "g", label="True Wasserstein")
plt.plot(blurs, sink, "r-o", label="Sinkhorn divergence")
plt.plot(blurs, bwass, "b-o", label="Blurred Wasserstein")
plt.plot(
[0.01, 10], [mean_diff, mean_diff], "m", label="Squared difference of means"
)
ax.set_xscale("log")
ax.legend()
plt.axis([0.01, 10.0, 0.0, 1.5 * bwass[0]])
plt.xlabel("blur $\\sqrt{\\varepsilon}$")
plt.tight_layout()
plt.show()
##################################################
# Conclusion
# --------------
#
# In practice, the Sinkhorn and blurred Wasserstein divergences
# are **nearly indistinguishable**. But as far as we can tell *today*,
# these two loss functions have very different properties:
#
# - :math:`\text{B}_\varepsilon` is **easy to define**, compute in 1D and
# **analyze** from geometric or statistical point of views...
# But cannot (?) be computed efficiently in higher dimensions,
# where the true OT problem is nearly intractable.
# - :math:`\text{S}_\varepsilon` is simply available through
# the :mod:`SamplesLoss("sinkhorn") <geomloss.SamplesLoss>` layer,
# but has a weird, composite definition and is pretty **hard to**
# **study** rigorously - as evidenced by recent, technical proofs
# of `positivity, definiteness (Feydy et al., 2018) <https://arxiv.org/abs/1810.08278>`_
# and `sample complexity (Genevay et al., 2018) <https://arxiv.org/abs/1810.02733>`_.
#
# **So couldn't we get the best of both worlds?**
# In an ideal world, we'd like to tweak the *efficient* multiscale Sinkhorn algorithm
# to compute the *natural* divergence :math:`\text{B}_\varepsilon`...
# but this may be out of reach. A realistic target could be to **quantify**
# **the difference** between these two objects, thus legitimizing the
# use of the :mod:`SamplesLoss("sinkhorn") <geomloss.SamplesLoss>` layer
# as a **cheap proxy** for the intuitive and well-understood *blurred Wasserstein distance*.
#
# In my opinion, investigating the link between these two quantities
# is one of the most interesting questions left open in the field of discrete entropic OT.
# The geometric loss functions implemented in GeomLoss are probably *good enough*
# for most practical purposes,
# but getting a **rigorous understanding** of the multiscale,
# wavelet-like behavior of our algorithms
# as we add small details through an exponential decay of
# the blurring scale :math:`\sqrt{\varepsilon}` would be truly insightful.
# In some sense, couldn't we prove a
# `Hilbert <https://en.wikipedia.org/wiki/Orthonormal_basis>`_-`Plancherel <https://en.wikipedia.org/wiki/Plancherel_theorem>`_
# theorem for the Wasserstein distance?
#
|
[
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"geomloss.SamplesLoss",
"sklearn.neighbors.KernelDensity",
"matplotlib.pyplot.axis",
"torch.cat",
"torch.randn",
"matplotlib.pyplot.figure",
"torch.cuda.is_available",
"numpy.linspace",
"torch.rand",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tight_layout",
"torch.linspace"
] |
[((3677, 3702), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3700, 3702), False, 'import torch\n'), ((3923, 3951), 'numpy.linspace', 'np.linspace', (['(-0.5)', '(1.5)', '(1000)'], {}), '(-0.5, 1.5, 1000)\n', (3934, 3951), True, 'import numpy as np\n'), ((6170, 6197), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 5)'}), '(figsize=(12, 5))\n', (6180, 6197), True, 'import matplotlib.pyplot as plt\n'), ((6520, 6540), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (6531, 6540), True, 'import matplotlib.pyplot as plt\n'), ((6545, 6620), 'matplotlib.pyplot.plot', 'plt.plot', (['[0.01, 10]', '[true_wass, true_wass]', '"""g"""'], {'label': '"""True Wasserstein"""'}), "([0.01, 10], [true_wass, true_wass], 'g', label='True Wasserstein')\n", (6553, 6620), True, 'import matplotlib.pyplot as plt\n'), ((6625, 6682), 'matplotlib.pyplot.plot', 'plt.plot', (['blurs', 'sink', '"""r-o"""'], {'label': '"""Sinkhorn divergence"""'}), "(blurs, sink, 'r-o', label='Sinkhorn divergence')\n", (6633, 6682), True, 'import matplotlib.pyplot as plt\n'), ((6687, 6745), 'matplotlib.pyplot.plot', 'plt.plot', (['blurs', 'bwass', '"""b-o"""'], {'label': '"""Blurred Wasserstein"""'}), "(blurs, bwass, 'b-o', label='Blurred Wasserstein')\n", (6695, 6745), True, 'import matplotlib.pyplot as plt\n'), ((6750, 6841), 'matplotlib.pyplot.plot', 'plt.plot', (['[0.01, 10]', '[mean_diff, mean_diff]', '"""m"""'], {'label': '"""Squared difference of means"""'}), "([0.01, 10], [mean_diff, mean_diff], 'm', label=\n 'Squared difference of means')\n", (6758, 6841), True, 'import matplotlib.pyplot as plt\n'), ((6896, 6939), 'matplotlib.pyplot.axis', 'plt.axis', (['[0.01, 10.0, 0.0, 1.5 * bwass[0]]'], {}), '([0.01, 10.0, 0.0, 1.5 * bwass[0]])\n', (6904, 6939), True, 'import matplotlib.pyplot as plt\n'), ((6944, 6985), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""blur $\\\\sqrt{\\\\varepsilon}$"""'], {}), "('blur $\\\\sqrt{\\\\varepsilon}$')\n", (6954, 6985), True, 'import matplotlib.pyplot as plt\n'), ((6990, 7008), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7006, 7008), True, 'import matplotlib.pyplot as plt\n'), ((7013, 7023), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7021, 7023), True, 'import matplotlib.pyplot as plt\n'), ((5600, 5671), 'geomloss.SamplesLoss', 'SamplesLoss', (['"""sinkhorn"""'], {'p': '(2)', 'blur': 'blur', 'scaling': '(0.99)', 'backend': '"""online"""'}), "('sinkhorn', p=2, blur=blur, scaling=0.99, backend='online')\n", (5611, 5671), False, 'from geomloss import SamplesLoss\n'), ((5782, 5809), 'torch.cat', 'torch.cat', (['([X_i] * C)'], {'dim': '(0)'}), '([X_i] * C, dim=0)\n', (5791, 5809), False, 'import torch\n'), ((5824, 5851), 'torch.cat', 'torch.cat', (['([Y_j] * C)'], {'dim': '(0)'}), '([Y_j] * C, dim=0)\n', (5833, 5851), False, 'import torch\n'), ((6232, 6252), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (6243, 6252), True, 'import matplotlib.pyplot as plt\n'), ((6400, 6432), 'matplotlib.pyplot.axis', 'plt.axis', (['[-0.5, 1.5, -0.1, 5.5]'], {}), '([-0.5, 1.5, -0.1, 5.5])\n', (6408, 6432), True, 'import matplotlib.pyplot as plt\n'), ((6441, 6462), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""density"""'], {}), "('density')\n", (6451, 6462), True, 'import matplotlib.pyplot as plt\n'), ((6491, 6509), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6507, 6509), True, 'import matplotlib.pyplot as plt\n'), ((4097, 4146), 'sklearn.neighbors.KernelDensity', 'KernelDensity', ([], {'kernel': '"""gaussian"""', 'bandwidth': '(0.005)'}), "(kernel='gaussian', bandwidth=0.005)\n", (4110, 4146), False, 'from sklearn.neighbors import KernelDensity\n'), ((4435, 4448), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (4445, 4448), False, 'import torch\n'), ((4750, 4778), 'torch.linspace', 'torch.linspace', (['(0)', '(1)', '(N // K)'], {}), '(0, 1, N // K)\n', (4764, 4778), False, 'import torch\n'), ((5885, 5907), 'torch.randn', 'torch.randn', (['x_i.shape'], {}), '(x_i.shape)\n', (5896, 5907), False, 'import torch\n'), ((5953, 5975), 'torch.randn', 'torch.randn', (['y_j.shape'], {}), '(y_j.shape)\n', (5964, 5975), False, 'import torch\n')]
|
#!/usr/bin/env python3
import cv2 as cv
import json
import math
import numpy as np
import os
import sys
from requests.utils import requote_uri
from geojson import FeatureCollection, Feature, Polygon, dumps
config = json.load(open("config.json","r"))
target = config.get('target')
tilesize = config.get('tilesize')
maxzoom = config.get('maxzoom')
spacing = config.get('spacing')
tile_format = '.webp'
LLBOUNDS = [-180.0, 180.0, -180.0, 180.0]
match = None
if len(sys.argv)>=2:
match = sys.argv[1]
# pixel coordinates as x,y
# tile coordinates as t,u
def xy_to_latlon(x,y,zoom):
max_x = -float(math.pow(2,zoom-1) * tilesize)
lat = x / max_x * LLBOUNDS[1]
max_y = float(math.pow(2,zoom-1) * tilesize)
lon = y / max_y * LLBOUNDS[3]
return lat,lon
features = []
prev_x, prev_y, prev_zoom = None, None, None
ymax = -1e10
for source in config.get('sources',[]):
if len(source)<7:
continue
filename, xrel, yrel, imgzoom, title, family, date, location, comment, href = source[:10]
# auto-place after spacing
if xrel=="+":
xrel = prev_x + int((2**imgzoom) * spacing)
xrel = xrel * (2**(imgzoom-prev_zoom))
print("CALCULATED NEW X FROM", prev_x, " AS ", xrel)
if yrel=="+":
yrel = prev_y + int((2**imgzoom) * spacing)
yrel = yrel * (2**(imgzoom-prev_zoom))
print("CALCULATED NEW Y FROM", prev_y, " AS ", yrel)
print("Processing ",filename)
source_im = cv.imread(filename, cv.IMREAD_UNCHANGED)
w,h = source_im.shape[:2]
# auto-place centered
if yrel=="=":
yrel = prev_yc * (2**(imgzoom-prev_zoom)) - int(h/2)
print("CALCULATED NEW Y FROM CENTER", prev_yc, " AS ", yrel)
# auto-place right of previous column
elif yrel==">":
yrel = (ymax + 1.0/100) * (2**imgzoom)
print("CALCULATED NEW Y FROM YMAX", ymax, " AS ", yrel, imgzoom)
else:
ymax = yrel
# might be off by a factor off two, to be verified.
if title:
print(title)
print("PIXEL COORDINATES ", xrel, yrel, xrel+w, yrel+h)
left, top = xy_to_latlon(xrel, yrel, imgzoom)
right, bottom = xy_to_latlon(xrel+w, yrel+h, imgzoom)
poly = Polygon([[(top, left), (top, right), (bottom, right), (bottom, left), (top, left)]])
feat = Feature(geometry=poly, properties = {
"title": title,
"family": family,
"date": date,
"loc": location,
"comment": comment,
"href": href
})
features.append(feat)
#if imgzoom < maxzoom:
# factor = math.pow(2, maxzoom-imgzoom)
# source_im = cv.resize(source_im, (0, 0), fx=factor, fy=factor)
# FIXME: memory issues when blowing up - add maxzoom (and minzoom) to define display range
# calculate outer borders of previous item to calculate relative positions
prev_x = xrel + w
prev_y = yrel + h
prev_yc = yrel + h/2
prev_yr = float(yrel + h) / (2**imgzoom)
if prev_yr > ymax:
ymax = prev_yr
print("NEW YMAX ", ymax, "FROM", yrel, h)
prev_zoom = imgzoom
if match and not match in filename:
continue
zoom = imgzoom
w = h = 256 # just to pass the first check
while zoom > 1 and w > 2 and h > 2:
if zoom <= maxzoom:
# relative zero (center) at the defined zoom level
x0 = math.floor(tilesize * math.pow(2, zoom-1))
y0 = math.floor(tilesize * math.pow(2, zoom-1))
# image coordinates at that zoom level
xi, yi = x0 + xrel, y0 + yrel
# image size
# NOTE: source images should always be transparent png, or overlaps will be covered
w,h = source_im.shape[:2]
wt = math.ceil(w / tilesize)
ht = math.ceil(h / tilesize)
# first tile to consider
t0 = math.floor(xi / tilesize)
u0 = math.floor(yi / tilesize)
# top left of the considered tile
xA = t0 * tilesize
yA = u0 * tilesize
# offset of the image to the first tile
off_x = xi - xA
off_y = yi - yA
off_t = math.floor(off_x / tilesize)
off_u = math.floor(off_y / tilesize)
# CHECK: adjust range to actually cover the location of the translated image
folders={}
for tx in range(0, wt+1): # TODO: try t0-t0+wt
for ty in range(0, ht+1):
# read current background tile
folder = target+"tiles/"+str(zoom)+"/"+str(u0+ty)
tile_url = folder +"/"+str(t0+tx)+tile_format
#print("Loading "+tile_url)
white_tile = np.zeros([tilesize, tilesize, 4],dtype=np.uint8)
#white_tile.fill(255)
bg = cv.imread(tile_url, cv.IMREAD_UNCHANGED)
if bg is None:
bg = white_tile.copy()
bg = cv.cvtColor(bg, cv.COLOR_BGR2BGRA)
# cut relevant section of source_im
from_x = max(0, tx * tilesize - off_x)
from_y = max(0, ty * tilesize - off_y)
to_x = min(w, (tx+1) * tilesize - off_x)
to_y = min(h, (ty+1) * tilesize - off_y)
cutout = source_im[from_x:to_x, from_y:to_y]
# correct location of background
dest_x = max(0, off_x - tx * tilesize)
dest_y = max(0, off_y - ty * tilesize)
dto_x = dest_x + to_x - from_x
dto_y = dest_y + to_y - from_y
# paste cutout onto background
# TODO: actually paste, not overwrite
# eg. overwrite white_tile, then merge with bg
try:
bg[dest_x:dto_x, dest_y:dto_y] = cutout
except:
continue
#print("SOMETHING FAILED")
#cv.imshow('BG',bg)
#print("CUTOUT SIZE:", (from_x, to_x, from_y, to_y))
#print("FROM Y:", (from_y))
#print("TO Y:", (to_y))
#print("H:", h)
#cv.waitKey(1)
#sys.exit(1)
# then write that tile to file
if not folder in folders:
#print("Writing ",folder)
try:
os.makedirs(folder)
folders[folder]=True
except:
pass
cv.imwrite(tile_url, bg)
zoom = zoom - 1
xrel = math.floor(xrel / 2)
yrel = math.floor(yrel / 2)
source_im = cv.resize(source_im, (0, 0), fx=0.5, fy=0.5)
w = math.floor(w / 2)
h = math.floor(h / 2)
fc = FeatureCollection(features)
fp = open(target+"features.geojson", "w")
fp.write(dumps(fc))
fp.close()
def species_link(s):
return '<li><a href="https://setzkasten.relet.net#?{}">{}</a></li>'.format(requote_uri(s),s)
species_list=map(lambda f:f.properties.get('title'), features)
species_links = "\n".join(map(species_link, sorted(species_list)))
fi = open(target+"species_index.html", "w")
fi.write("<html><body><ul>{}<ul></body><html>".format(species_links))
fi.close()
|
[
"cv2.resize",
"geojson.Polygon",
"os.makedirs",
"math.pow",
"geojson.dumps",
"math.ceil",
"cv2.cvtColor",
"cv2.imwrite",
"math.floor",
"geojson.Feature",
"numpy.zeros",
"cv2.imread",
"geojson.FeatureCollection",
"requests.utils.requote_uri"
] |
[((6720, 6747), 'geojson.FeatureCollection', 'FeatureCollection', (['features'], {}), '(features)\n', (6737, 6747), False, 'from geojson import FeatureCollection, Feature, Polygon, dumps\n'), ((1462, 1502), 'cv2.imread', 'cv.imread', (['filename', 'cv.IMREAD_UNCHANGED'], {}), '(filename, cv.IMREAD_UNCHANGED)\n', (1471, 1502), True, 'import cv2 as cv\n'), ((6799, 6808), 'geojson.dumps', 'dumps', (['fc'], {}), '(fc)\n', (6804, 6808), False, 'from geojson import FeatureCollection, Feature, Polygon, dumps\n'), ((2207, 2295), 'geojson.Polygon', 'Polygon', (['[[(top, left), (top, right), (bottom, right), (bottom, left), (top, left)]]'], {}), '([[(top, left), (top, right), (bottom, right), (bottom, left), (top,\n left)]])\n', (2214, 2295), False, 'from geojson import FeatureCollection, Feature, Polygon, dumps\n'), ((2307, 2445), 'geojson.Feature', 'Feature', ([], {'geometry': 'poly', 'properties': "{'title': title, 'family': family, 'date': date, 'loc': location, 'comment':\n comment, 'href': href}"}), "(geometry=poly, properties={'title': title, 'family': family, 'date':\n date, 'loc': location, 'comment': comment, 'href': href})\n", (2314, 2445), False, 'from geojson import FeatureCollection, Feature, Polygon, dumps\n'), ((6540, 6560), 'math.floor', 'math.floor', (['(xrel / 2)'], {}), '(xrel / 2)\n', (6550, 6560), False, 'import math\n'), ((6574, 6594), 'math.floor', 'math.floor', (['(yrel / 2)'], {}), '(yrel / 2)\n', (6584, 6594), False, 'import math\n'), ((6613, 6657), 'cv2.resize', 'cv.resize', (['source_im', '(0, 0)'], {'fx': '(0.5)', 'fy': '(0.5)'}), '(source_im, (0, 0), fx=0.5, fy=0.5)\n', (6622, 6657), True, 'import cv2 as cv\n'), ((6668, 6685), 'math.floor', 'math.floor', (['(w / 2)'], {}), '(w / 2)\n', (6678, 6685), False, 'import math\n'), ((6696, 6713), 'math.floor', 'math.floor', (['(h / 2)'], {}), '(h / 2)\n', (6706, 6713), False, 'import math\n'), ((6920, 6934), 'requests.utils.requote_uri', 'requote_uri', (['s'], {}), '(s)\n', (6931, 6934), False, 'from requests.utils import requote_uri\n'), ((692, 713), 'math.pow', 'math.pow', (['(2)', '(zoom - 1)'], {}), '(2, zoom - 1)\n', (700, 713), False, 'import math\n'), ((3735, 3758), 'math.ceil', 'math.ceil', (['(w / tilesize)'], {}), '(w / tilesize)\n', (3744, 3758), False, 'import math\n'), ((3772, 3795), 'math.ceil', 'math.ceil', (['(h / tilesize)'], {}), '(h / tilesize)\n', (3781, 3795), False, 'import math\n'), ((3843, 3868), 'math.floor', 'math.floor', (['(xi / tilesize)'], {}), '(xi / tilesize)\n', (3853, 3868), False, 'import math\n'), ((3882, 3907), 'math.floor', 'math.floor', (['(yi / tilesize)'], {}), '(yi / tilesize)\n', (3892, 3907), False, 'import math\n'), ((4119, 4147), 'math.floor', 'math.floor', (['(off_x / tilesize)'], {}), '(off_x / tilesize)\n', (4129, 4147), False, 'import math\n'), ((4164, 4192), 'math.floor', 'math.floor', (['(off_y / tilesize)'], {}), '(off_y / tilesize)\n', (4174, 4192), False, 'import math\n'), ((609, 630), 'math.pow', 'math.pow', (['(2)', '(zoom - 1)'], {}), '(2, zoom - 1)\n', (617, 630), False, 'import math\n'), ((3411, 3432), 'math.pow', 'math.pow', (['(2)', '(zoom - 1)'], {}), '(2, zoom - 1)\n', (3419, 3432), False, 'import math\n'), ((3467, 3488), 'math.pow', 'math.pow', (['(2)', '(zoom - 1)'], {}), '(2, zoom - 1)\n', (3475, 3488), False, 'import math\n'), ((4643, 4692), 'numpy.zeros', 'np.zeros', (['[tilesize, tilesize, 4]'], {'dtype': 'np.uint8'}), '([tilesize, tilesize, 4], dtype=np.uint8)\n', (4651, 4692), True, 'import numpy as np\n'), ((4752, 4792), 'cv2.imread', 'cv.imread', (['tile_url', 'cv.IMREAD_UNCHANGED'], {}), '(tile_url, cv.IMREAD_UNCHANGED)\n', (4761, 4792), True, 'import cv2 as cv\n'), ((4888, 4922), 'cv2.cvtColor', 'cv.cvtColor', (['bg', 'cv.COLOR_BGR2BGRA'], {}), '(bg, cv.COLOR_BGR2BGRA)\n', (4899, 4922), True, 'import cv2 as cv\n'), ((6479, 6503), 'cv2.imwrite', 'cv.imwrite', (['tile_url', 'bg'], {}), '(tile_url, bg)\n', (6489, 6503), True, 'import cv2 as cv\n'), ((6351, 6370), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (6362, 6370), False, 'import os\n')]
|
import math
import numpy as np
import basis.robot_math as rm
import visualization.panda.world as wd
import modeling.geometric_model as gm
import modeling.collision_model as cm
import robot_sim.end_effectors.gripper.robotiq85.robotiq85 as rtq85
import grasping.annotation.utils as gu
import pickle
base = wd.World(cam_pos=[.3, .3, .3], lookat_pos=[0, 0, 0])
gm.gen_frame(length=.05, thickness=.0021).attach_to(base)
# object
object_bunny = cm.CollisionModel("objects/bunnysim.stl")
object_bunny.set_rgba([.9, .75, .35, .3])
object_bunny.attach_to(base)
# hnd_s
# contact_pairs, contact_points = gpa.plan_contact_pairs(object_bunny,
# max_samples=10000,
# min_dist_between_sampled_contact_points=.014,
# angle_between_contact_normals=math.radians(160),
# toggle_sampled_points=True)
# for p in contact_points:
# gm.gen_sphere(p, radius=.002).attach_to(base)
# base.run()
# pickle.dump(contact_pairs, open( "save.p", "wb" ))
contact_pairs = pickle.load(open( "save.p", "rb" ))
for i, cp in enumerate(contact_pairs):
contact_p0, contact_n0 = cp[0]
contact_p1, contact_n1 = cp[1]
rgba = rm.get_rgba_from_cmap(i)
gm.gen_sphere(contact_p0, radius=.002, rgba=rgba).attach_to(base)
gm.gen_arrow(contact_p0, contact_p0+contact_n0*.01, thickness=.0012, rgba = rgba).attach_to(base)
# gm.gen_arrow(contact_p0, contact_p0-contact_n0*.1, thickness=.0012, rgba = rgba).attach_to(base)
gm.gen_sphere(contact_p1, radius=.002, rgba=rgba).attach_to(base)
# gm.gen_dashstick(contact_p0, contact_p1, thickness=.0012, rgba=rgba).attach_to(base)
gm.gen_arrow(contact_p1, contact_p1+contact_n1*.01, thickness=.0012, rgba=rgba).attach_to(base)
# gm.gen_dasharrow(contact_p1, contact_p1+contact_n1*.03, thickness=.0012, rgba=rgba).attach_to(base)
# base.run()
gripper_s = rtq85.Robotiq85()
contact_offset = .002
grasp_info_list = []
for i, cp in enumerate(contact_pairs):
print(f"{i} of {len(contact_pairs)} done!")
contact_p0, contact_n0 = cp[0]
contact_p1, contact_n1 = cp[1]
contact_center = (contact_p0 + contact_p1) / 2
jaw_width = np.linalg.norm(contact_p0 - contact_p1) + contact_offset * 2
if jaw_width > gripper_s.jawwidth_rng[1]:
continue
hndy = contact_n0
hndz = rm.orthogonal_vector(contact_n0)
grasp_info_list += gu.define_grasp_with_rotation(gripper_s,
object_bunny,
gl_jaw_center_pos=contact_center,
gl_jaw_center_z=hndz,
gl_jaw_center_y=hndy,
jaw_width=jaw_width,
gl_rotation_ax=hndy,
rotation_interval=math.radians(30),
toggle_flip=True)
for grasp_info in grasp_info_list:
aw_width, gl_jaw_center, hnd_pos, hnd_rotmat = grasp_info
gripper_s.fix_to(hnd_pos, hnd_rotmat)
gripper_s.jaw_to(aw_width)
gripper_s.gen_meshmodel().attach_to(base)
base.run()
|
[
"modeling.geometric_model.gen_arrow",
"math.radians",
"modeling.collision_model.CollisionModel",
"basis.robot_math.orthogonal_vector",
"modeling.geometric_model.gen_frame",
"robot_sim.end_effectors.gripper.robotiq85.robotiq85.Robotiq85",
"modeling.geometric_model.gen_sphere",
"numpy.linalg.norm",
"visualization.panda.world.World",
"basis.robot_math.get_rgba_from_cmap"
] |
[((305, 360), 'visualization.panda.world.World', 'wd.World', ([], {'cam_pos': '[0.3, 0.3, 0.3]', 'lookat_pos': '[0, 0, 0]'}), '(cam_pos=[0.3, 0.3, 0.3], lookat_pos=[0, 0, 0])\n', (313, 360), True, 'import visualization.panda.world as wd\n'), ((440, 481), 'modeling.collision_model.CollisionModel', 'cm.CollisionModel', (['"""objects/bunnysim.stl"""'], {}), "('objects/bunnysim.stl')\n", (457, 481), True, 'import modeling.collision_model as cm\n'), ((2011, 2028), 'robot_sim.end_effectors.gripper.robotiq85.robotiq85.Robotiq85', 'rtq85.Robotiq85', ([], {}), '()\n', (2026, 2028), True, 'import robot_sim.end_effectors.gripper.robotiq85.robotiq85 as rtq85\n'), ((1319, 1343), 'basis.robot_math.get_rgba_from_cmap', 'rm.get_rgba_from_cmap', (['i'], {}), '(i)\n', (1340, 1343), True, 'import basis.robot_math as rm\n'), ((2453, 2485), 'basis.robot_math.orthogonal_vector', 'rm.orthogonal_vector', (['contact_n0'], {}), '(contact_n0)\n', (2473, 2485), True, 'import basis.robot_math as rm\n'), ((358, 401), 'modeling.geometric_model.gen_frame', 'gm.gen_frame', ([], {'length': '(0.05)', 'thickness': '(0.0021)'}), '(length=0.05, thickness=0.0021)\n', (370, 401), True, 'import modeling.geometric_model as gm\n'), ((2296, 2335), 'numpy.linalg.norm', 'np.linalg.norm', (['(contact_p0 - contact_p1)'], {}), '(contact_p0 - contact_p1)\n', (2310, 2335), True, 'import numpy as np\n'), ((1348, 1398), 'modeling.geometric_model.gen_sphere', 'gm.gen_sphere', (['contact_p0'], {'radius': '(0.002)', 'rgba': 'rgba'}), '(contact_p0, radius=0.002, rgba=rgba)\n', (1361, 1398), True, 'import modeling.geometric_model as gm\n'), ((1418, 1507), 'modeling.geometric_model.gen_arrow', 'gm.gen_arrow', (['contact_p0', '(contact_p0 + contact_n0 * 0.01)'], {'thickness': '(0.0012)', 'rgba': 'rgba'}), '(contact_p0, contact_p0 + contact_n0 * 0.01, thickness=0.0012,\n rgba=rgba)\n', (1430, 1507), True, 'import modeling.geometric_model as gm\n'), ((1623, 1673), 'modeling.geometric_model.gen_sphere', 'gm.gen_sphere', (['contact_p1'], {'radius': '(0.002)', 'rgba': 'rgba'}), '(contact_p1, radius=0.002, rgba=rgba)\n', (1636, 1673), True, 'import modeling.geometric_model as gm\n'), ((1784, 1873), 'modeling.geometric_model.gen_arrow', 'gm.gen_arrow', (['contact_p1', '(contact_p1 + contact_n1 * 0.01)'], {'thickness': '(0.0012)', 'rgba': 'rgba'}), '(contact_p1, contact_p1 + contact_n1 * 0.01, thickness=0.0012,\n rgba=rgba)\n', (1796, 1873), True, 'import modeling.geometric_model as gm\n'), ((3073, 3089), 'math.radians', 'math.radians', (['(30)'], {}), '(30)\n', (3085, 3089), False, 'import math\n')]
|
# Copyright 2021 Adobe
# All Rights Reserved.
# NOTICE: Adobe permits you to use, modify, and distribute this file in
# accordance with the terms of the Adobe license agreement accompanying
# it.
import random
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib
# matplotlib.use('TkAgg')
import albumentations as A
from skimage import data
import os
from copy import deepcopy
import random
import time
from PIL import Image
from skimage.color import label2rgb
# import beacon_aug as BA
from . import properties
''' flatten the pipeline tree'''
def extract_single_operation(augPipeline):
def flatten(dict, flatten_ls=[]):
'''use DFS to unfold the operations'''
for operation in dict["transforms"]: # "OneOf" or "OneOrOther", etc
class_name = operation['__class_fullname__']
if "." in class_name:
if operation['__class_fullname__'].split(".")[-2] == "composition":
flatten(operation, flatten_ls)
continue
flatten_ls.append(operation)
return flatten_ls
transform_dict = A.to_dict(augPipeline)
flatten_ls = flatten(transform_dict["transform"])
return [{'__version__': transform_dict['__version__'], 'transform':opr} for opr in flatten_ls]
def screenshot_pipeline(augPipeline, image, save_fig_path=None):
''' Visualize an augmentation pipeline by displaying the extreme case for all the parameters
'''
# get the flattened operator sequence avoiding hierarchical structure
single_operation_ls = extract_single_operation(augPipeline)
numOfOperation = len(single_operation_ls)
fig, axs = plt.subplots(numOfOperation, 3,
figsize=(6, 2*numOfOperation),
constrained_layout=True)
axs[0, 1].set_title("Lower Limit")
axs[0, 2].set_title("Upper Limit")
for i, single_operation in enumerate(single_operation_ls):
# Extract the upper and lower limit
transform_name = single_operation["transform"]['__class_fullname__'].split(".")[-1]
# deep copy to avoid pointing save location in dict
lowerAndUpper = [single_operation, deepcopy(single_operation)]
limit_para_name = None
# Extract all the limit parameters
for para in single_operation["transform"]:
if para == "p": # change prob to 1 to make it always happen
lowerAndUpper[0]["transform"][para] = 1
lowerAndUpper[1]["transform"][para] = 1
if "limit" in para:
limit_para_name = para
original_values = list(single_operation["transform"][para])
lowerAndUpper[0]["transform"][para] = [original_values[0]]*2
lowerAndUpper[1]["transform"][para] = [original_values[1]]*2
# plot
for lu in range(2): # lower or upper limit
lu_transform = A.from_dict(lowerAndUpper[lu])
axs[i, lu+1].imshow(lu_transform(image=image)["image"])
axs[i, lu+1].axis("off")
if limit_para_name:
axs[i, 0].text(0.15, 0.5, transform_name+"\n" + limit_para_name+":" +
str(lowerAndUpper[0]["transform"][limit_para_name][0]) + "," +
str(lowerAndUpper[1]["transform"][limit_para_name][1]), dict(size=10))
else:
axs[i, 0].text(0.15, 0.5, transform_name, dict(size=10))
axs[i, 0].axis("off")
if save_fig_path:
figname = os.path.join(save_fig_path, "aug_pipeline-screenshot.png")
print("\n...screenshot figure save as : ", figname)
plt.savefig(figname)
return fig
def screenshot_library(BA_operator, image_data, save_fig_path=None, individual_fig=False, **kwargs):
''' Visualize the augmentation result comparision to all available libraries
e.g.
----
import beacon_aug as BA
from beacon_aug import screenshot
fig, __ = BA.screenshot.screenshot_library(BA.Brightness(), image_data=image)
fig.show()
'''
avail_libraries = BA_operator(**kwargs).avail_libraries
numOfLibraries = len(avail_libraries)
fig, axs = plt.subplots(2, 1 + numOfLibraries,
figsize=(4*numOfLibraries, 4),
constrained_layout=True)
fig.suptitle("beacon_aug."+BA_operator.__name__ + " with " +
str(kwargs)) # or plt.suptitle('Main title')
axs[0][0].imshow(image_data)
axs[0][0].set_title("Raw")
axs[1][0].text(0.3, 0.5, "Difference to\n" + "raw")
axs[1][0].axis("off")
attributes_result = {"runtime": {}, "differentiable": {}}
# axs[1][0].text(0.3, 0.5, "Sanity Check:\n p=0 ->", dict(size=10))
for i, library in enumerate(avail_libraries):
t_before = time.time()
op = BA_operator(always_apply=False, p=1, library=library, **kwargs)
image_auged = op(image=image_data)["image"]
t_after = time.time()
runtime = t_after - t_before
image_auged_vis = image_auged
attributes_result["runtime"][library] = runtime
attributes_result["differentiable"][library] = properties.isOpDifferentiable(op)
axs[0][1+i].set_title(library + ":" + '{0:.1f}'.format(runtime*1000) + " (ms)")
axs[0][1+i].imshow(image_auged)
# display the difference of original to augmented images
if image_auged.shape == image_data.shape:
axs[1][1+i].imshow(image_auged - image_data)
if save_fig_path and individual_fig == True:
img_name = os.path.join(save_fig_path, BA_operator.__name__+"-" + library+".jpeg")
if os.path.isfile(img_name):
print("\n...screenshot individual figure already existed as : ", img_name)
else:
if image_auged.min() < 0: # normalzied case, need to
image_auged = image_auged - image_auged.min()
image_auged = image_auged/image_auged.max()
print("@@@@@@@", image_auged.min())
plt.imsave(img_name, image_auged)
print("\n...screenshot individual figure save as : ", img_name)
fig.subplots_adjust(wspace=0)
if save_fig_path and individual_fig == False:
fig_name = os.path.join(save_fig_path, BA_operator.__name__+"aug_library-screenshot.png")
print("\n...screenshot figure save as : ", fig_name)
plt.savefig(fig_name)
return fig, attributes_result
def visualize_bboxes(img, bboxes, color=(255, 0, 0), thickness=2, **kwargs):
'''
color = BOX_COLOR (BOX_COLOR = (255, 0, 0) # Red
'''
image = img.copy()
for bbox in bboxes:
# x_min, y_min, w, h = bbox
if len(bbox) == 5:
bbox = bbox[:4] # the last one is label
x_min, y_min, x_max, y_max = map(int, bbox) # need to make sure bbox is integer
# x_min, x_max, y_min, y_max = int(x_min), int(x_min + w), int(y_min), int(y_min + h)
img = cv2.rectangle(image, (x_min, y_min), (x_max, y_max), color=color, thickness=thickness)
return image
def visualize_kps(img, kps, color=(0, 255, 0), key_point_diameter=2, **kwargs):
'''
'''
image = img.copy()
for kp in kps:
x, y = kp
image = cv2.circle(image, (int(x), int(y)), key_point_diameter, color, -1)
return image
def visualize_titles(img, bbox, title, color=(255, 0, 0), thickness=2, font_thickness=2, font_scale=0.35, **kwargs):
x_min, y_min, x_max, y_max = map(int, bbox) # x_min, y_min, w, h = bbox
# x_min, x_max, y_min, y_max = int(x_min), int(x_min + w), int(y_min), int(y_min + h)
((text_width, text_height), _) = cv2.getTextSize(
title, cv2.FONT_HERSHEY_SIMPLEX, font_scale, font_thickness)
cv2.rectangle(img, (x_min, y_min - int(1.3 * text_height)),
(x_min + text_width, y_min), color=(255, 0, 0))
cv2.putText(img, title, (x_min, y_min - int(0.3 * text_height)), cv2.FONT_HERSHEY_SIMPLEX, font_scale, (255, 255, 255),
font_thickness, lineType=cv2.LINE_AA)
return img
def visualize_targets(image, mask=None, bboxes=None, keypoints=None, image0=None):
''' Stack all the targets '''
target_list = []
if image.ndim == 2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
target_list.append(image.copy())
if image0 is not None:
if image0.ndim == 2:
image0 = cv2.cvtColor(image0, cv2.COLOR_GRAY2RGB)
target_list.append(image0)
if mask is not None:
target_list.append(cv2.cvtColor((mask*255).astype('uint8'), cv2.COLOR_GRAY2RGB))
if bboxes is not None:
target_list.append(visualize_bboxes(image, bboxes, thickness=10))
if keypoints is not None:
target_list.append(visualize_kps(image, keypoints, key_point_diameter=15))
return np.hstack(target_list)
def augment_and_show(aug, image, mask=None, bboxes=[], keypoints=[], categories=[], category_id_to_name=[], filename=None,
font_scale_orig=0.35, font_scale_aug=0.35, key_point_diameter=15,
show_title=True, **kwargs):
"""
Use from: https://albumentations.ai/docs/examples/showcase/
visualize the image,(mask), (bbox),(kp) superimposed result before and after augmentation
Args:
aug: augmentation pipelineg
image: single image
mask: original mask
bbox: original bounding boxes
keypoints: original keypoints
output:
augmented: augmented image components
f: visualize image
"""
if mask is None:
augmented = aug(image=image, bboxes=bboxes,
keypoints=keypoints, category_id=categories)
else:
augmented = aug(image=image, mask=mask, bboxes=bboxes,
keypoints=keypoints, category_id=categories)
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# image_aug = cv2.cvtColor(augmented['image'], cv2.COLOR_BGR2RGB)
image_aug = augmented['image']
visualize_bboxes(image, bboxes, **kwargs)
visualize_bboxes(image_aug, augmented['bboxes'], **kwargs)
visualize_kps(image, keypoints, **kwargs)
visualize_kps(image, augmented["keypoints"], **kwargs)
if show_title:
for bbox, cat_id in zip(bboxes, categories):
visualize_titles(
image, bbox, category_id_to_name[cat_id], font_scale=font_scale_orig, **kwargs)
for bbox, cat_id in zip(augmented['bboxes'], augmented['category_id']):
visualize_titles(
image_aug, bbox, category_id_to_name[cat_id], font_scale=font_scale_aug, **kwargs)
if mask is None:
f, ax = plt.subplots(1, 2, figsize=(16, 8))
ax[0].imshow(image)
ax[0].set_title('Original image')
ax[1].imshow(image_aug)
ax[1].set_title('Augmented image')
else:
f, ax = plt.subplots(2, 2, figsize=(16, 16))
if len(mask.shape) != 3:
mask = label2rgb(mask, bg_label=0)
mask_aug = label2rgb(augmented['mask'], bg_label=0)
else:
import pdb
pdb.set_trace()
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2RGB)
mask_aug = cv2.cvtColor(augmented['mask'], cv2.COLOR_BGR2RGB)
ax[0, 0].imshow(image)
ax[0, 0].set_title('Original image')
ax[0, 1].imshow(image_aug)
ax[0, 1].set_title('Augmented image')
ax[1, 0].imshow(mask, interpolation='nearest')
ax[1, 0].set_title('Original mask')
ax[1, 1].imshow(mask_aug, interpolation='nearest')
ax[1, 1].set_title('Augmented mask')
f.tight_layout()
if filename is not None:
f.savefig(filename)
return augmented, f
if __name__ == "__main__":
# Load an example image (uint8, 128x128x3).
image = data.astronaut()
# Example of an augmentation pipeline
augPipeline = A.Compose([
A.RandomCrop(256, 256),
A.OneOf([A.RGBShift(),
A.HueSaturationValue()])])
os.makedirs("tmp", exist_ok=True)
screenshot_pipeline(augPipeline, image, save_fig_path="tmp/")
|
[
"skimage.data.astronaut",
"os.path.isfile",
"matplotlib.pyplot.imsave",
"cv2.rectangle",
"os.path.join",
"albumentations.RGBShift",
"albumentations.from_dict",
"cv2.cvtColor",
"matplotlib.pyplot.subplots",
"albumentations.to_dict",
"copy.deepcopy",
"numpy.hstack",
"os.makedirs",
"skimage.color.label2rgb",
"cv2.getTextSize",
"albumentations.HueSaturationValue",
"time.time",
"pdb.set_trace",
"albumentations.RandomCrop",
"matplotlib.pyplot.savefig"
] |
[((1124, 1146), 'albumentations.to_dict', 'A.to_dict', (['augPipeline'], {}), '(augPipeline)\n', (1133, 1146), True, 'import albumentations as A\n'), ((1672, 1765), 'matplotlib.pyplot.subplots', 'plt.subplots', (['numOfOperation', '(3)'], {'figsize': '(6, 2 * numOfOperation)', 'constrained_layout': '(True)'}), '(numOfOperation, 3, figsize=(6, 2 * numOfOperation),\n constrained_layout=True)\n', (1684, 1765), True, 'import matplotlib.pyplot as plt\n'), ((4204, 4301), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1 + numOfLibraries)'], {'figsize': '(4 * numOfLibraries, 4)', 'constrained_layout': '(True)'}), '(2, 1 + numOfLibraries, figsize=(4 * numOfLibraries, 4),\n constrained_layout=True)\n', (4216, 4301), True, 'import matplotlib.pyplot as plt\n'), ((7725, 7801), 'cv2.getTextSize', 'cv2.getTextSize', (['title', 'cv2.FONT_HERSHEY_SIMPLEX', 'font_scale', 'font_thickness'], {}), '(title, cv2.FONT_HERSHEY_SIMPLEX, font_scale, font_thickness)\n', (7740, 7801), False, 'import cv2\n'), ((8886, 8908), 'numpy.hstack', 'np.hstack', (['target_list'], {}), '(target_list)\n', (8895, 8908), True, 'import numpy as np\n'), ((11857, 11873), 'skimage.data.astronaut', 'data.astronaut', ([], {}), '()\n', (11871, 11873), False, 'from skimage import data\n'), ((12058, 12091), 'os.makedirs', 'os.makedirs', (['"""tmp"""'], {'exist_ok': '(True)'}), "('tmp', exist_ok=True)\n", (12069, 12091), False, 'import os\n'), ((3546, 3604), 'os.path.join', 'os.path.join', (['save_fig_path', '"""aug_pipeline-screenshot.png"""'], {}), "(save_fig_path, 'aug_pipeline-screenshot.png')\n", (3558, 3604), False, 'import os\n'), ((3673, 3693), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figname'], {}), '(figname)\n', (3684, 3693), True, 'import matplotlib.pyplot as plt\n'), ((4832, 4843), 'time.time', 'time.time', ([], {}), '()\n', (4841, 4843), False, 'import time\n'), ((4992, 5003), 'time.time', 'time.time', ([], {}), '()\n', (5001, 5003), False, 'import time\n'), ((6321, 6406), 'os.path.join', 'os.path.join', (['save_fig_path', "(BA_operator.__name__ + 'aug_library-screenshot.png')"], {}), "(save_fig_path, BA_operator.__name__ + 'aug_library-screenshot.png'\n )\n", (6333, 6406), False, 'import os\n'), ((6469, 6490), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fig_name'], {}), '(fig_name)\n', (6480, 6490), True, 'import matplotlib.pyplot as plt\n'), ((7038, 7129), 'cv2.rectangle', 'cv2.rectangle', (['image', '(x_min, y_min)', '(x_max, y_max)'], {'color': 'color', 'thickness': 'thickness'}), '(image, (x_min, y_min), (x_max, y_max), color=color, thickness\n =thickness)\n', (7051, 7129), False, 'import cv2\n'), ((8315, 8354), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_GRAY2RGB'], {}), '(image, cv2.COLOR_GRAY2RGB)\n', (8327, 8354), False, 'import cv2\n'), ((10712, 10747), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(16, 8)'}), '(1, 2, figsize=(16, 8))\n', (10724, 10747), True, 'import matplotlib.pyplot as plt\n'), ((10921, 10957), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(16, 16)'}), '(2, 2, figsize=(16, 16))\n', (10933, 10957), True, 'import matplotlib.pyplot as plt\n'), ((2198, 2224), 'copy.deepcopy', 'deepcopy', (['single_operation'], {}), '(single_operation)\n', (2206, 2224), False, 'from copy import deepcopy\n'), ((2958, 2988), 'albumentations.from_dict', 'A.from_dict', (['lowerAndUpper[lu]'], {}), '(lowerAndUpper[lu])\n', (2969, 2988), True, 'import albumentations as A\n'), ((5607, 5682), 'os.path.join', 'os.path.join', (['save_fig_path', "(BA_operator.__name__ + '-' + library + '.jpeg')"], {}), "(save_fig_path, BA_operator.__name__ + '-' + library + '.jpeg')\n", (5619, 5682), False, 'import os\n'), ((5694, 5718), 'os.path.isfile', 'os.path.isfile', (['img_name'], {}), '(img_name)\n', (5708, 5718), False, 'import os\n'), ((8470, 8510), 'cv2.cvtColor', 'cv2.cvtColor', (['image0', 'cv2.COLOR_GRAY2RGB'], {}), '(image0, cv2.COLOR_GRAY2RGB)\n', (8482, 8510), False, 'import cv2\n'), ((11011, 11038), 'skimage.color.label2rgb', 'label2rgb', (['mask'], {'bg_label': '(0)'}), '(mask, bg_label=0)\n', (11020, 11038), False, 'from skimage.color import label2rgb\n'), ((11062, 11102), 'skimage.color.label2rgb', 'label2rgb', (["augmented['mask']"], {'bg_label': '(0)'}), "(augmented['mask'], bg_label=0)\n", (11071, 11102), False, 'from skimage.color import label2rgb\n'), ((11152, 11167), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (11165, 11167), False, 'import pdb\n'), ((11187, 11224), 'cv2.cvtColor', 'cv2.cvtColor', (['mask', 'cv2.COLOR_BGR2RGB'], {}), '(mask, cv2.COLOR_BGR2RGB)\n', (11199, 11224), False, 'import cv2\n'), ((11248, 11298), 'cv2.cvtColor', 'cv2.cvtColor', (["augmented['mask']", 'cv2.COLOR_BGR2RGB'], {}), "(augmented['mask'], cv2.COLOR_BGR2RGB)\n", (11260, 11298), False, 'import cv2\n'), ((11955, 11977), 'albumentations.RandomCrop', 'A.RandomCrop', (['(256)', '(256)'], {}), '(256, 256)\n', (11967, 11977), True, 'import albumentations as A\n'), ((6103, 6136), 'matplotlib.pyplot.imsave', 'plt.imsave', (['img_name', 'image_auged'], {}), '(img_name, image_auged)\n', (6113, 6136), True, 'import matplotlib.pyplot as plt\n'), ((11996, 12008), 'albumentations.RGBShift', 'A.RGBShift', ([], {}), '()\n', (12006, 12008), True, 'import albumentations as A\n'), ((12026, 12048), 'albumentations.HueSaturationValue', 'A.HueSaturationValue', ([], {}), '()\n', (12046, 12048), True, 'import albumentations as A\n')]
|
import streamlit as st
import numpy as np
from tensorflow.keras.models import load_model
import librosa
import time
import matplotlib.pyplot as plt
def wav2mfcc(wave, sr=22050,n_mfcc=20, max_len=170):
'''wave is a np array'''
wave = np.asfortranarray(wave)
mfcc = librosa.feature.mfcc(wave, sr=sr, n_mfcc=n_mfcc)
# If maximum length exceeds mfcc lengths then pad the remaining ones
if (max_len > mfcc.shape[1]):
pad_width = max_len - mfcc.shape[1]
mfcc = np.pad(mfcc, pad_width=((0, 0), (0, pad_width)), mode='constant')
# Else cutoff the remaining parts
else:
mfcc = mfcc[:, :max_len]
return mfcc
def updateplot(wave,txt_output):
"""
update the plot with the wave file
"""
line.set_ydata(wave)
the_plot.pyplot(plt)
text.set_text(txt_output)
# load the model from disk
model_path="models/"
cnn_model=load_model(model_path+'bal_cnn_model_accuracy_98.2_alpha_0.0001.h5')
#-------------------------------------------------
st.title('Firearm Alarm')
st.header('Listening for Firearms in Your Home')
##-----------------------------------------------------------------------------
path="data/external/"
audio_clip1='5-195710-A-10.wav' # ?
audio_clip2='2-121978-A-29.wav' #?
audio_clip3='T_17P.wav'
audio_dict={
'Audio clip 1':audio_clip1,
'Audio clip 2': audio_clip2,
'Audio clip 3': audio_clip3}
#-----------------------------------------------
# select a sidebar to navigate between different options of the app
options=['Test with some sample clips', 'Test with a youtube video']
page=st.sidebar.radio('Select an option',options)
st.sidebar.header('Firearm-Alarm Options')
st.sidebar.markdown('The first option will allow you to test firearm-alarm with some pre-recorded sound clips.')
st.sidebar.markdown('The second option will enable you to have firearm-alarm listen to a youtube clip: https://www.youtube.com/watch?v=1N_m3tsPyP0.')
#-----------------------------------------------
if page==options[0]: #The first option is selected
st.text('The following are a set of sample audio clips that can be input into the model.')
st.audio(path+audio_clip1)
st.text('This is audio clip 1.')
st.audio(path+audio_clip2)
st.text('This is audio clip 2.')
st.audio(path+audio_clip3)
option = st.selectbox('Select the clip you would like the model to analyze.',('Audio clip 1', 'Audio clip 2', 'Audio clip 3'))
st.write('You selected:', option)
if st.button('Analyze '+option):
wave, sr = librosa.load(path+audio_dict[option], mono=True, sr=22050)
mfcc=wav2mfcc(wave,sr=sr)
X_test = np.reshape(mfcc,(1, 20, 170, 1))
Y_predict=cnn_model.predict(X_test)
print(Y_predict)
if Y_predict.round()[0][0]==1 :
st.write("This doesn't sound like a firearm.")
if Y_predict.round()[0][0]==0:
st.write("This is a firearm! Contacting local authorities...")
else:
st.write('Click the button to analyze the audio clip.')
###############################################----------------------------------
elif page==options[1]: #if the second page is selected
st.header('Firearm Alarm in Action')
x = np.arange(0, 4,1/22050)
fig, ax=plt.subplots()
ax.set_ylim(-1, 1)
line, = ax.plot(x, np.zeros(len(x)),color='m',linewidth=2)
plt.xlabel('Time (s)')
plt.ylabel('Sound Wave')
the_plot = st.pyplot(plt)
text=plt.text(0,.8,'',fontsize=14)
sample='data/external/Real_life_gunshot_sound_effects.wav'
if st.button('See an example with Firearm Alarm'):
with st.spinner("Listening..."):
array,sr=librosa.load(sample)
tiempo=librosa.get_duration(array) #time in seconds
for t in range(0,int(tiempo),4):
wave, sr = librosa.load(sample, mono=True,offset=t,duration=4)
## run it through the model
mfcc=wav2mfcc(wave)
X_test = np.reshape(mfcc,(1, 20, 170, 1))
Y_predict=cnn_model.predict(X_test)
if Y_predict.round()[0][0]==1 :
txt_output='No firearm sound(s) detected'
# text.set_text('No firearm sounds detected')
if Y_predict.round()[0][0]==0:
txt_output='Firearm sound(s) detected!'
# text.set_text('Firearm sounds detected!')
updateplot(wave,txt_output)
time.sleep(3)
plt.show()
else:
st.write('Click the button to start listening.')
#-----------------------------------
|
[
"streamlit.title",
"numpy.arange",
"streamlit.sidebar.radio",
"librosa.feature.mfcc",
"librosa.get_duration",
"numpy.pad",
"streamlit.audio",
"streamlit.spinner",
"streamlit.text",
"streamlit.sidebar.markdown",
"streamlit.button",
"numpy.reshape",
"matplotlib.pyplot.subplots",
"streamlit.sidebar.header",
"tensorflow.keras.models.load_model",
"matplotlib.pyplot.show",
"streamlit.header",
"numpy.asfortranarray",
"time.sleep",
"matplotlib.pyplot.text",
"librosa.load",
"streamlit.pyplot",
"matplotlib.pyplot.ylabel",
"streamlit.write",
"streamlit.selectbox",
"matplotlib.pyplot.xlabel"
] |
[((889, 959), 'tensorflow.keras.models.load_model', 'load_model', (["(model_path + 'bal_cnn_model_accuracy_98.2_alpha_0.0001.h5')"], {}), "(model_path + 'bal_cnn_model_accuracy_98.2_alpha_0.0001.h5')\n", (899, 959), False, 'from tensorflow.keras.models import load_model\n'), ((1011, 1036), 'streamlit.title', 'st.title', (['"""Firearm Alarm"""'], {}), "('Firearm Alarm')\n", (1019, 1036), True, 'import streamlit as st\n'), ((1037, 1085), 'streamlit.header', 'st.header', (['"""Listening for Firearms in Your Home"""'], {}), "('Listening for Firearms in Your Home')\n", (1046, 1085), True, 'import streamlit as st\n'), ((1574, 1619), 'streamlit.sidebar.radio', 'st.sidebar.radio', (['"""Select an option"""', 'options'], {}), "('Select an option', options)\n", (1590, 1619), True, 'import streamlit as st\n'), ((1620, 1662), 'streamlit.sidebar.header', 'st.sidebar.header', (['"""Firearm-Alarm Options"""'], {}), "('Firearm-Alarm Options')\n", (1637, 1662), True, 'import streamlit as st\n'), ((1663, 1785), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""The first option will allow you to test firearm-alarm with some pre-recorded sound clips."""'], {}), "(\n 'The first option will allow you to test firearm-alarm with some pre-recorded sound clips.'\n )\n", (1682, 1785), True, 'import streamlit as st\n'), ((1776, 1935), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""The second option will enable you to have firearm-alarm listen to a youtube clip: https://www.youtube.com/watch?v=1N_m3tsPyP0."""'], {}), "(\n 'The second option will enable you to have firearm-alarm listen to a youtube clip: https://www.youtube.com/watch?v=1N_m3tsPyP0.'\n )\n", (1795, 1935), True, 'import streamlit as st\n'), ((244, 267), 'numpy.asfortranarray', 'np.asfortranarray', (['wave'], {}), '(wave)\n', (261, 267), True, 'import numpy as np\n'), ((279, 327), 'librosa.feature.mfcc', 'librosa.feature.mfcc', (['wave'], {'sr': 'sr', 'n_mfcc': 'n_mfcc'}), '(wave, sr=sr, n_mfcc=n_mfcc)\n', (299, 327), False, 'import librosa\n'), ((2031, 2131), 'streamlit.text', 'st.text', (['"""The following are a set of sample audio clips that can be input into the model."""'], {}), "(\n 'The following are a set of sample audio clips that can be input into the model.'\n )\n", (2038, 2131), True, 'import streamlit as st\n'), ((2128, 2156), 'streamlit.audio', 'st.audio', (['(path + audio_clip1)'], {}), '(path + audio_clip1)\n', (2136, 2156), True, 'import streamlit as st\n'), ((2161, 2193), 'streamlit.text', 'st.text', (['"""This is audio clip 1."""'], {}), "('This is audio clip 1.')\n", (2168, 2193), True, 'import streamlit as st\n'), ((2199, 2227), 'streamlit.audio', 'st.audio', (['(path + audio_clip2)'], {}), '(path + audio_clip2)\n', (2207, 2227), True, 'import streamlit as st\n'), ((2231, 2263), 'streamlit.text', 'st.text', (['"""This is audio clip 2."""'], {}), "('This is audio clip 2.')\n", (2238, 2263), True, 'import streamlit as st\n'), ((2269, 2297), 'streamlit.audio', 'st.audio', (['(path + audio_clip3)'], {}), '(path + audio_clip3)\n', (2277, 2297), True, 'import streamlit as st\n'), ((2310, 2433), 'streamlit.selectbox', 'st.selectbox', (['"""Select the clip you would like the model to analyze."""', "('Audio clip 1', 'Audio clip 2', 'Audio clip 3')"], {}), "('Select the clip you would like the model to analyze.', (\n 'Audio clip 1', 'Audio clip 2', 'Audio clip 3'))\n", (2322, 2433), True, 'import streamlit as st\n'), ((2432, 2465), 'streamlit.write', 'st.write', (['"""You selected:"""', 'option'], {}), "('You selected:', option)\n", (2440, 2465), True, 'import streamlit as st\n'), ((2474, 2504), 'streamlit.button', 'st.button', (["('Analyze ' + option)"], {}), "('Analyze ' + option)\n", (2483, 2504), True, 'import streamlit as st\n'), ((495, 560), 'numpy.pad', 'np.pad', (['mfcc'], {'pad_width': '((0, 0), (0, pad_width))', 'mode': '"""constant"""'}), "(mfcc, pad_width=((0, 0), (0, pad_width)), mode='constant')\n", (501, 560), True, 'import numpy as np\n'), ((2523, 2583), 'librosa.load', 'librosa.load', (['(path + audio_dict[option])'], {'mono': '(True)', 'sr': '(22050)'}), '(path + audio_dict[option], mono=True, sr=22050)\n', (2535, 2583), False, 'import librosa\n'), ((2633, 2666), 'numpy.reshape', 'np.reshape', (['mfcc', '(1, 20, 170, 1)'], {}), '(mfcc, (1, 20, 170, 1))\n', (2643, 2666), True, 'import numpy as np\n'), ((2972, 3027), 'streamlit.write', 'st.write', (['"""Click the button to analyze the audio clip."""'], {}), "('Click the button to analyze the audio clip.')\n", (2980, 3027), True, 'import streamlit as st\n'), ((3171, 3207), 'streamlit.header', 'st.header', (['"""Firearm Alarm in Action"""'], {}), "('Firearm Alarm in Action')\n", (3180, 3207), True, 'import streamlit as st\n'), ((3217, 3243), 'numpy.arange', 'np.arange', (['(0)', '(4)', '(1 / 22050)'], {}), '(0, 4, 1 / 22050)\n', (3226, 3243), True, 'import numpy as np\n'), ((3253, 3267), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3265, 3267), True, 'import matplotlib.pyplot as plt\n'), ((3358, 3380), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (3368, 3380), True, 'import matplotlib.pyplot as plt\n'), ((3385, 3409), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Sound Wave"""'], {}), "('Sound Wave')\n", (3395, 3409), True, 'import matplotlib.pyplot as plt\n'), ((3425, 3439), 'streamlit.pyplot', 'st.pyplot', (['plt'], {}), '(plt)\n', (3434, 3439), True, 'import streamlit as st\n'), ((3449, 3482), 'matplotlib.pyplot.text', 'plt.text', (['(0)', '(0.8)', '""""""'], {'fontsize': '(14)'}), "(0, 0.8, '', fontsize=14)\n", (3457, 3482), True, 'import matplotlib.pyplot as plt\n'), ((3551, 3597), 'streamlit.button', 'st.button', (['"""See an example with Firearm Alarm"""'], {}), "('See an example with Firearm Alarm')\n", (3560, 3597), True, 'import streamlit as st\n'), ((2790, 2836), 'streamlit.write', 'st.write', (['"""This doesn\'t sound like a firearm."""'], {}), '("This doesn\'t sound like a firearm.")\n', (2798, 2836), True, 'import streamlit as st\n'), ((2890, 2952), 'streamlit.write', 'st.write', (['"""This is a firearm! Contacting local authorities..."""'], {}), "('This is a firearm! Contacting local authorities...')\n", (2898, 2952), True, 'import streamlit as st\n'), ((4532, 4580), 'streamlit.write', 'st.write', (['"""Click the button to start listening."""'], {}), "('Click the button to start listening.')\n", (4540, 4580), True, 'import streamlit as st\n'), ((3612, 3638), 'streamlit.spinner', 'st.spinner', (['"""Listening..."""'], {}), "('Listening...')\n", (3622, 3638), True, 'import streamlit as st\n'), ((3661, 3681), 'librosa.load', 'librosa.load', (['sample'], {}), '(sample)\n', (3673, 3681), False, 'import librosa\n'), ((3701, 3728), 'librosa.get_duration', 'librosa.get_duration', (['array'], {}), '(array)\n', (3721, 3728), False, 'import librosa\n'), ((3818, 3871), 'librosa.load', 'librosa.load', (['sample'], {'mono': '(True)', 'offset': 't', 'duration': '(4)'}), '(sample, mono=True, offset=t, duration=4)\n', (3830, 3871), False, 'import librosa\n'), ((3977, 4010), 'numpy.reshape', 'np.reshape', (['mfcc', '(1, 20, 170, 1)'], {}), '(mfcc, (1, 20, 170, 1))\n', (3987, 4010), True, 'import numpy as np\n'), ((4472, 4485), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (4482, 4485), False, 'import time\n'), ((4503, 4513), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4511, 4513), True, 'import matplotlib.pyplot as plt\n')]
|
"""
Extract MNI coordinates for all brain maps.
Created on Fri May 24 11:26:07 2019
@author: <NAME> <<EMAIL>>
"""
import mne
import numpy as np
from summarize_clusters_stc_AT import summarize_clusters_stc_AT
import csv
#%% for one-sample T-test whether ISCs are significant
results_path = '/media/cbru/SMEDY/results/ISCs_comp_against_0/'
fres = {'5.000000e-01-4Hz', '4-8Hz', '8-12Hz', '12-25Hz', '25-45Hz', '55-90Hz'}
condition = '_1' # 1 speech, 2 rest
win = '_613' #'_579' #
groups = {'con_', 'dys_'}
fsave_vertices = [np.arange(10242), np.arange(10242)]
for fre in fres:
for group in groups:
T_obs, clusters, cluster_p_values, H0 = clu =\
np.load(results_path + 't_clu_' + group + fre + win + condition + '.npy')
stc_all_cluster_vis = summarize_clusters_stc_AT(clu,
vertices=fsave_vertices,
subject='fsaverage')
# find the max T value and vertex (clusters are all the same size)
max_T = stc_all_cluster_vis.data[:, 0].max()
max_vtx = np.where(stc_all_cluster_vis.data[:, 0] ==
stc_all_cluster_vis.data[:, 0].max())
p_cluster_threshold = 0.05
good_cluster_inds = np.where(cluster_p_values <
p_cluster_threshold)[0]
for ii in good_cluster_inds:
if np.isin(max_vtx, clusters[ii][1]):
clu_size = len(clusters[ii][1])
if max_vtx[0][0] > 10242:
hemi = 1 # rh
vtx = max_vtx[0][0] - 10242
else:
hemi = 0 # lh
vtx = max_vtx[0][0]
# transform to mni coordinates
mni = mne.vertex_to_mni(vtx, hemi, 'fsaverage')[0]
print(group, fre, clu_size, mni.astype(np.int64), round(max_T, 2))
#%% for ISC group differences
results_dir = '/media/cbru/SMEDY/results/dys_con_contrast/2020_02_redo_subject_perm/'
delta = (results_dir + 't_clu_tail1_5.000000e-01-4Hz_613_1.npy',
results_dir + 't_clu_tail-1_5.000000e-01-4Hz_613_1.npy')
theta = (results_dir + 't_clu_tail1_4-8Hz_613_1.npy',
results_dir + 't_clu_tail-1_4-8Hz_613_1.npy')
alpha = (results_dir + 't_clu_tail1_8-12Hz_613_1.npy',
results_dir + 't_clu_tail-1_8-12Hz_613_1.npy')
beta = (results_dir + 't_clu_tail1_12-25Hz_613_1.npy',
results_dir + 't_clu_tail-1_12-25Hz_613_1.npy')
gamma1 = (results_dir + 't_clu_tail1_25-45Hz_613_1.npy',
results_dir + 't_clu_tail-1_25-45Hz_613_1.npy')
gamma2 = (results_dir + 't_clu_tail1_55-90Hz_613_1.npy',
results_dir + 't_clu_tail-1_55-90Hz_613_1.npy')
all_bands = {delta, theta, alpha, beta, gamma1, gamma2}
#all_bands = {gamma1}
p_cluster_threshold = 0.05/6
with open(results_dir + 'mni_corrdinates_out.csv', mode='w') as file_out:
mni_out = csv.writer(file_out, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for band in all_bands:
max_T = None
min_T = None
clu_size = None
stc_all_cluster_vis_pos = None
stc_all_cluster_vis_neg = None
stc_all_cluster_vis_both = None
clu = np.load(band[0])
T_obs_pos, clusters_pos, cluster_p_values_pos, H0_pos = clu
good_cluster_inds_pos = np.where(cluster_p_values_pos < p_cluster_threshold)[0]
if not good_cluster_inds_pos.any():
print('')
else:
stc_all_cluster_vis_pos = summarize_clusters_stc_AT(clu, p_thresh=p_cluster_threshold,
tstep=1e-3, tmin=0,
subject='fsaverage',
vertices=None)
clu = np.load(band[1])
T_obs_neg, clusters_neg, cluster_p_values_neg, H0_neg = clu
good_cluster_inds_neg = np.where(cluster_p_values_neg < p_cluster_threshold)[0]
if not good_cluster_inds_neg.any():
print('')
else:
stc_all_cluster_vis_neg = summarize_clusters_stc_AT(clu, p_thresh=p_cluster_threshold,
tstep=1e-3, tmin=0,
subject='fsaverage',
vertices=None)
# combine positive and negative clusters to one source estimate file
if stc_all_cluster_vis_pos is not None and stc_all_cluster_vis_neg is not None:
stc_all_cluster_vis_both = stc_all_cluster_vis_pos.copy()
stc_all_cluster_vis_both.data[:, 0] =\
stc_all_cluster_vis_pos.data[:, 0] + stc_all_cluster_vis_neg.data[:, 0]
elif stc_all_cluster_vis_pos is None and stc_all_cluster_vis_neg is not None:
stc_all_cluster_vis_both = stc_all_cluster_vis_neg.copy()
stc_all_cluster_vis_both.data[:, 0] = stc_all_cluster_vis_neg.data[:, 0]
elif stc_all_cluster_vis_neg is None and stc_all_cluster_vis_pos is not None:
stc_all_cluster_vis_both = stc_all_cluster_vis_pos.copy()
stc_all_cluster_vis_both.data[:, 0] = stc_all_cluster_vis_pos.data[:, 0]
else:
print('Error! There is no data for negative and positive contrasts.')
# find the max T value and vertex, extreme might be negative or positive
# find largest cluster first
# pos
out = []
if good_cluster_inds_pos.any():
for j in range(0, len(good_cluster_inds_pos)):
inds_t, inds_v = [(clusters_pos[cluster_ind]) for ii, cluster_ind in
enumerate(good_cluster_inds_pos)][j]
out.append(len(inds_v)) # max cluster is xxth
out2 = out.copy()
out2.sort(reverse=True)
id_max_pos = out.index(out2[0])
max_T = stc_all_cluster_vis_pos.data[:, id_max_pos+1].max()
# neg
out = []
if good_cluster_inds_neg.any():
for j in range(0, len(good_cluster_inds_neg)):
inds_t, inds_v = [(clusters_neg[cluster_ind]) for ii, cluster_ind in
enumerate(good_cluster_inds_neg)][j]
out.append(len(inds_v)) # max cluster is xxth
out2 = out.copy()
out2.sort(reverse=True)
id_max_neg = out.index(out2[0])
min_T = stc_all_cluster_vis_neg.data[:, id_max_neg+1].min()
if min_T is None and max_T is None:
print('No pos nor neg clusters')
elif min_T is None: # take only positive clusters
T = max_T
max_vtx = np.where(stc_all_cluster_vis_pos.data[:, id_max_pos+1] ==
stc_all_cluster_vis_pos.data[:, id_max_pos+1].max())
good_cluster_inds = np.where(cluster_p_values_pos < p_cluster_threshold)[0]
for ii in good_cluster_inds:
if np.isin(max_vtx, clusters_pos[ii][1]):
clu_size = len(clusters_pos[ii][1])
elif max_T is None: # take only negative clusters
T = min_T
max_vtx = np.where(stc_all_cluster_vis_neg.data[:, id_max_neg+1] ==
stc_all_cluster_vis_neg.data[:, id_max_neg+1].min())
good_cluster_inds = np.where(cluster_p_values_neg < p_cluster_threshold)[0]
for ii in good_cluster_inds:
if np.isin(max_vtx, clusters_neg[ii][1]):
clu_size = len(clusters_neg[ii][1])
elif abs(max_T) > abs(min_T): # take only positive clusters
T = max_T
max_vtx = np.where(stc_all_cluster_vis_pos.data[:, id_max_pos+1] ==
stc_all_cluster_vis_pos.data[:, id_max_pos+1].max())
good_cluster_inds = np.where(cluster_p_values_pos < p_cluster_threshold)[0]
for ii in good_cluster_inds:
if np.isin(max_vtx, clusters_pos[ii][1]):
clu_size = len(clusters_pos[ii][1])
elif abs(max_T) < abs(min_T): # take only negative clusters
T = min_T
max_vtx = np.where(stc_all_cluster_vis_neg.data[:, id_max_neg+1] ==
stc_all_cluster_vis_neg.data[:, id_max_neg+1].min())
good_cluster_inds = np.where(cluster_p_values_neg < p_cluster_threshold)[0]
for ii in good_cluster_inds:
if np.isin(max_vtx, clusters_neg[ii][1]):
clu_size = len(clusters_neg[ii][1])
else:
print('Something went wrong')
if max_vtx[0][0] > 10242:
hemi = 1 # rh
vtx = max_vtx[0][0] - 10242
else:
hemi = 0 # lh
vtx = max_vtx[0][0]
# transform to mni coordinates
mni = mne.vertex_to_mni(vtx, hemi, 'fsaverage')[0]
print(band, clu_size, mni.astype(np.int64), round(T, 2))
mni_out.writerow([band[0], clu_size, mni.astype(np.str), round(T, 2)])
#%% for Mantel regressions
results_path = '/media/cbru/SMEDY/results/mantel_correlations/2019_05_simple_model/'
clu_files = [
results_path + 'phon_clu_5.000000e-01-4Hz_613_1.npy',
results_path + 'phon_clu_4-8Hz_613_1.npy',
results_path + 'phon_clu_8-12Hz_613_1.npy',
results_path + 'phon_clu_12-25Hz_613_1.npy',
results_path + 'phon_clu_25-45Hz_613_1.npy',
results_path + 'phon_clu_55-90Hz_613_1.npy',
results_path + 'read_clu_5.000000e-01-4Hz_613_1.npy',
results_path + 'read_clu_4-8Hz_613_1.npy',
results_path + 'read_clu_8-12Hz_613_1.npy',
results_path + 'read_clu_12-25Hz_613_1.npy',
results_path + 'read_clu_25-45Hz_613_1.npy',
results_path + 'mem_clu_5.000000e-01-4Hz_613_1.npy',
results_path + 'iq_clu_5.000000e-01-4Hz_613_1.npy'
]
cutoff = 25
with open(results_path + 'mni_corrdinates_out.csv', mode='w') as file_out:
mni_out = csv.writer(file_out, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for file in clu_files:
print(file)
# load clu
clu = np.load(file)
r_obs, clusters = clu
fsave_vertices = [np.arange(10242), np.arange(10242)]
# thresholding by cluster length
good_cluster_inds = []
clusters2 = []
for ii in range(0, len(clusters)):
if len(clusters[ii][1]) > (cutoff-1):
good_cluster_inds.append(ii)
clusters2.append(clusters[ii])
clu2 = r_obs, clusters2, np.zeros(len(clusters2)), _
if not clusters2:
print('All clusters are smaller than the minimal length.')
else:
# Investigating the significant effects / Find max cluster
out = []
for j in range(0, len(good_cluster_inds)):
inds_t, inds_v = [(clusters[cluster_ind]) for ii, cluster_ind in
enumerate(good_cluster_inds)][j]
out.append(len(inds_v)) # max cluster is xxth
out2 = out.copy()
out2.sort(reverse=True)
id_max = out.index(out2[0])
clusters[good_cluster_inds[id_max]]
stc_all_cluster_vis = summarize_clusters_stc_AT(clu2, p_thresh=0.05,
tstep=1e-3, tmin=0,
subject='fsaverage',
vertices=fsave_vertices)
max_R = np.absolute(stc_all_cluster_vis.data[:, id_max+1]).max()
R_max = stc_all_cluster_vis.data[:, id_max+1].max()
R_min = stc_all_cluster_vis.data[:, id_max+1].min()
if np.absolute(R_max)<np.absolute(R_min):
max_R = max_R*-1
max_vtx = np.where(np.absolute(stc_all_cluster_vis.data[:, id_max+1]) ==
np.absolute(stc_all_cluster_vis.data[:, id_max+1]).max())
for ii in good_cluster_inds:
if np.isin(max_vtx, clusters[ii][1]):
clu_size = len(clusters[ii][1])
if max_vtx[0][0] > 10242:
hemi = 1 # rh
vtx = max_vtx[0][0] - 10242
else:
hemi = 0 # lh
vtx = max_vtx[0][0]
# transform to mni coordinates
mni = mne.vertex_to_mni(vtx, hemi, 'fsaverage')[0]
print(file, clu_size, mni.astype(np.int64), round(max_R, 2))
mni_out.writerow([file, clu_size, mni.astype(np.str), round(max_R, 2)])
|
[
"numpy.isin",
"numpy.load",
"numpy.absolute",
"csv.writer",
"summarize_clusters_stc_AT.summarize_clusters_stc_AT",
"mne.vertex_to_mni",
"numpy.where",
"numpy.arange"
] |
[((524, 540), 'numpy.arange', 'np.arange', (['(10242)'], {}), '(10242)\n', (533, 540), True, 'import numpy as np\n'), ((542, 558), 'numpy.arange', 'np.arange', (['(10242)'], {}), '(10242)\n', (551, 558), True, 'import numpy as np\n'), ((2864, 2941), 'csv.writer', 'csv.writer', (['file_out'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_MINIMAL'}), '(file_out, delimiter=\',\', quotechar=\'"\', quoting=csv.QUOTE_MINIMAL)\n', (2874, 2941), False, 'import csv\n'), ((9996, 10073), 'csv.writer', 'csv.writer', (['file_out'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_MINIMAL'}), '(file_out, delimiter=\',\', quotechar=\'"\', quoting=csv.QUOTE_MINIMAL)\n', (10006, 10073), False, 'import csv\n'), ((670, 743), 'numpy.load', 'np.load', (["(results_path + 't_clu_' + group + fre + win + condition + '.npy')"], {}), "(results_path + 't_clu_' + group + fre + win + condition + '.npy')\n", (677, 743), True, 'import numpy as np\n'), ((783, 859), 'summarize_clusters_stc_AT.summarize_clusters_stc_AT', 'summarize_clusters_stc_AT', (['clu'], {'vertices': 'fsave_vertices', 'subject': '"""fsaverage"""'}), "(clu, vertices=fsave_vertices, subject='fsaverage')\n", (808, 859), False, 'from summarize_clusters_stc_AT import summarize_clusters_stc_AT\n'), ((3167, 3183), 'numpy.load', 'np.load', (['band[0]'], {}), '(band[0])\n', (3174, 3183), True, 'import numpy as np\n'), ((3781, 3797), 'numpy.load', 'np.load', (['band[1]'], {}), '(band[1])\n', (3788, 3797), True, 'import numpy as np\n'), ((10154, 10167), 'numpy.load', 'np.load', (['file'], {}), '(file)\n', (10161, 10167), True, 'import numpy as np\n'), ((1289, 1337), 'numpy.where', 'np.where', (['(cluster_p_values < p_cluster_threshold)'], {}), '(cluster_p_values < p_cluster_threshold)\n', (1297, 1337), True, 'import numpy as np\n'), ((1430, 1463), 'numpy.isin', 'np.isin', (['max_vtx', 'clusters[ii][1]'], {}), '(max_vtx, clusters[ii][1])\n', (1437, 1463), True, 'import numpy as np\n'), ((1739, 1780), 'mne.vertex_to_mni', 'mne.vertex_to_mni', (['vtx', 'hemi', '"""fsaverage"""'], {}), "(vtx, hemi, 'fsaverage')\n", (1756, 1780), False, 'import mne\n'), ((3284, 3336), 'numpy.where', 'np.where', (['(cluster_p_values_pos < p_cluster_threshold)'], {}), '(cluster_p_values_pos < p_cluster_threshold)\n', (3292, 3336), True, 'import numpy as np\n'), ((3458, 3579), 'summarize_clusters_stc_AT.summarize_clusters_stc_AT', 'summarize_clusters_stc_AT', (['clu'], {'p_thresh': 'p_cluster_threshold', 'tstep': '(0.001)', 'tmin': '(0)', 'subject': '"""fsaverage"""', 'vertices': 'None'}), "(clu, p_thresh=p_cluster_threshold, tstep=0.001,\n tmin=0, subject='fsaverage', vertices=None)\n", (3483, 3579), False, 'from summarize_clusters_stc_AT import summarize_clusters_stc_AT\n'), ((3898, 3950), 'numpy.where', 'np.where', (['(cluster_p_values_neg < p_cluster_threshold)'], {}), '(cluster_p_values_neg < p_cluster_threshold)\n', (3906, 3950), True, 'import numpy as np\n'), ((4072, 4193), 'summarize_clusters_stc_AT.summarize_clusters_stc_AT', 'summarize_clusters_stc_AT', (['clu'], {'p_thresh': 'p_cluster_threshold', 'tstep': '(0.001)', 'tmin': '(0)', 'subject': '"""fsaverage"""', 'vertices': 'None'}), "(clu, p_thresh=p_cluster_threshold, tstep=0.001,\n tmin=0, subject='fsaverage', vertices=None)\n", (4097, 4193), False, 'from summarize_clusters_stc_AT import summarize_clusters_stc_AT\n'), ((8908, 8949), 'mne.vertex_to_mni', 'mne.vertex_to_mni', (['vtx', 'hemi', '"""fsaverage"""'], {}), "(vtx, hemi, 'fsaverage')\n", (8925, 8949), False, 'import mne\n'), ((10224, 10240), 'numpy.arange', 'np.arange', (['(10242)'], {}), '(10242)\n', (10233, 10240), True, 'import numpy as np\n'), ((10242, 10258), 'numpy.arange', 'np.arange', (['(10242)'], {}), '(10242)\n', (10251, 10258), True, 'import numpy as np\n'), ((11294, 11412), 'summarize_clusters_stc_AT.summarize_clusters_stc_AT', 'summarize_clusters_stc_AT', (['clu2'], {'p_thresh': '(0.05)', 'tstep': '(0.001)', 'tmin': '(0)', 'subject': '"""fsaverage"""', 'vertices': 'fsave_vertices'}), "(clu2, p_thresh=0.05, tstep=0.001, tmin=0, subject\n ='fsaverage', vertices=fsave_vertices)\n", (11319, 11412), False, 'from summarize_clusters_stc_AT import summarize_clusters_stc_AT\n'), ((11820, 11838), 'numpy.absolute', 'np.absolute', (['R_max'], {}), '(R_max)\n', (11831, 11838), True, 'import numpy as np\n'), ((11839, 11857), 'numpy.absolute', 'np.absolute', (['R_min'], {}), '(R_min)\n', (11850, 11857), True, 'import numpy as np\n'), ((12139, 12172), 'numpy.isin', 'np.isin', (['max_vtx', 'clusters[ii][1]'], {}), '(max_vtx, clusters[ii][1])\n', (12146, 12172), True, 'import numpy as np\n'), ((12492, 12533), 'mne.vertex_to_mni', 'mne.vertex_to_mni', (['vtx', 'hemi', '"""fsaverage"""'], {}), "(vtx, hemi, 'fsaverage')\n", (12509, 12533), False, 'import mne\n'), ((6914, 6966), 'numpy.where', 'np.where', (['(cluster_p_values_pos < p_cluster_threshold)'], {}), '(cluster_p_values_pos < p_cluster_threshold)\n', (6922, 6966), True, 'import numpy as np\n'), ((7030, 7067), 'numpy.isin', 'np.isin', (['max_vtx', 'clusters_pos[ii][1]'], {}), '(max_vtx, clusters_pos[ii][1])\n', (7037, 7067), True, 'import numpy as np\n'), ((11620, 11672), 'numpy.absolute', 'np.absolute', (['stc_all_cluster_vis.data[:, id_max + 1]'], {}), '(stc_all_cluster_vis.data[:, id_max + 1])\n', (11631, 11672), True, 'import numpy as np\n'), ((11923, 11975), 'numpy.absolute', 'np.absolute', (['stc_all_cluster_vis.data[:, id_max + 1]'], {}), '(stc_all_cluster_vis.data[:, id_max + 1])\n', (11934, 11975), True, 'import numpy as np\n'), ((7401, 7453), 'numpy.where', 'np.where', (['(cluster_p_values_neg < p_cluster_threshold)'], {}), '(cluster_p_values_neg < p_cluster_threshold)\n', (7409, 7453), True, 'import numpy as np\n'), ((7517, 7554), 'numpy.isin', 'np.isin', (['max_vtx', 'clusters_neg[ii][1]'], {}), '(max_vtx, clusters_neg[ii][1])\n', (7524, 7554), True, 'import numpy as np\n'), ((7898, 7950), 'numpy.where', 'np.where', (['(cluster_p_values_pos < p_cluster_threshold)'], {}), '(cluster_p_values_pos < p_cluster_threshold)\n', (7906, 7950), True, 'import numpy as np\n'), ((8014, 8051), 'numpy.isin', 'np.isin', (['max_vtx', 'clusters_pos[ii][1]'], {}), '(max_vtx, clusters_pos[ii][1])\n', (8021, 8051), True, 'import numpy as np\n'), ((12008, 12060), 'numpy.absolute', 'np.absolute', (['stc_all_cluster_vis.data[:, id_max + 1]'], {}), '(stc_all_cluster_vis.data[:, id_max + 1])\n', (12019, 12060), True, 'import numpy as np\n'), ((8395, 8447), 'numpy.where', 'np.where', (['(cluster_p_values_neg < p_cluster_threshold)'], {}), '(cluster_p_values_neg < p_cluster_threshold)\n', (8403, 8447), True, 'import numpy as np\n'), ((8511, 8548), 'numpy.isin', 'np.isin', (['max_vtx', 'clusters_neg[ii][1]'], {}), '(max_vtx, clusters_neg[ii][1])\n', (8518, 8548), True, 'import numpy as np\n')]
|
from .dot_product import DotProduct
import os
from numpy import log2
filedir = os.path.dirname(os.path.realpath(__file__))
dot_product_tb_module_path = os.path.join(filedir, '..', 'src')
dot_product_module_path = os.path.join(filedir, '..', 'src', 'dot_prod_pip.v')
class DotProdPip(DotProduct):
"""
"""
def template_dict(self, inst_name=None):
t_dict = super(DotProdPip, self).template_dict(inst_name)
t_dict['length_counter_bits'] = int(log2(self.length))
return t_dict
|
[
"numpy.log2",
"os.path.realpath",
"os.path.join"
] |
[((153, 187), 'os.path.join', 'os.path.join', (['filedir', '""".."""', '"""src"""'], {}), "(filedir, '..', 'src')\n", (165, 187), False, 'import os\n'), ((214, 266), 'os.path.join', 'os.path.join', (['filedir', '""".."""', '"""src"""', '"""dot_prod_pip.v"""'], {}), "(filedir, '..', 'src', 'dot_prod_pip.v')\n", (226, 266), False, 'import os\n'), ((96, 122), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (112, 122), False, 'import os\n'), ((472, 489), 'numpy.log2', 'log2', (['self.length'], {}), '(self.length)\n', (476, 489), False, 'from numpy import log2\n')]
|
#%%
import numpy as np
import pandas as pd
import pickle
import matplotlib.pyplot as plt
TRAIN_UPDATE_FILE = "C:/kaggle/kaggle_keypoints/pickle/cleandata_updates_augment.pkl"
train = pickle.load(open(TRAIN_UPDATE_FILE, "rb")).reset_index()
print("Size of 'augmentation' set: %d" % train.shape[0])
# %%
fig = plt.figure(figsize=(20,20))
cols = [c for c in train.columns if not c.startswith('image')]
rng = np.clip(train.shape[0], 0, 60)
for i in range(rng):
img = train.iloc[i].image.reshape(96,96)
points = train.iloc[i][cols].values
ax = fig.add_subplot(6,10,i+1)
ax.imshow(img, cmap='gray')
ax.scatter(points[0::2], points[1::2], color = 'red', s = 20)
plt.axis('off')
plt.tight_layout()
plt.show()
# %%
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.axis",
"numpy.clip",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout"
] |
[((314, 342), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 20)'}), '(figsize=(20, 20))\n', (324, 342), True, 'import matplotlib.pyplot as plt\n'), ((411, 441), 'numpy.clip', 'np.clip', (['train.shape[0]', '(0)', '(60)'], {}), '(train.shape[0], 0, 60)\n', (418, 441), True, 'import numpy as np\n'), ((704, 722), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (720, 722), True, 'import matplotlib.pyplot as plt\n'), ((723, 733), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (731, 733), True, 'import matplotlib.pyplot as plt\n'), ((686, 701), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (694, 701), True, 'import matplotlib.pyplot as plt\n')]
|
# Separate script, to use python multiprocessing, that utilizes pickle #
import sys
from multiprocessing import Pool, cpu_count
import lasagne as nn
import numpy as np
import theano
import theano.tensor as T
from scipy import optimize
from python.util import get_model_params, model_path, model_epoch
from python.model.vae import build_vae
test_data_path, model, bound, output_path = sys.argv[1], sys.argv[2], int(sys.argv[3]), sys.argv[4]
n_channels, depth, z_dim, n_hid_first, lam, L = get_model_params(model)
test_data = np.load(test_data_path)
# load trained model
input_var = T.matrix('inputs')
z_var = T.vector()
l_z_mean, l_z_stddev, _, _, _, l_x = build_vae(input_var, n_channels=n_channels, depth=depth, z_dim=z_dim,
n_hid_first=n_hid_first, L=1)
with np.load(model_path(model) + str(model_epoch(model)) + '.npz') as f:
param_values = [f['arr_%d' % i] for i in range(len(f.files))]
nn.layers.set_all_param_values(l_x, param_values)
# create encoder function to find initial values for z
encoder = nn.layers.get_output([l_z_mean, l_z_stddev], deterministic=True)
encode = theano.function([input_var], encoder)
# create decoder function
generated_x = nn.layers.get_output(l_x, {l_z_mean: z_var}, deterministic=True)
gen_fn = theano.function([z_var], generated_x)
# create l2 loss to optimize over latent space
z_mean, z_stddev = encode(test_data)
z_0 = z_mean
def loss(z, voxel):
x = gen_fn(z).reshape(n_channels)
return np.linalg.norm(voxel-x)
if bound == 0:
def minimize_voxel(args):
loss, z_0, voxel = args
optimize_result = optimize.minimize(loss, z_0, voxel)
return loss(optimize_result.x, voxel)
else:
boundaries = ((-bound, bound),)
for _ in range(z_dim-1):
boundaries += ((-bound, bound),)
def minimize_voxel(args):
loss, z_0, voxel = args
optimize_result = optimize.minimize(loss, z_0, voxel, bounds=boundaries)
return loss(optimize_result.x, voxel)
args = [(loss, z_0[i], test_data[i]) for i in range(len(test_data))]
p = Pool(cpu_count())
novelty_score = np.array(p.map(minimize_voxel, args))
np.save(output_path, novelty_score)
|
[
"numpy.load",
"numpy.save",
"scipy.optimize.minimize",
"python.util.model_path",
"theano.function",
"multiprocessing.cpu_count",
"python.util.get_model_params",
"lasagne.layers.get_output",
"numpy.linalg.norm",
"python.model.vae.build_vae",
"theano.tensor.vector",
"python.util.model_epoch",
"lasagne.layers.set_all_param_values",
"theano.tensor.matrix"
] |
[((491, 514), 'python.util.get_model_params', 'get_model_params', (['model'], {}), '(model)\n', (507, 514), False, 'from python.util import get_model_params, model_path, model_epoch\n'), ((527, 550), 'numpy.load', 'np.load', (['test_data_path'], {}), '(test_data_path)\n', (534, 550), True, 'import numpy as np\n'), ((585, 603), 'theano.tensor.matrix', 'T.matrix', (['"""inputs"""'], {}), "('inputs')\n", (593, 603), True, 'import theano.tensor as T\n'), ((612, 622), 'theano.tensor.vector', 'T.vector', ([], {}), '()\n', (620, 622), True, 'import theano.tensor as T\n'), ((660, 763), 'python.model.vae.build_vae', 'build_vae', (['input_var'], {'n_channels': 'n_channels', 'depth': 'depth', 'z_dim': 'z_dim', 'n_hid_first': 'n_hid_first', 'L': '(1)'}), '(input_var, n_channels=n_channels, depth=depth, z_dim=z_dim,\n n_hid_first=n_hid_first, L=1)\n', (669, 763), False, 'from python.model.vae import build_vae\n'), ((939, 988), 'lasagne.layers.set_all_param_values', 'nn.layers.set_all_param_values', (['l_x', 'param_values'], {}), '(l_x, param_values)\n', (969, 988), True, 'import lasagne as nn\n'), ((1055, 1119), 'lasagne.layers.get_output', 'nn.layers.get_output', (['[l_z_mean, l_z_stddev]'], {'deterministic': '(True)'}), '([l_z_mean, l_z_stddev], deterministic=True)\n', (1075, 1119), True, 'import lasagne as nn\n'), ((1129, 1166), 'theano.function', 'theano.function', (['[input_var]', 'encoder'], {}), '([input_var], encoder)\n', (1144, 1166), False, 'import theano\n'), ((1208, 1272), 'lasagne.layers.get_output', 'nn.layers.get_output', (['l_x', '{l_z_mean: z_var}'], {'deterministic': '(True)'}), '(l_x, {l_z_mean: z_var}, deterministic=True)\n', (1228, 1272), True, 'import lasagne as nn\n'), ((1282, 1319), 'theano.function', 'theano.function', (['[z_var]', 'generated_x'], {}), '([z_var], generated_x)\n', (1297, 1319), False, 'import theano\n'), ((2147, 2182), 'numpy.save', 'np.save', (['output_path', 'novelty_score'], {}), '(output_path, novelty_score)\n', (2154, 2182), True, 'import numpy as np\n'), ((1489, 1514), 'numpy.linalg.norm', 'np.linalg.norm', (['(voxel - x)'], {}), '(voxel - x)\n', (1503, 1514), True, 'import numpy as np\n'), ((2080, 2091), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (2089, 2091), False, 'from multiprocessing import Pool, cpu_count\n'), ((1617, 1652), 'scipy.optimize.minimize', 'optimize.minimize', (['loss', 'z_0', 'voxel'], {}), '(loss, z_0, voxel)\n', (1634, 1652), False, 'from scipy import optimize\n'), ((1900, 1954), 'scipy.optimize.minimize', 'optimize.minimize', (['loss', 'z_0', 'voxel'], {'bounds': 'boundaries'}), '(loss, z_0, voxel, bounds=boundaries)\n', (1917, 1954), False, 'from scipy import optimize\n'), ((812, 829), 'python.util.model_path', 'model_path', (['model'], {}), '(model)\n', (822, 829), False, 'from python.util import get_model_params, model_path, model_epoch\n'), ((836, 854), 'python.util.model_epoch', 'model_epoch', (['model'], {}), '(model)\n', (847, 854), False, 'from python.util import get_model_params, model_path, model_epoch\n')]
|
#
# Created by: <NAME>, September 2002
#
from __future__ import division, print_function, absolute_import
import sys
import subprocess
import time
from numpy.testing import (assert_equal, assert_array_almost_equal, assert_,
assert_allclose, assert_almost_equal,
assert_array_equal)
import pytest
from pytest import raises as assert_raises
import numpy as np
from numpy.random import rand, seed
from scipy.linalg import _flapack as flapack
from scipy.linalg import inv
from scipy.linalg import svd
from scipy.linalg.lapack import _compute_lwork
try:
from scipy.linalg import _clapack as clapack
except ImportError:
clapack = None
from scipy.linalg.lapack import get_lapack_funcs
from scipy.linalg.blas import get_blas_funcs
REAL_DTYPES = [np.float32, np.float64]
COMPLEX_DTYPES = [np.complex64, np.complex128]
DTYPES = REAL_DTYPES + COMPLEX_DTYPES
class TestFlapackSimple(object):
def test_gebal(self):
a = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
a1 = [[1, 0, 0, 3e-4],
[4, 0, 0, 2e-3],
[7, 1, 0, 0],
[0, 1, 0, 0]]
for p in 'sdzc':
f = getattr(flapack, p+'gebal', None)
if f is None:
continue
ba, lo, hi, pivscale, info = f(a)
assert_(not info, repr(info))
assert_array_almost_equal(ba, a)
assert_equal((lo, hi), (0, len(a[0])-1))
assert_array_almost_equal(pivscale, np.ones(len(a)))
ba, lo, hi, pivscale, info = f(a1, permute=1, scale=1)
assert_(not info, repr(info))
# print(a1)
# print(ba, lo, hi, pivscale)
def test_gehrd(self):
a = [[-149, -50, -154],
[537, 180, 546],
[-27, -9, -25]]
for p in 'd':
f = getattr(flapack, p+'gehrd', None)
if f is None:
continue
ht, tau, info = f(a)
assert_(not info, repr(info))
def test_trsyl(self):
a = np.array([[1, 2], [0, 4]])
b = np.array([[5, 6], [0, 8]])
c = np.array([[9, 10], [11, 12]])
trans = 'T'
# Test single and double implementations, including most
# of the options
for dtype in 'fdFD':
a1, b1, c1 = a.astype(dtype), b.astype(dtype), c.astype(dtype)
trsyl, = get_lapack_funcs(('trsyl',), (a1,))
if dtype.isupper(): # is complex dtype
a1[0] += 1j
trans = 'C'
x, scale, info = trsyl(a1, b1, c1)
assert_array_almost_equal(np.dot(a1, x) + np.dot(x, b1),
scale * c1)
x, scale, info = trsyl(a1, b1, c1, trana=trans, tranb=trans)
assert_array_almost_equal(
np.dot(a1.conjugate().T, x) + np.dot(x, b1.conjugate().T),
scale * c1, decimal=4)
x, scale, info = trsyl(a1, b1, c1, isgn=-1)
assert_array_almost_equal(np.dot(a1, x) - np.dot(x, b1),
scale * c1, decimal=4)
def test_lange(self):
a = np.array([
[-149, -50, -154],
[537, 180, 546],
[-27, -9, -25]])
for dtype in 'fdFD':
for norm in 'Mm1OoIiFfEe':
a1 = a.astype(dtype)
if dtype.isupper():
# is complex dtype
a1[0, 0] += 1j
lange, = get_lapack_funcs(('lange',), (a1,))
value = lange(norm, a1)
if norm in 'FfEe':
if dtype in 'Ff':
decimal = 3
else:
decimal = 7
ref = np.sqrt(np.sum(np.square(np.abs(a1))))
assert_almost_equal(value, ref, decimal)
else:
if norm in 'Mm':
ref = np.max(np.abs(a1))
elif norm in '1Oo':
ref = np.max(np.sum(np.abs(a1), axis=0))
elif norm in 'Ii':
ref = np.max(np.sum(np.abs(a1), axis=1))
assert_equal(value, ref)
class TestLapack(object):
def test_flapack(self):
if hasattr(flapack, 'empty_module'):
# flapack module is empty
pass
def test_clapack(self):
if hasattr(clapack, 'empty_module'):
# clapack module is empty
pass
class TestLeastSquaresSolvers(object):
def test_gels(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gels, gels_lwork, geqrf = get_lapack_funcs(
('gels', 'gels_lwork', 'geqrf'), (a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
lwork = _compute_lwork(gels_lwork, m, n, nrhs)
lqr, x, info = gels(a1, b1, lwork=lwork)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
lqr_truth, _, _, _ = geqrf(a1)
assert_array_equal(lqr, lqr_truth)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gels, gels_lwork, geqrf = get_lapack_funcs(
('gels', 'gels_lwork', 'geqrf'), (a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
lwork = _compute_lwork(gels_lwork, m, n, nrhs)
lqr, x, info = gels(a1, b1, lwork=lwork)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
lqr_truth, _, _, _ = geqrf(a1)
assert_array_equal(lqr, lqr_truth)
def test_gelsd(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, iwork, info = gelsd_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
iwork_size = iwork
x, s, rank, info = gelsd(a1, b1, lwork, iwork_size,
-1, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([12.596017180511966,
0.583396253199685], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, rwork, iwork, info = gelsd_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
rwork_size = int(rwork)
iwork_size = iwork
x, s, rank, info = gelsd(a1, b1, lwork, rwork_size, iwork_size,
-1, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
assert_allclose(s,
np.array([13.035514762572043, 4.337666985231382],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
def test_gelss(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelss_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([12.596017180511966,
0.583396253199685], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelss_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([13.035514762572043,
4.337666985231382], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
def test_gelsy(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps)
lwork = int(np.real(work))
jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps,
lwork, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps)
lwork = int(np.real(work))
jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps,
lwork, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
class TestRegression(object):
def test_ticket_1645(self):
# Check that RQ routines have correct lwork
for dtype in DTYPES:
a = np.zeros((300, 2), dtype=dtype)
gerqf, = get_lapack_funcs(['gerqf'], [a])
assert_raises(Exception, gerqf, a, lwork=2)
rq, tau, work, info = gerqf(a)
if dtype in REAL_DTYPES:
orgrq, = get_lapack_funcs(['orgrq'], [a])
assert_raises(Exception, orgrq, rq[-2:], tau, lwork=1)
orgrq(rq[-2:], tau, lwork=2)
elif dtype in COMPLEX_DTYPES:
ungrq, = get_lapack_funcs(['ungrq'], [a])
assert_raises(Exception, ungrq, rq[-2:], tau, lwork=1)
ungrq(rq[-2:], tau, lwork=2)
class TestDpotr(object):
def test_gh_2691(self):
# 'lower' argument of dportf/dpotri
for lower in [True, False]:
for clean in [True, False]:
np.random.seed(42)
x = np.random.normal(size=(3, 3))
a = x.dot(x.T)
dpotrf, dpotri = get_lapack_funcs(("potrf", "potri"), (a, ))
c, info = dpotrf(a, lower, clean=clean)
dpt = dpotri(c, lower)[0]
if lower:
assert_allclose(np.tril(dpt), np.tril(inv(a)))
else:
assert_allclose(np.triu(dpt), np.triu(inv(a)))
class TestDlasd4(object):
def test_sing_val_update(self):
sigmas = np.array([4., 3., 2., 0])
m_vec = np.array([3.12, 5.7, -4.8, -2.2])
M = np.hstack((np.vstack((np.diag(sigmas[0:-1]),
np.zeros((1, len(m_vec) - 1)))), m_vec[:, np.newaxis]))
SM = svd(M, full_matrices=False, compute_uv=False, overwrite_a=False,
check_finite=False)
it_len = len(sigmas)
sgm = np.concatenate((sigmas[::-1], (sigmas[0] +
it_len*np.sqrt(np.sum(np.power(m_vec, 2))),)))
mvc = np.concatenate((m_vec[::-1], (0,)))
lasd4 = get_lapack_funcs('lasd4', (sigmas,))
roots = []
for i in range(0, it_len):
res = lasd4(i, sgm, mvc)
roots.append(res[1])
assert_((res[3] <= 0), "LAPACK root finding dlasd4 failed to find \
the singular value %i" % i)
roots = np.array(roots)[::-1]
assert_((not np.any(np.isnan(roots)), "There are NaN roots"))
assert_allclose(SM, roots, atol=100*np.finfo(np.float64).eps,
rtol=100*np.finfo(np.float64).eps)
def test_lartg():
for dtype in 'fdFD':
lartg = get_lapack_funcs('lartg', dtype=dtype)
f = np.array(3, dtype)
g = np.array(4, dtype)
if np.iscomplexobj(g):
g *= 1j
cs, sn, r = lartg(f, g)
assert_allclose(cs, 3.0/5.0)
assert_allclose(r, 5.0)
if np.iscomplexobj(g):
assert_allclose(sn, -4.0j/5.0)
assert_(type(r) == complex)
assert_(type(cs) == float)
else:
assert_allclose(sn, 4.0/5.0)
def test_rot():
# srot, drot from blas and crot and zrot from lapack.
for dtype in 'fdFD':
c = 0.6
s = 0.8
u = np.ones(4, dtype) * 3
v = np.ones(4, dtype) * 4
atol = 10**-(np.finfo(dtype).precision-1)
if dtype in 'fd':
rot = get_blas_funcs('rot', dtype=dtype)
f = 4
else:
rot = get_lapack_funcs('rot', dtype=dtype)
s *= -1j
v *= 1j
f = 4j
assert_allclose(rot(u, v, c, s), [[5, 5, 5, 5],
[0, 0, 0, 0]], atol=atol)
assert_allclose(rot(u, v, c, s, n=2), [[5, 5, 3, 3],
[0, 0, f, f]], atol=atol)
assert_allclose(rot(u, v, c, s, offx=2, offy=2),
[[3, 3, 5, 5], [f, f, 0, 0]], atol=atol)
assert_allclose(rot(u, v, c, s, incx=2, offy=2, n=2),
[[5, 3, 5, 3], [f, f, 0, 0]], atol=atol)
assert_allclose(rot(u, v, c, s, offx=2, incy=2, n=2),
[[3, 3, 5, 5], [0, f, 0, f]], atol=atol)
assert_allclose(rot(u, v, c, s, offx=2, incx=2, offy=2, incy=2, n=1),
[[3, 3, 5, 3], [f, f, 0, f]], atol=atol)
assert_allclose(rot(u, v, c, s, incx=-2, incy=-2, n=2),
[[5, 3, 5, 3], [0, f, 0, f]], atol=atol)
a, b = rot(u, v, c, s, overwrite_x=1, overwrite_y=1)
assert_(a is u)
assert_(b is v)
assert_allclose(a, [5, 5, 5, 5], atol=atol)
assert_allclose(b, [0, 0, 0, 0], atol=atol)
def test_larfg_larf():
np.random.seed(1234)
a0 = np.random.random((4, 4))
a0 = a0.T.dot(a0)
a0j = np.random.random((4, 4)) + 1j*np.random.random((4, 4))
a0j = a0j.T.conj().dot(a0j)
# our test here will be to do one step of reducing a hermetian matrix to
# tridiagonal form using householder transforms.
for dtype in 'fdFD':
larfg, larf = get_lapack_funcs(['larfg', 'larf'], dtype=dtype)
if dtype in 'FD':
a = a0j.copy()
else:
a = a0.copy()
# generate a householder transform to clear a[2:,0]
alpha, x, tau = larfg(a.shape[0]-1, a[1, 0], a[2:, 0])
# create expected output
expected = np.zeros_like(a[:, 0])
expected[0] = a[0, 0]
expected[1] = alpha
# assemble householder vector
v = np.zeros_like(a[1:, 0])
v[0] = 1.0
v[1:] = x
# apply transform from the left
a[1:, :] = larf(v, tau.conjugate(), a[1:, :], np.zeros(a.shape[1]))
# apply transform from the right
a[:, 1:] = larf(v, tau, a[:,1:], np.zeros(a.shape[0]), side='R')
assert_allclose(a[:, 0], expected, atol=1e-5)
assert_allclose(a[0, :], expected, atol=1e-5)
@pytest.mark.xslow
def test_sgesdd_lwork_bug_workaround():
# Test that SGESDD lwork is sufficiently large for LAPACK.
#
# This checks that workaround around an apparent LAPACK bug
# actually works. cf. gh-5401
#
# xslow: requires 1GB+ of memory
p = subprocess.Popen([sys.executable, '-c',
'import numpy as np; '
'from scipy.linalg import svd; '
'a = np.zeros([9537, 9537], dtype=np.float32); '
'svd(a)'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
# Check if it an error occurred within 5 sec; the computation can
# take substantially longer, and we will not wait for it to finish
for j in range(50):
time.sleep(0.1)
if p.poll() is not None:
returncode = p.returncode
break
else:
# Didn't exit in time -- probably entered computation. The
# error is raised before entering computation, so things are
# probably OK.
returncode = 0
p.terminate()
assert_equal(returncode, 0,
"Code apparently failed: " + p.stdout.read())
class TestSytrd(object):
def test_sytrd(self):
for dtype in REAL_DTYPES:
# Assert that a 0x0 matrix raises an error
A = np.zeros((0, 0), dtype=dtype)
sytrd, sytrd_lwork = \
get_lapack_funcs(('sytrd', 'sytrd_lwork'), (A,))
assert_raises(ValueError, sytrd, A)
# Tests for n = 1 currently fail with
# ```
# ValueError: failed to create intent(cache|hide)|optional array--
# must have defined dimensions but got (0,)
# ```
# This is a NumPy issue
# <https://github.com/numpy/numpy/issues/9617>.
# TODO once the issue has been resolved, test for n=1
# some upper triangular array
n = 3
A = np.zeros((n, n), dtype=dtype)
A[np.triu_indices_from(A)] = \
np.arange(1, n*(n+1)//2+1, dtype=dtype)
# query lwork
lwork, info = sytrd_lwork(n)
assert_equal(info, 0)
# check lower=1 behavior (shouldn't do much since the matrix is
# upper triangular)
data, d, e, tau, info = sytrd(A, lower=1, lwork=lwork)
assert_equal(info, 0)
assert_allclose(data, A, atol=5*np.finfo(dtype).eps, rtol=1.0)
assert_allclose(d, np.diag(A))
assert_allclose(e, 0.0)
assert_allclose(tau, 0.0)
# and now for the proper test (lower=0 is the default)
data, d, e, tau, info = sytrd(A, lwork=lwork)
assert_equal(info, 0)
# assert Q^T*A*Q = tridiag(e, d, e)
# build tridiagonal matrix
T = np.zeros_like(A, dtype=dtype)
k = np.arange(A.shape[0])
T[k, k] = d
k2 = np.arange(A.shape[0]-1)
T[k2+1, k2] = e
T[k2, k2+1] = e
# build Q
Q = np.eye(n, n, dtype=dtype)
for i in range(n-1):
v = np.zeros(n, dtype=dtype)
v[:i] = data[:i, i+1]
v[i] = 1.0
H = np.eye(n, n, dtype=dtype) - tau[i] * np.outer(v, v)
Q = np.dot(H, Q)
# Make matrix fully symmetric
i_lower = np.tril_indices(n, -1)
A[i_lower] = A.T[i_lower]
QTAQ = np.dot(Q.T, np.dot(A, Q))
# disable rtol here since some values in QTAQ and T are very close
# to 0.
assert_allclose(QTAQ, T, atol=5*np.finfo(dtype).eps, rtol=1.0)
class TestHetrd(object):
def test_hetrd(self):
for real_dtype, complex_dtype in zip(REAL_DTYPES, COMPLEX_DTYPES):
# Assert that a 0x0 matrix raises an error
A = np.zeros((0, 0), dtype=complex_dtype)
hetrd, hetrd_lwork = \
get_lapack_funcs(('hetrd', 'hetrd_lwork'), (A,))
assert_raises(ValueError, hetrd, A)
# Tests for n = 1 currently fail with
# ```
# ValueError: failed to create intent(cache|hide)|optional array--
# must have defined dimensions but got (0,)
# ```
# This is a NumPy issue
# <https://github.com/numpy/numpy/issues/9617>.
# TODO once the issue has been resolved, test for n=1
# some upper triangular array
n = 3
A = np.zeros((n, n), dtype=complex_dtype)
A[np.triu_indices_from(A)] = (
np.arange(1, n*(n+1)//2+1, dtype=real_dtype)
+ 1j * np.arange(1, n*(n+1)//2+1, dtype=real_dtype)
)
np.fill_diagonal(A, np.real(np.diag(A)))
# query lwork
lwork, info = hetrd_lwork(n)
assert_equal(info, 0)
# check lower=1 behavior (shouldn't do much since the matrix is
# upper triangular)
data, d, e, tau, info = hetrd(A, lower=1, lwork=lwork)
assert_equal(info, 0)
assert_allclose(data, A, atol=5*np.finfo(real_dtype).eps, rtol=1.0)
assert_allclose(d, np.real(np.diag(A)))
assert_allclose(e, 0.0)
assert_allclose(tau, 0.0)
# and now for the proper test (lower=0 is the default)
data, d, e, tau, info = hetrd(A, lwork=lwork)
assert_equal(info, 0)
# assert Q^T*A*Q = tridiag(e, d, e)
# build tridiagonal matrix
T = np.zeros_like(A, dtype=real_dtype)
k = np.arange(A.shape[0], dtype=int)
T[k, k] = d
k2 = np.arange(A.shape[0]-1, dtype=int)
T[k2+1, k2] = e
T[k2, k2+1] = e
# build Q
Q = np.eye(n, n, dtype=complex_dtype)
for i in range(n-1):
v = np.zeros(n, dtype=complex_dtype)
v[:i] = data[:i, i+1]
v[i] = 1.0
H = np.eye(n, n, dtype=complex_dtype) \
- tau[i] * np.outer(v, np.conj(v))
Q = np.dot(H, Q)
# Make matrix fully Hermetian
i_lower = np.tril_indices(n, -1)
A[i_lower] = np.conj(A.T[i_lower])
QHAQ = np.dot(np.conj(Q.T), np.dot(A, Q))
# disable rtol here since some values in QTAQ and T are very close
# to 0.
assert_allclose(
QHAQ, T, atol=10*np.finfo(real_dtype).eps, rtol=1.0
)
def test_gglse():
# Example data taken from NAG manual
for ind, dtype in enumerate(DTYPES):
# DTYPES = <s,d,c,z> gglse
func, func_lwork = get_lapack_funcs(('gglse', 'gglse_lwork'),
dtype=dtype)
lwork = _compute_lwork(func_lwork, m=6, n=4, p=2)
# For <s,d>gglse
if ind < 2:
a = np.array([[-0.57, -1.28, -0.39, 0.25],
[-1.93, 1.08, -0.31, -2.14],
[2.30, 0.24, 0.40, -0.35],
[-1.93, 0.64, -0.66, 0.08],
[0.15, 0.30, 0.15, -2.13],
[-0.02, 1.03, -1.43, 0.50]], dtype=dtype)
c = np.array([-1.50, -2.14, 1.23, -0.54, -1.68, 0.82], dtype=dtype)
d = np.array([0., 0.], dtype=dtype)
# For <s,d>gglse
else:
a = np.array([[0.96-0.81j, -0.03+0.96j, -0.91+2.06j, -0.05+0.41j],
[-0.98+1.98j, -1.20+0.19j, -0.66+0.42j, -0.81+0.56j],
[0.62-0.46j, 1.01+0.02j, 0.63-0.17j, -1.11+0.60j],
[0.37+0.38j, 0.19-0.54j, -0.98-0.36j, 0.22-0.20j],
[0.83+0.51j, 0.20+0.01j, -0.17-0.46j, 1.47+1.59j],
[1.08-0.28j, 0.20-0.12j, -0.07+1.23j, 0.26+0.26j]])
c = np.array([[-2.54+0.09j],
[1.65-2.26j],
[-2.11-3.96j],
[1.82+3.30j],
[-6.41+3.77j],
[2.07+0.66j]])
d = np.zeros(2, dtype=dtype)
b = np.array([[1., 0., -1., 0.], [0., 1., 0., -1.]], dtype=dtype)
_, _, _, result, _ = func(a, b, c, d, lwork=lwork)
if ind < 2:
expected = np.array([0.48904455,
0.99754786,
0.48904455,
0.99754786])
else:
expected = np.array([1.08742917-1.96205783j,
-0.74093902+3.72973919j,
1.08742917-1.96205759j,
-0.74093896+3.72973895j])
assert_array_almost_equal(result, expected, decimal=4)
def test_sycon_hecon():
seed(1234)
for ind, dtype in enumerate(DTYPES+COMPLEX_DTYPES):
# DTYPES + COMPLEX DTYPES = <s,d,c,z> sycon + <c,z>hecon
n = 10
# For <s,d,c,z>sycon
if ind < 4:
func_lwork = get_lapack_funcs('sytrf_lwork', dtype=dtype)
funcon, functrf = get_lapack_funcs(('sycon', 'sytrf'), dtype=dtype)
A = (rand(n, n)).astype(dtype)
# For <c,z>hecon
else:
func_lwork = get_lapack_funcs('hetrf_lwork', dtype=dtype)
funcon, functrf = get_lapack_funcs(('hecon', 'hetrf'), dtype=dtype)
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
# Since sycon only refers to upper/lower part, conj() is safe here.
A = (A + A.conj().T)/2 + 2*np.eye(n, dtype=dtype)
anorm = np.linalg.norm(A, 1)
lwork = _compute_lwork(func_lwork, n)
ldu, ipiv, _ = functrf(A, lwork=lwork, lower=1)
rcond, _ = funcon(a=ldu, ipiv=ipiv, anorm=anorm, lower=1)
# The error is at most 1-fold
assert_(abs(1/rcond - np.linalg.cond(A, p=1))*rcond < 1)
|
[
"numpy.random.seed",
"numpy.triu",
"numpy.abs",
"numpy.ones",
"numpy.isnan",
"numpy.linalg.cond",
"scipy.linalg.svd",
"numpy.linalg.norm",
"scipy.linalg.blas.get_blas_funcs",
"numpy.arange",
"numpy.random.normal",
"numpy.diag",
"numpy.testing.assert_array_almost_equal",
"numpy.zeros_like",
"numpy.testing.assert_almost_equal",
"numpy.power",
"scipy.linalg.inv",
"numpy.finfo",
"pytest.raises",
"numpy.testing.assert_equal",
"numpy.real",
"numpy.triu_indices_from",
"numpy.testing.assert_allclose",
"scipy.linalg.lapack.get_lapack_funcs",
"numpy.tril_indices",
"numpy.conj",
"subprocess.Popen",
"numpy.testing.assert_array_equal",
"scipy.linalg.lapack._compute_lwork",
"time.sleep",
"numpy.testing.assert_",
"numpy.dot",
"numpy.concatenate",
"numpy.outer",
"numpy.iscomplexobj",
"numpy.tril",
"numpy.zeros",
"numpy.random.random",
"numpy.array",
"numpy.random.rand",
"numpy.eye"
] |
[((18500, 18520), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (18514, 18520), True, 'import numpy as np\n'), ((18530, 18554), 'numpy.random.random', 'np.random.random', (['(4, 4)'], {}), '((4, 4))\n', (18546, 18554), True, 'import numpy as np\n'), ((19988, 20193), 'subprocess.Popen', 'subprocess.Popen', (["[sys.executable, '-c',\n 'import numpy as np; from scipy.linalg import svd; a = np.zeros([9537, 9537], dtype=np.float32); svd(a)'\n ]"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), "([sys.executable, '-c',\n 'import numpy as np; from scipy.linalg import svd; a = np.zeros([9537, 9537], dtype=np.float32); svd(a)'\n ], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n", (20004, 20193), False, 'import subprocess\n'), ((28683, 28693), 'numpy.random.seed', 'seed', (['(1234)'], {}), '(1234)\n', (28687, 28693), False, 'from numpy.random import rand, seed\n'), ((2050, 2076), 'numpy.array', 'np.array', (['[[1, 2], [0, 4]]'], {}), '([[1, 2], [0, 4]])\n', (2058, 2076), True, 'import numpy as np\n'), ((2089, 2115), 'numpy.array', 'np.array', (['[[5, 6], [0, 8]]'], {}), '([[5, 6], [0, 8]])\n', (2097, 2115), True, 'import numpy as np\n'), ((2128, 2157), 'numpy.array', 'np.array', (['[[9, 10], [11, 12]]'], {}), '([[9, 10], [11, 12]])\n', (2136, 2157), True, 'import numpy as np\n'), ((3166, 3228), 'numpy.array', 'np.array', (['[[-149, -50, -154], [537, 180, 546], [-27, -9, -25]]'], {}), '([[-149, -50, -154], [537, 180, 546], [-27, -9, -25]])\n', (3174, 3228), True, 'import numpy as np\n'), ((15239, 15267), 'numpy.array', 'np.array', (['[4.0, 3.0, 2.0, 0]'], {}), '([4.0, 3.0, 2.0, 0])\n', (15247, 15267), True, 'import numpy as np\n'), ((15281, 15314), 'numpy.array', 'np.array', (['[3.12, 5.7, -4.8, -2.2]'], {}), '([3.12, 5.7, -4.8, -2.2])\n', (15289, 15314), True, 'import numpy as np\n'), ((15465, 15553), 'scipy.linalg.svd', 'svd', (['M'], {'full_matrices': '(False)', 'compute_uv': '(False)', 'overwrite_a': '(False)', 'check_finite': '(False)'}), '(M, full_matrices=False, compute_uv=False, overwrite_a=False,\n check_finite=False)\n', (15468, 15553), False, 'from scipy.linalg import svd\n'), ((15745, 15780), 'numpy.concatenate', 'np.concatenate', (['(m_vec[::-1], (0,))'], {}), '((m_vec[::-1], (0,)))\n', (15759, 15780), True, 'import numpy as np\n'), ((15798, 15834), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (['"""lasd4"""', '(sigmas,)'], {}), "('lasd4', (sigmas,))\n", (15814, 15834), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((16404, 16442), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (['"""lartg"""'], {'dtype': 'dtype'}), "('lartg', dtype=dtype)\n", (16420, 16442), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((16456, 16474), 'numpy.array', 'np.array', (['(3)', 'dtype'], {}), '(3, dtype)\n', (16464, 16474), True, 'import numpy as np\n'), ((16487, 16505), 'numpy.array', 'np.array', (['(4)', 'dtype'], {}), '(4, dtype)\n', (16495, 16505), True, 'import numpy as np\n'), ((16518, 16536), 'numpy.iscomplexobj', 'np.iscomplexobj', (['g'], {}), '(g)\n', (16533, 16536), True, 'import numpy as np\n'), ((16600, 16630), 'numpy.testing.assert_allclose', 'assert_allclose', (['cs', '(3.0 / 5.0)'], {}), '(cs, 3.0 / 5.0)\n', (16615, 16630), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((16637, 16660), 'numpy.testing.assert_allclose', 'assert_allclose', (['r', '(5.0)'], {}), '(r, 5.0)\n', (16652, 16660), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((16673, 16691), 'numpy.iscomplexobj', 'np.iscomplexobj', (['g'], {}), '(g)\n', (16688, 16691), True, 'import numpy as np\n'), ((18327, 18342), 'numpy.testing.assert_', 'assert_', (['(a is u)'], {}), '(a is u)\n', (18334, 18342), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((18351, 18366), 'numpy.testing.assert_', 'assert_', (['(b is v)'], {}), '(b is v)\n', (18358, 18366), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((18375, 18418), 'numpy.testing.assert_allclose', 'assert_allclose', (['a', '[5, 5, 5, 5]'], {'atol': 'atol'}), '(a, [5, 5, 5, 5], atol=atol)\n', (18390, 18418), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((18427, 18470), 'numpy.testing.assert_allclose', 'assert_allclose', (['b', '[0, 0, 0, 0]'], {'atol': 'atol'}), '(b, [0, 0, 0, 0], atol=atol)\n', (18442, 18470), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((18588, 18612), 'numpy.random.random', 'np.random.random', (['(4, 4)'], {}), '((4, 4))\n', (18604, 18612), True, 'import numpy as np\n'), ((18854, 18902), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["['larfg', 'larf']"], {'dtype': 'dtype'}), "(['larfg', 'larf'], dtype=dtype)\n", (18870, 18902), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((19174, 19196), 'numpy.zeros_like', 'np.zeros_like', (['a[:, 0]'], {}), '(a[:, 0])\n', (19187, 19196), True, 'import numpy as np\n'), ((19306, 19329), 'numpy.zeros_like', 'np.zeros_like', (['a[1:, 0]'], {}), '(a[1:, 0])\n', (19319, 19329), True, 'import numpy as np\n'), ((19608, 19654), 'numpy.testing.assert_allclose', 'assert_allclose', (['a[:, 0]', 'expected'], {'atol': '(1e-05)'}), '(a[:, 0], expected, atol=1e-05)\n', (19623, 19654), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((19662, 19708), 'numpy.testing.assert_allclose', 'assert_allclose', (['a[0, :]', 'expected'], {'atol': '(1e-05)'}), '(a[0, :], expected, atol=1e-05)\n', (19677, 19708), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((20522, 20537), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (20532, 20537), False, 'import time\n'), ((26548, 26603), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["('gglse', 'gglse_lwork')"], {'dtype': 'dtype'}), "(('gglse', 'gglse_lwork'), dtype=dtype)\n", (26564, 26603), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((26664, 26705), 'scipy.linalg.lapack._compute_lwork', '_compute_lwork', (['func_lwork'], {'m': '(6)', 'n': '(4)', 'p': '(2)'}), '(func_lwork, m=6, n=4, p=2)\n', (26678, 26705), False, 'from scipy.linalg.lapack import _compute_lwork\n'), ((28022, 28091), 'numpy.array', 'np.array', (['[[1.0, 0.0, -1.0, 0.0], [0.0, 1.0, 0.0, -1.0]]'], {'dtype': 'dtype'}), '([[1.0, 0.0, -1.0, 0.0], [0.0, 1.0, 0.0, -1.0]], dtype=dtype)\n', (28030, 28091), True, 'import numpy as np\n'), ((28598, 28652), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['result', 'expected'], {'decimal': '(4)'}), '(result, expected, decimal=4)\n', (28623, 28652), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((29472, 29492), 'numpy.linalg.norm', 'np.linalg.norm', (['A', '(1)'], {}), '(A, 1)\n', (29486, 29492), True, 'import numpy as np\n'), ((29509, 29538), 'scipy.linalg.lapack._compute_lwork', '_compute_lwork', (['func_lwork', 'n'], {}), '(func_lwork, n)\n', (29523, 29538), False, 'from scipy.linalg.lapack import _compute_lwork\n'), ((1368, 1400), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['ba', 'a'], {}), '(ba, a)\n', (1393, 1400), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((2394, 2429), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["('trsyl',)", '(a1,)'], {}), "(('trsyl',), (a1,))\n", (2410, 2429), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((4649, 4708), 'numpy.array', 'np.array', (['[[1.0, 2.0], [4.0, 5.0], [7.0, 8.0]]'], {'dtype': 'dtype'}), '([[1.0, 2.0], [4.0, 5.0], [7.0, 8.0]], dtype=dtype)\n', (4657, 4708), True, 'import numpy as np\n'), ((4778, 4819), 'numpy.array', 'np.array', (['[16.0, 17.0, 20.0]'], {'dtype': 'dtype'}), '([16.0, 17.0, 20.0], dtype=dtype)\n', (4786, 4819), True, 'import numpy as np\n'), ((4858, 4917), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["('gels', 'gels_lwork', 'geqrf')", '(a1, b1)'], {}), "(('gels', 'gels_lwork', 'geqrf'), (a1, b1))\n", (4874, 4917), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((5133, 5171), 'scipy.linalg.lapack._compute_lwork', '_compute_lwork', (['gels_lwork', 'm', 'n', 'nrhs'], {}), '(gels_lwork, m, n, nrhs)\n', (5147, 5171), False, 'from scipy.linalg.lapack import _compute_lwork\n'), ((5531, 5565), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['lqr', 'lqr_truth'], {}), '(lqr, lqr_truth)\n', (5549, 5565), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((5621, 5720), 'numpy.array', 'np.array', (['[[1.0 + 4.0j, 2.0], [4.0 + 0.5j, 5.0 - 3.0j], [7.0 - 2.0j, 8.0 + 0.7j]]'], {'dtype': 'dtype'}), '([[1.0 + 4.0j, 2.0], [4.0 + 0.5j, 5.0 - 3.0j], [7.0 - 2.0j, 8.0 + \n 0.7j]], dtype=dtype)\n', (5629, 5720), True, 'import numpy as np\n'), ((5775, 5830), 'numpy.array', 'np.array', (['[16.0, 17.0 + 2.0j, 20.0 - 4.0j]'], {'dtype': 'dtype'}), '([16.0, 17.0 + 2.0j, 20.0 - 4.0j], dtype=dtype)\n', (5783, 5830), True, 'import numpy as np\n'), ((5865, 5924), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["('gels', 'gels_lwork', 'geqrf')", '(a1, b1)'], {}), "(('gels', 'gels_lwork', 'geqrf'), (a1, b1))\n", (5881, 5924), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((6140, 6178), 'scipy.linalg.lapack._compute_lwork', '_compute_lwork', (['gels_lwork', 'm', 'n', 'nrhs'], {}), '(gels_lwork, m, n, nrhs)\n', (6154, 6178), False, 'from scipy.linalg.lapack import _compute_lwork\n'), ((6548, 6582), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['lqr', 'lqr_truth'], {}), '(lqr, lqr_truth)\n', (6566, 6582), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((6661, 6720), 'numpy.array', 'np.array', (['[[1.0, 2.0], [4.0, 5.0], [7.0, 8.0]]'], {'dtype': 'dtype'}), '([[1.0, 2.0], [4.0, 5.0], [7.0, 8.0]], dtype=dtype)\n', (6669, 6720), True, 'import numpy as np\n'), ((6790, 6831), 'numpy.array', 'np.array', (['[16.0, 17.0, 20.0]'], {'dtype': 'dtype'}), '([16.0, 17.0, 20.0], dtype=dtype)\n', (6798, 6831), True, 'import numpy as np\n'), ((6865, 6917), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["('gelsd', 'gelsd_lwork')", '(a1, b1)'], {}), "(('gelsd', 'gelsd_lwork'), (a1, b1))\n", (6881, 6917), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((7844, 7943), 'numpy.array', 'np.array', (['[[1.0 + 4.0j, 2.0], [4.0 + 0.5j, 5.0 - 3.0j], [7.0 - 2.0j, 8.0 + 0.7j]]'], {'dtype': 'dtype'}), '([[1.0 + 4.0j, 2.0], [4.0 + 0.5j, 5.0 - 3.0j], [7.0 - 2.0j, 8.0 + \n 0.7j]], dtype=dtype)\n', (7852, 7943), True, 'import numpy as np\n'), ((7998, 8053), 'numpy.array', 'np.array', (['[16.0, 17.0 + 2.0j, 20.0 - 4.0j]'], {'dtype': 'dtype'}), '([16.0, 17.0 + 2.0j, 20.0 - 4.0j], dtype=dtype)\n', (8006, 8053), True, 'import numpy as np\n'), ((8083, 8135), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["('gelsd', 'gelsd_lwork')", '(a1, b1)'], {}), "(('gelsd', 'gelsd_lwork'), (a1, b1))\n", (8099, 8135), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((9193, 9252), 'numpy.array', 'np.array', (['[[1.0, 2.0], [4.0, 5.0], [7.0, 8.0]]'], {'dtype': 'dtype'}), '([[1.0, 2.0], [4.0, 5.0], [7.0, 8.0]], dtype=dtype)\n', (9201, 9252), True, 'import numpy as np\n'), ((9322, 9363), 'numpy.array', 'np.array', (['[16.0, 17.0, 20.0]'], {'dtype': 'dtype'}), '([16.0, 17.0, 20.0], dtype=dtype)\n', (9330, 9363), True, 'import numpy as np\n'), ((9397, 9449), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["('gelss', 'gelss_lwork')", '(a1, b1)'], {}), "(('gelss', 'gelss_lwork'), (a1, b1))\n", (9413, 9449), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((10298, 10397), 'numpy.array', 'np.array', (['[[1.0 + 4.0j, 2.0], [4.0 + 0.5j, 5.0 - 3.0j], [7.0 - 2.0j, 8.0 + 0.7j]]'], {'dtype': 'dtype'}), '([[1.0 + 4.0j, 2.0], [4.0 + 0.5j, 5.0 - 3.0j], [7.0 - 2.0j, 8.0 + \n 0.7j]], dtype=dtype)\n', (10306, 10397), True, 'import numpy as np\n'), ((10452, 10507), 'numpy.array', 'np.array', (['[16.0, 17.0 + 2.0j, 20.0 - 4.0j]'], {'dtype': 'dtype'}), '([16.0, 17.0 + 2.0j, 20.0 - 4.0j], dtype=dtype)\n', (10460, 10507), True, 'import numpy as np\n'), ((10537, 10589), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["('gelss', 'gelss_lwork')", '(a1, b1)'], {}), "(('gelss', 'gelss_lwork'), (a1, b1))\n", (10553, 10589), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((11555, 11614), 'numpy.array', 'np.array', (['[[1.0, 2.0], [4.0, 5.0], [7.0, 8.0]]'], {'dtype': 'dtype'}), '([[1.0, 2.0], [4.0, 5.0], [7.0, 8.0]], dtype=dtype)\n', (11563, 11614), True, 'import numpy as np\n'), ((11684, 11725), 'numpy.array', 'np.array', (['[16.0, 17.0, 20.0]'], {'dtype': 'dtype'}), '([16.0, 17.0, 20.0], dtype=dtype)\n', (11692, 11725), True, 'import numpy as np\n'), ((11759, 11811), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["('gelsy', 'gelss_lwork')", '(a1, b1)'], {}), "(('gelsy', 'gelss_lwork'), (a1, b1))\n", (11775, 11811), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((12168, 12210), 'numpy.zeros', 'np.zeros', (['(a1.shape[1], 1)'], {'dtype': 'np.int32'}), '((a1.shape[1], 1), dtype=np.int32)\n', (12176, 12210), True, 'import numpy as np\n'), ((12606, 12705), 'numpy.array', 'np.array', (['[[1.0 + 4.0j, 2.0], [4.0 + 0.5j, 5.0 - 3.0j], [7.0 - 2.0j, 8.0 + 0.7j]]'], {'dtype': 'dtype'}), '([[1.0 + 4.0j, 2.0], [4.0 + 0.5j, 5.0 - 3.0j], [7.0 - 2.0j, 8.0 + \n 0.7j]], dtype=dtype)\n', (12614, 12705), True, 'import numpy as np\n'), ((12760, 12815), 'numpy.array', 'np.array', (['[16.0, 17.0 + 2.0j, 20.0 - 4.0j]'], {'dtype': 'dtype'}), '([16.0, 17.0 + 2.0j, 20.0 - 4.0j], dtype=dtype)\n', (12768, 12815), True, 'import numpy as np\n'), ((12845, 12897), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["('gelsy', 'gelss_lwork')", '(a1, b1)'], {}), "(('gelsy', 'gelss_lwork'), (a1, b1))\n", (12861, 12897), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((13254, 13296), 'numpy.zeros', 'np.zeros', (['(a1.shape[1], 1)'], {'dtype': 'np.int32'}), '((a1.shape[1], 1), dtype=np.int32)\n', (13262, 13296), True, 'import numpy as np\n'), ((13892, 13923), 'numpy.zeros', 'np.zeros', (['(300, 2)'], {'dtype': 'dtype'}), '((300, 2), dtype=dtype)\n', (13900, 13923), True, 'import numpy as np\n'), ((13946, 13978), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["['gerqf']", '[a]'], {}), "(['gerqf'], [a])\n", (13962, 13978), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((13991, 14034), 'pytest.raises', 'assert_raises', (['Exception', 'gerqf', 'a'], {'lwork': '(2)'}), '(Exception, gerqf, a, lwork=2)\n', (14004, 14034), True, 'from pytest import raises as assert_raises\n'), ((15973, 16110), 'numpy.testing.assert_', 'assert_', (['(res[3] <= 0)', "('LAPACK root finding dlasd4 failed to find the singular value %i'\n % i)"], {}), "(res[3] <= 0, \n 'LAPACK root finding dlasd4 failed to find the singular value %i'\n % i)\n", (15980, 16110), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((16121, 16136), 'numpy.array', 'np.array', (['roots'], {}), '(roots)\n', (16129, 16136), True, 'import numpy as np\n'), ((16705, 16737), 'numpy.testing.assert_allclose', 'assert_allclose', (['sn', '(-4.0j / 5.0)'], {}), '(sn, -4.0j / 5.0)\n', (16720, 16737), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((16841, 16871), 'numpy.testing.assert_allclose', 'assert_allclose', (['sn', '(4.0 / 5.0)'], {}), '(sn, 4.0 / 5.0)\n', (16856, 16871), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((17017, 17034), 'numpy.ones', 'np.ones', (['(4)', 'dtype'], {}), '(4, dtype)\n', (17024, 17034), True, 'import numpy as np\n'), ((17051, 17068), 'numpy.ones', 'np.ones', (['(4)', 'dtype'], {}), '(4, dtype)\n', (17058, 17068), True, 'import numpy as np\n'), ((17168, 17202), 'scipy.linalg.blas.get_blas_funcs', 'get_blas_funcs', (['"""rot"""'], {'dtype': 'dtype'}), "('rot', dtype=dtype)\n", (17182, 17202), False, 'from scipy.linalg.blas import get_blas_funcs\n'), ((17253, 17289), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (['"""rot"""'], {'dtype': 'dtype'}), "('rot', dtype=dtype)\n", (17269, 17289), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((18618, 18642), 'numpy.random.random', 'np.random.random', (['(4, 4)'], {}), '((4, 4))\n', (18634, 18642), True, 'import numpy as np\n'), ((19462, 19482), 'numpy.zeros', 'np.zeros', (['a.shape[1]'], {}), '(a.shape[1])\n', (19470, 19482), True, 'import numpy as np\n'), ((19567, 19587), 'numpy.zeros', 'np.zeros', (['a.shape[0]'], {}), '(a.shape[0])\n', (19575, 19587), True, 'import numpy as np\n'), ((21096, 21125), 'numpy.zeros', 'np.zeros', (['(0, 0)'], {'dtype': 'dtype'}), '((0, 0), dtype=dtype)\n', (21104, 21125), True, 'import numpy as np\n'), ((21177, 21225), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["('sytrd', 'sytrd_lwork')", '(A,)'], {}), "(('sytrd', 'sytrd_lwork'), (A,))\n", (21193, 21225), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((21238, 21273), 'pytest.raises', 'assert_raises', (['ValueError', 'sytrd', 'A'], {}), '(ValueError, sytrd, A)\n', (21251, 21273), True, 'from pytest import raises as assert_raises\n'), ((21735, 21764), 'numpy.zeros', 'np.zeros', (['(n, n)'], {'dtype': 'dtype'}), '((n, n), dtype=dtype)\n', (21743, 21764), True, 'import numpy as np\n'), ((21824, 21871), 'numpy.arange', 'np.arange', (['(1)', '(n * (n + 1) // 2 + 1)'], {'dtype': 'dtype'}), '(1, n * (n + 1) // 2 + 1, dtype=dtype)\n', (21833, 21871), True, 'import numpy as np\n'), ((21944, 21965), 'numpy.testing.assert_equal', 'assert_equal', (['info', '(0)'], {}), '(info, 0)\n', (21956, 21965), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((22154, 22175), 'numpy.testing.assert_equal', 'assert_equal', (['info', '(0)'], {}), '(info, 0)\n', (22166, 22175), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((22307, 22330), 'numpy.testing.assert_allclose', 'assert_allclose', (['e', '(0.0)'], {}), '(e, 0.0)\n', (22322, 22330), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((22343, 22368), 'numpy.testing.assert_allclose', 'assert_allclose', (['tau', '(0.0)'], {}), '(tau, 0.0)\n', (22358, 22368), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((22507, 22528), 'numpy.testing.assert_equal', 'assert_equal', (['info', '(0)'], {}), '(info, 0)\n', (22519, 22528), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((22634, 22663), 'numpy.zeros_like', 'np.zeros_like', (['A'], {'dtype': 'dtype'}), '(A, dtype=dtype)\n', (22647, 22663), True, 'import numpy as np\n'), ((22680, 22701), 'numpy.arange', 'np.arange', (['A.shape[0]'], {}), '(A.shape[0])\n', (22689, 22701), True, 'import numpy as np\n'), ((22743, 22768), 'numpy.arange', 'np.arange', (['(A.shape[0] - 1)'], {}), '(A.shape[0] - 1)\n', (22752, 22768), True, 'import numpy as np\n'), ((22862, 22887), 'numpy.eye', 'np.eye', (['n', 'n'], {'dtype': 'dtype'}), '(n, n, dtype=dtype)\n', (22868, 22887), True, 'import numpy as np\n'), ((23201, 23223), 'numpy.tril_indices', 'np.tril_indices', (['n', '(-1)'], {}), '(n, -1)\n', (23216, 23223), True, 'import numpy as np\n'), ((23682, 23719), 'numpy.zeros', 'np.zeros', (['(0, 0)'], {'dtype': 'complex_dtype'}), '((0, 0), dtype=complex_dtype)\n', (23690, 23719), True, 'import numpy as np\n'), ((23771, 23819), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["('hetrd', 'hetrd_lwork')", '(A,)'], {}), "(('hetrd', 'hetrd_lwork'), (A,))\n", (23787, 23819), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((23832, 23867), 'pytest.raises', 'assert_raises', (['ValueError', 'hetrd', 'A'], {}), '(ValueError, hetrd, A)\n', (23845, 23867), True, 'from pytest import raises as assert_raises\n'), ((24329, 24366), 'numpy.zeros', 'np.zeros', (['(n, n)'], {'dtype': 'complex_dtype'}), '((n, n), dtype=complex_dtype)\n', (24337, 24366), True, 'import numpy as np\n'), ((24690, 24711), 'numpy.testing.assert_equal', 'assert_equal', (['info', '(0)'], {}), '(info, 0)\n', (24702, 24711), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((24900, 24921), 'numpy.testing.assert_equal', 'assert_equal', (['info', '(0)'], {}), '(info, 0)\n', (24912, 24921), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((25068, 25091), 'numpy.testing.assert_allclose', 'assert_allclose', (['e', '(0.0)'], {}), '(e, 0.0)\n', (25083, 25091), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((25104, 25129), 'numpy.testing.assert_allclose', 'assert_allclose', (['tau', '(0.0)'], {}), '(tau, 0.0)\n', (25119, 25129), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((25268, 25289), 'numpy.testing.assert_equal', 'assert_equal', (['info', '(0)'], {}), '(info, 0)\n', (25280, 25289), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((25395, 25429), 'numpy.zeros_like', 'np.zeros_like', (['A'], {'dtype': 'real_dtype'}), '(A, dtype=real_dtype)\n', (25408, 25429), True, 'import numpy as np\n'), ((25446, 25478), 'numpy.arange', 'np.arange', (['A.shape[0]'], {'dtype': 'int'}), '(A.shape[0], dtype=int)\n', (25455, 25478), True, 'import numpy as np\n'), ((25520, 25556), 'numpy.arange', 'np.arange', (['(A.shape[0] - 1)'], {'dtype': 'int'}), '(A.shape[0] - 1, dtype=int)\n', (25529, 25556), True, 'import numpy as np\n'), ((25650, 25683), 'numpy.eye', 'np.eye', (['n', 'n'], {'dtype': 'complex_dtype'}), '(n, n, dtype=complex_dtype)\n', (25656, 25683), True, 'import numpy as np\n'), ((26044, 26066), 'numpy.tril_indices', 'np.tril_indices', (['n', '(-1)'], {}), '(n, -1)\n', (26059, 26066), True, 'import numpy as np\n'), ((26092, 26113), 'numpy.conj', 'np.conj', (['A.T[i_lower]'], {}), '(A.T[i_lower])\n', (26099, 26113), True, 'import numpy as np\n'), ((26767, 26963), 'numpy.array', 'np.array', (['[[-0.57, -1.28, -0.39, 0.25], [-1.93, 1.08, -0.31, -2.14], [2.3, 0.24, 0.4,\n -0.35], [-1.93, 0.64, -0.66, 0.08], [0.15, 0.3, 0.15, -2.13], [-0.02, \n 1.03, -1.43, 0.5]]'], {'dtype': 'dtype'}), '([[-0.57, -1.28, -0.39, 0.25], [-1.93, 1.08, -0.31, -2.14], [2.3, \n 0.24, 0.4, -0.35], [-1.93, 0.64, -0.66, 0.08], [0.15, 0.3, 0.15, -2.13],\n [-0.02, 1.03, -1.43, 0.5]], dtype=dtype)\n', (26775, 26963), True, 'import numpy as np\n'), ((27105, 27167), 'numpy.array', 'np.array', (['[-1.5, -2.14, 1.23, -0.54, -1.68, 0.82]'], {'dtype': 'dtype'}), '([-1.5, -2.14, 1.23, -0.54, -1.68, 0.82], dtype=dtype)\n', (27113, 27167), True, 'import numpy as np\n'), ((27185, 27218), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {'dtype': 'dtype'}), '([0.0, 0.0], dtype=dtype)\n', (27193, 27218), True, 'import numpy as np\n'), ((27272, 27658), 'numpy.array', 'np.array', (['[[0.96 - 0.81j, -0.03 + 0.96j, -0.91 + 2.06j, -0.05 + 0.41j], [-0.98 + \n 1.98j, -1.2 + 0.19j, -0.66 + 0.42j, -0.81 + 0.56j], [0.62 - 0.46j, 1.01 +\n 0.02j, 0.63 - 0.17j, -1.11 + 0.6j], [0.37 + 0.38j, 0.19 - 0.54j, -0.98 -\n 0.36j, 0.22 - 0.2j], [0.83 + 0.51j, 0.2 + 0.01j, -0.17 - 0.46j, 1.47 + \n 1.59j], [1.08 - 0.28j, 0.2 - 0.12j, -0.07 + 1.23j, 0.26 + 0.26j]]'], {}), '([[0.96 - 0.81j, -0.03 + 0.96j, -0.91 + 2.06j, -0.05 + 0.41j], [-\n 0.98 + 1.98j, -1.2 + 0.19j, -0.66 + 0.42j, -0.81 + 0.56j], [0.62 - \n 0.46j, 1.01 + 0.02j, 0.63 - 0.17j, -1.11 + 0.6j], [0.37 + 0.38j, 0.19 -\n 0.54j, -0.98 - 0.36j, 0.22 - 0.2j], [0.83 + 0.51j, 0.2 + 0.01j, -0.17 -\n 0.46j, 1.47 + 1.59j], [1.08 - 0.28j, 0.2 - 0.12j, -0.07 + 1.23j, 0.26 +\n 0.26j]])\n', (27280, 27658), True, 'import numpy as np\n'), ((27740, 27852), 'numpy.array', 'np.array', (['[[-2.54 + 0.09j], [1.65 - 2.26j], [-2.11 - 3.96j], [1.82 + 3.3j], [-6.41 + \n 3.77j], [2.07 + 0.66j]]'], {}), '([[-2.54 + 0.09j], [1.65 - 2.26j], [-2.11 - 3.96j], [1.82 + 3.3j],\n [-6.41 + 3.77j], [2.07 + 0.66j]])\n', (27748, 27852), True, 'import numpy as np\n'), ((27984, 28008), 'numpy.zeros', 'np.zeros', (['(2)'], {'dtype': 'dtype'}), '(2, dtype=dtype)\n', (27992, 28008), True, 'import numpy as np\n'), ((28187, 28245), 'numpy.array', 'np.array', (['[0.48904455, 0.99754786, 0.48904455, 0.99754786]'], {}), '([0.48904455, 0.99754786, 0.48904455, 0.99754786])\n', (28195, 28245), True, 'import numpy as np\n'), ((28382, 28502), 'numpy.array', 'np.array', (['[1.08742917 - 1.96205783j, -0.74093902 + 3.72973919j, 1.08742917 - \n 1.96205759j, -0.74093896 + 3.72973895j]'], {}), '([1.08742917 - 1.96205783j, -0.74093902 + 3.72973919j, 1.08742917 -\n 1.96205759j, -0.74093896 + 3.72973895j])\n', (28390, 28502), True, 'import numpy as np\n'), ((28904, 28948), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (['"""sytrf_lwork"""'], {'dtype': 'dtype'}), "('sytrf_lwork', dtype=dtype)\n", (28920, 28948), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((28979, 29028), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["('sycon', 'sytrf')"], {'dtype': 'dtype'}), "(('sycon', 'sytrf'), dtype=dtype)\n", (28995, 29028), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((29136, 29180), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (['"""hetrf_lwork"""'], {'dtype': 'dtype'}), "('hetrf_lwork', dtype=dtype)\n", (29152, 29180), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((29211, 29260), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["('hecon', 'hetrf')"], {'dtype': 'dtype'}), "(('hecon', 'hetrf'), dtype=dtype)\n", (29227, 29260), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((3508, 3543), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["('lange',)", '(a1,)'], {}), "(('lange',), (a1,))\n", (3524, 3543), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((5262, 5326), 'numpy.array', 'np.array', (['[-14.333333333333323, 14.999999999999991]'], {'dtype': 'dtype'}), '([-14.333333333333323, 14.999999999999991], dtype=dtype)\n', (5270, 5326), True, 'import numpy as np\n'), ((6297, 6405), 'numpy.array', 'np.array', (['[1.161753632288328 - 1.901075709391912j, 1.735882340522193 + 1.521240901196909j\n ]'], {'dtype': 'dtype'}), '([1.161753632288328 - 1.901075709391912j, 1.735882340522193 + \n 1.521240901196909j], dtype=dtype)\n', (6305, 6405), True, 'import numpy as np\n'), ((7226, 7239), 'numpy.real', 'np.real', (['work'], {}), '(work)\n', (7233, 7239), True, 'import numpy as np\n'), ((7428, 7492), 'numpy.array', 'np.array', (['[-14.333333333333323, 14.999999999999991]'], {'dtype': 'dtype'}), '([-14.333333333333323, 14.999999999999991], dtype=dtype)\n', (7436, 7492), True, 'import numpy as np\n'), ((7627, 7689), 'numpy.array', 'np.array', (['[12.596017180511966, 0.583396253199685]'], {'dtype': 'dtype'}), '([12.596017180511966, 0.583396253199685], dtype=dtype)\n', (7635, 7689), True, 'import numpy as np\n'), ((8451, 8464), 'numpy.real', 'np.real', (['work'], {}), '(work)\n', (8458, 8464), True, 'import numpy as np\n'), ((8729, 8837), 'numpy.array', 'np.array', (['[1.161753632288328 - 1.901075709391912j, 1.735882340522193 + 1.521240901196909j\n ]'], {'dtype': 'dtype'}), '([1.161753632288328 - 1.901075709391912j, 1.735882340522193 + \n 1.521240901196909j], dtype=dtype)\n', (8737, 8837), True, 'import numpy as np\n'), ((8984, 9046), 'numpy.array', 'np.array', (['[13.035514762572044, 4.337666985231382]'], {'dtype': 'dtype'}), '([13.035514762572044, 4.337666985231382], dtype=dtype)\n', (8992, 9046), True, 'import numpy as np\n'), ((9751, 9764), 'numpy.real', 'np.real', (['work'], {}), '(work)\n', (9758, 9764), True, 'import numpy as np\n'), ((9882, 9946), 'numpy.array', 'np.array', (['[-14.333333333333323, 14.999999999999991]'], {'dtype': 'dtype'}), '([-14.333333333333323, 14.999999999999991], dtype=dtype)\n', (9890, 9946), True, 'import numpy as np\n'), ((10081, 10143), 'numpy.array', 'np.array', (['[12.596017180511966, 0.583396253199685]'], {'dtype': 'dtype'}), '([12.596017180511966, 0.583396253199685], dtype=dtype)\n', (10089, 10143), True, 'import numpy as np\n'), ((10891, 10904), 'numpy.real', 'np.real', (['work'], {}), '(work)\n', (10898, 10904), True, 'import numpy as np\n'), ((11050, 11158), 'numpy.array', 'np.array', (['[1.161753632288328 - 1.901075709391912j, 1.735882340522193 + 1.521240901196909j\n ]'], {'dtype': 'dtype'}), '([1.161753632288328 - 1.901075709391912j, 1.735882340522193 + \n 1.521240901196909j], dtype=dtype)\n', (11058, 11158), True, 'import numpy as np\n'), ((11314, 11376), 'numpy.array', 'np.array', (['[13.035514762572044, 4.337666985231382]'], {'dtype': 'dtype'}), '([13.035514762572044, 4.337666985231382], dtype=dtype)\n', (11322, 11376), True, 'import numpy as np\n'), ((12133, 12146), 'numpy.real', 'np.real', (['work'], {}), '(work)\n', (12140, 12146), True, 'import numpy as np\n'), ((12383, 12447), 'numpy.array', 'np.array', (['[-14.333333333333323, 14.999999999999991]'], {'dtype': 'dtype'}), '([-14.333333333333323, 14.999999999999991], dtype=dtype)\n', (12391, 12447), True, 'import numpy as np\n'), ((13219, 13232), 'numpy.real', 'np.real', (['work'], {}), '(work)\n', (13226, 13232), True, 'import numpy as np\n'), ((13497, 13605), 'numpy.array', 'np.array', (['[1.161753632288328 - 1.901075709391912j, 1.735882340522193 + 1.521240901196909j\n ]'], {'dtype': 'dtype'}), '([1.161753632288328 - 1.901075709391912j, 1.735882340522193 + \n 1.521240901196909j], dtype=dtype)\n', (13505, 13605), True, 'import numpy as np\n'), ((14141, 14173), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["['orgrq']", '[a]'], {}), "(['orgrq'], [a])\n", (14157, 14173), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((14190, 14244), 'pytest.raises', 'assert_raises', (['Exception', 'orgrq', 'rq[-2:]', 'tau'], {'lwork': '(1)'}), '(Exception, orgrq, rq[-2:], tau, lwork=1)\n', (14203, 14244), True, 'from pytest import raises as assert_raises\n'), ((14697, 14715), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (14711, 14715), True, 'import numpy as np\n'), ((14736, 14765), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(3, 3)'}), '(size=(3, 3))\n', (14752, 14765), True, 'import numpy as np\n'), ((14831, 14873), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["('potrf', 'potri')", '(a,)'], {}), "(('potrf', 'potri'), (a,))\n", (14847, 14873), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((21779, 21802), 'numpy.triu_indices_from', 'np.triu_indices_from', (['A'], {}), '(A)\n', (21799, 21802), True, 'import numpy as np\n'), ((22283, 22293), 'numpy.diag', 'np.diag', (['A'], {}), '(A)\n', (22290, 22293), True, 'import numpy as np\n'), ((22941, 22965), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'dtype'}), '(n, dtype=dtype)\n', (22949, 22965), True, 'import numpy as np\n'), ((23123, 23135), 'numpy.dot', 'np.dot', (['H', 'Q'], {}), '(H, Q)\n', (23129, 23135), True, 'import numpy as np\n'), ((23294, 23306), 'numpy.dot', 'np.dot', (['A', 'Q'], {}), '(A, Q)\n', (23300, 23306), True, 'import numpy as np\n'), ((24381, 24404), 'numpy.triu_indices_from', 'np.triu_indices_from', (['A'], {}), '(A)\n', (24401, 24404), True, 'import numpy as np\n'), ((24426, 24478), 'numpy.arange', 'np.arange', (['(1)', '(n * (n + 1) // 2 + 1)'], {'dtype': 'real_dtype'}), '(1, n * (n + 1) // 2 + 1, dtype=real_dtype)\n', (24435, 24478), True, 'import numpy as np\n'), ((25737, 25769), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'complex_dtype'}), '(n, dtype=complex_dtype)\n', (25745, 25769), True, 'import numpy as np\n'), ((25966, 25978), 'numpy.dot', 'np.dot', (['H', 'Q'], {}), '(H, Q)\n', (25972, 25978), True, 'import numpy as np\n'), ((26141, 26153), 'numpy.conj', 'np.conj', (['Q.T'], {}), '(Q.T)\n', (26148, 26153), True, 'import numpy as np\n'), ((26155, 26167), 'numpy.dot', 'np.dot', (['A', 'Q'], {}), '(A, Q)\n', (26161, 26167), True, 'import numpy as np\n'), ((29432, 29454), 'numpy.eye', 'np.eye', (['n'], {'dtype': 'dtype'}), '(n, dtype=dtype)\n', (29438, 29454), True, 'import numpy as np\n'), ((2624, 2637), 'numpy.dot', 'np.dot', (['a1', 'x'], {}), '(a1, x)\n', (2630, 2637), True, 'import numpy as np\n'), ((2640, 2653), 'numpy.dot', 'np.dot', (['x', 'b1'], {}), '(x, b1)\n', (2646, 2653), True, 'import numpy as np\n'), ((3035, 3048), 'numpy.dot', 'np.dot', (['a1', 'x'], {}), '(a1, x)\n', (3041, 3048), True, 'import numpy as np\n'), ((3051, 3064), 'numpy.dot', 'np.dot', (['x', 'b1'], {}), '(x, b1)\n', (3057, 3064), True, 'import numpy as np\n'), ((3841, 3881), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['value', 'ref', 'decimal'], {}), '(value, ref, decimal)\n', (3860, 3881), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((4220, 4244), 'numpy.testing.assert_equal', 'assert_equal', (['value', 'ref'], {}), '(value, ref)\n', (4232, 4244), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((12265, 12280), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (12273, 12280), True, 'import numpy as np\n'), ((13351, 13366), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (13359, 13366), True, 'import numpy as np\n'), ((14357, 14389), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["['ungrq']", '[a]'], {}), "(['ungrq'], [a])\n", (14373, 14389), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((14406, 14460), 'pytest.raises', 'assert_raises', (['Exception', 'ungrq', 'rq[-2:]', 'tau'], {'lwork': '(1)'}), '(Exception, ungrq, rq[-2:], tau, lwork=1)\n', (14419, 14460), True, 'from pytest import raises as assert_raises\n'), ((23051, 23076), 'numpy.eye', 'np.eye', (['n', 'n'], {'dtype': 'dtype'}), '(n, n, dtype=dtype)\n', (23057, 23076), True, 'import numpy as np\n'), ((24494, 24546), 'numpy.arange', 'np.arange', (['(1)', '(n * (n + 1) // 2 + 1)'], {'dtype': 'real_dtype'}), '(1, n * (n + 1) // 2 + 1, dtype=real_dtype)\n', (24503, 24546), True, 'import numpy as np\n'), ((24597, 24607), 'numpy.diag', 'np.diag', (['A'], {}), '(A)\n', (24604, 24607), True, 'import numpy as np\n'), ((25043, 25053), 'numpy.diag', 'np.diag', (['A'], {}), '(A)\n', (25050, 25053), True, 'import numpy as np\n'), ((25855, 25888), 'numpy.eye', 'np.eye', (['n', 'n'], {'dtype': 'complex_dtype'}), '(n, n, dtype=complex_dtype)\n', (25861, 25888), True, 'import numpy as np\n'), ((29046, 29056), 'numpy.random.rand', 'rand', (['n', 'n'], {}), '(n, n)\n', (29050, 29056), False, 'from numpy.random import rand, seed\n'), ((12088, 12103), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (12096, 12103), True, 'import numpy as np\n'), ((13174, 13189), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (13182, 13189), True, 'import numpy as np\n'), ((15037, 15049), 'numpy.tril', 'np.tril', (['dpt'], {}), '(dpt)\n', (15044, 15049), True, 'import numpy as np\n'), ((15126, 15138), 'numpy.triu', 'np.triu', (['dpt'], {}), '(dpt)\n', (15133, 15138), True, 'import numpy as np\n'), ((15350, 15371), 'numpy.diag', 'np.diag', (['sigmas[0:-1]'], {}), '(sigmas[0:-1])\n', (15357, 15371), True, 'import numpy as np\n'), ((16172, 16187), 'numpy.isnan', 'np.isnan', (['roots'], {}), '(roots)\n', (16180, 16187), True, 'import numpy as np\n'), ((16258, 16278), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (16266, 16278), True, 'import numpy as np\n'), ((16317, 16337), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (16325, 16337), True, 'import numpy as np\n'), ((17094, 17109), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (17102, 17109), True, 'import numpy as np\n'), ((23088, 23102), 'numpy.outer', 'np.outer', (['v', 'v'], {}), '(v, v)\n', (23096, 23102), True, 'import numpy as np\n'), ((29278, 29288), 'numpy.random.rand', 'rand', (['n', 'n'], {}), '(n, n)\n', (29282, 29288), False, 'from numpy.random import rand, seed\n'), ((3978, 3988), 'numpy.abs', 'np.abs', (['a1'], {}), '(a1)\n', (3984, 3988), True, 'import numpy as np\n'), ((5455, 5470), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (5463, 5470), True, 'import numpy as np\n'), ((6472, 6487), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (6480, 6487), True, 'import numpy as np\n'), ((7575, 7590), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (7583, 7590), True, 'import numpy as np\n'), ((7768, 7783), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (7776, 7783), True, 'import numpy as np\n'), ((8904, 8919), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (8912, 8919), True, 'import numpy as np\n'), ((9093, 9108), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (9101, 9108), True, 'import numpy as np\n'), ((10029, 10044), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (10037, 10044), True, 'import numpy as np\n'), ((10222, 10237), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (10230, 10237), True, 'import numpy as np\n'), ((11262, 11277), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (11270, 11277), True, 'import numpy as np\n'), ((11455, 11470), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (11463, 11470), True, 'import numpy as np\n'), ((12530, 12545), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (12538, 12545), True, 'import numpy as np\n'), ((13709, 13724), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (13717, 13724), True, 'import numpy as np\n'), ((15059, 15065), 'scipy.linalg.inv', 'inv', (['a'], {}), '(a)\n', (15062, 15065), False, 'from scipy.linalg import inv\n'), ((15148, 15154), 'scipy.linalg.inv', 'inv', (['a'], {}), '(a)\n', (15151, 15154), False, 'from scipy.linalg import inv\n'), ((22221, 22236), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (22229, 22236), True, 'import numpy as np\n'), ((23452, 23467), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (23460, 23467), True, 'import numpy as np\n'), ((24967, 24987), 'numpy.finfo', 'np.finfo', (['real_dtype'], {}), '(real_dtype)\n', (24975, 24987), True, 'import numpy as np\n'), ((25934, 25944), 'numpy.conj', 'np.conj', (['v'], {}), '(v)\n', (25941, 25944), True, 'import numpy as np\n'), ((26331, 26351), 'numpy.finfo', 'np.finfo', (['real_dtype'], {}), '(real_dtype)\n', (26339, 26351), True, 'import numpy as np\n'), ((29291, 29301), 'numpy.random.rand', 'rand', (['n', 'n'], {}), '(n, n)\n', (29295, 29301), False, 'from numpy.random import rand, seed\n'), ((29729, 29751), 'numpy.linalg.cond', 'np.linalg.cond', (['A'], {'p': '(1)'}), '(A, p=1)\n', (29743, 29751), True, 'import numpy as np\n'), ((3807, 3817), 'numpy.abs', 'np.abs', (['a1'], {}), '(a1)\n', (3813, 3817), True, 'import numpy as np\n'), ((4074, 4084), 'numpy.abs', 'np.abs', (['a1'], {}), '(a1)\n', (4080, 4084), True, 'import numpy as np\n'), ((15706, 15724), 'numpy.power', 'np.power', (['m_vec', '(2)'], {}), '(m_vec, 2)\n', (15714, 15724), True, 'import numpy as np\n'), ((4178, 4188), 'numpy.abs', 'np.abs', (['a1'], {}), '(a1)\n', (4184, 4188), True, 'import numpy as np\n')]
|
import os
from os.path import expanduser
import altair as alt
import numpy as np
import pandas as pd
from scipy.stats.stats import pearsonr
import sqlite3
from util import to_day, to_month, to_year, to_local, allocate_ys, save_plot
from config import dummy_start_date, dummy_end_date, cutoff_date
# %matplotlib inline
plot_start_date = dummy_start_date
plot_end_date = dummy_end_date
if cutoff_date is not None:
plot_start_date = cutoff_date
day = np.timedelta64(1, 'D')
fiction_scale = alt.Scale(domain=[True, False])
def get_data(library_paths=[expanduser('~/books/non-fiction/')]):
db_path = library_paths[0] + 'metadata.db'
conn = sqlite3.connect(db_path)
custom_column_index = dict(pd.read_sql_query("""
SELECT label, id FROM custom_columns
""", conn).to_dict(orient='split')['data'])
def tbl(name):
return 'custom_column_' + str(custom_column_index[name])
df = pd.read_sql_query(f"""
SELECT
title,
author_sort AS author,
series.name AS series,
series_index,
pubdate,
timestamp,
last_modified,
languages.lang_code AS language,
{tbl('started')}.value AS start,
{tbl('finished')}.value AS end,
{tbl('words')}.value AS words,
{tbl('pages')}.value AS pages,
{tbl('fre')}.value AS fre,
{tbl('fkg')}.value AS fkg,
{tbl('gfi')}.value AS gfi,
({tbl('shelf')}.value = 'Fiction') AS is_fiction,
ifnull({tbl('read')}.value, 0) AS is_read
FROM books
LEFT OUTER JOIN books_series_link
ON books.id = books_series_link.book
LEFT OUTER JOIN series
ON books_series_link.series = series.id
JOIN books_languages_link
ON books.id = books_languages_link.book
JOIN languages
ON books_languages_link.lang_code = languages.id
LEFT OUTER JOIN {tbl('pages')}
ON {tbl('pages')}.book = books.id
LEFT OUTER JOIN {tbl('words')}
ON {tbl('words')}.book = books.id
LEFT OUTER JOIN {tbl('fre')}
ON {tbl('fre')}.book = books.id
LEFT OUTER JOIN {tbl('fkg')}
ON {tbl('fkg')}.book = books.id
LEFT OUTER JOIN {tbl('gfi')}
ON {tbl('gfi')}.book = books.id
JOIN books_{tbl('shelf')}_link
ON books_{tbl('shelf')}_link.book = books.id
JOIN {tbl('shelf')}
ON {tbl('shelf')}.id = books_{tbl('shelf')}_link.value
LEFT OUTER JOIN {tbl('started')}
ON {tbl('started')}.book = books.id
LEFT OUTER JOIN {tbl('finished')}
ON {tbl('finished')}.book = books.id
LEFT OUTER JOIN {tbl('read')} ON {tbl('read')}.book = books.id
WHERE
{tbl('shelf')}.value = 'Fiction'
OR {tbl('shelf')}.value = 'Nonfiction'
""", conn, parse_dates=['start', 'end', 'pubdate', 'timestamp',
'last_modified'])
# Books with no page count are either simply placeholders, not a
# proper part of the library, or have just been added. In both
# cases, it is OK to ignore them.
df = df.loc[df.pages.notna()]
# Fix data types
df.language = df.language.astype('category')
df.pages = df.pages.astype('int64')
# We cannot make df.words an int64 column, as some PDF files have
# no word count associated with them and int64 columns cannot
# contain NAs.
df.is_fiction = df.is_fiction.astype(bool)
df.is_read = df.is_read.astype(bool)
# Compute intermediate columns
df.pubdate = df.pubdate.map(to_local)
df = df.assign(words_per_page=df.words / df.pages,
words_per_day=df.words / ((df.end - df.start) / day))
def to_numeric(x):
return pd.to_numeric(x, errors='coerce', downcast='integer')
df = df.assign(finished_year=to_numeric(df.end.map(to_year)),
finished_month=to_numeric(df.end.map(to_month)),
finished_day=to_numeric(df.end.map(to_day)))
df = df.assign(pubyear=to_numeric(df.pubdate.map(to_year)),
pubmonth=to_numeric(df.pubdate.map(to_month)),
pubday=to_numeric(df.pubdate.map(to_day)))
df.sort_values('start', inplace=True)
return df
def plot_ranges(df, output='ranges.html'):
"""Print date ranges in which the books have been is_read, how many
books have been is_read at any given point in time and how many words
have been is_read per day.
"""
if cutoff_date is not None:
# df = df[(df.start >= cutoff_date) & (df.end >= cutoff_date)]
df = df[df.end.isna() | (df.end >= cutoff_date)]
df.end.fillna(dummy_end_date)
df = df[df.start.notna()].assign(ys=-allocate_ys(df[df.start.notna()]))
bars = alt.Chart(df) \
.mark_bar(clip=True) \
.encode(
x=alt.X('start', axis=alt.Axis(labelAngle=45, title='Date')),
x2='end',
y=alt.Y('ys:N', axis=None),
color=alt.Color('is_fiction', scale=fiction_scale, legend=None),
tooltip='title'
)
bars.width = 1600
overlapped = alt.Chart(df[df.start.notna()]) \
.mark_bar(clip=True, opacity=0.1) \
.encode(
x=alt.X('start', axis=None),
x2='end',
y=alt.Y('is_fiction', axis=None),
color=alt.Color('is_fiction', scale=fiction_scale, legend=None)
)
overlapped.width = bars.width
baz = df[df.series.notna()]
if cutoff_date is not None:
baz = baz[baz.start.notna() & (baz.end.isna() |
(baz.end >= cutoff_date))]
else:
baz = baz[df.start.notna()]
by_series = alt.Chart(baz) \
.mark_bar(clip=True, opacity=0.7) \
.encode(
x=alt.X('start', axis=alt.Axis(labelAngle=45, title='Date')),
x2='end',
y=alt.Y('series', title='Series'),
tooltip='title'
)
by_series.width = bars.width
baz = df[df.author.notna()]
if cutoff_date is not None:
baz = baz[baz.start.notna() & (baz.end.isna() |
(baz.end >= cutoff_date))]
else:
baz = baz[df.start.notna()]
baz.ys = -allocate_ys(baz[baz.start.notna()])
by_author = alt.Chart(baz) \
.mark_bar(clip=True, opacity=0.7) \
.encode(
x=alt.X('start', axis=alt.Axis(labelAngle=45, title='Date')),
x2='end',
y=alt.Y('author', title='Author'),
color='series',
tooltip='title'
)
by_author.width = bars.width
save_plot(overlapped & bars & by_series, output)
save_plot(by_author, 'by_author.html')
def plot_yearly(df, y='count()', output='finished.html'):
chart = alt.Chart(df[df.is_read & df.end]) \
.mark_bar() \
.encode(
x='finished_year:O',
y=y,
color=alt.Color('is_fiction', scale=fiction_scale),
)
save_plot(chart, output)
def number_of_books_per_author(df, output='books_per_author.html'):
df = df[df.is_read]
x = df.author.value_counts()
foo = pd.DataFrame(data={'author': x.index,
'count': x.values})
foo.sort_values('count', ascending=False, inplace=True)
chart = alt.Chart(foo) \
.mark_bar() \
.encode(y=alt.Y('author', sort=None), x='count')
save_plot(chart, output)
def plot_pubdate(df, output='pubdate.html'):
df = df[df.pubdate.notna()]
years = alt.Chart(df).mark_bar().encode(x='pubyear:O', y='count(year):N')
years_nonfiction = alt.Chart(df[~df.is_fiction]) \
.mark_bar(color='orange') \
.encode(x='pubyear:O', y='count(year):N')
months = alt.Chart(df).mark_bar().encode(x='pubmonth:O',
y='count(pubmonth):N')
days = alt.Chart(df).mark_bar().encode(x='pubday:O', y='count(pubday):N')
years.width = 965
save_plot((years + years_nonfiction) & (months | days), output)
def reading_ease(df):
df = df[df.fre.notna() & df.fkg.notna() & df.gfi.notna()]
opacity = 0.2
color = alt.Color('is_fiction', scale=fiction_scale)
a = alt.Chart(df).mark_point(opacity=opacity) \
.encode(x='fre', y='fkg', color=color)
b = alt.Chart(df).mark_point(opacity=opacity) \
.encode(x='fre', y='gfi', color=color)
save_plot(a | b, 'reading_ease.html')
# blue_patch = mpatches.Patch(label='Fiction')
# orange_patch = mpatches.Patch(label='Nonfiction', color='orange')
#
# def plot_histogram(df):
# "Plot histogram of how many days I needed to is_read a book."
# fig = plt.figure(figsize=(8, 6), dpi=dpi)
# ax = fig.add_subplot(111)
#
# ax.hist([np.array(df[df.is_fiction].duration
# .map(lambda x: x.days).dropna(),
# dtype='float64'),
# np.array(df[~df.is_fiction].duration
# .map(lambda x: x.days).dropna(),
# dtype='float64')],
# histtype='barstacked',
# bins=list(range(-7, 1764, 14)))
#
# plt.title('Number of days spent reading a book')
# plt.legend(handles=[blue_patch, orange_patch])
# plt.xlabel("Number of days spent reading")
# plt.ylabel("Number of books")
#
# plt.savefig('histogram.png')
# return plt.show()
#
#
# def scatter_length_duration(df):
# fig = plt.figure(figsize=(8, 6), dpi=dpi)
# ax = fig.add_subplot(111)
# df = df[df.words > 0]
# fiction = df[df.is_fiction]
# nonfiction = df[~df.is_fiction]
#
# duration = np.array(fiction.duration.map(lambda x: x.days),
# dtype='float64')
# ax.scatter(fiction.words.values, duration)
#
# duration = np.array(nonfiction.duration.map(lambda x: x.days),
# dtype='float64')
# ax.scatter(nonfiction.words.values, duration)
#
# plt.title("Number of words vs. days of reading")
# plt.xlabel("Number of words")
# plt.ylabel("Days spent reading")
# plt.legend(handles=[blue_patch, orange_patch])
#
# plt.savefig('scatter.png')
# return plt.show()
#
#
# def scatter_words_vs_words_per_day(df):
# fig = plt.figure()
# ax = fig.gca()
# ax.set_xscale('log')
# ax.set_yscale('log')
# ax.set_xlabel('Words')
# ax.set_ylabel('Words per day')
# ax.plot(df.words, df.words_per_day, 'o')
os.makedirs('output', exist_ok=True)
df = get_data()
avg_words_per_page = df.words.sum() / df.pages[df.words.notna()].sum()
plot_ranges(df)
number_of_books_per_author(df)
plot_yearly(df, output='books_finished.html')
plot_yearly(df, y='sum(pages)', output='pages_finished.html')
plot_yearly(df, y='sum(words)', output='words_finished.html')
plot_pubdate(df)
values = ('words', 'pages')
table = df.pivot_table(values=values,
index=('is_read', 'is_fiction', 'language'),
aggfunc=np.sum).reset_index()
table = table.assign(combined=list(zip(table.is_fiction, table.is_read)))
chart = alt.Chart(table) \
.mark_bar() \
.encode(column='language',
x='is_read',
y='words',
color='language')
ease_df = df[df.fre.notna() & df.fkg.notna() & df.gfi.notna()]
cor_fre_fkg = pearsonr(ease_df.fre, ease_df.fkg)
cor_fre_gfi = pearsonr(ease_df.fre, ease_df.gfi)
cor_fkg_gfi = pearsonr(ease_df.fkg, ease_df.gfi)
reading_ease(df)
|
[
"pandas.DataFrame",
"os.makedirs",
"altair.Y",
"scipy.stats.stats.pearsonr",
"util.save_plot",
"altair.Chart",
"altair.Axis",
"altair.X",
"numpy.timedelta64",
"sqlite3.connect",
"pandas.read_sql_query",
"altair.Scale",
"os.path.expanduser",
"pandas.to_numeric",
"altair.Color"
] |
[((458, 480), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""D"""'], {}), "(1, 'D')\n", (472, 480), True, 'import numpy as np\n'), ((497, 528), 'altair.Scale', 'alt.Scale', ([], {'domain': '[True, False]'}), '(domain=[True, False])\n', (506, 528), True, 'import altair as alt\n'), ((10515, 10551), 'os.makedirs', 'os.makedirs', (['"""output"""'], {'exist_ok': '(True)'}), "('output', exist_ok=True)\n", (10526, 10551), False, 'import os\n'), ((11372, 11406), 'scipy.stats.stats.pearsonr', 'pearsonr', (['ease_df.fre', 'ease_df.fkg'], {}), '(ease_df.fre, ease_df.fkg)\n', (11380, 11406), False, 'from scipy.stats.stats import pearsonr\n'), ((11421, 11455), 'scipy.stats.stats.pearsonr', 'pearsonr', (['ease_df.fre', 'ease_df.gfi'], {}), '(ease_df.fre, ease_df.gfi)\n', (11429, 11455), False, 'from scipy.stats.stats import pearsonr\n'), ((11470, 11504), 'scipy.stats.stats.pearsonr', 'pearsonr', (['ease_df.fkg', 'ease_df.gfi'], {}), '(ease_df.fkg, ease_df.gfi)\n', (11478, 11504), False, 'from scipy.stats.stats import pearsonr\n'), ((655, 679), 'sqlite3.connect', 'sqlite3.connect', (['db_path'], {}), '(db_path)\n', (670, 679), False, 'import sqlite3\n'), ((6715, 6763), 'util.save_plot', 'save_plot', (['(overlapped & bars & by_series)', 'output'], {}), '(overlapped & bars & by_series, output)\n', (6724, 6763), False, 'from util import to_day, to_month, to_year, to_local, allocate_ys, save_plot\n'), ((6768, 6806), 'util.save_plot', 'save_plot', (['by_author', '"""by_author.html"""'], {}), "(by_author, 'by_author.html')\n", (6777, 6806), False, 'from util import to_day, to_month, to_year, to_local, allocate_ys, save_plot\n'), ((7083, 7107), 'util.save_plot', 'save_plot', (['chart', 'output'], {}), '(chart, output)\n', (7092, 7107), False, 'from util import to_day, to_month, to_year, to_local, allocate_ys, save_plot\n'), ((7245, 7302), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'author': x.index, 'count': x.values}"}), "(data={'author': x.index, 'count': x.values})\n", (7257, 7302), True, 'import pandas as pd\n'), ((7505, 7529), 'util.save_plot', 'save_plot', (['chart', 'output'], {}), '(chart, output)\n', (7514, 7529), False, 'from util import to_day, to_month, to_year, to_local, allocate_ys, save_plot\n'), ((8062, 8123), 'util.save_plot', 'save_plot', (['(years + years_nonfiction & (months | days))', 'output'], {}), '(years + years_nonfiction & (months | days), output)\n', (8071, 8123), False, 'from util import to_day, to_month, to_year, to_local, allocate_ys, save_plot\n'), ((8242, 8286), 'altair.Color', 'alt.Color', (['"""is_fiction"""'], {'scale': 'fiction_scale'}), "('is_fiction', scale=fiction_scale)\n", (8251, 8286), True, 'import altair as alt\n'), ((8491, 8528), 'util.save_plot', 'save_plot', (['(a | b)', '"""reading_ease.html"""'], {}), "(a | b, 'reading_ease.html')\n", (8500, 8528), False, 'from util import to_day, to_month, to_year, to_local, allocate_ys, save_plot\n'), ((559, 593), 'os.path.expanduser', 'expanduser', (['"""~/books/non-fiction/"""'], {}), "('~/books/non-fiction/')\n", (569, 593), False, 'from os.path import expanduser\n'), ((3857, 3910), 'pandas.to_numeric', 'pd.to_numeric', (['x'], {'errors': '"""coerce"""', 'downcast': '"""integer"""'}), "(x, errors='coerce', downcast='integer')\n", (3870, 3910), True, 'import pandas as pd\n'), ((5045, 5069), 'altair.Y', 'alt.Y', (['"""ys:N"""'], {'axis': 'None'}), "('ys:N', axis=None)\n", (5050, 5069), True, 'import altair as alt\n'), ((5089, 5146), 'altair.Color', 'alt.Color', (['"""is_fiction"""'], {'scale': 'fiction_scale', 'legend': 'None'}), "('is_fiction', scale=fiction_scale, legend=None)\n", (5098, 5146), True, 'import altair as alt\n'), ((5335, 5360), 'altair.X', 'alt.X', (['"""start"""'], {'axis': 'None'}), "('start', axis=None)\n", (5340, 5360), True, 'import altair as alt\n'), ((5398, 5428), 'altair.Y', 'alt.Y', (['"""is_fiction"""'], {'axis': 'None'}), "('is_fiction', axis=None)\n", (5403, 5428), True, 'import altair as alt\n'), ((5448, 5505), 'altair.Color', 'alt.Color', (['"""is_fiction"""'], {'scale': 'fiction_scale', 'legend': 'None'}), "('is_fiction', scale=fiction_scale, legend=None)\n", (5457, 5505), True, 'import altair as alt\n'), ((5987, 6018), 'altair.Y', 'alt.Y', (['"""series"""'], {'title': '"""Series"""'}), "('series', title='Series')\n", (5992, 6018), True, 'import altair as alt\n'), ((6578, 6609), 'altair.Y', 'alt.Y', (['"""author"""'], {'title': '"""Author"""'}), "('author', title='Author')\n", (6583, 6609), True, 'import altair as alt\n'), ((7023, 7067), 'altair.Color', 'alt.Color', (['"""is_fiction"""'], {'scale': 'fiction_scale'}), "('is_fiction', scale=fiction_scale)\n", (7032, 7067), True, 'import altair as alt\n'), ((7462, 7488), 'altair.Y', 'alt.Y', (['"""author"""'], {'sort': 'None'}), "('author', sort=None)\n", (7467, 7488), True, 'import altair as alt\n'), ((11148, 11164), 'altair.Chart', 'alt.Chart', (['table'], {}), '(table)\n', (11157, 11164), True, 'import altair as alt\n'), ((712, 797), 'pandas.read_sql_query', 'pd.read_sql_query', (['"""\n SELECT label, id FROM custom_columns\n """', 'conn'], {}), '("""\n SELECT label, id FROM custom_columns\n """,\n conn)\n', (729, 797), True, 'import pandas as pd\n'), ((4871, 4884), 'altair.Chart', 'alt.Chart', (['df'], {}), '(df)\n', (4880, 4884), True, 'import altair as alt\n'), ((4969, 5006), 'altair.Axis', 'alt.Axis', ([], {'labelAngle': '(45)', 'title': '"""Date"""'}), "(labelAngle=45, title='Date')\n", (4977, 5006), True, 'import altair as alt\n'), ((5799, 5813), 'altair.Chart', 'alt.Chart', (['baz'], {}), '(baz)\n', (5808, 5813), True, 'import altair as alt\n'), ((5911, 5948), 'altair.Axis', 'alt.Axis', ([], {'labelAngle': '(45)', 'title': '"""Date"""'}), "(labelAngle=45, title='Date')\n", (5919, 5948), True, 'import altair as alt\n'), ((6390, 6404), 'altair.Chart', 'alt.Chart', (['baz'], {}), '(baz)\n', (6399, 6404), True, 'import altair as alt\n'), ((6502, 6539), 'altair.Axis', 'alt.Axis', ([], {'labelAngle': '(45)', 'title': '"""Date"""'}), "(labelAngle=45, title='Date')\n", (6510, 6539), True, 'import altair as alt\n'), ((6879, 6913), 'altair.Chart', 'alt.Chart', (['df[df.is_read & df.end]'], {}), '(df[df.is_read & df.end])\n', (6888, 6913), True, 'import altair as alt\n'), ((7405, 7419), 'altair.Chart', 'alt.Chart', (['foo'], {}), '(foo)\n', (7414, 7419), True, 'import altair as alt\n'), ((7622, 7635), 'altair.Chart', 'alt.Chart', (['df'], {}), '(df)\n', (7631, 7635), True, 'import altair as alt\n'), ((7711, 7740), 'altair.Chart', 'alt.Chart', (['df[~df.is_fiction]'], {}), '(df[~df.is_fiction])\n', (7720, 7740), True, 'import altair as alt\n'), ((7842, 7855), 'altair.Chart', 'alt.Chart', (['df'], {}), '(df)\n', (7851, 7855), True, 'import altair as alt\n'), ((7969, 7982), 'altair.Chart', 'alt.Chart', (['df'], {}), '(df)\n', (7978, 7982), True, 'import altair as alt\n'), ((8296, 8309), 'altair.Chart', 'alt.Chart', (['df'], {}), '(df)\n', (8305, 8309), True, 'import altair as alt\n'), ((8395, 8408), 'altair.Chart', 'alt.Chart', (['df'], {}), '(df)\n', (8404, 8408), True, 'import altair as alt\n')]
|
#!/usr/bin/env python
"""
Aiida interface for twinpy.
"""
import warnings
import numpy as np
from aiida.cmdline.utils.decorators import with_dbenv
from aiida.orm import (load_node,
Node,
QueryBuilder,
)
from aiida.plugins import WorkflowFactory
from aiida_twinpy.common.utils import get_create_node
from twinpy.interfaces.aiida.base import (check_process_class,
_WorkChain)
from twinpy.interfaces.aiida.vasp import (AiidaRelaxWorkChain)
from twinpy.interfaces.aiida.twinboundary \
import AiidaTwinBoudnaryRelaxWorkChain
@with_dbenv()
class AiidaTwinBoudnaryShearWorkChain(_WorkChain):
"""
TwinBoundaryShear work chain class.
"""
def __init__(
self,
node:Node,
):
"""
Args:
node: TwinBoundaryShearWorkChain node.
"""
process_class = 'TwinBoundaryShearWorkChain'
check_process_class(node, process_class)
super().__init__(node=node)
self._shear_strain_ratios = None
self._set_shear_strain_ratios()
self._shear_aiida_relaxes = None
self._set_shear_aiida_relaxes()
self._structure_pks = None
self._set_structure_pks()
self._aiida_twinboundary_relax = None
self._set_aiida_twinboundary_relax()
self._additional_relax_pks = None
self._set_additional_relax_pks()
self._twinboundary_analyzer = None
def _set_shear_strain_ratios(self):
"""
Set shear strain ratios.
"""
conf = self._node.inputs.twinboundary_shear_conf.get_dict()
self._shear_strain_ratios = conf['shear_strain_ratios']
@property
def shear_strain_ratios(self):
"""
Shear strain ratios.
"""
return self._shear_strain_ratios
def _set_structure_pks(self):
"""
Set structure pks.
"""
qb = QueryBuilder()
qb.append(Node, filters={'id':{'==': self._pk}}, tag='wf')
qb.append(
Node,
filters={'label': {'==': 'get_twinboundary_shear_structure'}},
project=['id'],
with_incoming='wf')
cf_pks = [ q[0] for q in qb.all() ]
shear_ratios = [ load_node(q[0]).inputs.shear_strain_ratio.value for q in qb.all() ]
orders = list(np.argsort(shear_ratios))
orig_pks = []
input_pks = []
for ix in orders:
cf = load_node(cf_pks[ix])
orig_pks.append(cf.outputs.twinboundary_shear_structure_orig.pk)
input_pks.append(cf.outputs.twinboundary_shear_structure.pk)
rlx_pks = []
for aiida_rlx, i_struct_pk in zip(self._shear_aiida_relaxes, input_pks):
pks = aiida_rlx.get_pks()
assert pks['initial_structure_pk'] == i_struct_pk, \
"Input structure does not match."
rlx_pks.append(pks['final_structure_pk'])
self._structure_pks = {
'original_structures': orig_pks,
'input_structures': input_pks,
'relax_structures': rlx_pks,
}
@property
def structure_pks(self):
"""
Structure pks.
"""
return self._structure_pks
def _set_aiida_twinboundary_relax(self):
"""
Set twinboundary relax pk.
"""
tb_rlx_wf = WorkflowFactory('twinpy.twinboundary_relax')
tb_rlx_struct_pk = self._node.inputs.twinboundary_relax_structure.pk
tb_rlx = get_create_node(tb_rlx_struct_pk, tb_rlx_wf)
self._aiida_twinboundary_relax \
= AiidaTwinBoudnaryRelaxWorkChain(tb_rlx)
def _set_shear_aiida_relaxes(self):
"""
Set list of AiidaRelaxWorkChain objects.
"""
rlx_wf = WorkflowFactory('vasp.relax')
qb = QueryBuilder()
qb.append(Node, filters={'id':{'==': self._pk}}, tag='wf')
qb.append(rlx_wf, with_incoming='wf', project=['id', 'label'])
qb_all = qb.all()
qb_all.sort(key=lambda qb_all: qb_all[1])
rlx_pks = [ q[0] for q in qb_all ]
self._shear_aiida_relaxes = [ AiidaRelaxWorkChain(load_node(pk))
for pk in rlx_pks ]
def _set_additional_relax_pks(self):
"""
Set additional relax pks.
"""
addi_struct_pks = [ self._node.inputs.__getattr__(key).pk
for key in dir(self._node.inputs)
if 'additional_relax__structure' in key ]
self._additional_relax_pks = \
[ get_create_node(pk, rlx_wf).pk for pk in addi_struct_pks ]
@property
def shear_aiida_relaxes(self):
"""
List of AiidaRelaxWorkChain class objects.
"""
return self._shear_aiida_relaxes
def set_twinboundary_analyzer(self,
twinboundary_phonon_pk:int=None,
hexagonal_relax_pk:int=None,
hexagonal_phonon_pk:int=None,
):
"""
Set twinboundary analyzer.
Args:
twinboudnary_phonon_pk: Twinboundary phonon calculation pk.
hexagonal_relax_pk: Hexagonal relax calculation pk.
hexagonal_phonon_pk: Hexagonal phonon calculation pk.
"""
tb_rlx_pk = self._aiida_twinboundary_relax.pk
addi_rlx_pks = self._additional_relax_pks
aiida_tb = AiidaTwinBoudnaryRelaxWorkChain(load_node(tb_rlx_pk))
self._twinboundary_analyzer = aiida_tb.get_twinboundary_analyzer(
twinboundary_phonon_pk=twinboundary_phonon_pk,
additional_relax_pks=addi_rlx_pks,
hexagonal_relax_pk=hexagonal_relax_pk,
hexagonal_phonon_pk=hexagonal_phonon_pk,
)
@property
def twinboundary_analyzer(self):
"""
TwinBoundaryAnalyzer class object.
"""
return self._twinboundary_analyzer
def get_twinboundary_shear_analyzer(self,
shear_phonon_pks:list,
):
"""
Get twinboundary shear analyzer.
Args:
shaer_phonon_pks: List of phonon pks.
Raises:
RuntimeError: Property twinboundary_analyzer is not set.
Note:
Length of phono_pks list must be the same as that of shear strain
ratios. If there is no phonon result, set please set None.
"""
if self._twinboundary_analyzer is None:
raise RuntimeError("Please set twinboundary_analyzer before.")
assert len(self._shear_strain_ratios) == len(shear_phonon_pks), \
"Length of shear_phonon_pks does not match with shear_strain_ratios."
tb_anal = self._twinboundary_analyzer
shr_rlx_pks = \
[ aiida_rlx.pk for aiida_rlx in self._shear_aiida_relaxes ]
ratios = self._shear_strain_ratios
if len(shr_rlx_pks) != len(ratios):
warnings.warn("Some RelaxWorkChain has not finished normally. "
+"They are ignored.")
tb_shear_analyzer = \
tb_anal.get_twinboundary_shear_analyzer_from_relax_pks(
shear_relax_pks=shr_rlx_pks,
shear_strain_ratios=ratios[:len(shr_rlx_pks)],
shear_phonon_pks=shear_phonon_pks[:len(shr_rlx_pks)],
)
return tb_shear_analyzer
def get_pks(self):
"""
Get workflow pks.
Returns:
dict: Workflow pks.
"""
wf_pks = {
'twinboundary_relax_pk': self._aiida_twinboundary_relax.pk,
'additional_relax_pks': self._additional_relax_pks,
'shear_aiida_relax_pks': [ shr_rlx.pk for shr_rlx
in self._shear_aiida_relaxes ],
}
return wf_pks
|
[
"aiida.orm.QueryBuilder",
"twinpy.interfaces.aiida.base.check_process_class",
"aiida.orm.load_node",
"aiida.plugins.WorkflowFactory",
"twinpy.interfaces.aiida.twinboundary.AiidaTwinBoudnaryRelaxWorkChain",
"numpy.argsort",
"aiida_twinpy.common.utils.get_create_node",
"warnings.warn",
"aiida.cmdline.utils.decorators.with_dbenv"
] |
[((646, 658), 'aiida.cmdline.utils.decorators.with_dbenv', 'with_dbenv', ([], {}), '()\n', (656, 658), False, 'from aiida.cmdline.utils.decorators import with_dbenv\n'), ((991, 1031), 'twinpy.interfaces.aiida.base.check_process_class', 'check_process_class', (['node', 'process_class'], {}), '(node, process_class)\n', (1010, 1031), False, 'from twinpy.interfaces.aiida.base import check_process_class, _WorkChain\n'), ((1989, 2003), 'aiida.orm.QueryBuilder', 'QueryBuilder', ([], {}), '()\n', (2001, 2003), False, 'from aiida.orm import load_node, Node, QueryBuilder\n'), ((3445, 3489), 'aiida.plugins.WorkflowFactory', 'WorkflowFactory', (['"""twinpy.twinboundary_relax"""'], {}), "('twinpy.twinboundary_relax')\n", (3460, 3489), False, 'from aiida.plugins import WorkflowFactory\n'), ((3584, 3628), 'aiida_twinpy.common.utils.get_create_node', 'get_create_node', (['tb_rlx_struct_pk', 'tb_rlx_wf'], {}), '(tb_rlx_struct_pk, tb_rlx_wf)\n', (3599, 3628), False, 'from aiida_twinpy.common.utils import get_create_node\n'), ((3688, 3727), 'twinpy.interfaces.aiida.twinboundary.AiidaTwinBoudnaryRelaxWorkChain', 'AiidaTwinBoudnaryRelaxWorkChain', (['tb_rlx'], {}), '(tb_rlx)\n', (3719, 3727), False, 'from twinpy.interfaces.aiida.twinboundary import AiidaTwinBoudnaryRelaxWorkChain\n'), ((3859, 3888), 'aiida.plugins.WorkflowFactory', 'WorkflowFactory', (['"""vasp.relax"""'], {}), "('vasp.relax')\n", (3874, 3888), False, 'from aiida.plugins import WorkflowFactory\n'), ((3902, 3916), 'aiida.orm.QueryBuilder', 'QueryBuilder', ([], {}), '()\n', (3914, 3916), False, 'from aiida.orm import load_node, Node, QueryBuilder\n'), ((2402, 2426), 'numpy.argsort', 'np.argsort', (['shear_ratios'], {}), '(shear_ratios)\n', (2412, 2426), True, 'import numpy as np\n'), ((2516, 2537), 'aiida.orm.load_node', 'load_node', (['cf_pks[ix]'], {}), '(cf_pks[ix])\n', (2525, 2537), False, 'from aiida.orm import load_node, Node, QueryBuilder\n'), ((5598, 5618), 'aiida.orm.load_node', 'load_node', (['tb_rlx_pk'], {}), '(tb_rlx_pk)\n', (5607, 5618), False, 'from aiida.orm import load_node, Node, QueryBuilder\n'), ((7158, 7248), 'warnings.warn', 'warnings.warn', (["('Some RelaxWorkChain has not finished normally. ' + 'They are ignored.')"], {}), "('Some RelaxWorkChain has not finished normally. ' +\n 'They are ignored.')\n", (7171, 7248), False, 'import warnings\n'), ((4232, 4245), 'aiida.orm.load_node', 'load_node', (['pk'], {}), '(pk)\n', (4241, 4245), False, 'from aiida.orm import load_node, Node, QueryBuilder\n'), ((4669, 4696), 'aiida_twinpy.common.utils.get_create_node', 'get_create_node', (['pk', 'rlx_wf'], {}), '(pk, rlx_wf)\n', (4684, 4696), False, 'from aiida_twinpy.common.utils import get_create_node\n'), ((2312, 2327), 'aiida.orm.load_node', 'load_node', (['q[0]'], {}), '(q[0])\n', (2321, 2327), False, 'from aiida.orm import load_node, Node, QueryBuilder\n')]
|
import gym
import pybullet as p
import numpy as np
from gym_delta_robot_trampoline.resources.delta_robot_trampoline import Omnid_Simulator
import matplotlib.pyplot as plt
import os
import pybullet_data
"""
Action space (1,3) : [theta_1_torque, theta_2_torque, theta_3_torque]
Observation space (1,18) : [3 joint_positions, 3 joint velocities, 3 eef positions, 3 eef velocities, 3
3 ball positions, 3 ball velocities]
"""
FAIL_ALTITUDE = 0.20
BONUS_ALTITUDE_DIFF = 0.16
MAX_STEP_NUM = 800
class DeltaRobotTrampolineEnv(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self):
self.step_counter = 0
#TODO
# self.client = p.connect(p.DIRECT)
self.client = p.connect(p.GUI)
p.resetDebugVisualizerCamera(cameraDistance=1.5, cameraYaw=0, cameraPitch=-40, cameraTargetPosition=[0.05,-0.35,0.2])
self.action_space = gym.spaces.box.Box(
low=np.array([-100] * 3),
high=np.array([100] * 3))
self.observation_space = gym.spaces.box.Box(
low=np.array([-np.pi/4, -np.pi/4, -np.pi/4, -100, -100, -100, \
-5, -5, -5, -50, -50, -50, \
-20, -20, 0, -50, -50, -50]),
high=np.array([np.pi/2, np.pi/2, np.pi/2, 100, 100, 100, \
5, 5, 5, 50, 50, 50, \
20, 20, 20, 50, 50, 50]))
self.np_random, _ = gym.utils.seeding.np_random()
#enable visualization
#TODO
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING,1)
def reset(self):
p.resetSimulation()
# episode params
self.step_counter = 0
self.above_BONUS_ALTITUDE_DIFF = False
p.loadURDF(os.path.join(pybullet_data.getDataPath(), "plane.urdf")) #loads from the root pybullet library
p.setGravity(0,0,-10)
p.setRealTimeSimulation(0)
#set up the robot and the ball
self.omnid_simulator = Omnid_Simulator()
initialized = False
self.omnid_simulator.attachBallToRobot() # we want the robot to land safely onto the robot.
while not initialized:
self.omnid_simulator.updateStates()
if self.omnid_simulator.ballonRobot():
self.omnid_simulator.detachBallFromRobot() #now we can let the ball move freely!
initialized = True
p.stepSimulation()
self.observation = self.omnid_simulator.updateStates().astype(np.float32)
return self.observation
def step(self, action):
self.omnid_simulator.applyJointTorque({"theta_1": action[0], \
"theta_2": action[1], \
"theta_3": action[2]})
p.stepSimulation()
self.step_counter += 1
self.observation = self.omnid_simulator.updateStates()
#z < 0, -100. else, if get over height threshold, we get 100.
z= self.observation[14]
if z < FAIL_ALTITUDE:
reward = -25
done = True
else:
height_diff = z - self.observation[8]
if height_diff >= BONUS_ALTITUDE_DIFF:
done = False
if not self.above_BONUS_ALTITUDE_DIFF:
reward = 50
self.above_BONUS_ALTITUDE_DIFF = True
self.step_counter = 0
else:
reward = 0
else: #ball is above the platform but lower than the relative height threshold
if self.above_BONUS_ALTITUDE_DIFF:
self.above_BONUS_ALTITUDE_DIFF = False
reward = -0.1
done = False
if self.step_counter >= MAX_STEP_NUM:
done = True
info = {"eef position: ": self.observation[6:9], \
"ball position: ": self.observation[12:15]}
return self.observation.astype(np.float32), reward, done, info
def render(self, mode='human'):
""" Render is an interface function. Since we are using GUI, we do not need this.
We use GUI because computing view matrices and projection matrices is much slower. """
pass
def close(self):
p.disconnect(self.client)
def seed(self, seed=None):
self.np_random, seed = gym.utils.seeding.np_random(seed)
return [seed]
|
[
"pybullet.setRealTimeSimulation",
"pybullet.resetSimulation",
"pybullet.stepSimulation",
"pybullet.setGravity",
"pybullet.configureDebugVisualizer",
"gym_delta_robot_trampoline.resources.delta_robot_trampoline.Omnid_Simulator",
"pybullet.resetDebugVisualizerCamera",
"pybullet.disconnect",
"numpy.array",
"pybullet_data.getDataPath",
"pybullet.connect",
"gym.utils.seeding.np_random"
] |
[((716, 732), 'pybullet.connect', 'p.connect', (['p.GUI'], {}), '(p.GUI)\n', (725, 732), True, 'import pybullet as p\n'), ((741, 865), 'pybullet.resetDebugVisualizerCamera', 'p.resetDebugVisualizerCamera', ([], {'cameraDistance': '(1.5)', 'cameraYaw': '(0)', 'cameraPitch': '(-40)', 'cameraTargetPosition': '[0.05, -0.35, 0.2]'}), '(cameraDistance=1.5, cameraYaw=0, cameraPitch=-\n 40, cameraTargetPosition=[0.05, -0.35, 0.2])\n', (769, 865), True, 'import pybullet as p\n'), ((1427, 1456), 'gym.utils.seeding.np_random', 'gym.utils.seeding.np_random', ([], {}), '()\n', (1454, 1456), False, 'import gym\n'), ((1510, 1563), 'pybullet.configureDebugVisualizer', 'p.configureDebugVisualizer', (['p.COV_ENABLE_RENDERING', '(1)'], {}), '(p.COV_ENABLE_RENDERING, 1)\n', (1536, 1563), True, 'import pybullet as p\n'), ((1593, 1612), 'pybullet.resetSimulation', 'p.resetSimulation', ([], {}), '()\n', (1610, 1612), True, 'import pybullet as p\n'), ((1840, 1863), 'pybullet.setGravity', 'p.setGravity', (['(0)', '(0)', '(-10)'], {}), '(0, 0, -10)\n', (1852, 1863), True, 'import pybullet as p\n'), ((1870, 1896), 'pybullet.setRealTimeSimulation', 'p.setRealTimeSimulation', (['(0)'], {}), '(0)\n', (1893, 1896), True, 'import pybullet as p\n'), ((1968, 1985), 'gym_delta_robot_trampoline.resources.delta_robot_trampoline.Omnid_Simulator', 'Omnid_Simulator', ([], {}), '()\n', (1983, 1985), False, 'from gym_delta_robot_trampoline.resources.delta_robot_trampoline import Omnid_Simulator\n'), ((2771, 2789), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (2787, 2789), True, 'import pybullet as p\n'), ((4244, 4269), 'pybullet.disconnect', 'p.disconnect', (['self.client'], {}), '(self.client)\n', (4256, 4269), True, 'import pybullet as p\n'), ((4333, 4366), 'gym.utils.seeding.np_random', 'gym.utils.seeding.np_random', (['seed'], {}), '(seed)\n', (4360, 4366), False, 'import gym\n'), ((2388, 2406), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (2404, 2406), True, 'import pybullet as p\n'), ((924, 944), 'numpy.array', 'np.array', (['([-100] * 3)'], {}), '([-100] * 3)\n', (932, 944), True, 'import numpy as np\n'), ((963, 982), 'numpy.array', 'np.array', (['([100] * 3)'], {}), '([100] * 3)\n', (971, 982), True, 'import numpy as np\n'), ((1054, 1177), 'numpy.array', 'np.array', (['[-np.pi / 4, -np.pi / 4, -np.pi / 4, -100, -100, -100, -5, -5, -5, -50, -50,\n -50, -20, -20, 0, -50, -50, -50]'], {}), '([-np.pi / 4, -np.pi / 4, -np.pi / 4, -100, -100, -100, -5, -5, -5,\n -50, -50, -50, -20, -20, 0, -50, -50, -50])\n', (1062, 1177), True, 'import numpy as np\n'), ((1242, 1350), 'numpy.array', 'np.array', (['[np.pi / 2, np.pi / 2, np.pi / 2, 100, 100, 100, 5, 5, 5, 50, 50, 50, 20, \n 20, 20, 50, 50, 50]'], {}), '([np.pi / 2, np.pi / 2, np.pi / 2, 100, 100, 100, 5, 5, 5, 50, 50, \n 50, 20, 20, 20, 50, 50, 50])\n', (1250, 1350), True, 'import numpy as np\n'), ((1749, 1776), 'pybullet_data.getDataPath', 'pybullet_data.getDataPath', ([], {}), '()\n', (1774, 1776), False, 'import pybullet_data\n')]
|
""" Default Runner & Worker components
Local Runner
Memmap Interface (numpy)
Template Preprocessor
JSON Postprocessor
NumpytxtPostprocessor
HDF5Postprocessor
"""
from .runner import Runner, RunnerInterface
from .worker import Interface, Preprocessor, Postprocessor, Worker
import subprocess
from multiprocessing import Process
from time import sleep
import logging
import numpy as np
import os
from shutil import rmtree
# === Local Runner === #
@Runner.register('local')
class LocalRunner(Runner):
""" Runner for executing simulations locally
- forks the worker, thereby having less overhead (especially with a custom python Worker)
- per default uses all available CPUs
"""
def spawn_run(self, params=None, wait=False):
super().spawn_run(params, wait)
if self.run_config['custom'] or not self.config['fork']:
env = self.env.copy()
env['PROFIT_RUN_ID'] = str(self.next_run_id)
if self.run_config['custom']:
cmd = self.run_config['command']
else:
cmd = 'profit-worker'
self.runs[self.next_run_id] = subprocess.Popen(cmd, shell=True, env=env, cwd=self.base_config['run_dir'])
if wait:
self.runs[self.next_run_id].wait()
del self.runs[self.next_run_id]
else:
def work():
worker = Worker.from_config(self.run_config, self.next_run_id)
worker.main()
os.chdir(self.base_config['run_dir'])
process = Process(target=work)
self.runs[self.next_run_id] = process
process.start()
if wait:
process.join()
del self.runs[self.next_run_id]
os.chdir(self.base_config['base_dir'])
self.next_run_id += 1
def spawn_array(self, params_array, blocking=True):
""" spawn an array of runs, maximum 'parallel' at the same time, blocking until all are done """
if not blocking:
raise NotImplementedError
for params in params_array:
self.spawn_run(params)
while len(self.runs) >= self.config['parallel']:
sleep(self.config['sleep'])
self.check_runs(poll=True)
while len(self.runs):
sleep(self.config['sleep'])
self.check_runs(poll=True)
def check_runs(self, poll=False):
""" check the status of runs via the interface """
self.interface.poll()
if self.run_config['custom'] or not self.config['fork']:
for run_id, process in list(self.runs.items()): # preserve state before deletions
if self.interface.internal['DONE'][run_id]:
process.wait() # just to make sure
del self.runs[run_id]
elif poll and process.poll() is not None:
del self.runs[run_id]
else:
for run_id, process in list(self.runs.items()): # preserve state before deletions
if self.interface.internal['DONE'][run_id]:
process.join() # just to make sure
del self.runs[run_id]
elif poll and process.exitcode is not None:
process.terminate()
del self.runs[run_id]
def cancel_all(self):
if self.run_config['custom'] or not self.config['fork']:
for process in self.runs.values():
process.terminate()
else:
for process in self.runs.values():
process.terminate()
self.runs = {}
# === Numpy Memmap Inerface === #
@RunnerInterface.register('memmap')
class MemmapRunnerInterface(RunnerInterface):
""" Runner-Worker Interface using a memory mapped numpy array
- expected to be very fast with the *local* Runner as each Worker can access the array directly (unverified)
- expected to be inefficient if used on a cluster with a shared filesystem (unverified)
- reliable
- known issue: resizing the array (to add more runs) is dangerous, needs a workaround
(e.g. several arrays in the same file)
"""
def __init__(self, config, size, input_config, output_config, *, logger_parent: logging.Logger = None):
super().__init__(config, size, input_config, output_config, logger_parent=logger_parent)
init_data = np.zeros(size, dtype=self.input_vars + self.internal_vars + self.output_vars)
np.save(self.config['path'], init_data)
try:
self._memmap = np.load(self.config['path'], mmap_mode='r+')
except FileNotFoundError:
self.runner.logger.error(
f'{self.__class__.__name__} could not load {self.config["path"]} (cwd: {os.getcwd()})')
raise
# should return views on memmap
self.input = self._memmap[[v[0] for v in self.input_vars]]
self.output = self._memmap[[v[0] for v in self.output_vars]]
self.internal = self._memmap[[v[0] for v in self.internal_vars]]
def resize(self, size):
""" Resizing Memmap Runner Interfac
Attention: this is dangerous and may lead to unexpected errors!
The problem is that the memory mapped file is overwritten.
Any Workers which have this file mapped will run into severe problems.
Possible future workarounds: multiple files or multiple headers in one file.
"""
if size <= self.size:
self.logger.warning('shrinking RunnerInterface is not supported')
return
self.logger.warning('resizing MemmapRunnerInterface is dangerous')
self.clean()
init_data = np.zeros(size, dtype=self.input_vars + self.internal_vars + self.output_vars)
np.save(self.config['path'], init_data)
try:
self._memmap = np.load(self.config['path'], mmap_mode='r+')
except FileNotFoundError:
self.runner.logger.error(
f'{self.__class__.__name__} could not load {self.config["path"]} (cwd: {os.getcwd()})')
raise
self.input = self._memmap[[v[0] for v in self.input_vars]]
self.output = self._memmap[[v[0] for v in self.output_vars]]
self.internal = self._memmap[[v[0] for v in self.internal_vars]]
def clean(self):
if os.path.exists(self.config['path']):
os.remove(self.config['path'])#
@Interface.register('memmap')
class MemmapInterface(Interface):
""" Runner-Worker Interface using a memory mapped numpy array
counterpart to :py:class:`MemmapRunnerInterface`
"""
def __init__(self, config, run_id: int, *, logger_parent: logging.Logger = None):
super().__init__(config, run_id, logger_parent=logger_parent)
# ToDo: multiple arrays after another to allow extending the file dynamically
try:
self._memmap = np.load(self.config['path'], mmap_mode='r+')
except FileNotFoundError:
self.worker.logger.error(
f'{self.__class__.__name__} could not load {self.config["path"]} (cwd: {os.getcwd()})')
raise
# should return views on memmap
inputs, outputs = [], []
k = 0
for k, key in enumerate(self._memmap.dtype.names):
if key == 'DONE':
break
inputs.append(key)
for key in self._memmap.dtype.names[k:]:
if key not in ['DONE', 'TIME']:
outputs.append(key)
self.input = self._memmap[inputs][run_id]
self.output = self._memmap[outputs][run_id]
self._data = self._memmap[run_id]
def done(self):
self._memmap['TIME'] = self.time
self._memmap['DONE'] = True
self._memmap.flush()
def clean(self):
if os.path.exists(self.config['path']):
os.remove(self.config['path'])
# === Template Preprocessor === #
@Preprocessor.register('template')
class TemplatePreprocessor(Preprocessor):
""" Preprocessor which substitutes the variables with a given template
- copies the given template directory to the target run directory
- searches all files for variables templates of the form {name} and replaces them with their values
- for file formats which use curly braces (e.g. json) the template identifier is {{name}}
- substitution can be restricted to certain files by specifying `param_files`
- relative symbolic links are converted to absolute symbolic links on copying
- linked files are ignored with `param_files: all`, but if specified explicitly the link target is copied to the run
directory and then substituted
"""
def pre(self, data, run_dir):
# No call to super()! replaces the default preprocessing
from profit.pre import fill_run_dir_single
if os.path.exists(run_dir):
rmtree(run_dir)
fill_run_dir_single(data, self.config['path'], run_dir, ignore_path_exists=True,
param_files=self.config['param_files'])
os.chdir(run_dir)
# === JSON Postprocessor === #
@Postprocessor.register('json')
class JSONPostprocessor(Postprocessor):
""" Postprocessor to read output from a JSON file
- variables are assumed to be stored with the correct key and able to be converted immediately
- not extensively tested
"""
def post(self, data):
import json
with open(self.config['path']) as f:
output = json.load(f)
for key, value in output.items():
data[key] = value
# === Numpy Text Postprocessor === #
@Postprocessor.register('numpytxt')
class NumpytxtPostprocessor(Postprocessor):
""" Postprocessor to read output from a tabular text file (e.g. csv, tsv) with numpy ``genfromtxt``
- the data is assumed to be row oriented
- vector variables are spread across the row and have to be in the right order, only the name of the variable should
be specified once in ``names``
- ``names`` which are not specified as output variables are ignored
- additional options are passed directly to ``numpy.genfromtxt()`
"""
def post(self, data):
dtype = [(name, float, data.dtype[name].shape if name in data.dtype.names else ())
for name in self.config['names']]
try:
raw = np.genfromtxt(self.config['path'], dtype=dtype, **self.config['options'])
except OSError:
self.logger.error(f'output file {self.config["path"]} not found')
self.logger.info(f'cwd = {os.getcwd()}')
dirname = os.path.dirname(self.config['path']) or '.'
self.logger.info(f'ls {dirname} = {os.listdir(dirname)}')
raise
for key in self.config['names']:
if key in data.dtype.names:
data[key] = raw[key]
# === HDF5 Postprocessor === #
@Postprocessor.register('hdf5')
class HDF5Postprocessor(Postprocessor):
""" Postprocessor to read output from a HDF5 file
- variables are assumed to be stored with the correct key and able to be converted immediately
- not extensively tested
"""
def post(self, data):
import h5py
with h5py.File(self.config['path'], 'r') as f:
for key in f.keys():
data[key] = f[key]
|
[
"os.listdir",
"subprocess.Popen",
"numpy.save",
"numpy.load",
"os.remove",
"json.load",
"h5py.File",
"profit.pre.fill_run_dir_single",
"os.getcwd",
"os.path.dirname",
"numpy.zeros",
"os.path.exists",
"numpy.genfromtxt",
"time.sleep",
"shutil.rmtree",
"multiprocessing.Process",
"os.chdir"
] |
[((4409, 4486), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': '(self.input_vars + self.internal_vars + self.output_vars)'}), '(size, dtype=self.input_vars + self.internal_vars + self.output_vars)\n', (4417, 4486), True, 'import numpy as np\n'), ((4495, 4534), 'numpy.save', 'np.save', (["self.config['path']", 'init_data'], {}), "(self.config['path'], init_data)\n", (4502, 4534), True, 'import numpy as np\n'), ((5718, 5795), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': '(self.input_vars + self.internal_vars + self.output_vars)'}), '(size, dtype=self.input_vars + self.internal_vars + self.output_vars)\n', (5726, 5795), True, 'import numpy as np\n'), ((5804, 5843), 'numpy.save', 'np.save', (["self.config['path']", 'init_data'], {}), "(self.config['path'], init_data)\n", (5811, 5843), True, 'import numpy as np\n'), ((6367, 6402), 'os.path.exists', 'os.path.exists', (["self.config['path']"], {}), "(self.config['path'])\n", (6381, 6402), False, 'import os\n'), ((7826, 7861), 'os.path.exists', 'os.path.exists', (["self.config['path']"], {}), "(self.config['path'])\n", (7840, 7861), False, 'import os\n'), ((8856, 8879), 'os.path.exists', 'os.path.exists', (['run_dir'], {}), '(run_dir)\n', (8870, 8879), False, 'import os\n'), ((8917, 9042), 'profit.pre.fill_run_dir_single', 'fill_run_dir_single', (['data', "self.config['path']", 'run_dir'], {'ignore_path_exists': '(True)', 'param_files': "self.config['param_files']"}), "(data, self.config['path'], run_dir, ignore_path_exists=\n True, param_files=self.config['param_files'])\n", (8936, 9042), False, 'from profit.pre import fill_run_dir_single\n'), ((9074, 9091), 'os.chdir', 'os.chdir', (['run_dir'], {}), '(run_dir)\n', (9082, 9091), False, 'import os\n'), ((1134, 1209), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'shell': '(True)', 'env': 'env', 'cwd': "self.base_config['run_dir']"}), "(cmd, shell=True, env=env, cwd=self.base_config['run_dir'])\n", (1150, 1209), False, 'import subprocess\n'), ((1489, 1526), 'os.chdir', 'os.chdir', (["self.base_config['run_dir']"], {}), "(self.base_config['run_dir'])\n", (1497, 1526), False, 'import os\n'), ((1549, 1569), 'multiprocessing.Process', 'Process', ([], {'target': 'work'}), '(target=work)\n', (1556, 1569), False, 'from multiprocessing import Process\n'), ((1760, 1798), 'os.chdir', 'os.chdir', (["self.base_config['base_dir']"], {}), "(self.base_config['base_dir'])\n", (1768, 1798), False, 'import os\n'), ((2316, 2343), 'time.sleep', 'sleep', (["self.config['sleep']"], {}), "(self.config['sleep'])\n", (2321, 2343), False, 'from time import sleep\n'), ((4576, 4620), 'numpy.load', 'np.load', (["self.config['path']"], {'mmap_mode': '"""r+"""'}), "(self.config['path'], mmap_mode='r+')\n", (4583, 4620), True, 'import numpy as np\n'), ((5885, 5929), 'numpy.load', 'np.load', (["self.config['path']"], {'mmap_mode': '"""r+"""'}), "(self.config['path'], mmap_mode='r+')\n", (5892, 5929), True, 'import numpy as np\n'), ((6416, 6446), 'os.remove', 'os.remove', (["self.config['path']"], {}), "(self.config['path'])\n", (6425, 6446), False, 'import os\n'), ((6924, 6968), 'numpy.load', 'np.load', (["self.config['path']"], {'mmap_mode': '"""r+"""'}), "(self.config['path'], mmap_mode='r+')\n", (6931, 6968), True, 'import numpy as np\n'), ((7875, 7905), 'os.remove', 'os.remove', (["self.config['path']"], {}), "(self.config['path'])\n", (7884, 7905), False, 'import os\n'), ((8893, 8908), 'shutil.rmtree', 'rmtree', (['run_dir'], {}), '(run_dir)\n', (8899, 8908), False, 'from shutil import rmtree\n'), ((9502, 9514), 'json.load', 'json.load', (['f'], {}), '(f)\n', (9511, 9514), False, 'import json\n'), ((10365, 10438), 'numpy.genfromtxt', 'np.genfromtxt', (["self.config['path']"], {'dtype': 'dtype'}), "(self.config['path'], dtype=dtype, **self.config['options'])\n", (10378, 10438), True, 'import numpy as np\n'), ((11231, 11266), 'h5py.File', 'h5py.File', (["self.config['path']", '"""r"""'], {}), "(self.config['path'], 'r')\n", (11240, 11266), False, 'import h5py\n'), ((2203, 2230), 'time.sleep', 'sleep', (["self.config['sleep']"], {}), "(self.config['sleep'])\n", (2208, 2230), False, 'from time import sleep\n'), ((10616, 10652), 'os.path.dirname', 'os.path.dirname', (["self.config['path']"], {}), "(self.config['path'])\n", (10631, 10652), False, 'import os\n'), ((4781, 4792), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4790, 4792), False, 'import os\n'), ((6090, 6101), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6099, 6101), False, 'import os\n'), ((7129, 7140), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7138, 7140), False, 'import os\n'), ((10579, 10590), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (10588, 10590), False, 'import os\n'), ((10707, 10726), 'os.listdir', 'os.listdir', (['dirname'], {}), '(dirname)\n', (10717, 10726), False, 'import os\n')]
|
import numpy as np
import tensorflow as tf
from tools.tf_tools import binary_entropy, repeat_axis
class EntropyTest(tf.test.TestCase):
def test_binary_entropy_logits(self):
H1 = binary_entropy(logits=[0., 0.]) # i.e. sigmoid(logits) = 0.5
H0 = binary_entropy(logits=[100., -100.])
with self.test_session():
self.assertAllEqual(H1.eval(), [1., 1.])
self.assertAllClose(H0.eval(), [0., 0.])
def test_binary_entropy_probs(self):
H1 = binary_entropy(probs=tf.constant([0.5, 0.5]))
H0 = binary_entropy(probs=tf.constant([0., 1.]))
with self.test_session():
self.assertAllEqual(H1.eval(), [1., 1.])
self.assertAllEqual(H0.eval(), [0., 0.])
class RepeatsTest(tf.test.TestCase):
def test_repeat_axis(self):
x = np.random.rand(10, 10)
x1 = np.repeat(x, repeats=5, axis=1)
x2 = repeat_axis(tf.constant(x), axis=1, repeats=5)
with self.test_session():
self.assertAllEqual(x1, x2.eval())
if __name__ == '__main__':
tf.test.main()
|
[
"tensorflow.test.main",
"numpy.random.rand",
"tensorflow.constant",
"tools.tf_tools.binary_entropy",
"numpy.repeat"
] |
[((1072, 1086), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (1084, 1086), True, 'import tensorflow as tf\n'), ((193, 226), 'tools.tf_tools.binary_entropy', 'binary_entropy', ([], {'logits': '[0.0, 0.0]'}), '(logits=[0.0, 0.0])\n', (207, 226), False, 'from tools.tf_tools import binary_entropy, repeat_axis\n'), ((268, 306), 'tools.tf_tools.binary_entropy', 'binary_entropy', ([], {'logits': '[100.0, -100.0]'}), '(logits=[100.0, -100.0])\n', (282, 306), False, 'from tools.tf_tools import binary_entropy, repeat_axis\n'), ((828, 850), 'numpy.random.rand', 'np.random.rand', (['(10)', '(10)'], {}), '(10, 10)\n', (842, 850), True, 'import numpy as np\n'), ((865, 896), 'numpy.repeat', 'np.repeat', (['x'], {'repeats': '(5)', 'axis': '(1)'}), '(x, repeats=5, axis=1)\n', (874, 896), True, 'import numpy as np\n'), ((922, 936), 'tensorflow.constant', 'tf.constant', (['x'], {}), '(x)\n', (933, 936), True, 'import tensorflow as tf\n'), ((522, 545), 'tensorflow.constant', 'tf.constant', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (533, 545), True, 'import tensorflow as tf\n'), ((581, 604), 'tensorflow.constant', 'tf.constant', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (592, 604), True, 'import tensorflow as tf\n')]
|
# -*- coding: utf-8 -*-
"""fileio module."""
import pandas as pd # create_conf_file
import csv # write_conf_header, create_conf_file
import json # create_conf_file
import os # read_c3d_file
import btk # C3D class
import bmch # C3D class
import numpy as np # C3D class
def write_conf_header(metadata_path):
"""Create and write header in the csv configuration files.
:param metadata_path: path to the metadata folder
:type metadata_path: str
Example::
result = write_conf_header('/home/romain/Downloads/irsst/metadata/')
"""
files = ['emg', 'markers', 'force', 'participants', 'trials']
headers = {
'emg': ['labels', 'publication_name'],
'markers': ['labels'],
'force': ['labels'],
'participants': ['pseudo', 'process', 'laterality', 'group', 'mass', 'height', 'date'],
'trials': ['folder', 'emg', 'markers', 'force']
}
for ifile in files:
with open('{}{}.csv'.format(metadata_path, ifile), 'w') as out:
writer = csv.DictWriter(out, fieldnames=headers[ifile])
writer.writeheader()
def create_conf_file(metadata_path):
"""Create a json conf file based on the csv conf files.
:param metadata_path: path to the metadata folder
:type metadata_path: str
Example::
result = write_conf_header('/home/romain/Downloads/irsst/metadata/')
"""
files = ['emg', 'markers', 'force', 'participants', 'trials']
# read each csv files into dict
csv_dict = {ifile: pd.read_csv('{}{}.csv'.format(metadata_path, ifile)) for ifile in files}
# merge dicts into json files
json_file = {key: json.loads(csv_dict[key].to_json()) for key in csv_dict}
# export json file
json_path = '{}config.json'.format(metadata_path)
with open(json_path, 'w') as json_data:
json_data.write(json.dumps(json_file, indent=4))
def load_conf_file(metadata_path):
"""Load the json configuration file create with the function `create_conf_file`.
:param metadata_path: path to the metadata folder
:type metadata_path: str
Example::
result = load_conf_file('/home/romain/Downloads/irsst/metadata/')
"""
json_path = '{}config.json'.format(metadata_path)
with open(json_path, 'r') as json_data:
return json.load(json_data)
def save_conf_file(metadata_path, json_file):
json_path = '{}config.json'.format(metadata_path)
with open(json_path, 'w') as json_data:
json_data.write(json.dumps(json_file, indent=4))
class C3D:
"""C3D class read c3d files and return data.
:param data_folders: dict with path to the data folder(s) as key and type (*markers and/or emg and/or emg*) as value
:type data_folders: dict
Example::
data_folders = {'/home/romain/Downloads/irsst/inputs/DapO/mvc/': ['emg'],
'/home/romain/Downloads/irsst/inputs/DapO/score/': ['markers']}
c3d = load_conf_file(data_folders)
c3d.read_data()
"""
def __init__(self, data_folders, conf_file):
"""Constructor for C3D"""
print('import c3d files from:')
self.folders = data_folders
self.conf_file = conf_file
self.assign = []
def read_data(self):
# todo complete return docstring
"""Read data from `self.folders`
:return
"""
for ifolder, kind in self.folders.items():
print('\t{}'.format(ifolder))
c3d_files = [f for f in os.listdir(ifolder) if f.endswith('.c3d')]
for ifile in c3d_files:
print('\t\t{}'.format(ifile))
file = os.path.join(ifolder, ifile)
metadata, markers, analogs = self._open_file(file, kind)
save_assign
def _open_file(self, file, kind):
"""Open c3d acquisition (*private function*).
:param file: path to the c3d file
:type file: str
:param kind: type (*markers and/or emg and/or emg*)
:type kind: list
"""
reader = btk.btkAcquisitionFileReader()
reader.SetFilename(file)
reader.Update()
acq = reader.GetOutput()
metadata = {'first_frame': acq.GetFirstFrame(), 'last_frame': acq.GetLastFrame()}
data = {}
for i in ['markers', 'force', 'emg']:
if i in kind:
if i is 'markers':
metadata.update({'point_rate': acq.GetPointFrequency(), 'point_used': acq.GetPointNumber()})
data_temp = self._iterate(acq=acq, kind='markers')
n = metadata['last_frame']
else:
metadata.update({'analog_rate': acq.GetAnalogFrequency(), 'analog_used': acq.GetAnalogNumber()})
data_temp = self._iterate(acq=acq, kind='analogs')
n = (metadata['last_frame'] * metadata['analog_rate']) / acq.GetPointFrequency()
data[i] = self._attribute_channels(data_temp, kind=i, frames=n)
else:
data[i] = None
def _attribute_channels(self, data_temp, kind, frames):
fields = list(data_temp.keys())
targets = list(self.conf_file[kind]['labels'].values())
# TODELETE:
# targets[-1] = 'Voltage.1'
# gui = bmch.util.GuiC3D(targets, fields)
gui = ['Delt_ant.EMG1',
'Delt_med.EMG2',
'Delt_post.EMG3',
'Biceps.EMG4',
'Triceps.EMG5',
'Trap_sup.EMG6',
'Pec.IM EMG12',
'Supra.EMG9',
'Infra.EMG10']
output = np.zeros((int(frames), len(targets)))
for i, iassign in enumerate(gui):
output[:, i] = np.squeeze(data_temp[iassign])
itarget = 'Delt_ant.EMG1'
# check if all target are in fields
# check if all previous assign are in fields
# GUI
gui = bmch.util.GuiC3D(targets, fields)
self.assign.append(gui.assign)
# save assign
return output
@staticmethod
def _iterate(acq, kind='markers'):
"""Iterate through a btkCollection object (*private function*) and return data as dict.
:param acq: btkAcquisition object
:type acq: btk.btkAcquisition
:param kind: type of the data (*markers or analogs*)
:type kind: str
"""
out = {}
if kind == 'markers':
iterator = btk.Iterate(acq.GetPoints())
elif kind == 'analogs':
iterator = btk.Iterate(acq.GetAnalogs())
else:
iterator = []
for it in iterator:
data_temp = it.GetValues()
if data_temp.any():
out.update({it.GetLabel(): data_temp})
return out
|
[
"json.load",
"json.dumps",
"bmch.util.GuiC3D",
"btk.btkAcquisitionFileReader",
"numpy.squeeze",
"os.path.join",
"os.listdir",
"csv.DictWriter"
] |
[((2284, 2304), 'json.load', 'json.load', (['json_data'], {}), '(json_data)\n', (2293, 2304), False, 'import json\n'), ((4011, 4041), 'btk.btkAcquisitionFileReader', 'btk.btkAcquisitionFileReader', ([], {}), '()\n', (4039, 4041), False, 'import btk\n'), ((5891, 5924), 'bmch.util.GuiC3D', 'bmch.util.GuiC3D', (['targets', 'fields'], {}), '(targets, fields)\n', (5907, 5924), False, 'import bmch\n'), ((1022, 1068), 'csv.DictWriter', 'csv.DictWriter', (['out'], {'fieldnames': 'headers[ifile]'}), '(out, fieldnames=headers[ifile])\n', (1036, 1068), False, 'import csv\n'), ((1839, 1870), 'json.dumps', 'json.dumps', (['json_file'], {'indent': '(4)'}), '(json_file, indent=4)\n', (1849, 1870), False, 'import json\n'), ((2475, 2506), 'json.dumps', 'json.dumps', (['json_file'], {'indent': '(4)'}), '(json_file, indent=4)\n', (2485, 2506), False, 'import json\n'), ((5697, 5727), 'numpy.squeeze', 'np.squeeze', (['data_temp[iassign]'], {}), '(data_temp[iassign])\n', (5707, 5727), True, 'import numpy as np\n'), ((3615, 3643), 'os.path.join', 'os.path.join', (['ifolder', 'ifile'], {}), '(ifolder, ifile)\n', (3627, 3643), False, 'import os\n'), ((3467, 3486), 'os.listdir', 'os.listdir', (['ifolder'], {}), '(ifolder)\n', (3477, 3486), False, 'import os\n')]
|
# #################################################################
# Python codes PENN for caching
# Codes have been tested successfully on Python 3.6.0 with TensorFlow 1.14.0.
# #################################################################
import scipy.io as sio
import numpy as np
import runner
import math
import sys
K = 10 # number of files
num_H = 1000 # number of training samples, 10000 for K=10 and 20, 15000 for K=30
num_val = math.ceil(0.1*num_H) # number of validation samples
training_epochs = 3000 # number of training epochs
N_mont = 10 # number of Montercalo simulations
LR = 0.01 # initial learning rate
batch_size = min(num_H, 1000) # batch size
# load data
Xtrain = sio.loadmat('../Data/Sup_WFpol_Nf'+str(K)+'.mat')['X_train']
Ytrain = sio.loadmat('../Data/Sup_WFpol_Nf'+str(K)+'.mat')['pol_tr']
X = sio.loadmat('../Data/Sup_WFpol_Nf'+str(K)+'.mat')['X_test']
Y = sio.loadmat('../Data/Sup_WFpol_Nf'+str(K)+'.mat')['pol_te']
pf_test = sio.loadmat('../Data/Sup_WFpol_Nf'+str(K)+'.mat')['pf_test']
num_tr = Xtrain.shape[2]
num_te = X.shape[2]
d_past= Xtrain.shape[1]
layernum = [d_past*K, 10*K,K] # layer size
Xtrain = np.reshape(Xtrain,(d_past*K,num_tr))
X = np.reshape(X,(d_past*K,num_te))
# training
Ratio,Time = runner.run(Xtrain, Ytrain,X,Y,pf_test,num_H,num_val,N_mont, training_epochs=training_epochs, LR=LR,
batch_size=batch_size, K=K, layernum=layernum)
# performance
Sort_Ratio = np.sort(Ratio)
print('The second worst ratio is: %f ' % Sort_Ratio[1] )
print('Average time for each training is: %f s' % (np.mean(Time)) )
|
[
"math.ceil",
"runner.run",
"numpy.sort",
"numpy.mean",
"numpy.reshape"
] |
[((538, 560), 'math.ceil', 'math.ceil', (['(0.1 * num_H)'], {}), '(0.1 * num_H)\n', (547, 560), False, 'import math\n'), ((1300, 1340), 'numpy.reshape', 'np.reshape', (['Xtrain', '(d_past * K, num_tr)'], {}), '(Xtrain, (d_past * K, num_tr))\n', (1310, 1340), True, 'import numpy as np\n'), ((1341, 1376), 'numpy.reshape', 'np.reshape', (['X', '(d_past * K, num_te)'], {}), '(X, (d_past * K, num_te))\n', (1351, 1376), True, 'import numpy as np\n'), ((1401, 1561), 'runner.run', 'runner.run', (['Xtrain', 'Ytrain', 'X', 'Y', 'pf_test', 'num_H', 'num_val', 'N_mont'], {'training_epochs': 'training_epochs', 'LR': 'LR', 'batch_size': 'batch_size', 'K': 'K', 'layernum': 'layernum'}), '(Xtrain, Ytrain, X, Y, pf_test, num_H, num_val, N_mont,\n training_epochs=training_epochs, LR=LR, batch_size=batch_size, K=K,\n layernum=layernum)\n', (1411, 1561), False, 'import runner\n'), ((1612, 1626), 'numpy.sort', 'np.sort', (['Ratio'], {}), '(Ratio)\n', (1619, 1626), True, 'import numpy as np\n'), ((1738, 1751), 'numpy.mean', 'np.mean', (['Time'], {}), '(Time)\n', (1745, 1751), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
__author__ = "Yuchen"
__aim__ = 'rank top sentences in one topic'
__testCase__ = "../test/test_rankingTFIDF.py"
from sklearn.feature_extraction.text import CountVectorizer,TfidfTransformer
import sys
import argparse
import numpy as np
from termcolor import colored
from sklearn.metrics.pairwise import cosine_similarity
import operator
sys.path.append(r"../..")
from pushkin_gs.sum import tfidf_contentWords
class TFIDF(object):
def __init__(self, train_data, contentWords, topN, targCorpus):
"""
:param train_data: in 'tfidf_contentWords.py' file. after processing step, return a 'targData' dataset, use it to train sentence_tfidf_score
:param contentWords: in 'tfidf_contentWords.py' file. get 'contentWords' for each topic
:param topN: N sentence to summary the doc
:param targCorpus: return top N sentence from init corpus
"""
self.train_data = train_data
self.contentWords = contentWords
self.topN = topN
self.targCorpus = targCorpus
def SentRankTFIDF(self):
"""
:return: tfidfArray: [[0.12, 0.99, 0.24]
[0.4, 0.3, 0.4, 0.33, ..]...]
"""
"""#tfidf
#根据bag of words的原理计算corpus的词频矩阵,把每个句子(即矩阵的每一行)看做一个vector,计算每个vector(句子)在全部corpus中的tfidf值,每个句子的tfidf值是矩阵的每个行向量
"""
print ("func: SentRankTFIDF")
# convert corpus to term(word)_vectors
vectorizer = CountVectorizer()
# calculate appear times for each word
term_freq_matrix = vectorizer.fit_transform(self.train_data)
# get all terms(words) from corpus
termList = vectorizer.get_feature_names()
# 将词频矩阵term_freq_matrix统计成TF-IDF值
# calculate tfidf value for each sentence using term_freq_matrix
transformer = TfidfTransformer()
tfidf = transformer.fit_transform(term_freq_matrix)
# tfidf[i][j] is sentence[i]'s tfidf value
# 查看数据结构 tfidf[i][j]表示i类文本中的tf-idf权重
tfidfArray = tfidf.toarray()
# print (tfidf.toarray())
"""#claculate sentence score
##only summing tfidf values where the words belong to contentWords##
根据上面求得的sentence tfidf矩阵(tfidfArray),加和求每一行(每个句子)的tfidf value,
不是全部相加,只是把代表content words的值加起来
Finally, 每个句子的tfidf分数除以整个文章tfidf总分数,即是该句子的ranking(sentRanking[i] = sentValueList[i]/docTfidfScore)
"""
# content words in each sentence
contWoEachSent = [[w for w in self.contentWords if w in sent.lower().split()]
for sent in self.train_data]
# content words index(termList) in each sentence
contWoIndex = [[[termList.index(w)] for w in self.contentWords if w in sent.lower().split()]
for sent in self.train_data]
print (' content words in each sentence',contWoEachSent,'\n','content words index in each sent',contWoIndex)
# calculate tfidf value for each sentence, return a score list for all sentence(sentValueList)
sentValueList = []
for i,index in enumerate(contWoIndex):
sentValue = sum(tfidfArray[i,index])
sentValueList.append(float(sentValue))
print (' sentValueList',sentValueList)
# sentence ranking #normalization
sentRanking = [value/max(sentValueList) for value in sentValueList]
sentRanking = np.array(sentRanking)
# print ("sentRanking",sentRanking[np.argsort(-sentRanking)])
topNSent = [self.targCorpus[rank] for rank in np.argsort(-sentRanking)[:-1]]
topNProcess = [self.train_data[rank] for rank in np.argsort(-sentRanking)[:-1]]
dicTop = np.c_[sentRanking[np.argsort(-sentRanking)[:-1]],topNProcess,topNSent]
print (' sent with score',dicTop[:2])
print ("....")
print ('-'*200)
self.dicTop = dicTop
return dicTop
# calculate Similarity score each sentence with whole documents
def calculateSimilarity(self, sentence, doc):
if doc == []:
return 0
vocab = {}
for word in sentence[:-1].split():
vocab[word] = 0
docInOneSentence = ''
for t in doc:
docInOneSentence += (t + ' ')
for word in t[:-1].split():
vocab[word] = 0
cv = CountVectorizer(vocabulary=vocab.keys())
docVector = cv.fit_transform([docInOneSentence])
sentenceVector = cv.fit_transform([sentence])
return cosine_similarity(docVector, sentenceVector)[0][0]
def MMR(self, dicTopSentence):
print("func: MMR")
##惩罚因子
##score = a * i[2] + (1 - a) * similarity(i[sentence], (i - 1)[sentence])
n = 20 * len(self.targCorpus) / 100
alpha = 0.5
summarySet = []
temset = []
while n > 0:
mmr = {}
for sentence in dicTopSentence:
if not sentence[1] in temset:
# print (self.calculateSimilarity(sentence[1],summarySet))
mmr[sentence[1]] = alpha * float(sentence[0]) - (1 - alpha) * self.calculateSimilarity(sentence[1], temset)
selected = max(mmr.items(), key=operator.itemgetter(1))[0]
# print (selected)
temset.append(selected)
n -= 1
for temsents in temset:
summarySet.append(''.join([sent[2] for sent in self.dicTop if sent[1] == temsents]))
print ('\nTotal Sentences', colored(len(self.train_data),'red'))
print ('Top', colored(len(summarySet),'red') ,'sentences:')
for sent in enumerate(summarySet):
print (sent)
print ("**"*100)
return summarySet
def main():
"""
python rankingTFIDF.py --topic bmt_2.txt --contentWordNumber 100
:predefine:
:--allData: X.txt file, which contain (target1 polarity1\tsent1\ntarget2 polarity2\tsent2\n )
:--topic: bmt_0.txt, which contain (sent1 sent2 ... sentn)
"""
parser = argparse.ArgumentParser()
parser.add_argument('--topic', default='', help="target topic")
parser.add_argument('--contentWordNumber', default='', help="threshold for content Word Number")
parser.add_argument('--returnNSents', default='', help="top N sentences")
args = parser.parse_args()
targetTweets, targData, contentWords = tfidf_contentWords.main()
for key in targData:
trainData = targData[key].split(".")
# init corpus: finally return top N sentence from init corpus
for key in targetTweets:
initCorpus = targetTweets[key].split('\n')
instance = TFIDF(trainData, contentWords, args.returnNSents, initCorpus)
topSent = instance.SentRankTFIDF()
instance.MMR(topSent)
if __name__ == '__main__':
"""
python rankingTFIDF.py --topic bmt_2.txt --contentWordNumber 100 (--returnNSents 2)
"""
main()
|
[
"sys.path.append",
"pushkin_gs.sum.tfidf_contentWords.main",
"sklearn.feature_extraction.text.CountVectorizer",
"sklearn.metrics.pairwise.cosine_similarity",
"argparse.ArgumentParser",
"numpy.argsort",
"numpy.array",
"operator.itemgetter",
"sklearn.feature_extraction.text.TfidfTransformer"
] |
[((361, 385), 'sys.path.append', 'sys.path.append', (['"""../.."""'], {}), "('../..')\n", (376, 385), False, 'import sys\n'), ((6010, 6035), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6033, 6035), False, 'import argparse\n'), ((6357, 6382), 'pushkin_gs.sum.tfidf_contentWords.main', 'tfidf_contentWords.main', ([], {}), '()\n', (6380, 6382), False, 'from pushkin_gs.sum import tfidf_contentWords\n'), ((1469, 1486), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {}), '()\n', (1484, 1486), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\n'), ((1833, 1851), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {}), '()\n', (1849, 1851), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\n'), ((3398, 3419), 'numpy.array', 'np.array', (['sentRanking'], {}), '(sentRanking)\n', (3406, 3419), True, 'import numpy as np\n'), ((4497, 4541), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['docVector', 'sentenceVector'], {}), '(docVector, sentenceVector)\n', (4514, 4541), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((3545, 3569), 'numpy.argsort', 'np.argsort', (['(-sentRanking)'], {}), '(-sentRanking)\n', (3555, 3569), True, 'import numpy as np\n'), ((3633, 3657), 'numpy.argsort', 'np.argsort', (['(-sentRanking)'], {}), '(-sentRanking)\n', (3643, 3657), True, 'import numpy as np\n'), ((3700, 3724), 'numpy.argsort', 'np.argsort', (['(-sentRanking)'], {}), '(-sentRanking)\n', (3710, 3724), True, 'import numpy as np\n'), ((5201, 5223), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (5220, 5223), False, 'import operator\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.