code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
#!/usr/bin/env python
#
# Copyright (C) 2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
from __future__ import absolute_import, division, print_function
import argparse
import os
import glog as log
import numpy as np
import cv2
from lxml import etree
from tqdm import tqdm
def parse_args():
"""Parse arguments of command line"""
parser = argparse.ArgumentParser(
fromfile_prefix_chars='@',
description='Convert CVAT XML annotations to masks'
)
parser.add_argument(
'--cvat-xml', metavar='FILE', required=True,
help='input file with CVAT annotation in xml format'
)
parser.add_argument(
'--background-color', metavar='COLOR_BGR', default="0,0,0",
help='specify background color (by default: 0,0,0)'
)
parser.add_argument(
'--label-color', metavar='LABEL:COLOR_BGR', action='append',
default=[],
help="specify a label's color (e.g. 255 or 255,0,0). The color will " +
"be interpreted in accordance with the mask format."
)
parser.add_argument(
'--mask-bitness', type=int, choices=[8, 24], default=8,
help='choose bitness for masks'
)
parser.add_argument(
'--output-dir', metavar='DIRECTORY', required=True,
help='directory for output masks'
)
return parser.parse_args()
def parse_anno_file(cvat_xml):
root = etree.parse(cvat_xml).getroot()
anno = []
for image_tag in root.iter('image'):
image = {}
for key, value in image_tag.items():
image[key] = value
image['shapes'] = []
for poly_tag in image_tag.iter('polygon'):
polygon = {'type': 'polygon'}
for key, value in poly_tag.items():
polygon[key] = value
image['shapes'].append(polygon)
for box_tag in image_tag.iter('box'):
box = {'type': 'box'}
for key, value in box_tag.items():
box[key] = value
box['points'] = "{0},{1};{2},{1};{2},{3};{0},{3}".format(
box['xtl'], box['ytl'], box['xbr'], box['ybr'])
image['shapes'].append(box)
image['shapes'].sort(key=lambda x: int(x.get('z_order', 0)))
anno.append(image)
return anno
def create_mask_file(mask_path, width, height, bitness, color_map, background, shapes):
mask = np.zeros((height, width, bitness // 8), dtype=np.uint8)
for shape in shapes:
color = color_map.get(shape['label'], background)
points = [tuple(map(float, p.split(','))) for p in shape['points'].split(';')]
points = np.array([(int(p[0]), int(p[1])) for p in points])
mask = cv2.fillPoly(mask, [points], color=color)
cv2.imwrite(mask_path, mask)
def to_scalar(str, dim):
scalar = list(map(int, str.split(',')))
if len(scalar) < dim:
scalar.extend([scalar[-1]] * dim)
return tuple(scalar[0:dim])
def main():
args = parse_args()
anno = parse_anno_file(args.cvat_xml)
color_map = {}
dim = args.mask_bitness // 8
for item in args.label_color:
label, color = item.split(':')
color_map[label] = to_scalar(color, dim)
background = to_scalar(args.background_color, dim)
for image in tqdm(anno, desc='Generate masks'):
mask_path = os.path.join(args.output_dir, os.path.splitext(image['name'])[0] + '.png')
mask_dir = os.path.dirname(mask_path)
if mask_dir:
os.makedirs(mask_dir, exist_ok=True)
create_mask_file(mask_path, int(image['width']), int(image['height']),
args.mask_bitness, color_map, background, image['shapes'])
if __name__ == "__main__":
main()
|
[
"cv2.imwrite",
"cv2.fillPoly",
"argparse.ArgumentParser",
"os.makedirs",
"tqdm.tqdm",
"lxml.etree.parse",
"os.path.splitext",
"os.path.dirname",
"numpy.zeros"
] |
[((358, 466), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'fromfile_prefix_chars': '"""@"""', 'description': '"""Convert CVAT XML annotations to masks"""'}), "(fromfile_prefix_chars='@', description=\n 'Convert CVAT XML annotations to masks')\n", (381, 466), False, 'import argparse\n'), ((2382, 2437), 'numpy.zeros', 'np.zeros', (['(height, width, bitness // 8)'], {'dtype': 'np.uint8'}), '((height, width, bitness // 8), dtype=np.uint8)\n', (2390, 2437), True, 'import numpy as np\n'), ((2738, 2766), 'cv2.imwrite', 'cv2.imwrite', (['mask_path', 'mask'], {}), '(mask_path, mask)\n', (2749, 2766), False, 'import cv2\n'), ((3264, 3297), 'tqdm.tqdm', 'tqdm', (['anno'], {'desc': '"""Generate masks"""'}), "(anno, desc='Generate masks')\n", (3268, 3297), False, 'from tqdm import tqdm\n'), ((2692, 2733), 'cv2.fillPoly', 'cv2.fillPoly', (['mask', '[points]'], {'color': 'color'}), '(mask, [points], color=color)\n', (2704, 2733), False, 'import cv2\n'), ((3413, 3439), 'os.path.dirname', 'os.path.dirname', (['mask_path'], {}), '(mask_path)\n', (3428, 3439), False, 'import os\n'), ((1401, 1422), 'lxml.etree.parse', 'etree.parse', (['cvat_xml'], {}), '(cvat_xml)\n', (1412, 1422), False, 'from lxml import etree\n'), ((3473, 3509), 'os.makedirs', 'os.makedirs', (['mask_dir'], {'exist_ok': '(True)'}), '(mask_dir, exist_ok=True)\n', (3484, 3509), False, 'import os\n'), ((3349, 3380), 'os.path.splitext', 'os.path.splitext', (["image['name']"], {}), "(image['name'])\n", (3365, 3380), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
"""Tests of GLSAR and diagnostics against Gretl
Created on Thu Feb 02 21:15:47 2012
Author: <NAME>
License: BSD-3
"""
import os
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal,
assert_allclose, assert_array_less)
from statsmodels.regression.linear_model import OLS, GLSAR
from statsmodels.tools.tools import add_constant
from statsmodels.datasets import macrodata
import statsmodels.stats.sandwich_covariance as sw
import statsmodels.stats.diagnostic as smsdia
import statsmodels.stats.outliers_influence as oi
def compare_ftest(contrast_res, other, decimal=(5,4)):
assert_almost_equal(contrast_res.fvalue, other[0], decimal=decimal[0])
assert_almost_equal(contrast_res.pvalue, other[1], decimal=decimal[1])
assert_equal(contrast_res.df_num, other[2])
assert_equal(contrast_res.df_denom, other[3])
assert_equal("f", other[4])
class TestGLSARGretl:
def test_all(self):
d = macrodata.load_pandas().data
#import datasetswsm.greene as g
#d = g.load('5-1')
#growth rates
gs_l_realinv = 400 * np.diff(np.log(d['realinv'].values))
gs_l_realgdp = 400 * np.diff(np.log(d['realgdp'].values))
#simple diff, not growthrate, I want heteroscedasticity later for testing
endogd = np.diff(d['realinv'])
exogd = add_constant(np.c_[np.diff(d['realgdp'].values), d['realint'][:-1].values])
endogg = gs_l_realinv
exogg = add_constant(np.c_[gs_l_realgdp, d['realint'][:-1].values])
res_ols = OLS(endogg, exogg).fit()
#print res_ols.params
mod_g1 = GLSAR(endogg, exogg, rho=-0.108136)
res_g1 = mod_g1.fit()
#print res_g1.params
mod_g2 = GLSAR(endogg, exogg, rho=-0.108136) #-0.1335859) from R
res_g2 = mod_g2.iterative_fit(maxiter=5)
#print res_g2.params
rho = -0.108136
# coefficient std. error t-ratio p-value 95% CONFIDENCE INTERVAL
partable = np.array([
[-9.50990, 0.990456, -9.602, 3.65e-018, -11.4631, -7.55670], # ***
[ 4.37040, 0.208146, 21.00, 2.93e-052, 3.95993, 4.78086], # ***
[-0.579253, 0.268009, -2.161, 0.0319, -1.10777, -0.0507346]]) # **
#Statistics based on the rho-differenced data:
result_gretl_g1 = dict(
endog_mean = ("Mean dependent var", 3.113973),
endog_std = ("S.D. dependent var", 18.67447),
ssr = ("Sum squared resid", 22530.90),
mse_resid_sqrt = ("S.E. of regression", 10.66735),
rsquared = ("R-squared", 0.676973),
rsquared_adj = ("Adjusted R-squared", 0.673710),
fvalue = ("F(2, 198)", 221.0475),
f_pvalue = ("P-value(F)", 3.56e-51),
resid_acf1 = ("rho", -0.003481),
dw = ("Durbin-Watson", 1.993858))
#fstatistic, p-value, df1, df2
reset_2_3 = [5.219019, 0.00619, 2, 197, "f"]
reset_2 = [7.268492, 0.00762, 1, 198, "f"]
reset_3 = [5.248951, 0.023, 1, 198, "f"]
#LM-statistic, p-value, df
arch_4 = [7.30776, 0.120491, 4, "chi2"]
#multicollinearity
vif = [1.002, 1.002]
cond_1norm = 6862.0664
determinant = 1.0296049e+009
reciprocal_condition_number = 0.013819244
#Chi-square(2): test-statistic, pvalue, df
normality = [20.2792, 3.94837e-005, 2]
#tests
res = res_g1 #with rho from Gretl
#basic
assert_almost_equal(res.params, partable[:,0], 4)
assert_almost_equal(res.bse, partable[:,1], 6)
assert_almost_equal(res.tvalues, partable[:,2], 2)
assert_almost_equal(res.ssr, result_gretl_g1['ssr'][1], decimal=2)
#assert_almost_equal(res.llf, result_gretl_g1['llf'][1], decimal=7) #not in gretl
#assert_almost_equal(res.rsquared, result_gretl_g1['rsquared'][1], decimal=7) #FAIL
#assert_almost_equal(res.rsquared_adj, result_gretl_g1['rsquared_adj'][1], decimal=7) #FAIL
assert_almost_equal(np.sqrt(res.mse_resid), result_gretl_g1['mse_resid_sqrt'][1], decimal=5)
assert_almost_equal(res.fvalue, result_gretl_g1['fvalue'][1], decimal=4)
assert_allclose(res.f_pvalue,
result_gretl_g1['f_pvalue'][1],
rtol=1e-2)
#assert_almost_equal(res.durbin_watson, result_gretl_g1['dw'][1], decimal=7) #TODO
#arch
#sm_arch = smsdia.acorr_lm(res.wresid**2, maxlag=4, autolag=None)
sm_arch = smsdia.het_arch(res.wresid, nlags=4)
assert_almost_equal(sm_arch[0], arch_4[0], decimal=4)
assert_almost_equal(sm_arch[1], arch_4[1], decimal=6)
#tests
res = res_g2 #with estimated rho
#estimated lag coefficient
assert_almost_equal(res.model.rho, rho, decimal=3)
#basic
assert_almost_equal(res.params, partable[:,0], 4)
assert_almost_equal(res.bse, partable[:,1], 3)
assert_almost_equal(res.tvalues, partable[:,2], 2)
assert_almost_equal(res.ssr, result_gretl_g1['ssr'][1], decimal=2)
#assert_almost_equal(res.llf, result_gretl_g1['llf'][1], decimal=7) #not in gretl
#assert_almost_equal(res.rsquared, result_gretl_g1['rsquared'][1], decimal=7) #FAIL
#assert_almost_equal(res.rsquared_adj, result_gretl_g1['rsquared_adj'][1], decimal=7) #FAIL
assert_almost_equal(np.sqrt(res.mse_resid), result_gretl_g1['mse_resid_sqrt'][1], decimal=5)
assert_almost_equal(res.fvalue, result_gretl_g1['fvalue'][1], decimal=0)
assert_almost_equal(res.f_pvalue, result_gretl_g1['f_pvalue'][1], decimal=6)
#assert_almost_equal(res.durbin_watson, result_gretl_g1['dw'][1], decimal=7) #TODO
c = oi.reset_ramsey(res, degree=2)
compare_ftest(c, reset_2, decimal=(2,4))
c = oi.reset_ramsey(res, degree=3)
compare_ftest(c, reset_2_3, decimal=(2,4))
#arch
#sm_arch = smsdia.acorr_lm(res.wresid**2, maxlag=4, autolag=None)
sm_arch = smsdia.het_arch(res.wresid, nlags=4)
assert_almost_equal(sm_arch[0], arch_4[0], decimal=1)
assert_almost_equal(sm_arch[1], arch_4[1], decimal=2)
'''
Performing iterative calculation of rho...
ITER RHO ESS
1 -0.10734 22530.9
2 -0.10814 22530.9
Model 4: Cochrane-Orcutt, using observations 1959:3-2009:3 (T = 201)
Dependent variable: ds_l_realinv
rho = -0.108136
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const -9.50990 0.990456 -9.602 3.65e-018 ***
ds_l_realgdp 4.37040 0.208146 21.00 2.93e-052 ***
realint_1 -0.579253 0.268009 -2.161 0.0319 **
Statistics based on the rho-differenced data:
Mean dependent var 3.113973 S.D. dependent var 18.67447
Sum squared resid 22530.90 S.E. of regression 10.66735
R-squared 0.676973 Adjusted R-squared 0.673710
F(2, 198) 221.0475 P-value(F) 3.56e-51
rho -0.003481 Durbin-Watson 1.993858
'''
'''
RESET test for specification (squares and cubes)
Test statistic: F = 5.219019,
with p-value = P(F(2,197) > 5.21902) = 0.00619
RESET test for specification (squares only)
Test statistic: F = 7.268492,
with p-value = P(F(1,198) > 7.26849) = 0.00762
RESET test for specification (cubes only)
Test statistic: F = 5.248951,
with p-value = P(F(1,198) > 5.24895) = 0.023:
'''
'''
Test for ARCH of order 4
coefficient std. error t-ratio p-value
--------------------------------------------------------
alpha(0) 97.0386 20.3234 4.775 3.56e-06 ***
alpha(1) 0.176114 0.0714698 2.464 0.0146 **
alpha(2) -0.0488339 0.0724981 -0.6736 0.5014
alpha(3) -0.0705413 0.0737058 -0.9571 0.3397
alpha(4) 0.0384531 0.0725763 0.5298 0.5968
Null hypothesis: no ARCH effect is present
Test statistic: LM = 7.30776
with p-value = P(Chi-square(4) > 7.30776) = 0.120491:
'''
'''
Variance Inflation Factors
Minimum possible value = 1.0
Values > 10.0 may indicate a collinearity problem
ds_l_realgdp 1.002
realint_1 1.002
VIF(j) = 1/(1 - R(j)^2), where R(j) is the multiple correlation coefficient
between variable j and the other independent variables
Properties of matrix X'X:
1-norm = 6862.0664
Determinant = 1.0296049e+009
Reciprocal condition number = 0.013819244
'''
'''
Test for ARCH of order 4 -
Null hypothesis: no ARCH effect is present
Test statistic: LM = 7.30776
with p-value = P(Chi-square(4) > 7.30776) = 0.120491
Test of common factor restriction -
Null hypothesis: restriction is acceptable
Test statistic: F(2, 195) = 0.426391
with p-value = P(F(2, 195) > 0.426391) = 0.653468
Test for normality of residual -
Null hypothesis: error is normally distributed
Test statistic: Chi-square(2) = 20.2792
with p-value = 3.94837e-005:
'''
#no idea what this is
'''
Augmented regression for common factor test
OLS, using observations 1959:3-2009:3 (T = 201)
Dependent variable: ds_l_realinv
coefficient std. error t-ratio p-value
---------------------------------------------------------------
const -10.9481 1.35807 -8.062 7.44e-014 ***
ds_l_realgdp 4.28893 0.229459 18.69 2.40e-045 ***
realint_1 -0.662644 0.334872 -1.979 0.0492 **
ds_l_realinv_1 -0.108892 0.0715042 -1.523 0.1294
ds_l_realgdp_1 0.660443 0.390372 1.692 0.0923 *
realint_2 0.0769695 0.341527 0.2254 0.8219
Sum of squared residuals = 22432.8
Test of common factor restriction
Test statistic: F(2, 195) = 0.426391, with p-value = 0.653468
'''
################ with OLS, HAC errors
#Model 5: OLS, using observations 1959:2-2009:3 (T = 202)
#Dependent variable: ds_l_realinv
#HAC standard errors, bandwidth 4 (Bartlett kernel)
#coefficient std. error t-ratio p-value 95% CONFIDENCE INTERVAL
#for confidence interval t(199, 0.025) = 1.972
partable = np.array([
[-9.48167, 1.17709, -8.055, 7.17e-014, -11.8029, -7.16049], # ***
[4.37422, 0.328787, 13.30, 2.62e-029, 3.72587, 5.02258], #***
[-0.613997, 0.293619, -2.091, 0.0378, -1.19300, -0.0349939]]) # **
result_gretl_g1 = dict(
endog_mean = ("Mean dependent var", 3.257395),
endog_std = ("S.D. dependent var", 18.73915),
ssr = ("Sum squared resid", 22799.68),
mse_resid_sqrt = ("S.E. of regression", 10.70380),
rsquared = ("R-squared", 0.676978),
rsquared_adj = ("Adjusted R-squared", 0.673731),
fvalue = ("F(2, 199)", 90.79971),
f_pvalue = ("P-value(F)", 9.53e-29),
llf = ("Log-likelihood", -763.9752),
aic = ("Akaike criterion", 1533.950),
bic = ("Schwarz criterion", 1543.875),
hqic = ("Hannan-Quinn", 1537.966),
resid_acf1 = ("rho", -0.107341),
dw = ("Durbin-Watson", 2.213805))
linear_logs = [1.68351, 0.430953, 2, "chi2"]
#for logs: dropping 70 nan or incomplete observations, T=133
#(res_ols.model.exog <=0).any(1).sum() = 69 ?not 70
linear_squares = [7.52477, 0.0232283, 2, "chi2"]
#Autocorrelation, Breusch-Godfrey test for autocorrelation up to order 4
lm_acorr4 = [1.17928, 0.321197, 4, 195, "F"]
lm2_acorr4 = [4.771043, 0.312, 4, "chi2"]
acorr_ljungbox4 = [5.23587, 0.264, 4, "chi2"]
#break
cusum_Harvey_Collier = [0.494432, 0.621549, 198, "t"] #stats.t.sf(0.494432, 198)*2
#see cusum results in files
break_qlr = [3.01985, 0.1, 3, 196, "maxF"] #TODO check this, max at 2001:4
break_chow = [13.1897, 0.00424384, 3, "chi2"] # break at 1984:1
arch_4 = [3.43473, 0.487871, 4, "chi2"]
normality = [23.962, 0.00001, 2, "chi2"]
het_white = [33.503723, 0.000003, 5, "chi2"]
het_breusch_pagan = [1.302014, 0.521520, 2, "chi2"] #TODO: not available
het_breusch_pagan_konker = [0.709924, 0.701200, 2, "chi2"]
reset_2_3 = [5.219019, 0.00619, 2, 197, "f"]
reset_2 = [7.268492, 0.00762, 1, 198, "f"]
reset_3 = [5.248951, 0.023, 1, 198, "f"] #not available
cond_1norm = 5984.0525
determinant = 7.1087467e+008
reciprocal_condition_number = 0.013826504
vif = [1.001, 1.001]
names = 'date residual leverage influence DFFITS'.split()
cur_dir = os.path.abspath(os.path.dirname(__file__))
fpath = os.path.join(cur_dir, 'results/leverage_influence_ols_nostars.txt')
lev = np.genfromtxt(fpath, skip_header=3, skip_footer=1,
converters={0:lambda s: s})
#either numpy 1.6 or python 3.2 changed behavior
if np.isnan(lev[-1]['f1']):
lev = np.genfromtxt(fpath, skip_header=3, skip_footer=2,
converters={0:lambda s: s})
lev.dtype.names = names
res = res_ols #for easier copying
cov_hac = sw.cov_hac_simple(res, nlags=4, use_correction=False)
bse_hac = sw.se_cov(cov_hac)
assert_almost_equal(res.params, partable[:,0], 5)
assert_almost_equal(bse_hac, partable[:,1], 5)
#TODO
assert_almost_equal(res.ssr, result_gretl_g1['ssr'][1], decimal=2)
assert_almost_equal(res.llf, result_gretl_g1['llf'][1], decimal=4) #not in gretl
assert_almost_equal(res.rsquared, result_gretl_g1['rsquared'][1], decimal=6) #FAIL
assert_almost_equal(res.rsquared_adj, result_gretl_g1['rsquared_adj'][1], decimal=6) #FAIL
assert_almost_equal(np.sqrt(res.mse_resid), result_gretl_g1['mse_resid_sqrt'][1], decimal=5)
#f-value is based on cov_hac I guess
#res2 = res.get_robustcov_results(cov_type='HC1')
# TODO: fvalue differs from Gretl, trying any of the HCx
#assert_almost_equal(res2.fvalue, result_gretl_g1['fvalue'][1], decimal=0) #FAIL
#assert_approx_equal(res.f_pvalue, result_gretl_g1['f_pvalue'][1], significant=1) #FAIL
#assert_almost_equal(res.durbin_watson, result_gretl_g1['dw'][1], decimal=7) #TODO
c = oi.reset_ramsey(res, degree=2)
compare_ftest(c, reset_2, decimal=(6,5))
c = oi.reset_ramsey(res, degree=3)
compare_ftest(c, reset_2_3, decimal=(6,5))
linear_sq = smsdia.linear_lm(res.resid, res.model.exog)
assert_almost_equal(linear_sq[0], linear_squares[0], decimal=6)
assert_almost_equal(linear_sq[1], linear_squares[1], decimal=7)
hbpk = smsdia.het_breuschpagan(res.resid, res.model.exog)
assert_almost_equal(hbpk[0], het_breusch_pagan_konker[0], decimal=6)
assert_almost_equal(hbpk[1], het_breusch_pagan_konker[1], decimal=6)
hw = smsdia.het_white(res.resid, res.model.exog)
assert_almost_equal(hw[:2], het_white[:2], 6)
#arch
#sm_arch = smsdia.acorr_lm(res.resid**2, maxlag=4, autolag=None)
sm_arch = smsdia.het_arch(res.resid, nlags=4)
assert_almost_equal(sm_arch[0], arch_4[0], decimal=5)
assert_almost_equal(sm_arch[1], arch_4[1], decimal=6)
vif2 = [oi.variance_inflation_factor(res.model.exog, k) for k in [1,2]]
infl = oi.OLSInfluence(res_ols)
#print np.max(np.abs(lev['DFFITS'] - infl.dffits[0]))
#print np.max(np.abs(lev['leverage'] - infl.hat_matrix_diag))
#print np.max(np.abs(lev['influence'] - infl.influence)) #just added this based on Gretl
#just rough test, low decimal in Gretl output,
assert_almost_equal(lev['residual'], res.resid, decimal=3)
assert_almost_equal(lev['DFFITS'], infl.dffits[0], decimal=3)
assert_almost_equal(lev['leverage'], infl.hat_matrix_diag, decimal=3)
assert_almost_equal(lev['influence'], infl.influence, decimal=4)
def test_GLSARlag():
#test that results for lag>1 is close to lag=1, and smaller ssr
from statsmodels.datasets import macrodata
d2 = macrodata.load_pandas().data
g_gdp = 400*np.diff(np.log(d2['realgdp'].values))
g_inv = 400*np.diff(np.log(d2['realinv'].values))
exogg = add_constant(np.c_[g_gdp, d2['realint'][:-1].values], prepend=False)
mod1 = GLSAR(g_inv, exogg, 1)
res1 = mod1.iterative_fit(5)
mod4 = GLSAR(g_inv, exogg, 4)
res4 = mod4.iterative_fit(10)
assert_array_less(np.abs(res1.params / res4.params - 1), 0.03)
assert_array_less(res4.ssr, res1.ssr)
assert_array_less(np.abs(res4.bse / res1.bse) - 1, 0.015)
assert_array_less(np.abs((res4.fittedvalues / res1.fittedvalues - 1).mean()),
0.015)
assert_equal(len(mod4.rho), 4)
if __name__ == '__main__':
t = TestGLSARGretl()
t.test_all()
'''
Model 5: OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: ds_l_realinv
HAC standard errors, bandwidth 4 (Bartlett kernel)
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const -9.48167 1.17709 -8.055 7.17e-014 ***
ds_l_realgdp 4.37422 0.328787 13.30 2.62e-029 ***
realint_1 -0.613997 0.293619 -2.091 0.0378 **
Mean dependent var 3.257395 S.D. dependent var 18.73915
Sum squared resid 22799.68 S.E. of regression 10.70380
R-squared 0.676978 Adjusted R-squared 0.673731
F(2, 199) 90.79971 P-value(F) 9.53e-29
Log-likelihood -763.9752 Akaike criterion 1533.950
Schwarz criterion 1543.875 Hannan-Quinn 1537.966
rho -0.107341 Durbin-Watson 2.213805
QLR test for structural break -
Null hypothesis: no structural break
Test statistic: max F(3, 196) = 3.01985 at observation 2001:4
(10 percent critical value = 4.09)
Non-linearity test (logs) -
Null hypothesis: relationship is linear
Test statistic: LM = 1.68351
with p-value = P(Chi-square(2) > 1.68351) = 0.430953
Non-linearity test (squares) -
Null hypothesis: relationship is linear
Test statistic: LM = 7.52477
with p-value = P(Chi-square(2) > 7.52477) = 0.0232283
LM test for autocorrelation up to order 4 -
Null hypothesis: no autocorrelation
Test statistic: LMF = 1.17928
with p-value = P(F(4,195) > 1.17928) = 0.321197
CUSUM test for parameter stability -
Null hypothesis: no change in parameters
Test statistic: Harvey-Collier t(198) = 0.494432
with p-value = P(t(198) > 0.494432) = 0.621549
Chow test for structural break at observation 1984:1 -
Null hypothesis: no structural break
Asymptotic test statistic: Chi-square(3) = 13.1897
with p-value = 0.00424384
Test for ARCH of order 4 -
Null hypothesis: no ARCH effect is present
Test statistic: LM = 3.43473
with p-value = P(Chi-square(4) > 3.43473) = 0.487871:
#ANOVA
Analysis of Variance:
Sum of squares df Mean square
Regression 47782.7 2 23891.3
Residual 22799.7 199 114.571
Total 70582.3 201 351.156
R^2 = 47782.7 / 70582.3 = 0.676978
F(2, 199) = 23891.3 / 114.571 = 208.528 [p-value 1.47e-049]
#LM-test autocorrelation
Breusch-Godfrey test for autocorrelation up to order 4
OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: uhat
coefficient std. error t-ratio p-value
------------------------------------------------------------
const 0.0640964 1.06719 0.06006 0.9522
ds_l_realgdp -0.0456010 0.217377 -0.2098 0.8341
realint_1 0.0511769 0.293136 0.1746 0.8616
uhat_1 -0.104707 0.0719948 -1.454 0.1475
uhat_2 -0.00898483 0.0742817 -0.1210 0.9039
uhat_3 0.0837332 0.0735015 1.139 0.2560
uhat_4 -0.0636242 0.0737363 -0.8629 0.3893
Unadjusted R-squared = 0.023619
Test statistic: LMF = 1.179281,
with p-value = P(F(4,195) > 1.17928) = 0.321
Alternative statistic: TR^2 = 4.771043,
with p-value = P(Chi-square(4) > 4.77104) = 0.312
Ljung-Box Q' = 5.23587,
with p-value = P(Chi-square(4) > 5.23587) = 0.264:
RESET test for specification (squares and cubes)
Test statistic: F = 5.219019,
with p-value = P(F(2,197) > 5.21902) = 0.00619
RESET test for specification (squares only)
Test statistic: F = 7.268492,
with p-value = P(F(1,198) > 7.26849) = 0.00762
RESET test for specification (cubes only)
Test statistic: F = 5.248951,
with p-value = P(F(1,198) > 5.24895) = 0.023
#heteroscedasticity White
White's test for heteroskedasticity
OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: uhat^2
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const 104.920 21.5848 4.861 2.39e-06 ***
ds_l_realgdp -29.7040 6.24983 -4.753 3.88e-06 ***
realint_1 -6.93102 6.95607 -0.9964 0.3203
sq_ds_l_realg 4.12054 0.684920 6.016 8.62e-09 ***
X2_X3 2.89685 1.38571 2.091 0.0379 **
sq_realint_1 0.662135 1.10919 0.5970 0.5512
Unadjusted R-squared = 0.165860
Test statistic: TR^2 = 33.503723,
with p-value = P(Chi-square(5) > 33.503723) = 0.000003:
#heteroscedasticity Breusch-Pagan (original)
Breusch-Pagan test for heteroskedasticity
OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: scaled uhat^2
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const 1.09468 0.192281 5.693 4.43e-08 ***
ds_l_realgdp -0.0323119 0.0386353 -0.8363 0.4040
realint_1 0.00410778 0.0512274 0.08019 0.9362
Explained sum of squares = 2.60403
Test statistic: LM = 1.302014,
with p-value = P(Chi-square(2) > 1.302014) = 0.521520
#heteroscedasticity Breusch-Pagan Koenker
Breusch-Pagan test for heteroskedasticity
OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: scaled uhat^2 (Koenker robust variant)
coefficient std. error t-ratio p-value
------------------------------------------------------------
const 10.6870 21.7027 0.4924 0.6230
ds_l_realgdp -3.64704 4.36075 -0.8363 0.4040
realint_1 0.463643 5.78202 0.08019 0.9362
Explained sum of squares = 33174.2
Test statistic: LM = 0.709924,
with p-value = P(Chi-square(2) > 0.709924) = 0.701200
########## forecast
#forecast mean y
For 95% confidence intervals, t(199, 0.025) = 1.972
Obs ds_l_realinv prediction std. error 95% interval
2008:3 -7.134492 -17.177905 2.946312 -22.987904 - -11.367905
2008:4 -27.665860 -36.294434 3.036851 -42.282972 - -30.305896
2009:1 -70.239280 -44.018178 4.007017 -51.919841 - -36.116516
2009:2 -27.024588 -12.284842 1.427414 -15.099640 - -9.470044
2009:3 8.078897 4.483669 1.315876 1.888819 - 7.078520
Forecast evaluation statistics
Mean Error -3.7387
Mean Squared Error 218.61
Root Mean Squared Error 14.785
Mean Absolute Error 12.646
Mean Percentage Error -7.1173
Mean Absolute Percentage Error -43.867
Theil's U 0.4365
Bias proportion, UM 0.06394
Regression proportion, UR 0.13557
Disturbance proportion, UD 0.80049
#forecast actual y
For 95% confidence intervals, t(199, 0.025) = 1.972
Obs ds_l_realinv prediction std. error 95% interval
2008:3 -7.134492 -17.177905 11.101892 -39.070353 - 4.714544
2008:4 -27.665860 -36.294434 11.126262 -58.234939 - -14.353928
2009:1 -70.239280 -44.018178 11.429236 -66.556135 - -21.480222
2009:2 -27.024588 -12.284842 10.798554 -33.579120 - 9.009436
2009:3 8.078897 4.483669 10.784377 -16.782652 - 25.749991
Forecast evaluation statistics
Mean Error -3.7387
Mean Squared Error 218.61
Root Mean Squared Error 14.785
Mean Absolute Error 12.646
Mean Percentage Error -7.1173
Mean Absolute Percentage Error -43.867
Theil's U 0.4365
Bias proportion, UM 0.06394
Regression proportion, UR 0.13557
Disturbance proportion, UD 0.80049
'''
|
[
"numpy.sqrt",
"numpy.testing.assert_equal",
"numpy.log",
"statsmodels.stats.diagnostic.het_white",
"numpy.array",
"numpy.genfromtxt",
"statsmodels.stats.diagnostic.het_breuschpagan",
"numpy.testing.assert_array_less",
"statsmodels.stats.outliers_influence.OLSInfluence",
"numpy.testing.assert_allclose",
"statsmodels.stats.outliers_influence.reset_ramsey",
"numpy.diff",
"numpy.testing.assert_almost_equal",
"statsmodels.regression.linear_model.OLS",
"statsmodels.regression.linear_model.GLSAR",
"statsmodels.tools.tools.add_constant",
"numpy.abs",
"statsmodels.stats.sandwich_covariance.cov_hac_simple",
"os.path.dirname",
"statsmodels.stats.outliers_influence.variance_inflation_factor",
"numpy.isnan",
"statsmodels.datasets.macrodata.load_pandas",
"statsmodels.stats.diagnostic.linear_lm",
"statsmodels.stats.diagnostic.het_arch",
"os.path.join",
"statsmodels.stats.sandwich_covariance.se_cov"
] |
[((662, 732), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['contrast_res.fvalue', 'other[0]'], {'decimal': 'decimal[0]'}), '(contrast_res.fvalue, other[0], decimal=decimal[0])\n', (681, 732), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((737, 807), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['contrast_res.pvalue', 'other[1]'], {'decimal': 'decimal[1]'}), '(contrast_res.pvalue, other[1], decimal=decimal[1])\n', (756, 807), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((812, 855), 'numpy.testing.assert_equal', 'assert_equal', (['contrast_res.df_num', 'other[2]'], {}), '(contrast_res.df_num, other[2])\n', (824, 855), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((860, 905), 'numpy.testing.assert_equal', 'assert_equal', (['contrast_res.df_denom', 'other[3]'], {}), '(contrast_res.df_denom, other[3])\n', (872, 905), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((910, 937), 'numpy.testing.assert_equal', 'assert_equal', (['"""f"""', 'other[4]'], {}), "('f', other[4])\n", (922, 937), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((17494, 17562), 'statsmodels.tools.tools.add_constant', 'add_constant', (["np.c_[g_gdp, d2['realint'][:-1].values]"], {'prepend': '(False)'}), "(np.c_[g_gdp, d2['realint'][:-1].values], prepend=False)\n", (17506, 17562), False, 'from statsmodels.tools.tools import add_constant\n'), ((17575, 17597), 'statsmodels.regression.linear_model.GLSAR', 'GLSAR', (['g_inv', 'exogg', '(1)'], {}), '(g_inv, exogg, 1)\n', (17580, 17597), False, 'from statsmodels.regression.linear_model import OLS, GLSAR\n'), ((17643, 17665), 'statsmodels.regression.linear_model.GLSAR', 'GLSAR', (['g_inv', 'exogg', '(4)'], {}), '(g_inv, exogg, 4)\n', (17648, 17665), False, 'from statsmodels.regression.linear_model import OLS, GLSAR\n'), ((17772, 17809), 'numpy.testing.assert_array_less', 'assert_array_less', (['res4.ssr', 'res1.ssr'], {}), '(res4.ssr, res1.ssr)\n', (17789, 17809), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((1351, 1372), 'numpy.diff', 'np.diff', (["d['realinv']"], {}), "(d['realinv'])\n", (1358, 1372), True, 'import numpy as np\n'), ((1512, 1571), 'statsmodels.tools.tools.add_constant', 'add_constant', (["np.c_[gs_l_realgdp, d['realint'][:-1].values]"], {}), "(np.c_[gs_l_realgdp, d['realint'][:-1].values])\n", (1524, 1571), False, 'from statsmodels.tools.tools import add_constant\n'), ((1664, 1699), 'statsmodels.regression.linear_model.GLSAR', 'GLSAR', (['endogg', 'exogg'], {'rho': '(-0.108136)'}), '(endogg, exogg, rho=-0.108136)\n', (1669, 1699), False, 'from statsmodels.regression.linear_model import OLS, GLSAR\n'), ((1777, 1812), 'statsmodels.regression.linear_model.GLSAR', 'GLSAR', (['endogg', 'exogg'], {'rho': '(-0.108136)'}), '(endogg, exogg, rho=-0.108136)\n', (1782, 1812), False, 'from statsmodels.regression.linear_model import OLS, GLSAR\n'), ((2055, 2247), 'numpy.array', 'np.array', (['[[-9.5099, 0.990456, -9.602, 3.65e-18, -11.4631, -7.5567], [4.3704, \n 0.208146, 21.0, 2.93e-52, 3.95993, 4.78086], [-0.579253, 0.268009, -\n 2.161, 0.0319, -1.10777, -0.0507346]]'], {}), '([[-9.5099, 0.990456, -9.602, 3.65e-18, -11.4631, -7.5567], [4.3704,\n 0.208146, 21.0, 2.93e-52, 3.95993, 4.78086], [-0.579253, 0.268009, -\n 2.161, 0.0319, -1.10777, -0.0507346]])\n', (2063, 2247), True, 'import numpy as np\n'), ((3619, 3669), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res.params', 'partable[:, 0]', '(4)'], {}), '(res.params, partable[:, 0], 4)\n', (3638, 3669), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((3677, 3724), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res.bse', 'partable[:, 1]', '(6)'], {}), '(res.bse, partable[:, 1], 6)\n', (3696, 3724), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((3732, 3783), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res.tvalues', 'partable[:, 2]', '(2)'], {}), '(res.tvalues, partable[:, 2], 2)\n', (3751, 3783), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((3792, 3858), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res.ssr', "result_gretl_g1['ssr'][1]"], {'decimal': '(2)'}), "(res.ssr, result_gretl_g1['ssr'][1], decimal=2)\n", (3811, 3858), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((4250, 4322), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res.fvalue', "result_gretl_g1['fvalue'][1]"], {'decimal': '(4)'}), "(res.fvalue, result_gretl_g1['fvalue'][1], decimal=4)\n", (4269, 4322), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((4331, 4403), 'numpy.testing.assert_allclose', 'assert_allclose', (['res.f_pvalue', "result_gretl_g1['f_pvalue'][1]"], {'rtol': '(0.01)'}), "(res.f_pvalue, result_gretl_g1['f_pvalue'][1], rtol=0.01)\n", (4346, 4403), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((4650, 4686), 'statsmodels.stats.diagnostic.het_arch', 'smsdia.het_arch', (['res.wresid'], {'nlags': '(4)'}), '(res.wresid, nlags=4)\n', (4665, 4686), True, 'import statsmodels.stats.diagnostic as smsdia\n'), ((4695, 4748), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sm_arch[0]', 'arch_4[0]'], {'decimal': '(4)'}), '(sm_arch[0], arch_4[0], decimal=4)\n', (4714, 4748), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((4757, 4810), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sm_arch[1]', 'arch_4[1]'], {'decimal': '(6)'}), '(sm_arch[1], arch_4[1], decimal=6)\n', (4776, 4810), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((4912, 4962), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res.model.rho', 'rho'], {'decimal': '(3)'}), '(res.model.rho, rho, decimal=3)\n', (4931, 4962), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((4987, 5037), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res.params', 'partable[:, 0]', '(4)'], {}), '(res.params, partable[:, 0], 4)\n', (5006, 5037), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((5045, 5092), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res.bse', 'partable[:, 1]', '(3)'], {}), '(res.bse, partable[:, 1], 3)\n', (5064, 5092), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((5100, 5151), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res.tvalues', 'partable[:, 2]', '(2)'], {}), '(res.tvalues, partable[:, 2], 2)\n', (5119, 5151), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((5160, 5226), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res.ssr', "result_gretl_g1['ssr'][1]"], {'decimal': '(2)'}), "(res.ssr, result_gretl_g1['ssr'][1], decimal=2)\n", (5179, 5226), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((5618, 5690), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res.fvalue', "result_gretl_g1['fvalue'][1]"], {'decimal': '(0)'}), "(res.fvalue, result_gretl_g1['fvalue'][1], decimal=0)\n", (5637, 5690), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((5699, 5775), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res.f_pvalue', "result_gretl_g1['f_pvalue'][1]"], {'decimal': '(6)'}), "(res.f_pvalue, result_gretl_g1['f_pvalue'][1], decimal=6)\n", (5718, 5775), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((5882, 5912), 'statsmodels.stats.outliers_influence.reset_ramsey', 'oi.reset_ramsey', (['res'], {'degree': '(2)'}), '(res, degree=2)\n', (5897, 5912), True, 'import statsmodels.stats.outliers_influence as oi\n'), ((5974, 6004), 'statsmodels.stats.outliers_influence.reset_ramsey', 'oi.reset_ramsey', (['res'], {'degree': '(3)'}), '(res, degree=3)\n', (5989, 6004), True, 'import statsmodels.stats.outliers_influence as oi\n'), ((6163, 6199), 'statsmodels.stats.diagnostic.het_arch', 'smsdia.het_arch', (['res.wresid'], {'nlags': '(4)'}), '(res.wresid, nlags=4)\n', (6178, 6199), True, 'import statsmodels.stats.diagnostic as smsdia\n'), ((6208, 6261), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sm_arch[0]', 'arch_4[0]'], {'decimal': '(1)'}), '(sm_arch[0], arch_4[0], decimal=1)\n', (6227, 6261), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((6270, 6323), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sm_arch[1]', 'arch_4[1]'], {'decimal': '(2)'}), '(sm_arch[1], arch_4[1], decimal=2)\n', (6289, 6323), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((11095, 11288), 'numpy.array', 'np.array', (['[[-9.48167, 1.17709, -8.055, 7.17e-14, -11.8029, -7.16049], [4.37422, \n 0.328787, 13.3, 2.62e-29, 3.72587, 5.02258], [-0.613997, 0.293619, -\n 2.091, 0.0378, -1.193, -0.0349939]]'], {}), '([[-9.48167, 1.17709, -8.055, 7.17e-14, -11.8029, -7.16049], [\n 4.37422, 0.328787, 13.3, 2.62e-29, 3.72587, 5.02258], [-0.613997, \n 0.293619, -2.091, 0.0378, -1.193, -0.0349939]])\n', (11103, 11288), True, 'import numpy as np\n'), ((13880, 13947), 'os.path.join', 'os.path.join', (['cur_dir', '"""results/leverage_influence_ols_nostars.txt"""'], {}), "(cur_dir, 'results/leverage_influence_ols_nostars.txt')\n", (13892, 13947), False, 'import os\n'), ((13962, 14047), 'numpy.genfromtxt', 'np.genfromtxt', (['fpath'], {'skip_header': '(3)', 'skip_footer': '(1)', 'converters': '{(0): lambda s: s}'}), '(fpath, skip_header=3, skip_footer=1, converters={(0): lambda\n s: s})\n', (13975, 14047), True, 'import numpy as np\n'), ((14137, 14160), 'numpy.isnan', 'np.isnan', (["lev[-1]['f1']"], {}), "(lev[-1]['f1'])\n", (14145, 14160), True, 'import numpy as np\n'), ((14386, 14439), 'statsmodels.stats.sandwich_covariance.cov_hac_simple', 'sw.cov_hac_simple', (['res'], {'nlags': '(4)', 'use_correction': '(False)'}), '(res, nlags=4, use_correction=False)\n', (14403, 14439), True, 'import statsmodels.stats.sandwich_covariance as sw\n'), ((14459, 14477), 'statsmodels.stats.sandwich_covariance.se_cov', 'sw.se_cov', (['cov_hac'], {}), '(cov_hac)\n', (14468, 14477), True, 'import statsmodels.stats.sandwich_covariance as sw\n'), ((14487, 14537), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res.params', 'partable[:, 0]', '(5)'], {}), '(res.params, partable[:, 0], 5)\n', (14506, 14537), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((14545, 14592), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['bse_hac', 'partable[:, 1]', '(5)'], {}), '(bse_hac, partable[:, 1], 5)\n', (14564, 14592), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((14615, 14681), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res.ssr', "result_gretl_g1['ssr'][1]"], {'decimal': '(2)'}), "(res.ssr, result_gretl_g1['ssr'][1], decimal=2)\n", (14634, 14681), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((14690, 14756), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res.llf', "result_gretl_g1['llf'][1]"], {'decimal': '(4)'}), "(res.llf, result_gretl_g1['llf'][1], decimal=4)\n", (14709, 14756), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((14779, 14855), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res.rsquared', "result_gretl_g1['rsquared'][1]"], {'decimal': '(6)'}), "(res.rsquared, result_gretl_g1['rsquared'][1], decimal=6)\n", (14798, 14855), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((14870, 14958), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res.rsquared_adj', "result_gretl_g1['rsquared_adj'][1]"], {'decimal': '(6)'}), "(res.rsquared_adj, result_gretl_g1['rsquared_adj'][1],\n decimal=6)\n", (14889, 14958), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((15520, 15550), 'statsmodels.stats.outliers_influence.reset_ramsey', 'oi.reset_ramsey', (['res'], {'degree': '(2)'}), '(res, degree=2)\n', (15535, 15550), True, 'import statsmodels.stats.outliers_influence as oi\n'), ((15612, 15642), 'statsmodels.stats.outliers_influence.reset_ramsey', 'oi.reset_ramsey', (['res'], {'degree': '(3)'}), '(res, degree=3)\n', (15627, 15642), True, 'import statsmodels.stats.outliers_influence as oi\n'), ((15715, 15758), 'statsmodels.stats.diagnostic.linear_lm', 'smsdia.linear_lm', (['res.resid', 'res.model.exog'], {}), '(res.resid, res.model.exog)\n', (15731, 15758), True, 'import statsmodels.stats.diagnostic as smsdia\n'), ((15767, 15830), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['linear_sq[0]', 'linear_squares[0]'], {'decimal': '(6)'}), '(linear_sq[0], linear_squares[0], decimal=6)\n', (15786, 15830), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((15839, 15902), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['linear_sq[1]', 'linear_squares[1]'], {'decimal': '(7)'}), '(linear_sq[1], linear_squares[1], decimal=7)\n', (15858, 15902), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((15919, 15969), 'statsmodels.stats.diagnostic.het_breuschpagan', 'smsdia.het_breuschpagan', (['res.resid', 'res.model.exog'], {}), '(res.resid, res.model.exog)\n', (15942, 15969), True, 'import statsmodels.stats.diagnostic as smsdia\n'), ((15978, 16046), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['hbpk[0]', 'het_breusch_pagan_konker[0]'], {'decimal': '(6)'}), '(hbpk[0], het_breusch_pagan_konker[0], decimal=6)\n', (15997, 16046), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((16055, 16123), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['hbpk[1]', 'het_breusch_pagan_konker[1]'], {'decimal': '(6)'}), '(hbpk[1], het_breusch_pagan_konker[1], decimal=6)\n', (16074, 16123), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((16138, 16181), 'statsmodels.stats.diagnostic.het_white', 'smsdia.het_white', (['res.resid', 'res.model.exog'], {}), '(res.resid, res.model.exog)\n', (16154, 16181), True, 'import statsmodels.stats.diagnostic as smsdia\n'), ((16190, 16235), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['hw[:2]', 'het_white[:2]', '(6)'], {}), '(hw[:2], het_white[:2], 6)\n', (16209, 16235), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((16342, 16377), 'statsmodels.stats.diagnostic.het_arch', 'smsdia.het_arch', (['res.resid'], {'nlags': '(4)'}), '(res.resid, nlags=4)\n', (16357, 16377), True, 'import statsmodels.stats.diagnostic as smsdia\n'), ((16386, 16439), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sm_arch[0]', 'arch_4[0]'], {'decimal': '(5)'}), '(sm_arch[0], arch_4[0], decimal=5)\n', (16405, 16439), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((16448, 16501), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sm_arch[1]', 'arch_4[1]'], {'decimal': '(6)'}), '(sm_arch[1], arch_4[1], decimal=6)\n', (16467, 16501), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((16599, 16623), 'statsmodels.stats.outliers_influence.OLSInfluence', 'oi.OLSInfluence', (['res_ols'], {}), '(res_ols)\n', (16614, 16623), True, 'import statsmodels.stats.outliers_influence as oi\n'), ((16918, 16976), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (["lev['residual']", 'res.resid'], {'decimal': '(3)'}), "(lev['residual'], res.resid, decimal=3)\n", (16937, 16976), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((16985, 17046), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (["lev['DFFITS']", 'infl.dffits[0]'], {'decimal': '(3)'}), "(lev['DFFITS'], infl.dffits[0], decimal=3)\n", (17004, 17046), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((17055, 17124), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (["lev['leverage']", 'infl.hat_matrix_diag'], {'decimal': '(3)'}), "(lev['leverage'], infl.hat_matrix_diag, decimal=3)\n", (17074, 17124), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((17133, 17197), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (["lev['influence']", 'infl.influence'], {'decimal': '(4)'}), "(lev['influence'], infl.influence, decimal=4)\n", (17152, 17197), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((17345, 17368), 'statsmodels.datasets.macrodata.load_pandas', 'macrodata.load_pandas', ([], {}), '()\n', (17366, 17368), False, 'from statsmodels.datasets import macrodata\n'), ((17723, 17760), 'numpy.abs', 'np.abs', (['(res1.params / res4.params - 1)'], {}), '(res1.params / res4.params - 1)\n', (17729, 17760), True, 'import numpy as np\n'), ((1000, 1023), 'statsmodels.datasets.macrodata.load_pandas', 'macrodata.load_pandas', ([], {}), '()\n', (1021, 1023), False, 'from statsmodels.datasets import macrodata\n'), ((4169, 4191), 'numpy.sqrt', 'np.sqrt', (['res.mse_resid'], {}), '(res.mse_resid)\n', (4176, 4191), True, 'import numpy as np\n'), ((5537, 5559), 'numpy.sqrt', 'np.sqrt', (['res.mse_resid'], {}), '(res.mse_resid)\n', (5544, 5559), True, 'import numpy as np\n'), ((13837, 13862), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (13852, 13862), False, 'import os\n'), ((14180, 14265), 'numpy.genfromtxt', 'np.genfromtxt', (['fpath'], {'skip_header': '(3)', 'skip_footer': '(2)', 'converters': '{(0): lambda s: s}'}), '(fpath, skip_header=3, skip_footer=2, converters={(0): lambda\n s: s})\n', (14193, 14265), True, 'import numpy as np\n'), ((14989, 15011), 'numpy.sqrt', 'np.sqrt', (['res.mse_resid'], {}), '(res.mse_resid)\n', (14996, 15011), True, 'import numpy as np\n'), ((16519, 16566), 'statsmodels.stats.outliers_influence.variance_inflation_factor', 'oi.variance_inflation_factor', (['res.model.exog', 'k'], {}), '(res.model.exog, k)\n', (16547, 16566), True, 'import statsmodels.stats.outliers_influence as oi\n'), ((17398, 17426), 'numpy.log', 'np.log', (["d2['realgdp'].values"], {}), "(d2['realgdp'].values)\n", (17404, 17426), True, 'import numpy as np\n'), ((17452, 17480), 'numpy.log', 'np.log', (["d2['realinv'].values"], {}), "(d2['realinv'].values)\n", (17458, 17480), True, 'import numpy as np\n'), ((17832, 17859), 'numpy.abs', 'np.abs', (['(res4.bse / res1.bse)'], {}), '(res4.bse / res1.bse)\n', (17838, 17859), True, 'import numpy as np\n'), ((1156, 1183), 'numpy.log', 'np.log', (["d['realinv'].values"], {}), "(d['realinv'].values)\n", (1162, 1183), True, 'import numpy as np\n'), ((1222, 1249), 'numpy.log', 'np.log', (["d['realgdp'].values"], {}), "(d['realgdp'].values)\n", (1228, 1249), True, 'import numpy as np\n'), ((1591, 1609), 'statsmodels.regression.linear_model.OLS', 'OLS', (['endogg', 'exogg'], {}), '(endogg, exogg)\n', (1594, 1609), False, 'from statsmodels.regression.linear_model import OLS, GLSAR\n'), ((1408, 1436), 'numpy.diff', 'np.diff', (["d['realgdp'].values"], {}), "(d['realgdp'].values)\n", (1415, 1436), True, 'import numpy as np\n')]
|
import warnings
import numba
import numpy as np
import strax
import straxen
DEFAULT_MAX_SAMPLES = 20_000
@straxen.mini_analysis(requires=('records',),
warn_beyond_sec=10,
default_time_selection='touching')
def records_matrix(records, time_range, seconds_range, config, to_pe,
max_samples=DEFAULT_MAX_SAMPLES,
ignore_max_sample_warning=False):
"""Return (wv_matrix, times, pms)
- wv_matrix: (n_samples, n_pmt) array with per-PMT waveform intensity in PE/ns
- times: time labels in seconds (corr. to rows)
- pmts: PMT numbers (corr. to columns)
Both times and pmts have one extra element.
:param max_samples: Maximum number of time samples. If window and dt
conspire to exceed this, waveforms will be downsampled.
:param ignore_max_sample_warning: If True, suppress warning when this happens.
Example:
wvm, ts, ys = st.records_matrix(run_id, seconds_range=(1., 1.00001))
plt.pcolormesh(ts, ys, wvm.T,
norm=matplotlib.colors.LogNorm())
plt.colorbar(label='Intensity [PE / ns]')
"""
if len(records):
dt = records[0]['dt']
samples_per_record = len(records[0]['data'])
else:
# Defaults here do not matter, nothing will be plotted anyway
dt = 10, 110
record_duration = samples_per_record * dt
window = time_range[1] - time_range[0]
if window / dt > max_samples:
with np.errstate(divide='ignore', invalid='ignore'):
# Downsample. New dt must be
# a) multiple of old dt
dts = np.arange(0, record_duration + dt, dt).astype(np.int)
# b) divisor of record duration
dts = dts[record_duration / dts % 1 == 0]
# c) total samples < max_samples
dts = dts[window / dts < max_samples]
if len(dts):
# Pick lowest dt that satisfies criteria
dt = dts.min()
else:
# Records will be downsampled to single points
dt = max(record_duration, window // max_samples)
if not ignore_max_sample_warning:
warnings.warn(f"Matrix would exceed max_samples {max_samples}, "
f"downsampling to dt = {dt} ns.")
wvm = _records_to_matrix(
records,
t0=time_range[0],
n_channels=config['n_tpc_pmts'],
dt=dt,
window=window)
wvm = wvm.astype(np.float32) * to_pe.reshape(1, -1) / dt
# Note + 1, so data for sample 0 will range from 0-1 in plot
ts = (np.arange(wvm.shape[0] + 1) * dt / int(1e9) + seconds_range[0])
ys = np.arange(wvm.shape[1] + 1)
return wvm, ts, ys
@straxen.mini_analysis(requires=('raw_records',),
warn_beyond_sec=3e-3,
default_time_selection='touching')
def raw_records_matrix(context, run_id, raw_records, time_range,
ignore_max_sample_warning=False,
max_samples=DEFAULT_MAX_SAMPLES,
**kwargs):
# Convert raw to records. We may not be able to baseline correctly
# at the start of the range due to missing zeroth fragments
records = strax.raw_to_records(raw_records)
strax.baseline(records, allow_sloppy_chunking=True)
strax.zero_out_of_bounds(records)
return context.records_matrix(run_id=run_id,
records=records,
time_range=time_range,
max_samples=max_samples,
ignore_max_sample_warning=ignore_max_sample_warning,
**kwargs)
@numba.njit
def _records_to_matrix(records, t0, window, n_channels, dt=10):
n_samples = (window // dt) + 1
# Use 32-bit integers, so downsampling saturated samples doesn't
# cause wraparounds
# TODO: amplitude bit shift!
y = np.zeros((n_samples, n_channels),
dtype=np.int32)
if not len(records):
return y
samples_per_record = len(records[0]['data'])
for r in records:
if r['channel'] > n_channels:
continue
if dt >= samples_per_record * r['dt']:
# Downsample to single sample -> store area
idx = (r['time'] - t0) // dt
if idx >= len(y):
print(len(y), idx)
raise IndexError('Despite n_samples = window // dt + 1, our '
'idx is too high?!')
y[idx, r['channel']] += r['area']
continue
# Assume out-of-bounds data has been zeroed, so we do not
# need to do r['data'][:r['length']] here.
# This simplifies downsampling.
w = r['data'].astype(np.int32)
if dt > r['dt']:
# Downsample
duration = samples_per_record * r['dt']
assert duration % dt == 0, "Cannot downsample fractionally"
# .astype here keeps numba happy ... ??
w = w.reshape(duration // dt, -1).sum(axis=1).astype(np.int32)
elif dt < r['dt']:
raise ValueError("Upsampling not yet implemented")
(r_start, r_end), (y_start, y_end) = strax.overlap_indices(
r['time'] // dt, len(w),
t0 // dt, n_samples)
# += is paranoid, data in individual channels should not overlap
# but... https://github.com/AxFoundation/strax/issues/119
y[y_start:y_end, r['channel']] += w[r_start:r_end]
return y
|
[
"strax.raw_to_records",
"numpy.errstate",
"numpy.zeros",
"strax.baseline",
"strax.zero_out_of_bounds",
"straxen.mini_analysis",
"warnings.warn",
"numpy.arange"
] |
[((111, 214), 'straxen.mini_analysis', 'straxen.mini_analysis', ([], {'requires': "('records',)", 'warn_beyond_sec': '(10)', 'default_time_selection': '"""touching"""'}), "(requires=('records',), warn_beyond_sec=10,\n default_time_selection='touching')\n", (132, 214), False, 'import straxen\n'), ((2754, 2864), 'straxen.mini_analysis', 'straxen.mini_analysis', ([], {'requires': "('raw_records',)", 'warn_beyond_sec': '(0.003)', 'default_time_selection': '"""touching"""'}), "(requires=('raw_records',), warn_beyond_sec=0.003,\n default_time_selection='touching')\n", (2775, 2864), False, 'import straxen\n'), ((2699, 2726), 'numpy.arange', 'np.arange', (['(wvm.shape[1] + 1)'], {}), '(wvm.shape[1] + 1)\n', (2708, 2726), True, 'import numpy as np\n'), ((3266, 3299), 'strax.raw_to_records', 'strax.raw_to_records', (['raw_records'], {}), '(raw_records)\n', (3286, 3299), False, 'import strax\n'), ((3304, 3355), 'strax.baseline', 'strax.baseline', (['records'], {'allow_sloppy_chunking': '(True)'}), '(records, allow_sloppy_chunking=True)\n', (3318, 3355), False, 'import strax\n'), ((3360, 3393), 'strax.zero_out_of_bounds', 'strax.zero_out_of_bounds', (['records'], {}), '(records)\n', (3384, 3393), False, 'import strax\n'), ((3989, 4038), 'numpy.zeros', 'np.zeros', (['(n_samples, n_channels)'], {'dtype': 'np.int32'}), '((n_samples, n_channels), dtype=np.int32)\n', (3997, 4038), True, 'import numpy as np\n'), ((1505, 1551), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (1516, 1551), True, 'import numpy as np\n'), ((2211, 2315), 'warnings.warn', 'warnings.warn', (['f"""Matrix would exceed max_samples {max_samples}, downsampling to dt = {dt} ns."""'], {}), "(\n f'Matrix would exceed max_samples {max_samples}, downsampling to dt = {dt} ns.'\n )\n", (2224, 2315), False, 'import warnings\n'), ((2626, 2653), 'numpy.arange', 'np.arange', (['(wvm.shape[0] + 1)'], {}), '(wvm.shape[0] + 1)\n', (2635, 2653), True, 'import numpy as np\n'), ((1649, 1687), 'numpy.arange', 'np.arange', (['(0)', '(record_duration + dt)', 'dt'], {}), '(0, record_duration + dt, dt)\n', (1658, 1687), True, 'import numpy as np\n')]
|
import numpy as np
from openpnm.algorithms import ReactiveTransport
from openpnm.models.physics import generic_source_term as gst
from openpnm.utils import logging
logger = logging.getLogger(__name__)
class ChargeConservation(ReactiveTransport):
r"""
A class to enforce charge conservation in ionic transport simulations.
Parameters
----------
network : OpenPNM Network object
The network on which this algorithm operates
project : OpenPNM Project object
Either a network or a project must be specified
name : string, optional
A unique name to give the object for easier identification. If not
given, one is generated.
"""
def __init__(self, settings={}, phase=None, **kwargs):
def_set = {'phase': None,
'quantity': 'pore.potential',
'conductance': 'throat.ionic_conductance',
'charge_conservation': 'electroneutrality',
'gui': {'setup': {'phase': None,
'quantity': '',
'conductance': '',
'charge_conservation': ''},
'set_rate_BC': {'pores': None,
'values': None},
'set_value_BC': {'pores': None,
'values': None},
'set_source': {'pores': None,
'propname': ''}
}
}
super().__init__(**kwargs)
self.settings.update(def_set)
self.settings.update(settings)
if phase is not None:
self.setup(phase=phase)
def setup(self, phase=None, quantity='', conductance='',
charge_conservation=None, **kwargs):
r"""
This method takes several arguments that are essential to running the
algorithm and adds them to the settings.
Parameters
----------
phase : OpenPNM Phase object
The phase on which the algorithm is to be run.
quantity : string
(default is ``'pore.mole_fraction'``) The name of the physical
quantity to be calculated.
conductance : string
(default is ``'throat.diffusive_conductance'``) The name of the
pore-scale transport conductance values. These are typically
calculated by a model attached to a *Physics* object associated
with the given *Phase*.
charge_conservation : string
The assumption adopted to enforce charge conservation when
performing ions transport simulations (default is
"electroneutrality").
Notes
-----
Any additional arguments are added to the ``settings`` dictionary of
the object.
"""
if phase:
self.settings['phase'] = phase.name
if quantity:
self.settings['quantity'] = quantity
if conductance:
self.settings['conductance'] = conductance
if charge_conservation:
self.settings['charge_conservation'] = charge_conservation
super().setup(**kwargs)
def _charge_conservation_eq_source_term(self, e_alg):
# Source term for Poisson or charge conservation (electroneutrality) eq
phase = self.project.phases()[self.settings['phase']]
Ps = (self['pore.all'] * np.isnan(self['pore.bc_value']) *
np.isnan(self['pore.bc_rate']))
mod = gst.charge_conservation
phys = self.project.find_physics(phase=phase)
phys[0].add_model(propname='pore.charge_conservation', model=mod,
phase=phase, p_alg=self, e_alg=e_alg,
assumption=self.settings['charge_conservation'])
self.set_source(propname='pore.charge_conservation', pores=Ps)
|
[
"numpy.isnan",
"openpnm.utils.logging.getLogger"
] |
[((173, 200), 'openpnm.utils.logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (190, 200), False, 'from openpnm.utils import logging\n'), ((3600, 3630), 'numpy.isnan', 'np.isnan', (["self['pore.bc_rate']"], {}), "(self['pore.bc_rate'])\n", (3608, 3630), True, 'import numpy as np\n'), ((3552, 3583), 'numpy.isnan', 'np.isnan', (["self['pore.bc_value']"], {}), "(self['pore.bc_value'])\n", (3560, 3583), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2019 <NAME>. All Rights Reserved.
#
# Licensed under the MIT License;
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/MIT
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import logging
import multiprocessing
import numpy as np
import os
import random
import torch
import torch.backends.cudnn as cudnn
from kerosene.configs.configs import RunConfiguration, DatasetConfiguration
from kerosene.configs.parsers import YamlConfigurationParser
from kerosene.loggers.visdom import PlotType, PlotFrequency
from kerosene.loggers.visdom.config import VisdomConfiguration
from kerosene.loggers.visdom.visdom import VisdomLogger, VisdomData
from kerosene.training.trainers import ModelTrainerFactory
from samitorch.inputs.utils import augmented_sample_collate
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import DataLoader
from deepNormalize.config.parsers import ArgsParserFactory, ArgsParserType
from deepNormalize.factories.customModelFactory import CustomModelFactory
from deepNormalize.factories.customTrainerFactory import TrainerFactory
from deepNormalize.inputs.datasets import iSEGSliceDatasetFactory, MRBrainSSliceDatasetFactory, ABIDESliceDatasetFactory
from deepNormalize.nn.criterions import CustomCriterionFactory
from deepNormalize.utils.constants import *
from deepNormalize.utils.image_slicer import ImageReconstructor
cudnn.benchmark = True
cudnn.enabled = True
np.random.seed(42)
random.seed(42)
if __name__ == '__main__':
# Basic settings
logging.basicConfig(level=logging.INFO)
torch.set_num_threads(multiprocessing.cpu_count())
torch.set_num_interop_threads(multiprocessing.cpu_count())
args = ArgsParserFactory.create_parser(ArgsParserType.MODEL_TRAINING).parse_args()
# Create configurations.
run_config = RunConfiguration(use_amp=args.use_amp, local_rank=args.local_rank, amp_opt_level=args.amp_opt_level)
model_trainer_configs, training_config = YamlConfigurationParser.parse(args.config_file)
if not isinstance(model_trainer_configs, list):
model_trainer_configs = [model_trainer_configs]
dataset_configs = YamlConfigurationParser.parse_section(args.config_file, "dataset")
dataset_configs = {k: DatasetConfiguration(v) for k, v, in dataset_configs.items()}
data_augmentation_config = YamlConfigurationParser.parse_section(args.config_file, "data_augmentation")
config_html = [training_config.to_html(), list(map(lambda config: config.to_html(), dataset_configs.values())),
list(map(lambda config: config.to_html(), model_trainer_configs))]
# Prepare the data.
train_datasets = list()
valid_datasets = list()
test_datasets = list()
reconstruction_datasets = list()
iSEG_train = None
iSEG_CSV = None
MRBrainS_train = None
MRBrainS_CSV = None
ABIDE_train = None
ABIDE_CSV = None
iSEG_augmentation_strategy = None
MRBrainS_augmentation_strategy = None
ABIDE_augmentation_strategy = None
# Initialize the model trainers
model_trainer_factory = ModelTrainerFactory(model_factory=CustomModelFactory(),
criterion_factory=CustomCriterionFactory())
model_trainers = model_trainer_factory.create(model_trainer_configs)
if not isinstance(model_trainers, list):
model_trainers = [model_trainers]
# Create datasets
if dataset_configs.get("iSEG", None) is not None:
iSEG_train, iSEG_valid, iSEG_test, iSEG_reconstruction = iSEGSliceDatasetFactory.create_train_valid_test(
source_dir=dataset_configs["iSEG"].path,
modalities=dataset_configs["iSEG"].modalities,
dataset_id=ISEG_ID,
test_size=dataset_configs["iSEG"].validation_split,
max_subjects=dataset_configs["iSEG"].max_subjects,
max_num_patches=dataset_configs["iSEG"].max_num_patches,
augment=dataset_configs["iSEG"].augment,
patch_size=dataset_configs["iSEG"].patch_size,
step=dataset_configs["iSEG"].step,
test_patch_size=dataset_configs["iSEG"].test_patch_size,
test_step=dataset_configs["iSEG"].test_step,
data_augmentation_config=data_augmentation_config)
train_datasets.append(iSEG_train)
valid_datasets.append(iSEG_valid)
reconstruction_datasets.append(iSEG_reconstruction)
if dataset_configs.get("MRBrainS", None) is not None:
MRBrainS_train, MRBrainS_valid, MRBrainS_test, MRBrainS_reconstruction = MRBrainSSliceDatasetFactory.create_train_valid_test(
source_dir=dataset_configs["MRBrainS"].path,
modalities=dataset_configs["MRBrainS"].modalities,
dataset_id=MRBRAINS_ID,
test_size=dataset_configs["MRBrainS"].validation_split,
max_subjects=dataset_configs["MRBrainS"].max_subjects,
max_num_patches=dataset_configs["MRBrainS"].max_num_patches,
augment=dataset_configs["MRBrainS"].augment,
patch_size=dataset_configs["MRBrainS"].patch_size,
step=dataset_configs["MRBrainS"].step,
test_patch_size=dataset_configs["MRBrainS"].test_patch_size,
test_step=dataset_configs["MRBrainS"].test_step,
data_augmentation_config=data_augmentation_config)
test_datasets.append(MRBrainS_test)
reconstruction_datasets.append(MRBrainS_reconstruction)
if dataset_configs.get("ABIDE", None) is not None:
ABIDE_train, ABIDE_valid, ABIDE_test, ABIDE_reconstruction = ABIDESliceDatasetFactory.create_train_valid_test(
source_dir=dataset_configs["ABIDE"].path,
modalities=dataset_configs["ABIDE"].modalities,
dataset_id=ABIDE_ID,
sites=dataset_configs["ABIDE"].sites,
max_subjects=dataset_configs["ABIDE"].max_subjects,
test_size=dataset_configs["ABIDE"].validation_split,
max_num_patches=dataset_configs["ABIDE"].max_num_patches,
augment=dataset_configs["ABIDE"].augment,
patch_size=dataset_configs["ABIDE"].patch_size,
step=dataset_configs["ABIDE"].step,
test_patch_size=dataset_configs["ABIDE"].test_patch_size,
test_step=dataset_configs["ABIDE"].test_step,
data_augmentation_config=data_augmentation_config)
train_datasets.append(ABIDE_train)
valid_datasets.append(ABIDE_valid)
test_datasets.append(ABIDE_test)
reconstruction_datasets.append(ABIDE_reconstruction)
if len(list(dataset_configs.keys())) == 2:
segmentation_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]],
patch_size=dataset_configs["iSEG"].test_patch_size,
reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs["iSEG"].test_step,
models=[model_trainers[0]],
segment=True,
batch_size=8)
input_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]],
patch_size=dataset_configs["iSEG"].test_patch_size,
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
batch_size=50)
gt_reconstructor = ImageReconstructor(
[iSEG_reconstruction._target_images[0], MRBrainS_reconstruction._target_images[0]],
patch_size=dataset_configs["iSEG"].test_patch_size,
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
is_ground_truth=True,
batch_size=50)
if dataset_configs["iSEG"].augment:
augmented_input_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]],
patch_size=dataset_configs["iSEG"].test_patch_size,
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
batch_size=50,
alpha=data_augmentation_config["test"]["bias_field"]["alpha"][0],
prob_bias=data_augmentation_config["test"]["bias_field"]["prob_bias"],
snr=data_augmentation_config["test"]["noise"]["snr"],
prob_noise=data_augmentation_config["test"]["noise"]["prob_noise"])
else:
augmented_input_reconstructor = None
augmented_normalized_input_reconstructor = None
else:
segmentation_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0],
ABIDE_reconstruction._source_images[0]],
patch_size=(1, 32, 32, 32),
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
models=[model_trainers[0]],
normalize_and_segment=True,
batch_size=4)
input_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0],
ABIDE_reconstruction._source_images[0]],
patch_size=(1, 32, 32, 32),
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
batch_size=50)
gt_reconstructor = ImageReconstructor(
[iSEG_reconstruction._target_images[0], MRBrainS_reconstruction._target_images[0],
ABIDE_reconstruction._target_images[0]],
patch_size=(1, 32, 32, 32),
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
batch_size=50,
is_ground_truth=True)
if dataset_configs["iSEG"].augment:
augmented_input_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0],
ABIDE_reconstruction._source_images[0]],
patch_size=(1, 32, 32, 32),
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
batch_size=50,
alpha=data_augmentation_config["test"]["bias_field"]["alpha"][0],
prob_bias=data_augmentation_config["test"]["bias_field"]["prob_bias"],
snr=data_augmentation_config["test"]["noise"]["snr"],
prob_noise=data_augmentation_config["test"]["noise"]["prob_noise"])
else:
augmented_input_reconstructor = None
augmented_normalized_input_reconstructor = None
# Concat datasets.
if len(dataset_configs) > 1:
train_dataset = torch.utils.data.ConcatDataset(train_datasets)
valid_dataset = torch.utils.data.ConcatDataset(valid_datasets)
test_dataset = torch.utils.data.ConcatDataset(test_datasets)
else:
train_dataset = train_datasets[0]
valid_dataset = valid_datasets[0]
test_dataset = test_datasets[0]
# Create loaders.
dataloaders = list(map(lambda dataset: DataLoader(dataset,
training_config.batch_size,
sampler=None,
shuffle=True,
num_workers=args.num_workers,
collate_fn=augmented_sample_collate,
drop_last=True,
pin_memory=True),
[train_dataset, valid_dataset, test_dataset]))
# Initialize the loggers.
visdom_config = VisdomConfiguration.from_yml(args.config_file, "visdom")
exp = args.config_file.split("/")[-3:]
if visdom_config.save_destination is not None:
save_folder = visdom_config.save_destination + os.path.join(exp[0], exp[1],
os.path.basename(
os.path.normpath(visdom_config.env)))
else:
save_folder = "saves/{}".format(os.path.basename(os.path.normpath(visdom_config.env)))
[os.makedirs("{}/{}".format(save_folder, model), exist_ok=True)
for model in
["Discriminator", "Generator", "Segmenter"]]
visdom_logger = VisdomLogger(visdom_config)
visdom_logger(VisdomData("Experiment", "Experiment Config", PlotType.TEXT_PLOT, PlotFrequency.EVERY_EPOCH, None,
config_html))
visdom_logger(VisdomData("Experiment", "Patch count", PlotType.BAR_PLOT, PlotFrequency.EVERY_EPOCH,
x=[len(iSEG_train) if iSEG_train is not None else 0,
len(MRBrainS_train) if MRBrainS_train is not None else 0,
len(ABIDE_train) if ABIDE_train is not None else 0],
y=["iSEG", "MRBrainS", "ABIDE"], params={"opts": {"title": "Patch count"}}))
trainer = TrainerFactory(training_config.trainer).create(training_config,
model_trainers,
dataloaders,
reconstruction_datasets,
None,
input_reconstructor,
segmentation_reconstructor,
augmented_input_reconstructor,
None,
gt_reconstructor,
run_config,
dataset_configs,
save_folder,
visdom_logger)
trainer.train(training_config.nb_epochs)
|
[
"torch.utils.data.ConcatDataset",
"deepNormalize.factories.customTrainerFactory.TrainerFactory",
"deepNormalize.utils.image_slicer.ImageReconstructor",
"multiprocessing.cpu_count",
"kerosene.configs.configs.DatasetConfiguration",
"kerosene.loggers.visdom.visdom.VisdomLogger",
"deepNormalize.inputs.datasets.MRBrainSSliceDatasetFactory.create_train_valid_test",
"kerosene.configs.configs.RunConfiguration",
"deepNormalize.inputs.datasets.iSEGSliceDatasetFactory.create_train_valid_test",
"deepNormalize.config.parsers.ArgsParserFactory.create_parser",
"deepNormalize.nn.criterions.CustomCriterionFactory",
"torch.utils.data.dataloader.DataLoader",
"os.path.normpath",
"numpy.random.seed",
"kerosene.loggers.visdom.visdom.VisdomData",
"kerosene.loggers.visdom.config.VisdomConfiguration.from_yml",
"deepNormalize.factories.customModelFactory.CustomModelFactory",
"deepNormalize.inputs.datasets.ABIDESliceDatasetFactory.create_train_valid_test",
"logging.basicConfig",
"kerosene.configs.parsers.YamlConfigurationParser.parse",
"kerosene.configs.parsers.YamlConfigurationParser.parse_section",
"random.seed"
] |
[((1889, 1907), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (1903, 1907), True, 'import numpy as np\n'), ((1908, 1923), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (1919, 1923), False, 'import random\n'), ((1977, 2016), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (1996, 2016), False, 'import logging\n'), ((2269, 2373), 'kerosene.configs.configs.RunConfiguration', 'RunConfiguration', ([], {'use_amp': 'args.use_amp', 'local_rank': 'args.local_rank', 'amp_opt_level': 'args.amp_opt_level'}), '(use_amp=args.use_amp, local_rank=args.local_rank,\n amp_opt_level=args.amp_opt_level)\n', (2285, 2373), False, 'from kerosene.configs.configs import RunConfiguration, DatasetConfiguration\n'), ((2415, 2462), 'kerosene.configs.parsers.YamlConfigurationParser.parse', 'YamlConfigurationParser.parse', (['args.config_file'], {}), '(args.config_file)\n', (2444, 2462), False, 'from kerosene.configs.parsers import YamlConfigurationParser\n'), ((2593, 2659), 'kerosene.configs.parsers.YamlConfigurationParser.parse_section', 'YamlConfigurationParser.parse_section', (['args.config_file', '"""dataset"""'], {}), "(args.config_file, 'dataset')\n", (2630, 2659), False, 'from kerosene.configs.parsers import YamlConfigurationParser\n'), ((2779, 2855), 'kerosene.configs.parsers.YamlConfigurationParser.parse_section', 'YamlConfigurationParser.parse_section', (['args.config_file', '"""data_augmentation"""'], {}), "(args.config_file, 'data_augmentation')\n", (2816, 2855), False, 'from kerosene.configs.parsers import YamlConfigurationParser\n'), ((12352, 12408), 'kerosene.loggers.visdom.config.VisdomConfiguration.from_yml', 'VisdomConfiguration.from_yml', (['args.config_file', '"""visdom"""'], {}), "(args.config_file, 'visdom')\n", (12380, 12408), False, 'from kerosene.loggers.visdom.config import VisdomConfiguration\n'), ((13045, 13072), 'kerosene.loggers.visdom.visdom.VisdomLogger', 'VisdomLogger', (['visdom_config'], {}), '(visdom_config)\n', (13057, 13072), False, 'from kerosene.loggers.visdom.visdom import VisdomLogger, VisdomData\n'), ((2043, 2070), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (2068, 2070), False, 'import multiprocessing\n'), ((2106, 2133), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (2131, 2133), False, 'import multiprocessing\n'), ((2686, 2709), 'kerosene.configs.configs.DatasetConfiguration', 'DatasetConfiguration', (['v'], {}), '(v)\n', (2706, 2709), False, 'from kerosene.configs.configs import RunConfiguration, DatasetConfiguration\n'), ((3975, 4604), 'deepNormalize.inputs.datasets.iSEGSliceDatasetFactory.create_train_valid_test', 'iSEGSliceDatasetFactory.create_train_valid_test', ([], {'source_dir': "dataset_configs['iSEG'].path", 'modalities': "dataset_configs['iSEG'].modalities", 'dataset_id': 'ISEG_ID', 'test_size': "dataset_configs['iSEG'].validation_split", 'max_subjects': "dataset_configs['iSEG'].max_subjects", 'max_num_patches': "dataset_configs['iSEG'].max_num_patches", 'augment': "dataset_configs['iSEG'].augment", 'patch_size': "dataset_configs['iSEG'].patch_size", 'step': "dataset_configs['iSEG'].step", 'test_patch_size': "dataset_configs['iSEG'].test_patch_size", 'test_step': "dataset_configs['iSEG'].test_step", 'data_augmentation_config': 'data_augmentation_config'}), "(source_dir=dataset_configs[\n 'iSEG'].path, modalities=dataset_configs['iSEG'].modalities, dataset_id\n =ISEG_ID, test_size=dataset_configs['iSEG'].validation_split,\n max_subjects=dataset_configs['iSEG'].max_subjects, max_num_patches=\n dataset_configs['iSEG'].max_num_patches, augment=dataset_configs['iSEG'\n ].augment, patch_size=dataset_configs['iSEG'].patch_size, step=\n dataset_configs['iSEG'].step, test_patch_size=dataset_configs['iSEG'].\n test_patch_size, test_step=dataset_configs['iSEG'].test_step,\n data_augmentation_config=data_augmentation_config)\n", (4022, 4604), False, 'from deepNormalize.inputs.datasets import iSEGSliceDatasetFactory, MRBrainSSliceDatasetFactory, ABIDESliceDatasetFactory\n'), ((4996, 5678), 'deepNormalize.inputs.datasets.MRBrainSSliceDatasetFactory.create_train_valid_test', 'MRBrainSSliceDatasetFactory.create_train_valid_test', ([], {'source_dir': "dataset_configs['MRBrainS'].path", 'modalities': "dataset_configs['MRBrainS'].modalities", 'dataset_id': 'MRBRAINS_ID', 'test_size': "dataset_configs['MRBrainS'].validation_split", 'max_subjects': "dataset_configs['MRBrainS'].max_subjects", 'max_num_patches': "dataset_configs['MRBrainS'].max_num_patches", 'augment': "dataset_configs['MRBrainS'].augment", 'patch_size': "dataset_configs['MRBrainS'].patch_size", 'step': "dataset_configs['MRBrainS'].step", 'test_patch_size': "dataset_configs['MRBrainS'].test_patch_size", 'test_step': "dataset_configs['MRBrainS'].test_step", 'data_augmentation_config': 'data_augmentation_config'}), "(source_dir=\n dataset_configs['MRBrainS'].path, modalities=dataset_configs['MRBrainS'\n ].modalities, dataset_id=MRBRAINS_ID, test_size=dataset_configs[\n 'MRBrainS'].validation_split, max_subjects=dataset_configs['MRBrainS'].\n max_subjects, max_num_patches=dataset_configs['MRBrainS'].\n max_num_patches, augment=dataset_configs['MRBrainS'].augment,\n patch_size=dataset_configs['MRBrainS'].patch_size, step=dataset_configs\n ['MRBrainS'].step, test_patch_size=dataset_configs['MRBrainS'].\n test_patch_size, test_step=dataset_configs['MRBrainS'].test_step,\n data_augmentation_config=data_augmentation_config)\n", (5047, 5678), False, 'from deepNormalize.inputs.datasets import iSEGSliceDatasetFactory, MRBrainSSliceDatasetFactory, ABIDESliceDatasetFactory\n'), ((6014, 6698), 'deepNormalize.inputs.datasets.ABIDESliceDatasetFactory.create_train_valid_test', 'ABIDESliceDatasetFactory.create_train_valid_test', ([], {'source_dir': "dataset_configs['ABIDE'].path", 'modalities': "dataset_configs['ABIDE'].modalities", 'dataset_id': 'ABIDE_ID', 'sites': "dataset_configs['ABIDE'].sites", 'max_subjects': "dataset_configs['ABIDE'].max_subjects", 'test_size': "dataset_configs['ABIDE'].validation_split", 'max_num_patches': "dataset_configs['ABIDE'].max_num_patches", 'augment': "dataset_configs['ABIDE'].augment", 'patch_size': "dataset_configs['ABIDE'].patch_size", 'step': "dataset_configs['ABIDE'].step", 'test_patch_size': "dataset_configs['ABIDE'].test_patch_size", 'test_step': "dataset_configs['ABIDE'].test_step", 'data_augmentation_config': 'data_augmentation_config'}), "(source_dir=dataset_configs\n ['ABIDE'].path, modalities=dataset_configs['ABIDE'].modalities,\n dataset_id=ABIDE_ID, sites=dataset_configs['ABIDE'].sites, max_subjects\n =dataset_configs['ABIDE'].max_subjects, test_size=dataset_configs[\n 'ABIDE'].validation_split, max_num_patches=dataset_configs['ABIDE'].\n max_num_patches, augment=dataset_configs['ABIDE'].augment, patch_size=\n dataset_configs['ABIDE'].patch_size, step=dataset_configs['ABIDE'].step,\n test_patch_size=dataset_configs['ABIDE'].test_patch_size, test_step=\n dataset_configs['ABIDE'].test_step, data_augmentation_config=\n data_augmentation_config)\n", (6062, 6698), False, 'from deepNormalize.inputs.datasets import iSEGSliceDatasetFactory, MRBrainSSliceDatasetFactory, ABIDESliceDatasetFactory\n'), ((7086, 7398), 'deepNormalize.utils.image_slicer.ImageReconstructor', 'ImageReconstructor', (['[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction.\n _source_images[0]]'], {'patch_size': "dataset_configs['iSEG'].test_patch_size", 'reconstructed_image_size': '(1, 256, 256, 192)', 'step': "dataset_configs['iSEG'].test_step", 'models': '[model_trainers[0]]', 'segment': '(True)', 'batch_size': '(8)'}), "([iSEG_reconstruction._source_images[0],\n MRBrainS_reconstruction._source_images[0]], patch_size=dataset_configs[\n 'iSEG'].test_patch_size, reconstructed_image_size=(1, 256, 256, 192),\n step=dataset_configs['iSEG'].test_step, models=[model_trainers[0]],\n segment=True, batch_size=8)\n", (7104, 7398), False, 'from deepNormalize.utils.image_slicer import ImageReconstructor\n'), ((7485, 7752), 'deepNormalize.utils.image_slicer.ImageReconstructor', 'ImageReconstructor', (['[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction.\n _source_images[0]]'], {'patch_size': "dataset_configs['iSEG'].test_patch_size", 'reconstructed_image_size': '(1, 256, 256, 192)', 'step': "dataset_configs['iSEG'].test_step", 'batch_size': '(50)'}), "([iSEG_reconstruction._source_images[0],\n MRBrainS_reconstruction._source_images[0]], patch_size=dataset_configs[\n 'iSEG'].test_patch_size, reconstructed_image_size=(1, 256, 256, 192),\n step=dataset_configs['iSEG'].test_step, batch_size=50)\n", (7503, 7752), False, 'from deepNormalize.utils.image_slicer import ImageReconstructor\n'), ((7828, 8122), 'deepNormalize.utils.image_slicer.ImageReconstructor', 'ImageReconstructor', (['[iSEG_reconstruction._target_images[0], MRBrainS_reconstruction.\n _target_images[0]]'], {'patch_size': "dataset_configs['iSEG'].test_patch_size", 'reconstructed_image_size': '(1, 256, 256, 192)', 'step': "dataset_configs['iSEG'].test_step", 'is_ground_truth': '(True)', 'batch_size': '(50)'}), "([iSEG_reconstruction._target_images[0],\n MRBrainS_reconstruction._target_images[0]], patch_size=dataset_configs[\n 'iSEG'].test_patch_size, reconstructed_image_size=(1, 256, 256, 192),\n step=dataset_configs['iSEG'].test_step, is_ground_truth=True, batch_size=50\n )\n", (7846, 8122), False, 'from deepNormalize.utils.image_slicer import ImageReconstructor\n'), ((9095, 9443), 'deepNormalize.utils.image_slicer.ImageReconstructor', 'ImageReconstructor', (['[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction.\n _source_images[0], ABIDE_reconstruction._source_images[0]]'], {'patch_size': '(1, 32, 32, 32)', 'reconstructed_image_size': '(1, 256, 256, 192)', 'step': "dataset_configs['iSEG'].test_step", 'models': '[model_trainers[0]]', 'normalize_and_segment': '(True)', 'batch_size': '(4)'}), "([iSEG_reconstruction._source_images[0],\n MRBrainS_reconstruction._source_images[0], ABIDE_reconstruction.\n _source_images[0]], patch_size=(1, 32, 32, 32),\n reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\n 'iSEG'].test_step, models=[model_trainers[0]], normalize_and_segment=\n True, batch_size=4)\n", (9113, 9443), False, 'from deepNormalize.utils.image_slicer import ImageReconstructor\n'), ((9549, 9837), 'deepNormalize.utils.image_slicer.ImageReconstructor', 'ImageReconstructor', (['[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction.\n _source_images[0], ABIDE_reconstruction._source_images[0]]'], {'patch_size': '(1, 32, 32, 32)', 'reconstructed_image_size': '(1, 256, 256, 192)', 'step': "dataset_configs['iSEG'].test_step", 'batch_size': '(50)'}), "([iSEG_reconstruction._source_images[0],\n MRBrainS_reconstruction._source_images[0], ABIDE_reconstruction.\n _source_images[0]], patch_size=(1, 32, 32, 32),\n reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\n 'iSEG'].test_step, batch_size=50)\n", (9567, 9837), False, 'from deepNormalize.utils.image_slicer import ImageReconstructor\n'), ((9921, 10231), 'deepNormalize.utils.image_slicer.ImageReconstructor', 'ImageReconstructor', (['[iSEG_reconstruction._target_images[0], MRBrainS_reconstruction.\n _target_images[0], ABIDE_reconstruction._target_images[0]]'], {'patch_size': '(1, 32, 32, 32)', 'reconstructed_image_size': '(1, 256, 256, 192)', 'step': "dataset_configs['iSEG'].test_step", 'batch_size': '(50)', 'is_ground_truth': '(True)'}), "([iSEG_reconstruction._target_images[0],\n MRBrainS_reconstruction._target_images[0], ABIDE_reconstruction.\n _target_images[0]], patch_size=(1, 32, 32, 32),\n reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\n 'iSEG'].test_step, batch_size=50, is_ground_truth=True)\n", (9939, 10231), False, 'from deepNormalize.utils.image_slicer import ImageReconstructor\n'), ((11285, 11331), 'torch.utils.data.ConcatDataset', 'torch.utils.data.ConcatDataset', (['train_datasets'], {}), '(train_datasets)\n', (11315, 11331), False, 'import torch\n'), ((11356, 11402), 'torch.utils.data.ConcatDataset', 'torch.utils.data.ConcatDataset', (['valid_datasets'], {}), '(valid_datasets)\n', (11386, 11402), False, 'import torch\n'), ((11426, 11471), 'torch.utils.data.ConcatDataset', 'torch.utils.data.ConcatDataset', (['test_datasets'], {}), '(test_datasets)\n', (11456, 11471), False, 'import torch\n'), ((13092, 13207), 'kerosene.loggers.visdom.visdom.VisdomData', 'VisdomData', (['"""Experiment"""', '"""Experiment Config"""', 'PlotType.TEXT_PLOT', 'PlotFrequency.EVERY_EPOCH', 'None', 'config_html'], {}), "('Experiment', 'Experiment Config', PlotType.TEXT_PLOT,\n PlotFrequency.EVERY_EPOCH, None, config_html)\n", (13102, 13207), False, 'from kerosene.loggers.visdom.visdom import VisdomLogger, VisdomData\n'), ((2146, 2208), 'deepNormalize.config.parsers.ArgsParserFactory.create_parser', 'ArgsParserFactory.create_parser', (['ArgsParserType.MODEL_TRAINING'], {}), '(ArgsParserType.MODEL_TRAINING)\n', (2177, 2208), False, 'from deepNormalize.config.parsers import ArgsParserFactory, ArgsParserType\n'), ((3559, 3579), 'deepNormalize.factories.customModelFactory.CustomModelFactory', 'CustomModelFactory', ([], {}), '()\n', (3577, 3579), False, 'from deepNormalize.factories.customModelFactory import CustomModelFactory\n'), ((3647, 3671), 'deepNormalize.nn.criterions.CustomCriterionFactory', 'CustomCriterionFactory', ([], {}), '()\n', (3669, 3671), False, 'from deepNormalize.nn.criterions import CustomCriterionFactory\n'), ((8266, 8812), 'deepNormalize.utils.image_slicer.ImageReconstructor', 'ImageReconstructor', (['[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction.\n _source_images[0]]'], {'patch_size': "dataset_configs['iSEG'].test_patch_size", 'reconstructed_image_size': '(1, 256, 256, 192)', 'step': "dataset_configs['iSEG'].test_step", 'batch_size': '(50)', 'alpha': "data_augmentation_config['test']['bias_field']['alpha'][0]", 'prob_bias': "data_augmentation_config['test']['bias_field']['prob_bias']", 'snr': "data_augmentation_config['test']['noise']['snr']", 'prob_noise': "data_augmentation_config['test']['noise']['prob_noise']"}), "([iSEG_reconstruction._source_images[0],\n MRBrainS_reconstruction._source_images[0]], patch_size=dataset_configs[\n 'iSEG'].test_patch_size, reconstructed_image_size=(1, 256, 256, 192),\n step=dataset_configs['iSEG'].test_step, batch_size=50, alpha=\n data_augmentation_config['test']['bias_field']['alpha'][0], prob_bias=\n data_augmentation_config['test']['bias_field']['prob_bias'], snr=\n data_augmentation_config['test']['noise']['snr'], prob_noise=\n data_augmentation_config['test']['noise']['prob_noise'])\n", (8284, 8812), False, 'from deepNormalize.utils.image_slicer import ImageReconstructor\n'), ((10388, 10955), 'deepNormalize.utils.image_slicer.ImageReconstructor', 'ImageReconstructor', (['[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction.\n _source_images[0], ABIDE_reconstruction._source_images[0]]'], {'patch_size': '(1, 32, 32, 32)', 'reconstructed_image_size': '(1, 256, 256, 192)', 'step': "dataset_configs['iSEG'].test_step", 'batch_size': '(50)', 'alpha': "data_augmentation_config['test']['bias_field']['alpha'][0]", 'prob_bias': "data_augmentation_config['test']['bias_field']['prob_bias']", 'snr': "data_augmentation_config['test']['noise']['snr']", 'prob_noise': "data_augmentation_config['test']['noise']['prob_noise']"}), "([iSEG_reconstruction._source_images[0],\n MRBrainS_reconstruction._source_images[0], ABIDE_reconstruction.\n _source_images[0]], patch_size=(1, 32, 32, 32),\n reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\n 'iSEG'].test_step, batch_size=50, alpha=data_augmentation_config['test'\n ]['bias_field']['alpha'][0], prob_bias=data_augmentation_config['test']\n ['bias_field']['prob_bias'], snr=data_augmentation_config['test'][\n 'noise']['snr'], prob_noise=data_augmentation_config['test']['noise'][\n 'prob_noise'])\n", (10406, 10955), False, 'from deepNormalize.utils.image_slicer import ImageReconstructor\n'), ((13716, 13755), 'deepNormalize.factories.customTrainerFactory.TrainerFactory', 'TrainerFactory', (['training_config.trainer'], {}), '(training_config.trainer)\n', (13730, 13755), False, 'from deepNormalize.factories.customTrainerFactory import TrainerFactory\n'), ((11672, 11855), 'torch.utils.data.dataloader.DataLoader', 'DataLoader', (['dataset', 'training_config.batch_size'], {'sampler': 'None', 'shuffle': '(True)', 'num_workers': 'args.num_workers', 'collate_fn': 'augmented_sample_collate', 'drop_last': '(True)', 'pin_memory': '(True)'}), '(dataset, training_config.batch_size, sampler=None, shuffle=True,\n num_workers=args.num_workers, collate_fn=augmented_sample_collate,\n drop_last=True, pin_memory=True)\n', (11682, 11855), False, 'from torch.utils.data.dataloader import DataLoader\n'), ((12850, 12885), 'os.path.normpath', 'os.path.normpath', (['visdom_config.env'], {}), '(visdom_config.env)\n', (12866, 12885), False, 'import os\n'), ((12745, 12780), 'os.path.normpath', 'os.path.normpath', (['visdom_config.env'], {}), '(visdom_config.env)\n', (12761, 12780), False, 'import os\n')]
|
import sys
import unittest
import os
import tempfile
from netCDF4 import Dataset
import numpy as np
from numpy.testing import assert_array_equal
FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
VL_NAME = 'vlen_type'
VL_BASETYPE = np.int16
DIM1_NAME = 'lon'
DIM2_NAME = 'lat'
nlons = 5; nlats = 5
VAR1_NAME = 'ragged'
VAR2_NAME = 'strings'
VAR3_NAME = 'strings_alt'
VAR4_NAME = 'string_scalar'
VAR5_NAME = 'vlen_scalar'
data = np.empty(nlats*nlons,object)
datas = np.empty(nlats*nlons,object)
nn = 0
for n in range(nlats*nlons):
nn = nn + 1
data[n] = np.arange(nn,dtype=VL_BASETYPE)
datas[n] = ''.join([chr(i) for i in range(97,97+nn+1)])
data = np.reshape(data,(nlats,nlons))
datas = np.reshape(datas,(nlats,nlons))
class VariablesTestCase(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
f = Dataset(self.file,'w')
vlen_t = f.createVLType(VL_BASETYPE, VL_NAME)
f.createDimension(DIM1_NAME,nlons)
f.createDimension(DIM2_NAME,nlats)
ragged = f.createVariable(VAR1_NAME, vlen_t,\
(DIM2_NAME,DIM1_NAME))
strings = f.createVariable(VAR2_NAME, str,
(DIM2_NAME,DIM1_NAME))
strings_alt = f.createVariable(VAR3_NAME, datas.astype(str).dtype,
(DIM2_NAME, DIM1_NAME))
string_scalar = f.createVariable(VAR4_NAME,str,())
vlen_scalar = f.createVariable(VAR5_NAME,vlen_t,())
ragged[:] = data
ragged[-1,-1] = data[-1,-1]
strings[:] = datas
strings[-2,-2] = datas[-2,-2]
strings_alt[:] = datas.astype(str)
string_scalar[...] = 'foo' #issue458
vlen_scalar[...] = np.array([1,2,3],np.int16)
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing vlen variables"""
f = Dataset(self.file, 'r')
v = f.variables[VAR1_NAME]
vs = f.variables[VAR2_NAME]
vs_alt = f.variables[VAR3_NAME]
assert list(f.vltypes.keys()) == [VL_NAME]
assert f.vltypes[VL_NAME].dtype == VL_BASETYPE
assert f.variables['string_scalar'][...] == 'foo'
assert_array_equal(f.variables['vlen_scalar'][...],np.array([1,2,3],np.int16))
data2 = v[:]
data2s = vs[:]
for i in range(nlons):
for j in range(nlats):
assert_array_equal(data2[j,i], data[j,i])
assert datas[j,i] == data2s[j,i]
assert_array_equal(datas, vs_alt[:])
f.close()
class TestInvalidDataType(unittest.TestCase):
def runTest(self):
f = Dataset(FILE_NAME, 'w', format='NETCDF3_CLASSIC')
f.createDimension('x', 1)
# using assertRaisesRegext as a context manager
# only works with python >= 2.7 (issue #497)
#with self.assertRaisesRegexp(ValueError, 'strings are only supported'):
# f.createVariable('foo', str, ('x',))
try:
f.createVariable('foo', str, ('x',))
except ValueError:
pass
f.close()
os.remove(FILE_NAME)
class TestScalarVlenString(unittest.TestCase):
# issue 333
def runTest(self):
f = Dataset(FILE_NAME, 'w', format='NETCDF4')
teststring = f.createVariable('teststring', str)
stringout = "yyyymmdd_hhmmss"
teststring[()] = stringout
f.close()
f = Dataset(FILE_NAME)
assert f.variables['teststring'][:] == stringout
f.close()
os.remove(FILE_NAME)
class TestIntegerIndex(unittest.TestCase):
# issue 526
def runTest(self):
strtest = Dataset(FILE_NAME, 'w', format='NETCDF4')
strtest.createDimension('tenstrings', 10)
strtest.createVariable('tenstrings', str, ['tenstrings'])
strtest['tenstrings'][np.int32(5)] = 'asdf'
strtest['tenstrings'][6.0] = 'asdf'
strtest.close()
f = Dataset(FILE_NAME)
assert f.variables['tenstrings'][np.int32(5)] == 'asdf'
assert f.variables['tenstrings'][6.0] == 'asdf'
f.close()
os.remove(FILE_NAME)
class TestObjectArrayIndexing(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
f = Dataset(self.file,'w')
vlen_t = f.createVLType(VL_BASETYPE, VL_NAME)
f.createDimension(DIM1_NAME,nlons)
f.createDimension(DIM2_NAME,nlats)
strings_alt = f.createVariable(VAR3_NAME, datas.astype(str).dtype,
(DIM2_NAME, DIM1_NAME))
strings_alt[:] = datas.astype(str)
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing vlen variables"""
f = Dataset(self.file, 'r')
vs_alt = f.variables[VAR3_NAME]
unicode_strings = vs_alt[:]
fancy_indexed = unicode_strings[0][[1,2,4]]
assert fancy_indexed[0] == 'abc'
assert fancy_indexed[1] == 'abcd'
assert fancy_indexed[2] == 'abcdef'
f.close()
class VlenAppendTestCase(unittest.TestCase):
def setUp(self):
import netCDF4
if netCDF4.__netcdf4libversion__ < "4.4.1":
self.skip = True
try:
self.skipTest("This test requires NetCDF 4.4.1 or later.")
except AttributeError:
# workaround for Python 2.6 (skipTest(reason) is new
# in Python 2.7)
pass
else:
self.skip = False
self.file = FILE_NAME
f = Dataset(self.file, 'w')
vlen_type = f.createVLType(np.float64, 'vltest')
f.createDimension('x', None)
v = f.createVariable('vl', vlen_type, 'x')
w = f.createVariable('vl2', np.float64, 'x')
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing appending to vlen variables (issue #527)."""
# workaround for Python 2.6
if self.skip:
return
f = Dataset(self.file, 'a')
w = f.variables["vl2"]
v = f.variables["vl"]
w[0:3] = np.arange(3, dtype=np.float64)
v[0] # sometimes crashes
v[0].tolist() # sometimes crashes
v[0].size # BOOM!
f.close()
class Vlen_ScaledInts(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
nc = Dataset(self.file, 'w')
vlen_type = nc.createVLType(np.uint8, 'vltest')
nc.createDimension('x', None)
v = nc.createVariable('vl', vlen_type, 'x')
v.scale_factor = 1./254.
v.missing_value=np.array(255,np.uint8)
# random lengths between 1 and 1000
ilen = np.random.randint(1,1000,size=100)
n = 0
for nlen in ilen:
data = np.random.uniform(low=0.0, high=1.0, size=nlen)
v[n] = data
if n==99: self.data = data
n += 1
nc.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing packing float vlens as scaled integers (issue #1003)."""
nc = Dataset(self.file)
data = nc['vl'][-1]
# check max error of compression
err = np.abs(data - self.data)
assert(err.max() < nc['vl'].scale_factor)
# turn off auto-scaling
nc.set_auto_maskandscale(False)
data = nc['vl'][-1]
assert(data[-1] == np.around(self.data[-1]/nc['vl'].scale_factor))
nc.close()
if __name__ == '__main__':
unittest.main()
|
[
"numpy.abs",
"numpy.reshape",
"numpy.testing.assert_array_equal",
"netCDF4.Dataset",
"numpy.int32",
"numpy.array",
"numpy.random.randint",
"numpy.random.uniform",
"numpy.empty",
"numpy.around",
"tempfile.NamedTemporaryFile",
"unittest.main",
"numpy.arange",
"os.remove"
] |
[((451, 482), 'numpy.empty', 'np.empty', (['(nlats * nlons)', 'object'], {}), '(nlats * nlons, object)\n', (459, 482), True, 'import numpy as np\n'), ((488, 519), 'numpy.empty', 'np.empty', (['(nlats * nlons)', 'object'], {}), '(nlats * nlons, object)\n', (496, 519), True, 'import numpy as np\n'), ((682, 714), 'numpy.reshape', 'np.reshape', (['data', '(nlats, nlons)'], {}), '(data, (nlats, nlons))\n', (692, 714), True, 'import numpy as np\n'), ((721, 754), 'numpy.reshape', 'np.reshape', (['datas', '(nlats, nlons)'], {}), '(datas, (nlats, nlons))\n', (731, 754), True, 'import numpy as np\n'), ((158, 213), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".nc"""', 'delete': '(False)'}), "(suffix='.nc', delete=False)\n", (185, 213), False, 'import tempfile\n'), ((583, 615), 'numpy.arange', 'np.arange', (['nn'], {'dtype': 'VL_BASETYPE'}), '(nn, dtype=VL_BASETYPE)\n', (592, 615), True, 'import numpy as np\n'), ((7659, 7674), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7672, 7674), False, 'import unittest\n'), ((863, 886), 'netCDF4.Dataset', 'Dataset', (['self.file', '"""w"""'], {}), "(self.file, 'w')\n", (870, 886), False, 'from netCDF4 import Dataset\n'), ((1708, 1737), 'numpy.array', 'np.array', (['[1, 2, 3]', 'np.int16'], {}), '([1, 2, 3], np.int16)\n', (1716, 1737), True, 'import numpy as np\n'), ((1823, 1843), 'os.remove', 'os.remove', (['self.file'], {}), '(self.file)\n', (1832, 1843), False, 'import os\n'), ((1917, 1940), 'netCDF4.Dataset', 'Dataset', (['self.file', '"""r"""'], {}), "(self.file, 'r')\n", (1924, 1940), False, 'from netCDF4 import Dataset\n'), ((2528, 2564), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['datas', 'vs_alt[:]'], {}), '(datas, vs_alt[:])\n', (2546, 2564), False, 'from numpy.testing import assert_array_equal\n'), ((2666, 2715), 'netCDF4.Dataset', 'Dataset', (['FILE_NAME', '"""w"""'], {'format': '"""NETCDF3_CLASSIC"""'}), "(FILE_NAME, 'w', format='NETCDF3_CLASSIC')\n", (2673, 2715), False, 'from netCDF4 import Dataset\n'), ((3122, 3142), 'os.remove', 'os.remove', (['FILE_NAME'], {}), '(FILE_NAME)\n', (3131, 3142), False, 'import os\n'), ((3242, 3283), 'netCDF4.Dataset', 'Dataset', (['FILE_NAME', '"""w"""'], {'format': '"""NETCDF4"""'}), "(FILE_NAME, 'w', format='NETCDF4')\n", (3249, 3283), False, 'from netCDF4 import Dataset\n'), ((3444, 3462), 'netCDF4.Dataset', 'Dataset', (['FILE_NAME'], {}), '(FILE_NAME)\n', (3451, 3462), False, 'from netCDF4 import Dataset\n'), ((3546, 3566), 'os.remove', 'os.remove', (['FILE_NAME'], {}), '(FILE_NAME)\n', (3555, 3566), False, 'import os\n'), ((3668, 3709), 'netCDF4.Dataset', 'Dataset', (['FILE_NAME', '"""w"""'], {'format': '"""NETCDF4"""'}), "(FILE_NAME, 'w', format='NETCDF4')\n", (3675, 3709), False, 'from netCDF4 import Dataset\n'), ((3958, 3976), 'netCDF4.Dataset', 'Dataset', (['FILE_NAME'], {}), '(FILE_NAME)\n', (3965, 3976), False, 'from netCDF4 import Dataset\n'), ((4123, 4143), 'os.remove', 'os.remove', (['FILE_NAME'], {}), '(FILE_NAME)\n', (4132, 4143), False, 'import os\n'), ((4259, 4282), 'netCDF4.Dataset', 'Dataset', (['self.file', '"""w"""'], {}), "(self.file, 'w')\n", (4266, 4282), False, 'from netCDF4 import Dataset\n'), ((4691, 4711), 'os.remove', 'os.remove', (['self.file'], {}), '(self.file)\n', (4700, 4711), False, 'import os\n'), ((4785, 4808), 'netCDF4.Dataset', 'Dataset', (['self.file', '"""r"""'], {}), "(self.file, 'r')\n", (4792, 4808), False, 'from netCDF4 import Dataset\n'), ((5591, 5614), 'netCDF4.Dataset', 'Dataset', (['self.file', '"""w"""'], {}), "(self.file, 'w')\n", (5598, 5614), False, 'from netCDF4 import Dataset\n'), ((5901, 5921), 'os.remove', 'os.remove', (['self.file'], {}), '(self.file)\n', (5910, 5921), False, 'import os\n'), ((6100, 6123), 'netCDF4.Dataset', 'Dataset', (['self.file', '"""a"""'], {}), "(self.file, 'a')\n", (6107, 6123), False, 'from netCDF4 import Dataset\n'), ((6202, 6232), 'numpy.arange', 'np.arange', (['(3)'], {'dtype': 'np.float64'}), '(3, dtype=np.float64)\n', (6211, 6232), True, 'import numpy as np\n'), ((6502, 6525), 'netCDF4.Dataset', 'Dataset', (['self.file', '"""w"""'], {}), "(self.file, 'w')\n", (6509, 6525), False, 'from netCDF4 import Dataset\n'), ((6729, 6752), 'numpy.array', 'np.array', (['(255)', 'np.uint8'], {}), '(255, np.uint8)\n', (6737, 6752), True, 'import numpy as np\n'), ((6811, 6847), 'numpy.random.randint', 'np.random.randint', (['(1)', '(1000)'], {'size': '(100)'}), '(1, 1000, size=100)\n', (6828, 6847), True, 'import numpy as np\n'), ((7123, 7143), 'os.remove', 'os.remove', (['self.file'], {}), '(self.file)\n', (7132, 7143), False, 'import os\n'), ((7256, 7274), 'netCDF4.Dataset', 'Dataset', (['self.file'], {}), '(self.file)\n', (7263, 7274), False, 'from netCDF4 import Dataset\n'), ((7358, 7382), 'numpy.abs', 'np.abs', (['(data - self.data)'], {}), '(data - self.data)\n', (7364, 7382), True, 'import numpy as np\n'), ((2275, 2304), 'numpy.array', 'np.array', (['[1, 2, 3]', 'np.int16'], {}), '([1, 2, 3], np.int16)\n', (2283, 2304), True, 'import numpy as np\n'), ((3856, 3867), 'numpy.int32', 'np.int32', (['(5)'], {}), '(5)\n', (3864, 3867), True, 'import numpy as np\n'), ((6905, 6952), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.0)', 'high': '(1.0)', 'size': 'nlen'}), '(low=0.0, high=1.0, size=nlen)\n', (6922, 6952), True, 'import numpy as np\n'), ((7560, 7608), 'numpy.around', 'np.around', (["(self.data[-1] / nc['vl'].scale_factor)"], {}), "(self.data[-1] / nc['vl'].scale_factor)\n", (7569, 7608), True, 'import numpy as np\n'), ((2429, 2472), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['data2[j, i]', 'data[j, i]'], {}), '(data2[j, i], data[j, i])\n', (2447, 2472), False, 'from numpy.testing import assert_array_equal\n'), ((4018, 4029), 'numpy.int32', 'np.int32', (['(5)'], {}), '(5)\n', (4026, 4029), True, 'import numpy as np\n')]
|
import numpy as np
if __name__ == '__main__':
h, w = map( int, input().split() )
row_list = []
for i in range(h):
single_row = list( map(int, input().split() ) )
np_row = np.array( single_row )
row_list.append( np_row )
min_of_each_row = np.min( row_list, axis = 1)
max_of_min = np.max( min_of_each_row )
print( max_of_min )
|
[
"numpy.max",
"numpy.array",
"numpy.min"
] |
[((288, 312), 'numpy.min', 'np.min', (['row_list'], {'axis': '(1)'}), '(row_list, axis=1)\n', (294, 312), True, 'import numpy as np\n'), ((334, 357), 'numpy.max', 'np.max', (['min_of_each_row'], {}), '(min_of_each_row)\n', (340, 357), True, 'import numpy as np\n'), ((206, 226), 'numpy.array', 'np.array', (['single_row'], {}), '(single_row)\n', (214, 226), True, 'import numpy as np\n')]
|
from .._BlackJack import BlackJackCPP
import gym
import ctypes
import numpy as np
from gym import spaces
class BlackJack(gym.Env):
def __init__(self, natural=False):
self.env = BlackJackCPP(natural)
self.action_space = spaces.Discrete(2)
self.observation_space = spaces.Tuple((
spaces.Discrete(32),
spaces.Discrete(11),
spaces.Discrete(2)
))
self.state = None
self.natural = natural
def seed(self, seed=None):
if seed is None:
return [self.env.get_seed()]
else:
if not isinstance(seed, ctypes.c_uint32):
seed = ctypes.c_uint32(seed).value
self.env.set_seed(seed)
return [seed]
def step(self, action):
assert self.action_space.contains(action)
state, reward, done = self.env.step(action)
self.state = np.array(state)
return self.state, reward, done, {}
def render(self, mode='human'):
return None
def reset(self):
self.state = np.array(self.env.reset())
return self.state
|
[
"numpy.array",
"ctypes.c_uint32",
"gym.spaces.Discrete"
] |
[((242, 260), 'gym.spaces.Discrete', 'spaces.Discrete', (['(2)'], {}), '(2)\n', (257, 260), False, 'from gym import spaces\n'), ((905, 920), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (913, 920), True, 'import numpy as np\n'), ((321, 340), 'gym.spaces.Discrete', 'spaces.Discrete', (['(32)'], {}), '(32)\n', (336, 340), False, 'from gym import spaces\n'), ((354, 373), 'gym.spaces.Discrete', 'spaces.Discrete', (['(11)'], {}), '(11)\n', (369, 373), False, 'from gym import spaces\n'), ((387, 405), 'gym.spaces.Discrete', 'spaces.Discrete', (['(2)'], {}), '(2)\n', (402, 405), False, 'from gym import spaces\n'), ((663, 684), 'ctypes.c_uint32', 'ctypes.c_uint32', (['seed'], {}), '(seed)\n', (678, 684), False, 'import ctypes\n')]
|
import numpy as np
from . import _version
__version__ = _version.get_versions()['version']
HXR_COLORS = ("#000000", "#02004a", "#030069", "#04008f", "#0500b3", "#0700ff")
SXR_COLORS = ("#000000", "#330000", "#520000", "#850000", "#ad0000", "#ff0000")
HXR_AREAS = {
"GUN" : [2017.911, 2018.712],
"L0" : [2018.712, 2024.791],
"DL1_1": [2024.791, 2031.992],
"DL1_2": [2031.992, 2035.035],
"L1": [2035.035, 2044.167],
"BC1": [2044.167, 2059.733],
"L2": [2059.733, 2410.698],
"BC2": [2410.698, 2438.400],
"L3": [2438.400, 3042.005],
"CLTH_0": [3042.005, 3050.512],
"CLTH_1": [3050.512, 3058.457],
"CLTH_2": [3058.457, 3110.961],
"BSYH_1": [3110.961, 3117.409],
"BSYH_2": [3117.409, 3224.022],
"LTUH": [3224.022, 3562.739],
"UNDH": [3562.739, 3718.483],
"DMPH_1": [3718.483, 3734.407],
"DMPH_2": [3734.407, 3765.481]
}
HXR_AREAS = {np.mean(value): key for key, value in HXR_AREAS.items()}
SXR_AREAS = {
"GUN" : [2017.911, 2017.911],
"L0" : [2018.712, 2024.791],
"DL1_1": [2024.791, 2031.992],
"DL1_2": [2031.992, 2035.035],
"L1": [2035.035, 2044.167],
"BC1": [2044.167, 2059.733],
"L2": [2059.733, 2410.698],
"BC2": [2410.698, 2438.400],
"L3": [2438.400, 3042.005],
"CLTH_0": [3042.005, 3050.512],
"CLTH_1": [3050.512, 3058.457],
"CLTS": [3177.650, 3224.022],
"BSYS": [3224.022, 3565.656],
"LTUS": [3565.656, 3718.483],
"UNDS": [3718.483, 3734.407],
"DMPS_1": [3734.407, 3734.407],
"DMPS_2": [3734.407, 3765.481]
}
SXR_AREAS = {np.mean(value): key for key, value in SXR_AREAS.items()}
|
[
"numpy.mean"
] |
[((903, 917), 'numpy.mean', 'np.mean', (['value'], {}), '(value)\n', (910, 917), True, 'import numpy as np\n'), ((1569, 1583), 'numpy.mean', 'np.mean', (['value'], {}), '(value)\n', (1576, 1583), True, 'import numpy as np\n')]
|
import os
import tempfile
import numpy as np
import tensorflow as tf
from time import time
from termcolor import cprint
from unittest import TestCase
from .. import K
from .. import Input, Dense, GRU, Bidirectional, Embedding
from .. import Model, load_model
from .. import l2
from .. import maxnorm
from .. import Adam, Nadam, SGD
from .. import AdamW, NadamW, SGDW
from .. import get_weight_decays, fill_dict_in_order, reset_seeds, K_eval
print("TF version: %s" % tf.__version__)
tf_eager = bool(os.environ["TF_EAGER"] == "True")
if tf_eager:
print("TF running eagerly")
else:
tf.compat.v1.disable_eager_execution()
print("TF running in graph mode")
class TestOptimizers(TestCase):
def test_all(self): # Save/Load, Warm Restarts (w/ cosine annealing)
for optimizer_name in ['AdamW', 'NadamW', 'SGDW']:
cprint("<< TESTING {} OPTIMIZER >>".format(optimizer_name), 'blue')
reset_seeds()
num_batches, num_epochs = 25, 4
batch_size, timesteps, num_channels = 16, 8, 4
batch_shape = (batch_size, timesteps, num_channels)
total_iterations = num_batches # due to warm restarts
self.model = self._make_model(batch_shape, total_iterations)
optimizer = self._make_optimizer(optimizer_name, self.model,
total_iterations)
self.model.compile(optimizer, loss='binary_crossentropy')
self.assertTrue(self._valid_weight_decays(self.model))
self.model._make_train_function() # else K.eval before train may fail
X, Y = self._make_data(num_batches, *batch_shape)
self.eta_history = [] # for stop-introspection
self.t_cur_history = [] # for stop-introspection
for epoch in range(num_epochs):
for batch_num in range(num_batches):
self.t_cur_history += [K_eval(self.model.optimizer.t_cur, K)]
self.eta_history += [K_eval(self.model.optimizer.eta_t, K)]
self.model.train_on_batch(X[batch_num], Y[batch_num])
self.eta_history += [K_eval(self.model.optimizer.eta_t, K)]
self.eta_history.pop(-(1 + int(tf_eager)))
K.set_value(self.model.optimizer.t_cur, 0)
self.assertTrue(self._valid_cosine_annealing(self.eta_history,
total_iterations, num_epochs))
self._test_save_load(self.model, X, optimizer_name, optimizer)
# cleanup
del self.model, optimizer
reset_seeds(reset_graph_with_backend=K)
cprint("\n<< {} MAIN TEST PASSED >>\n".format(optimizer_name), 'green')
cprint("\n<< ALL MAIN TESTS PASSED >>\n", 'green')
def test_misc(self): # tests of non-main features to improve coverage
for optimizer_name in ['AdamW', 'NadamW', 'SGDW']:
cprint("<< TESTING {} OPTIMIZER >>".format(optimizer_name), 'blue')
reset_seeds()
optimizer_kw = {'total_iterations': 0, 'decay': 1e-3,
'amsgrad': optimizer_name == 'AdamW',
'nesterov': optimizer_name == 'SGDW'}
num_batches = 4
batch_size, timesteps = 16, 8
batch_shape = (batch_size, timesteps)
embed_input_dim = 5
total_iterations = 0
self.model = self._make_model(batch_shape, total_iterations,
embed_input_dim=embed_input_dim,
dense_constraint=1, l2_reg=1e-4,
bidirectional=False, sparse=True)
optimizer = self._make_optimizer(optimizer_name, self.model,
**optimizer_kw)
self.model.compile(optimizer, loss='sparse_categorical_crossentropy')
X, Y = self._make_data(num_batches, *batch_shape,
embed_input_dim=embed_input_dim, sparse=True)
for batch_num in range(num_batches):
self.model.train_on_batch(X[batch_num], Y[batch_num])
self._test_save_load(self.model, X, optimizer_name, optimizer)
# cleanup
del self.model, optimizer
reset_seeds(reset_graph_with_backend=K)
cprint("\n<< {} MISC TEST PASSED >>\n".format(optimizer_name), 'green')
cprint("\n<< ALL MISC TESTS PASSED >>\n", 'green')
def test_control(self): # tests losses against original optimizers'
for optimizer_name in ['AdamW', 'NadamW', 'SGDW']:
cprint("<< TESTING {} OPTIMIZER >>".format(optimizer_name), 'blue')
pass_txt = "Control Test Passed"
if optimizer_name == 'AdamW':
for amsgrad in [True, False]:
self._test_control(optimizer_name, amsgrad=amsgrad)
print("\n>> AdamW amsgrad={} {}".format(amsgrad, pass_txt))
elif optimizer_name == 'NadamW':
self._test_control(optimizer_name)
elif optimizer_name == 'SGDW':
for nesterov in [True, False]:
self._test_control(optimizer_name, nesterov=nesterov)
print("\n>> SGDW nesterov={} {}".format(nesterov, pass_txt))
o_name = optimizer_name
cprint("\n<< {} {} >>\n".format(o_name, pass_txt.upper()), 'green')
cprint("\n<< ALL CONTROL TESTS PASSED >>\n", 'green')
def _test_control(self, optimizer_name, amsgrad=False, nesterov=False):
optimizer_kw = dict(total_iterations=0, decay=1e-3,
amsgrad=amsgrad, nesterov=nesterov,
control_mode=True)
num_batches = 100
batch_size, timesteps = 16, 32
batch_shape = (batch_size, timesteps)
embed_input_dim = 5
total_iterations = 0
model_kw = dict(batch_shape=batch_shape, dense_constraint=1,
total_iterations=total_iterations,
embed_input_dim=embed_input_dim, l2_reg=0,
bidirectional=False, sparse=True)
loss_name = 'sparse_categorical_crossentropy'
reset_seeds(verbose=0)
X, Y = self._make_data(num_batches, *batch_shape,
embed_input_dim=embed_input_dim, sparse=True)
reset_seeds(reset_graph_with_backend=K, verbose=0)
self.model_custom = self._make_model(**model_kw)
optimizer_custom = self._make_optimizer(optimizer_name,
self.model_custom,
**optimizer_kw)
self.model_custom.compile(optimizer_custom, loss=loss_name)
self.loss_custom = [] # for introspection
t0 = time()
for batch_num in range(num_batches):
self.loss_custom += [self.model_custom.train_on_batch(
X[batch_num], Y[batch_num])]
print("model_custom -- %s batches -- time: %.2f sec" % (num_batches,
time() - t0))
reset_seeds(reset_graph_with_backend=K, verbose=0)
self.model_control = self._make_model(**model_kw)
optimizer_control = self._make_optimizer(optimizer_name[:-1],
self.model_control,
**optimizer_kw)
self.model_control.compile(optimizer_control, loss=loss_name)
self.loss_control = [] # for introspection
t0 = time()
for batch_num in range(num_batches):
self.loss_control += [self.model_control.train_on_batch(
X[batch_num], Y[batch_num])]
print("model_control -- %s batches -- time: %.2f sec" % (num_batches,
time() - t0))
loss_diff = np.abs(np.array(self.loss_custom) -
np.array(self.loss_control))
print("%s max loss diff: %e" % (optimizer_name, np.max(loss_diff)))
self.assertTrue(np.allclose(self.loss_custom, self.loss_control,
rtol=0, atol=1e-3))
# cleanup
del self.model_custom, self.model_control
del optimizer_custom, optimizer_control
reset_seeds(reset_graph_with_backend=K, verbose=0)
def _test_save_load(self, model, X, optimizer_name, optimizer):
saved_model_preds = model.predict(X[0])
saved_model_weights = K.batch_get_value(model.trainable_weights)
saved_optim_weights = K.batch_get_value(model.optimizer.weights)
test_name = 'test__%f{}.h5'.format(np.random.random())
modelpath = os.path.join(tempfile.gettempdir(), test_name)
model.save(modelpath)
del model
model = load_model(modelpath, custom_objects={optimizer_name: optimizer})
loaded_model_preds = model.predict(X[0])
loaded_model_weights = K.batch_get_value(model.trainable_weights)
loaded_optim_weights = K.batch_get_value(model.optimizer.weights)
self.assertTrue(np.allclose(saved_model_preds, loaded_model_preds,
rtol=0, atol=1e-8))
for smw, lmw in zip(saved_model_weights, loaded_model_weights):
self.assertTrue(np.allclose(smw, lmw, rtol=0, atol=1e-8))
for sow, low in zip(saved_optim_weights, loaded_optim_weights):
self.assertTrue(np.allclose(sow, low, rtol=0, atol=1e-8))
@staticmethod
def _make_data(num_batches, batch_size, timesteps, num_channels=None,
embed_input_dim=None, sparse=False):
if sparse:
X = np.random.randint(0, embed_input_dim,
(num_batches, batch_size, timesteps))
else:
X = np.random.randn(num_batches, batch_size, timesteps, num_channels)
Y = np.random.randint(0, 2, (num_batches, batch_size))
return X, Y
@staticmethod
def _make_model(batch_shape, total_iterations, l2_reg=0, bidirectional=True,
dense_constraint=None, embed_input_dim=None, sparse=False):
if dense_constraint is not None:
dense_constraint = maxnorm(dense_constraint)
ipt = Input(batch_shape=batch_shape)
if sparse:
x = Embedding(embed_input_dim, embed_input_dim*3 + 1,
mask_zero=True)(ipt)
else:
x = ipt
gru = GRU(4, recurrent_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg))
if bidirectional:
x = Bidirectional(gru)(x)
else:
x = gru(x)
x = Dense(2, kernel_regularizer=l2(l2_reg),
kernel_constraint=dense_constraint)(x)
if sparse:
out = Dense(2, activation='softmax')(x)
else:
out = Dense(1, activation='sigmoid')(x)
return Model(ipt, out)
@staticmethod
def _make_optimizer(optimizer_name, model, total_iterations, decay=0,
amsgrad=False, nesterov=False, control_mode=False):
optimizer_dict = {'AdamW': AdamW, 'NadamW': NadamW, 'SGDW': SGDW,
'Adam': Adam, 'Nadam': Nadam, 'SGD': SGD}
optimizer = optimizer_dict[optimizer_name]
optimizer_kw = {}
if 'Adam' in optimizer_name:
optimizer_kw = {'amsgrad': amsgrad}
elif 'SGD' in optimizer_name:
optimizer_kw = {'nesterov': nesterov, 'momentum': .9}
if 'Nadam' not in optimizer_name:
optimizer_kw.update({'decay': decay})
if not control_mode:
wd_dict = get_weight_decays(model)
l2_extra = [2e-5]*(len(wd_dict) - 3)
wd = fill_dict_in_order(wd_dict, [1e-5, 1e-5, 1e-6] + l2_extra)
lr_m = {'gru': 0.5}
use_cosine_annealing = True
else:
wd, lr_m = None, None
use_cosine_annealing = False
if not any([optimizer_name == name for name in ('Adam', 'Nadam', 'SGD')]):
return optimizer(lr=1e-4, weight_decays=wd, lr_multipliers=lr_m,
use_cosine_annealing=use_cosine_annealing, t_cur=0,
total_iterations=total_iterations, **optimizer_kw)
else:
return optimizer(lr=1e-4, **optimizer_kw)
@staticmethod
def _valid_weight_decays(model):
weight_decays = get_weight_decays(model)
trues = 0
for wd in weight_decays.values():
trues += (wd != 0)
return (trues == 0)
@staticmethod
def _valid_cosine_annealing(eta_history, total_iterations, num_epochs):
eta_history_simul = []
for epoch in range(num_epochs):
for iteration in range(0, total_iterations):
eta_history_simul.append(0.5 * (
1 + np.cos(np.pi*iteration / total_iterations)))
return np.allclose(eta_history, eta_history_simul, rtol=0, atol=2e-7)
|
[
"numpy.allclose",
"numpy.random.random",
"numpy.max",
"tensorflow.compat.v1.disable_eager_execution",
"numpy.random.randint",
"numpy.random.randn",
"tempfile.gettempdir",
"numpy.array",
"numpy.cos",
"time.time",
"termcolor.cprint"
] |
[((591, 629), 'tensorflow.compat.v1.disable_eager_execution', 'tf.compat.v1.disable_eager_execution', ([], {}), '()\n', (627, 629), True, 'import tensorflow as tf\n'), ((2744, 2796), 'termcolor.cprint', 'cprint', (['"""\n<< ALL MAIN TESTS PASSED >>\n"""', '"""green"""'], {}), '("""\n<< ALL MAIN TESTS PASSED >>\n""", \'green\')\n', (2750, 2796), False, 'from termcolor import cprint\n'), ((4481, 4533), 'termcolor.cprint', 'cprint', (['"""\n<< ALL MISC TESTS PASSED >>\n"""', '"""green"""'], {}), '("""\n<< ALL MISC TESTS PASSED >>\n""", \'green\')\n', (4487, 4533), False, 'from termcolor import cprint\n'), ((5499, 5554), 'termcolor.cprint', 'cprint', (['"""\n<< ALL CONTROL TESTS PASSED >>\n"""', '"""green"""'], {}), '("""\n<< ALL CONTROL TESTS PASSED >>\n""", \'green\')\n', (5505, 5554), False, 'from termcolor import cprint\n'), ((6887, 6893), 'time.time', 'time', ([], {}), '()\n', (6891, 6893), False, 'from time import time\n'), ((7667, 7673), 'time.time', 'time', ([], {}), '()\n', (7671, 7673), False, 'from time import time\n'), ((10028, 10078), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', '(num_batches, batch_size)'], {}), '(0, 2, (num_batches, batch_size))\n', (10045, 10078), True, 'import numpy as np\n'), ((13060, 13123), 'numpy.allclose', 'np.allclose', (['eta_history', 'eta_history_simul'], {'rtol': '(0)', 'atol': '(2e-07)'}), '(eta_history, eta_history_simul, rtol=0, atol=2e-07)\n', (13071, 13123), True, 'import numpy as np\n'), ((8208, 8276), 'numpy.allclose', 'np.allclose', (['self.loss_custom', 'self.loss_control'], {'rtol': '(0)', 'atol': '(0.001)'}), '(self.loss_custom, self.loss_control, rtol=0, atol=0.001)\n', (8219, 8276), True, 'import numpy as np\n'), ((8795, 8813), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (8811, 8813), True, 'import numpy as np\n'), ((8848, 8869), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (8867, 8869), False, 'import tempfile\n'), ((9235, 9305), 'numpy.allclose', 'np.allclose', (['saved_model_preds', 'loaded_model_preds'], {'rtol': '(0)', 'atol': '(1e-08)'}), '(saved_model_preds, loaded_model_preds, rtol=0, atol=1e-08)\n', (9246, 9305), True, 'import numpy as np\n'), ((9810, 9885), 'numpy.random.randint', 'np.random.randint', (['(0)', 'embed_input_dim', '(num_batches, batch_size, timesteps)'], {}), '(0, embed_input_dim, (num_batches, batch_size, timesteps))\n', (9827, 9885), True, 'import numpy as np\n'), ((9950, 10015), 'numpy.random.randn', 'np.random.randn', (['num_batches', 'batch_size', 'timesteps', 'num_channels'], {}), '(num_batches, batch_size, timesteps, num_channels)\n', (9965, 10015), True, 'import numpy as np\n'), ((8022, 8048), 'numpy.array', 'np.array', (['self.loss_custom'], {}), '(self.loss_custom)\n', (8030, 8048), True, 'import numpy as np\n'), ((8078, 8105), 'numpy.array', 'np.array', (['self.loss_control'], {}), '(self.loss_control)\n', (8086, 8105), True, 'import numpy as np\n'), ((9442, 9483), 'numpy.allclose', 'np.allclose', (['smw', 'lmw'], {'rtol': '(0)', 'atol': '(1e-08)'}), '(smw, lmw, rtol=0, atol=1e-08)\n', (9453, 9483), True, 'import numpy as np\n'), ((9584, 9625), 'numpy.allclose', 'np.allclose', (['sow', 'low'], {'rtol': '(0)', 'atol': '(1e-08)'}), '(sow, low, rtol=0, atol=1e-08)\n', (9595, 9625), True, 'import numpy as np\n'), ((8163, 8180), 'numpy.max', 'np.max', (['loss_diff'], {}), '(loss_diff)\n', (8169, 8180), True, 'import numpy as np\n'), ((7196, 7202), 'time.time', 'time', ([], {}), '()\n', (7200, 7202), False, 'from time import time\n'), ((7980, 7986), 'time.time', 'time', ([], {}), '()\n', (7984, 7986), False, 'from time import time\n'), ((13000, 13044), 'numpy.cos', 'np.cos', (['(np.pi * iteration / total_iterations)'], {}), '(np.pi * iteration / total_iterations)\n', (13006, 13044), True, 'import numpy as np\n')]
|
import os
import sys
import time
import random
import string
import argparse
import torch
import torch.backends.cudnn as cudnn
import torch.nn.init as init
import torch.optim as optim
import torch.utils.data
import numpy as np
from utils import CTCLabelConverter, CTCLabelConverterForBaiduWarpctc, AttnLabelConverter, Averager
from simclr_dataset import hierarchical_dataset, AlignCollate, Batch_Balanced_Dataset
from simclr_model import FeaturesModel as Model
from test import validation
from byol_pytorch import BYOL
from imgaug import augmenters as iaa
import imgaug as ia
from tqdm import tqdm
import matplotlib.pyplot as plt
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def train(opt):
""" dataset preparation """
if not opt.data_filtering_off:
print('Filtering the images containing characters which are not in opt.character')
print('Filtering the images whose label is longer than opt.batch_max_length')
# see https://github.com/clovaai/deep-text-recognition-benchmark/blob/6593928855fb7abb999a99f428b3e4477d4ae356/dataset.py#L130
opt.select_data = opt.select_data.split('-')
opt.batch_ratio = opt.batch_ratio.split('-')
train_dataset = Batch_Balanced_Dataset(opt)
log = open(f'./saved_models/{opt.exp_name}/log_dataset.txt', 'a')
ia.seed(1)
image_transforms = iaa.Sequential([iaa.SomeOf((1, 5),
[iaa.LinearContrast((0.5, 1.0)),
iaa.GaussianBlur((0.5, 1.5)),
iaa.Crop(percent=((0, 0.4),(0, 0),(0, 0.4),(0, 0.0)), keep_size=True),
iaa.Crop(percent=((0, 0.0),(0, 0.02),(0, 0),(0, 0.02)), keep_size=True),
iaa.Sharpen(alpha=(0.0, 0.5), lightness=(0.0, 0.5)),
iaa.PiecewiseAffine(scale=(0.02, 0.03), mode='edge'),
iaa.PerspectiveTransform(scale=(0.01, 0.02))],
random_order=True)])
AlignCollate_valid = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD, image_transforms=image_transforms)
valid_dataset, valid_dataset_log = hierarchical_dataset(root=opt.valid_data, opt=opt)
valid_loader = torch.utils.data.DataLoader(
valid_dataset, batch_size=opt.batch_size,
shuffle=True, # 'True' to check training progress with validation function.
num_workers=int(opt.workers),
collate_fn=AlignCollate_valid, pin_memory=True)
log.write(valid_dataset_log)
print('-' * 80)
log.write('-' * 80 + '\n')
log.close()
if opt.rgb:
opt.input_channel = 3
model = Model(opt)
print('model input parameters', opt.imgH, opt.imgW, opt.num_fiducial, opt.input_channel, opt.output_channel,
opt.hidden_size, opt.batch_max_length, opt.Transformation, opt.FeatureExtraction,
opt.SequenceModeling)
# weight initialization
for name, param in model.named_parameters():
if 'localization_fc2' in name:
print(f'Skip {name} as it is already initialized')
continue
try:
if 'bias' in name:
init.constant_(param, 0.0)
elif 'weight' in name:
init.kaiming_normal_(param)
except Exception as e: # for batchnorm.
if 'weight' in name:
param.data.fill_(1)
continue
# data parallel for multi-GPU
model = torch.nn.DataParallel(model).to(device)
model.train()
if opt.saved_model != '':
print(f'loading pretrained model from {opt.saved_model}')
if opt.FT:
model.load_state_dict(torch.load(opt.saved_model), strict=False)
else:
model.load_state_dict(torch.load(opt.saved_model))
print("Model:")
print(model)
image_transforms = iaa.Sequential([iaa.SomeOf((1, 5),
[iaa.LinearContrast((0.5, 1.0)),
iaa.GaussianBlur((0.5, 1.5)),
iaa.Crop(percent=((0, 0.4),(0, 0),(0, 0.4),(0, 0.0)), keep_size=True),
iaa.Crop(percent=((0, 0.0),(0, 0.02),(0, 0),(0, 0.02)), keep_size=True),
iaa.Sharpen(alpha=(0.0, 0.5), lightness=(0.0, 0.5)),
iaa.PiecewiseAffine(scale=(0.02, 0.03), mode='edge'),
iaa.PerspectiveTransform(scale=(0.01, 0.02))],
random_order=True)])
byol_learner = BYOL(
model,
image_size=(32,100),
hidden_layer=-1,
channels=1,
augment_fn=image_transforms,
augmented=True)
print(byol_learner)
# filter that only require gradient decent
filtered_parameters = []
params_num = []
for p in filter(lambda p: p.requires_grad, byol_learner.parameters()):
filtered_parameters.append(p)
params_num.append(np.prod(p.size()))
print('Trainable params num : ', sum(params_num))
# setup optimizer
if opt.optimizer == 'adam':
optimizer = optim.Adam(filtered_parameters, lr=opt.lr, betas=(opt.beta1, 0.999))
elif opt.optimizer == 'adadelta':
optimizer = optim.Adadelta(filtered_parameters, lr=opt.lr, rho=opt.rho, eps=opt.eps, weight_decay=opt.weight_decay)
elif opt.optimizer == 'sgd':
optimizer = optim.SGD(filtered_parameters, lr=opt.lr, momentum=opt.momentum, weight_decay=opt.weight_decay, nesterov=opt.nesterov)
else:
raise Exception('Unknown optimizer')
print("Optimizer:")
print(optimizer)
""" final options """
# print(opt)
with open(f'./saved_models/{opt.exp_name}/opt.txt', 'a') as opt_file:
opt_log = '------------ Options -------------\n'
args = vars(opt)
for k, v in args.items():
opt_log += f'{str(k)}: {str(v)}\n'
opt_log += '---------------------------------------\n'
print(opt_log)
opt_file.write(opt_log)
""" start training """
start_iter = 0
if opt.saved_model != '':
try:
start_iter = int(opt.saved_model.split('_')[-1].split('.')[0])
print(f'continue to train, start_iter: {start_iter}')
except:
pass
#LR Scheduler:
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[int(0.6*opt.num_iter), int(0.8*opt.num_iter)], last_epoch=start_iter-1, gamma=0.1)
best_loss = None
iteration = start_iter
print(device)
loss_avg = Averager()
valid_loss_avg = Averager()
# kl_loss_avg = Averager()
# kl_loss = torch.nn.KLDivLoss()
epoch = 0
while(True):
# train part
for i in tqdm(range(opt.valInterval)):
image_tensors, _ = train_dataset.get_batch()
image = image_tensors.to(device)
optimizer.zero_grad()
loss = byol_learner(image)
loss.backward()
if opt.grad_clip:
torch.nn.utils.clip_grad_norm_(byol_learner.parameters(), opt.grad_clip)
optimizer.step()
scheduler.step()
byol_learner.update_moving_average()
loss_avg.add(loss)
if iteration==0:
print("Epoch {:06d} Loss: {:.04f}".format(iteration, loss_avg.val()))
iteration += 1
byol_learner.eval()
model.eval()
with torch.no_grad():
for image_tensors, _ in valid_loader:
image = image_tensors.to(device)
val_loss = byol_learner(image)
valid_loss_avg.add(val_loss)
# features = model(image)
# features = features.view(-1, 26, features.shape[1])
# kl_div = kl_loss(features[:int(features.shape[0]/2)], features[int(features.shape[0]/2):])
# kl_loss_avg.add(kl_div)
model.train()
byol_learner.train()
with open(f'./saved_models/{opt.exp_name}/log_train.txt', 'a') as log:
log.write("Iteration {:06d} Loss: {:.06f} Val loss: {:06f}".format(iteration, loss_avg.val(), valid_loss_avg.val()) + '\n')
print("Iteration {:06d} Loss: {:.06f} Val loss: {:06f}".format(iteration, loss_avg.val(), valid_loss_avg.val()))
if best_loss is None:
best_loss = valid_loss_avg.val()
torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth')
elif best_loss > valid_loss_avg.val():
best_loss = valid_loss_avg.val()
torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth')
scheduler.step()
loss_avg.reset()
valid_loss_avg.reset()
if epoch % 5 == 0:
torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth')
if (iteration + 1) >= opt.num_iter:
print('end the training')
torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth')
sys.exit()
epoch +=1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--exp_name', help='Where to store logs and models')
parser.add_argument('--train_data', required=True, help='path to training dataset')
parser.add_argument('--valid_data', required=True, help='path to validation dataset')
parser.add_argument('--manualSeed', type=int, default=1111, help='for random seed setting')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--batch_size', type=int, default=192, help='input batch size')
parser.add_argument('--num_iter', type=int, default=300000, help='number of iterations to train for')
parser.add_argument('--valInterval', type=int, default=2000, help='Interval between each validation')
parser.add_argument('--saved_model', default='', help="path to model to continue training")
parser.add_argument('--FT', action='store_true', help='whether to do fine-tuning')
parser.add_argument('--optimizer', type=str, choices=['adam', 'adadelta', 'sgd'], help="Optimizer")
parser.add_argument('--lr', type=float, default=1, help='learning rate, default=1.0 for Adadelta')
parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for adam. default=0.9')
parser.add_argument('--rho', type=float, default=0.95, help='decay rate rho for Adadelta. default=0.95')
parser.add_argument('--eps', type=float, default=1e-8, help='eps for Adadelta. default=1e-8')
parser.add_argument('--nesterov', action='store_true', help='Use Nesterov momentum for SGD')
parser.add_argument('--momentum', type=float, default=0.9, help='Momentum for SGD')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping value. default=5')
parser.add_argument('--baiduCTC', action='store_true', help='for data_filtering_off mode')
""" Data processing """
parser.add_argument('--select_data', type=str, default='MJ-ST',
help='select training data (default is MJ-ST, which means MJ and ST used as training data)')
parser.add_argument('--batch_ratio', type=str, default='0.5-0.5',
help='assign ratio for each selected data in the batch')
parser.add_argument('--total_data_usage_ratio', type=str, default='1.0',
help='total data usage ratio, this ratio is multiplied to total number of data.')
parser.add_argument('--batch_max_length', type=int, default=25, help='maximum-label-length')
parser.add_argument('--imgH', type=int, default=32, help='the height of the input image')
parser.add_argument('--imgW', type=int, default=100, help='the width of the input image')
parser.add_argument('--rgb', action='store_true', help='use rgb input')
parser.add_argument('--character', type=str,
default='0123456789abcdefghijklmnopqrstuvwxyz', help='character label')
parser.add_argument('--sensitive', action='store_true', help='for sensitive character mode')
parser.add_argument('--PAD', action='store_true', help='whether to keep ratio then pad for image resize')
parser.add_argument('--data_filtering_off', action='store_true', help='for data_filtering_off mode')
""" Model Architecture """
parser.add_argument('--Transformation', type=str, required=True, help='Transformation stage. None|TPS')
parser.add_argument('--FeatureExtraction', type=str, required=True,
help='FeatureExtraction stage. VGG|RCNN|ResNet')
parser.add_argument('--SequenceModeling', type=str, required=True, help='SequenceModeling stage. None|BiLSTM')
parser.add_argument('--num_fiducial', type=int, default=20, help='number of fiducial points of TPS-STN')
parser.add_argument('--input_channel', type=int, default=1,
help='the number of input channel of Feature extractor')
parser.add_argument('--output_channel', type=int, default=512,
help='the number of output channel of Feature extractor')
parser.add_argument('--hidden_size', type=int, default=256, help='the size of the LSTM hidden state')
parser.add_argument('--weight_decay', type=float, default=10e-4, help='Weight decay')
parser.add_argument('--FinalLayer', action='store_true', help='Use a nonlinear projection head during training')
parser.add_argument('--final_feature', type=int, default=256, help='the size of the output of the final layer')
opt = parser.parse_args()
if not opt.exp_name:
opt.exp_name = f'{opt.Transformation}-{opt.FeatureExtraction}-{opt.SequenceModeling}-BYOL'
opt.exp_name += f'-Seed{opt.manualSeed}'
# print(opt.exp_name)
os.makedirs(f'./saved_models/{opt.exp_name}', exist_ok=True)
""" vocab / character number configuration """
if opt.sensitive:
# opt.character += 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
opt.character = string.printable[:-6] # same with ASTER setting (use 94 char).
""" Seed and GPU setting """
# print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
np.random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
torch.cuda.manual_seed(opt.manualSeed)
cudnn.benchmark = True
cudnn.deterministic = True
opt.num_gpu = torch.cuda.device_count()
# print('device count', opt.num_gpu)
if opt.num_gpu > 1:
print('------ Use multi-GPU setting ------')
print('if you stuck too long time with multi-GPU setting, try to set --workers 0')
# check multi-GPU issue https://github.com/clovaai/deep-text-recognition-benchmark/issues/1
opt.workers = opt.workers * opt.num_gpu
opt.batch_size = opt.batch_size * opt.num_gpu
""" previous version
print('To equlize batch stats to 1-GPU setting, the batch_size is multiplied with num_gpu and multiplied batch_size is ', opt.batch_size)
opt.batch_size = opt.batch_size * opt.num_gpu
print('To equalize the number of epochs to 1-GPU setting, num_iter is divided with num_gpu by default.')
If you dont care about it, just commnet out these line.)
opt.num_iter = int(opt.num_iter / opt.num_gpu)
"""
train(opt)
|
[
"imgaug.augmenters.PiecewiseAffine",
"torch.nn.init.constant_",
"imgaug.augmenters.GaussianBlur",
"simclr_dataset.AlignCollate",
"torch.cuda.device_count",
"simclr_dataset.Batch_Balanced_Dataset",
"torch.cuda.is_available",
"sys.exit",
"argparse.ArgumentParser",
"imgaug.augmenters.Crop",
"torch.nn.init.kaiming_normal_",
"numpy.random.seed",
"imgaug.augmenters.LinearContrast",
"imgaug.augmenters.Sharpen",
"simclr_dataset.hierarchical_dataset",
"torch.optim.Adadelta",
"torch.optim.SGD",
"simclr_model.FeaturesModel",
"imgaug.augmenters.PerspectiveTransform",
"utils.Averager",
"torch.manual_seed",
"torch.optim.Adam",
"os.makedirs",
"torch.load",
"torch.nn.DataParallel",
"random.seed",
"imgaug.seed",
"torch.no_grad",
"torch.cuda.manual_seed",
"byol_pytorch.BYOL"
] |
[((1220, 1247), 'simclr_dataset.Batch_Balanced_Dataset', 'Batch_Balanced_Dataset', (['opt'], {}), '(opt)\n', (1242, 1247), False, 'from simclr_dataset import hierarchical_dataset, AlignCollate, Batch_Balanced_Dataset\n'), ((1328, 1338), 'imgaug.seed', 'ia.seed', (['(1)'], {}), '(1)\n', (1335, 1338), True, 'import imgaug as ia\n'), ((2014, 2124), 'simclr_dataset.AlignCollate', 'AlignCollate', ([], {'imgH': 'opt.imgH', 'imgW': 'opt.imgW', 'keep_ratio_with_pad': 'opt.PAD', 'image_transforms': 'image_transforms'}), '(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD,\n image_transforms=image_transforms)\n', (2026, 2124), False, 'from simclr_dataset import hierarchical_dataset, AlignCollate, Batch_Balanced_Dataset\n'), ((2160, 2210), 'simclr_dataset.hierarchical_dataset', 'hierarchical_dataset', ([], {'root': 'opt.valid_data', 'opt': 'opt'}), '(root=opt.valid_data, opt=opt)\n', (2180, 2210), False, 'from simclr_dataset import hierarchical_dataset, AlignCollate, Batch_Balanced_Dataset\n'), ((2651, 2661), 'simclr_model.FeaturesModel', 'Model', (['opt'], {}), '(opt)\n', (2656, 2661), True, 'from simclr_model import FeaturesModel as Model\n'), ((4486, 4598), 'byol_pytorch.BYOL', 'BYOL', (['model'], {'image_size': '(32, 100)', 'hidden_layer': '(-1)', 'channels': '(1)', 'augment_fn': 'image_transforms', 'augmented': '(True)'}), '(model, image_size=(32, 100), hidden_layer=-1, channels=1, augment_fn=\n image_transforms, augmented=True)\n', (4490, 4598), False, 'from byol_pytorch import BYOL\n'), ((6485, 6495), 'utils.Averager', 'Averager', ([], {}), '()\n', (6493, 6495), False, 'from utils import CTCLabelConverter, CTCLabelConverterForBaiduWarpctc, AttnLabelConverter, Averager\n'), ((6517, 6527), 'utils.Averager', 'Averager', ([], {}), '()\n', (6525, 6527), False, 'from utils import CTCLabelConverter, CTCLabelConverterForBaiduWarpctc, AttnLabelConverter, Averager\n'), ((9091, 9116), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (9114, 9116), False, 'import argparse\n'), ((13778, 13838), 'os.makedirs', 'os.makedirs', (['f"""./saved_models/{opt.exp_name}"""'], {'exist_ok': '(True)'}), "(f'./saved_models/{opt.exp_name}', exist_ok=True)\n", (13789, 13838), False, 'import os\n'), ((14140, 14167), 'random.seed', 'random.seed', (['opt.manualSeed'], {}), '(opt.manualSeed)\n', (14151, 14167), False, 'import random\n'), ((14172, 14202), 'numpy.random.seed', 'np.random.seed', (['opt.manualSeed'], {}), '(opt.manualSeed)\n', (14186, 14202), True, 'import numpy as np\n'), ((14207, 14240), 'torch.manual_seed', 'torch.manual_seed', (['opt.manualSeed'], {}), '(opt.manualSeed)\n', (14224, 14240), False, 'import torch\n'), ((14245, 14283), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['opt.manualSeed'], {}), '(opt.manualSeed)\n', (14267, 14283), False, 'import torch\n'), ((14361, 14386), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (14384, 14386), False, 'import torch\n'), ((666, 691), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (689, 691), False, 'import torch\n'), ((5052, 5120), 'torch.optim.Adam', 'optim.Adam', (['filtered_parameters'], {'lr': 'opt.lr', 'betas': '(opt.beta1, 0.999)'}), '(filtered_parameters, lr=opt.lr, betas=(opt.beta1, 0.999))\n', (5062, 5120), True, 'import torch.optim as optim\n'), ((3452, 3480), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (3473, 3480), False, 'import torch\n'), ((5179, 5286), 'torch.optim.Adadelta', 'optim.Adadelta', (['filtered_parameters'], {'lr': 'opt.lr', 'rho': 'opt.rho', 'eps': 'opt.eps', 'weight_decay': 'opt.weight_decay'}), '(filtered_parameters, lr=opt.lr, rho=opt.rho, eps=opt.eps,\n weight_decay=opt.weight_decay)\n', (5193, 5286), True, 'import torch.optim as optim\n'), ((7377, 7392), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7390, 7392), False, 'import torch\n'), ((9011, 9021), 'sys.exit', 'sys.exit', ([], {}), '()\n', (9019, 9021), False, 'import sys\n'), ((3160, 3186), 'torch.nn.init.constant_', 'init.constant_', (['param', '(0.0)'], {}), '(param, 0.0)\n', (3174, 3186), True, 'import torch.nn.init as init\n'), ((3659, 3686), 'torch.load', 'torch.load', (['opt.saved_model'], {}), '(opt.saved_model)\n', (3669, 3686), False, 'import torch\n'), ((3750, 3777), 'torch.load', 'torch.load', (['opt.saved_model'], {}), '(opt.saved_model)\n', (3760, 3777), False, 'import torch\n'), ((5336, 5458), 'torch.optim.SGD', 'optim.SGD', (['filtered_parameters'], {'lr': 'opt.lr', 'momentum': 'opt.momentum', 'weight_decay': 'opt.weight_decay', 'nesterov': 'opt.nesterov'}), '(filtered_parameters, lr=opt.lr, momentum=opt.momentum,\n weight_decay=opt.weight_decay, nesterov=opt.nesterov)\n', (5345, 5458), True, 'import torch.optim as optim\n'), ((1425, 1455), 'imgaug.augmenters.LinearContrast', 'iaa.LinearContrast', (['(0.5, 1.0)'], {}), '((0.5, 1.0))\n', (1443, 1455), True, 'from imgaug import augmenters as iaa\n'), ((1483, 1511), 'imgaug.augmenters.GaussianBlur', 'iaa.GaussianBlur', (['(0.5, 1.5)'], {}), '((0.5, 1.5))\n', (1499, 1511), True, 'from imgaug import augmenters as iaa\n'), ((1539, 1611), 'imgaug.augmenters.Crop', 'iaa.Crop', ([], {'percent': '((0, 0.4), (0, 0), (0, 0.4), (0, 0.0))', 'keep_size': '(True)'}), '(percent=((0, 0.4), (0, 0), (0, 0.4), (0, 0.0)), keep_size=True)\n', (1547, 1611), True, 'from imgaug import augmenters as iaa\n'), ((1636, 1710), 'imgaug.augmenters.Crop', 'iaa.Crop', ([], {'percent': '((0, 0.0), (0, 0.02), (0, 0), (0, 0.02))', 'keep_size': '(True)'}), '(percent=((0, 0.0), (0, 0.02), (0, 0), (0, 0.02)), keep_size=True)\n', (1644, 1710), True, 'from imgaug import augmenters as iaa\n'), ((1735, 1786), 'imgaug.augmenters.Sharpen', 'iaa.Sharpen', ([], {'alpha': '(0.0, 0.5)', 'lightness': '(0.0, 0.5)'}), '(alpha=(0.0, 0.5), lightness=(0.0, 0.5))\n', (1746, 1786), True, 'from imgaug import augmenters as iaa\n'), ((1814, 1866), 'imgaug.augmenters.PiecewiseAffine', 'iaa.PiecewiseAffine', ([], {'scale': '(0.02, 0.03)', 'mode': '"""edge"""'}), "(scale=(0.02, 0.03), mode='edge')\n", (1833, 1866), True, 'from imgaug import augmenters as iaa\n'), ((1894, 1938), 'imgaug.augmenters.PerspectiveTransform', 'iaa.PerspectiveTransform', ([], {'scale': '(0.01, 0.02)'}), '(scale=(0.01, 0.02))\n', (1918, 1938), True, 'from imgaug import augmenters as iaa\n'), ((3238, 3265), 'torch.nn.init.kaiming_normal_', 'init.kaiming_normal_', (['param'], {}), '(param)\n', (3258, 3265), True, 'import torch.nn.init as init\n'), ((3903, 3933), 'imgaug.augmenters.LinearContrast', 'iaa.LinearContrast', (['(0.5, 1.0)'], {}), '((0.5, 1.0))\n', (3921, 3933), True, 'from imgaug import augmenters as iaa\n'), ((3961, 3989), 'imgaug.augmenters.GaussianBlur', 'iaa.GaussianBlur', (['(0.5, 1.5)'], {}), '((0.5, 1.5))\n', (3977, 3989), True, 'from imgaug import augmenters as iaa\n'), ((4017, 4089), 'imgaug.augmenters.Crop', 'iaa.Crop', ([], {'percent': '((0, 0.4), (0, 0), (0, 0.4), (0, 0.0))', 'keep_size': '(True)'}), '(percent=((0, 0.4), (0, 0), (0, 0.4), (0, 0.0)), keep_size=True)\n', (4025, 4089), True, 'from imgaug import augmenters as iaa\n'), ((4114, 4188), 'imgaug.augmenters.Crop', 'iaa.Crop', ([], {'percent': '((0, 0.0), (0, 0.02), (0, 0), (0, 0.02))', 'keep_size': '(True)'}), '(percent=((0, 0.0), (0, 0.02), (0, 0), (0, 0.02)), keep_size=True)\n', (4122, 4188), True, 'from imgaug import augmenters as iaa\n'), ((4213, 4264), 'imgaug.augmenters.Sharpen', 'iaa.Sharpen', ([], {'alpha': '(0.0, 0.5)', 'lightness': '(0.0, 0.5)'}), '(alpha=(0.0, 0.5), lightness=(0.0, 0.5))\n', (4224, 4264), True, 'from imgaug import augmenters as iaa\n'), ((4292, 4344), 'imgaug.augmenters.PiecewiseAffine', 'iaa.PiecewiseAffine', ([], {'scale': '(0.02, 0.03)', 'mode': '"""edge"""'}), "(scale=(0.02, 0.03), mode='edge')\n", (4311, 4344), True, 'from imgaug import augmenters as iaa\n'), ((4372, 4416), 'imgaug.augmenters.PerspectiveTransform', 'iaa.PerspectiveTransform', ([], {'scale': '(0.01, 0.02)'}), '(scale=(0.01, 0.02))\n', (4396, 4416), True, 'from imgaug import augmenters as iaa\n')]
|
import sys
sys.path.append('../../')
import constants as cnst
import os
os.environ['PYTHONHASHSEED'] = '2'
import tqdm
from model.stg2_generator import StyledGenerator
import numpy as np
from my_utils.visualize_flame_overlay import OverLayViz
from my_utils.flm_dynamic_fit_overlay import camera_ringnetpp
from my_utils.generate_gif import generate_from_flame_sequence
from my_utils.generic_utils import save_set_of_images
from my_utils import compute_fid
import constants
from dataset_loaders import fast_image_reshape
import torch
from my_utils import generic_utils
from my_utils.eye_centering import position_to_given_location
def ge_gen_in(flm_params, textured_rndr, norm_map, normal_map_cond, texture_cond):
if normal_map_cond and texture_cond:
return torch.cat((textured_rndr, norm_map), dim=1)
elif normal_map_cond:
return norm_map
elif texture_cond:
return textured_rndr
else:
return flm_params
# General settings
save_images = True
code_size = 236
use_inst_norm = True
core_tensor_res = 4
resolution = 256
alpha = 1
step_max = int(np.log2(resolution) - 2)
root_out_dir = f'{cnst.output_root}sample/'
num_smpl_to_eval_on = 1000
use_styled_conv_stylegan2 = True
flength = 5000
cam_t = np.array([0., 0., 0])
camera_params = camera_ringnetpp((512, 512), trans=cam_t, focal=flength)
run_ids_1 = [29, ] # with sqrt(2)
# run_ids_1 = [7, 24, 8, 3]
# run_ids_1 = [7, 8, 3]
settings_for_runs = \
{24: {'name': 'vector_cond', 'model_idx': '216000_1', 'normal_maps_as_cond': False,
'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False},
29: {'name': 'full_model', 'model_idx': '294000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': True},
7: {'name': 'flm_rndr_tex_interp', 'model_idx': '051000_1', 'normal_maps_as_cond': False,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False},
3: {'name': 'norm_mp_tex_interp', 'model_idx': '203000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False},
8: {'name': 'norm_map_rend_flm_no_tex_interp', 'model_idx': '009000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False},}
overlay_visualizer = OverLayViz()
# overlay_visualizer.setup_renderer(mesh_file=None)
flm_params = np.zeros((num_smpl_to_eval_on, code_size)).astype('float32')
fl_param_dict = np.load(cnst.all_flame_params_file, allow_pickle=True).item()
for i, key in enumerate(fl_param_dict):
flame_param = fl_param_dict[key]
flame_param = np.hstack((flame_param['shape'], flame_param['exp'], flame_param['pose'], flame_param['cam'],
flame_param['tex'], flame_param['lit'].flatten()))
# tz = camera_params['f'][0] / (camera_params['c'][0] * flame_param[:, 156:157])
# flame_param[:, 156:159] = np.concatenate((flame_param[:, 157:], tz), axis=1)
# import ipdb; ipdb.set_trace()
flm_params[i, :] = flame_param.astype('float32')
if i == num_smpl_to_eval_on - 1:
break
batch_size = 64
flame_decoder = overlay_visualizer.deca.flame.eval()
for run_idx in run_ids_1:
# import ipdb; ipdb.set_trace()
generator_1 = torch.nn.DataParallel(
StyledGenerator(embedding_vocab_size=69158,
rendered_flame_ascondition=settings_for_runs[run_idx]['rendered_flame_as_condition'],
normal_maps_as_cond=settings_for_runs[run_idx]['normal_maps_as_cond'],
core_tensor_res=core_tensor_res,
w_truncation_factor=1.0,
apply_sqrt2_fac_in_eq_lin=settings_for_runs[run_idx]['apply_sqrt2_fac_in_eq_lin'],
n_mlp=8)).cuda()
model_idx = settings_for_runs[run_idx]['model_idx']
ckpt1 = torch.load(f'{cnst.output_root}checkpoint/{run_idx}/{model_idx}.model')
generator_1.load_state_dict(ckpt1['generator_running'])
generator_1 = generator_1.eval()
# images = np.zeros((num_smpl_to_eval_on, 3, resolution, resolution)).astype('float32')
pbar = tqdm.tqdm(range(0, num_smpl_to_eval_on, batch_size))
pbar.set_description('Generating_images')
flame_mesh_imgs = None
mdl_id = 'mdl2_'
if settings_for_runs[run_idx]['name'] == 'full_model':
mdl_id = 'mdl1_'
for batch_idx in pbar:
flm_batch = flm_params[batch_idx:batch_idx+batch_size, :]
flm_batch = torch.from_numpy(flm_batch).cuda()
flm_batch = position_to_given_location(flame_decoder, flm_batch)
batch_size_true = flm_batch.shape[0]
if settings_for_runs[run_idx]['normal_maps_as_cond'] or \
settings_for_runs[run_idx]['rendered_flame_as_condition']:
cam = flm_batch[:, constants.DECA_IDX['cam'][0]:constants.DECA_IDX['cam'][1]:]
shape = flm_batch[:, constants.INDICES['SHAPE'][0]:constants.INDICES['SHAPE'][1]]
exp = flm_batch[:, constants.INDICES['EXP'][0]:constants.INDICES['EXP'][1]]
pose = flm_batch[:, constants.INDICES['POSE'][0]:constants.INDICES['POSE'][1]]
# import ipdb; ipdb.set_trace()
light_code = \
flm_batch[:, constants.DECA_IDX['lit'][0]:constants.DECA_IDX['lit'][1]:].view((batch_size_true, 9, 3))
texture_code = flm_batch[:, constants.DECA_IDX['tex'][0]:constants.DECA_IDX['tex'][1]:]
norma_map_img, _, _, _, rend_flm = \
overlay_visualizer.get_rendered_mesh(flame_params=(shape, exp, pose, light_code, texture_code),
camera_params=cam)
rend_flm = torch.clamp(rend_flm, 0, 1) * 2 - 1
norma_map_img = torch.clamp(norma_map_img, 0, 1) * 2 - 1
rend_flm = fast_image_reshape(rend_flm, height_out=256, width_out=256, mode='bilinear')
norma_map_img = fast_image_reshape(norma_map_img, height_out=256, width_out=256, mode='bilinear')
else:
rend_flm = None
norma_map_img = None
gen_1_in = ge_gen_in(flm_batch, rend_flm, norma_map_img, settings_for_runs[run_idx]['normal_maps_as_cond'],
settings_for_runs[run_idx]['rendered_flame_as_condition'])
# torch.manual_seed(2)
identity_embeddings = torch.randint(low=0, high=69158, size=(gen_1_in.shape[0], ), dtype=torch.long,
device='cuda')
mdl_1_gen_images = generic_utils.get_images_from_flame_params(
flame_params=gen_1_in.cpu().numpy(), pose=None,
model=generator_1,
step=step_max, alpha=alpha,
input_indices=identity_embeddings.cpu().numpy())
# import ipdb; ipdb.set_trace()
images = torch.clamp(mdl_1_gen_images, -1, 1).cpu().numpy()
flame_mesh_imgs = torch.clamp(rend_flm, -1, 1).cpu().numpy()
save_path_current_id = os.path.join(root_out_dir, 'inter_model_comparison', settings_for_runs[run_idx]['name'])
save_set_of_images(path=save_path_current_id, prefix=f'{mdl_id}_{batch_idx}',
images=(images + 1) / 2, show_prog_bar=True)
#save flam rndr
save_path_current_id_flm_rndr = os.path.join(root_out_dir, 'inter_model_comparison',
settings_for_runs[run_idx]['name'])
save_set_of_images(path=save_path_current_id_flm_rndr, prefix=f'mesh_{batch_idx}',
images=(flame_mesh_imgs + 1) / 2, show_prog_bar=True)
# save_set_of_images(path=save_path_this_expt, prefix='mesh_', images=((norma_map_img + 1) / 2).cpu().numpy())
# save_set_of_images(path=save_path_this_expt, prefix='mdl1_', images=((mdl_1_gen_images + 1) / 2).cpu().numpy())
# save_set_of_images(path=save_path_this_expt, prefix='mdl2_', images=((mdl_2_gen_images + 1) / 2).cpu().numpy())
|
[
"my_utils.visualize_flame_overlay.OverLayViz",
"torch.load",
"os.path.join",
"my_utils.generic_utils.save_set_of_images",
"my_utils.flm_dynamic_fit_overlay.camera_ringnetpp",
"dataset_loaders.fast_image_reshape",
"numpy.array",
"numpy.zeros",
"torch.randint",
"my_utils.eye_centering.position_to_given_location",
"model.stg2_generator.StyledGenerator",
"torch.from_numpy",
"numpy.log2",
"numpy.load",
"sys.path.append",
"torch.clamp",
"torch.cat"
] |
[((11, 36), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (26, 36), False, 'import sys\n'), ((1244, 1267), 'numpy.array', 'np.array', (['[0.0, 0.0, 0]'], {}), '([0.0, 0.0, 0])\n', (1252, 1267), True, 'import numpy as np\n'), ((1282, 1338), 'my_utils.flm_dynamic_fit_overlay.camera_ringnetpp', 'camera_ringnetpp', (['(512, 512)'], {'trans': 'cam_t', 'focal': 'flength'}), '((512, 512), trans=cam_t, focal=flength)\n', (1298, 1338), False, 'from my_utils.flm_dynamic_fit_overlay import camera_ringnetpp\n'), ((2360, 2372), 'my_utils.visualize_flame_overlay.OverLayViz', 'OverLayViz', ([], {}), '()\n', (2370, 2372), False, 'from my_utils.visualize_flame_overlay import OverLayViz\n'), ((3909, 3980), 'torch.load', 'torch.load', (['f"""{cnst.output_root}checkpoint/{run_idx}/{model_idx}.model"""'], {}), "(f'{cnst.output_root}checkpoint/{run_idx}/{model_idx}.model')\n", (3919, 3980), False, 'import torch\n'), ((770, 813), 'torch.cat', 'torch.cat', (['(textured_rndr, norm_map)'], {'dim': '(1)'}), '((textured_rndr, norm_map), dim=1)\n', (779, 813), False, 'import torch\n'), ((1091, 1110), 'numpy.log2', 'np.log2', (['resolution'], {}), '(resolution)\n', (1098, 1110), True, 'import numpy as np\n'), ((2439, 2481), 'numpy.zeros', 'np.zeros', (['(num_smpl_to_eval_on, code_size)'], {}), '((num_smpl_to_eval_on, code_size))\n', (2447, 2481), True, 'import numpy as np\n'), ((2516, 2570), 'numpy.load', 'np.load', (['cnst.all_flame_params_file'], {'allow_pickle': '(True)'}), '(cnst.all_flame_params_file, allow_pickle=True)\n', (2523, 2570), True, 'import numpy as np\n'), ((4582, 4634), 'my_utils.eye_centering.position_to_given_location', 'position_to_given_location', (['flame_decoder', 'flm_batch'], {}), '(flame_decoder, flm_batch)\n', (4608, 4634), False, 'from my_utils.eye_centering import position_to_given_location\n'), ((6390, 6487), 'torch.randint', 'torch.randint', ([], {'low': '(0)', 'high': '(69158)', 'size': '(gen_1_in.shape[0],)', 'dtype': 'torch.long', 'device': '"""cuda"""'}), "(low=0, high=69158, size=(gen_1_in.shape[0],), dtype=torch.\n long, device='cuda')\n", (6403, 6487), False, 'import torch\n'), ((7002, 7095), 'os.path.join', 'os.path.join', (['root_out_dir', '"""inter_model_comparison"""', "settings_for_runs[run_idx]['name']"], {}), "(root_out_dir, 'inter_model_comparison', settings_for_runs[\n run_idx]['name'])\n", (7014, 7095), False, 'import os\n'), ((7099, 7226), 'my_utils.generic_utils.save_set_of_images', 'save_set_of_images', ([], {'path': 'save_path_current_id', 'prefix': 'f"""{mdl_id}_{batch_idx}"""', 'images': '((images + 1) / 2)', 'show_prog_bar': '(True)'}), "(path=save_path_current_id, prefix=\n f'{mdl_id}_{batch_idx}', images=(images + 1) / 2, show_prog_bar=True)\n", (7117, 7226), False, 'from my_utils.generic_utils import save_set_of_images\n'), ((7314, 7407), 'os.path.join', 'os.path.join', (['root_out_dir', '"""inter_model_comparison"""', "settings_for_runs[run_idx]['name']"], {}), "(root_out_dir, 'inter_model_comparison', settings_for_runs[\n run_idx]['name'])\n", (7326, 7407), False, 'import os\n'), ((7464, 7605), 'my_utils.generic_utils.save_set_of_images', 'save_set_of_images', ([], {'path': 'save_path_current_id_flm_rndr', 'prefix': 'f"""mesh_{batch_idx}"""', 'images': '((flame_mesh_imgs + 1) / 2)', 'show_prog_bar': '(True)'}), "(path=save_path_current_id_flm_rndr, prefix=\n f'mesh_{batch_idx}', images=(flame_mesh_imgs + 1) / 2, show_prog_bar=True)\n", (7482, 7605), False, 'from my_utils.generic_utils import save_set_of_images\n'), ((5860, 5936), 'dataset_loaders.fast_image_reshape', 'fast_image_reshape', (['rend_flm'], {'height_out': '(256)', 'width_out': '(256)', 'mode': '"""bilinear"""'}), "(rend_flm, height_out=256, width_out=256, mode='bilinear')\n", (5878, 5936), False, 'from dataset_loaders import fast_image_reshape\n'), ((5965, 6051), 'dataset_loaders.fast_image_reshape', 'fast_image_reshape', (['norma_map_img'], {'height_out': '(256)', 'width_out': '(256)', 'mode': '"""bilinear"""'}), "(norma_map_img, height_out=256, width_out=256, mode=\n 'bilinear')\n", (5983, 6051), False, 'from dataset_loaders import fast_image_reshape\n'), ((3338, 3710), 'model.stg2_generator.StyledGenerator', 'StyledGenerator', ([], {'embedding_vocab_size': '(69158)', 'rendered_flame_ascondition': "settings_for_runs[run_idx]['rendered_flame_as_condition']", 'normal_maps_as_cond': "settings_for_runs[run_idx]['normal_maps_as_cond']", 'core_tensor_res': 'core_tensor_res', 'w_truncation_factor': '(1.0)', 'apply_sqrt2_fac_in_eq_lin': "settings_for_runs[run_idx]['apply_sqrt2_fac_in_eq_lin']", 'n_mlp': '(8)'}), "(embedding_vocab_size=69158, rendered_flame_ascondition=\n settings_for_runs[run_idx]['rendered_flame_as_condition'],\n normal_maps_as_cond=settings_for_runs[run_idx]['normal_maps_as_cond'],\n core_tensor_res=core_tensor_res, w_truncation_factor=1.0,\n apply_sqrt2_fac_in_eq_lin=settings_for_runs[run_idx][\n 'apply_sqrt2_fac_in_eq_lin'], n_mlp=8)\n", (3353, 3710), False, 'from model.stg2_generator import StyledGenerator\n'), ((4527, 4554), 'torch.from_numpy', 'torch.from_numpy', (['flm_batch'], {}), '(flm_batch)\n', (4543, 4554), False, 'import torch\n'), ((5732, 5759), 'torch.clamp', 'torch.clamp', (['rend_flm', '(0)', '(1)'], {}), '(rend_flm, 0, 1)\n', (5743, 5759), False, 'import torch\n'), ((5796, 5828), 'torch.clamp', 'torch.clamp', (['norma_map_img', '(0)', '(1)'], {}), '(norma_map_img, 0, 1)\n', (5807, 5828), False, 'import torch\n'), ((6849, 6885), 'torch.clamp', 'torch.clamp', (['mdl_1_gen_images', '(-1)', '(1)'], {}), '(mdl_1_gen_images, -1, 1)\n', (6860, 6885), False, 'import torch\n'), ((6926, 6954), 'torch.clamp', 'torch.clamp', (['rend_flm', '(-1)', '(1)'], {}), '(rend_flm, -1, 1)\n', (6937, 6954), False, 'import torch\n')]
|
import unittest
import numpy as np
from numpy.testing import assert_almost_equal
from dymos.utils.hermite import hermite_matrices
class TestHermiteMatrices(unittest.TestCase):
def test_quadratic(self):
# Interpolate with values and rates provided at [-1, 1] in tau space
tau_given = [-1.0, 1.0]
tau_eval = np.linspace(-1, 1, 100)
# In time space use the boundaries [-2, 2]
dt_dtau = 4.0 / 2.0
# Provide values for y = t**2 and its time-derivative
y_given = [4.0, 4.0]
ydot_given = [-4.0, 4.0]
# Get the hermite matrices.
Ai, Bi, Ad, Bd = hermite_matrices(tau_given, tau_eval)
# Interpolate y and ydot at tau_eval points in tau space.
y_i = np.dot(Ai, y_given) + dt_dtau * np.dot(Bi, ydot_given)
ydot_i = (1.0 / dt_dtau) * np.dot(Ad, y_given) + np.dot(Bd, ydot_given)
# Compute our function as a point of comparison.
y_computed = (tau_eval * dt_dtau)**2
ydot_computed = 2.0 * (tau_eval * dt_dtau)
# Check results
assert_almost_equal(y_i, y_computed)
assert_almost_equal(ydot_i, ydot_computed)
def test_cubic(self):
# Interpolate with values and rates provided at [-1, 1] in tau space
tau_given = [-1.0, 0.0, 1.0]
tau_eval = np.linspace(-1, 1, 101)
# In time space use the boundaries [-2, 2]
dt_dtau = 4.0 / 2.0
# Provide values for y = t**2 and its time-derivative
y_given = [-8.0, 0.0, 8.0]
ydot_given = [12.0, 0.0, 12.0]
# Get the hermite matrices.
Ai, Bi, Ad, Bd = hermite_matrices(tau_given, tau_eval)
# Interpolate y and ydot at tau_eval points in tau space.
y_i = np.dot(Ai, y_given) + dt_dtau * np.dot(Bi, ydot_given)
ydot_i = (1.0 / dt_dtau) * np.dot(Ad, y_given) + np.dot(Bd, ydot_given)
# Compute our function as a point of comparison.
y_computed = (tau_eval * dt_dtau)**3
ydot_computed = 3.0 * (tau_eval * dt_dtau)**2
# Check results
assert_almost_equal(y_i, y_computed)
assert_almost_equal(ydot_i, ydot_computed)
if __name__ == '__main__': # pragma: no cover
unittest.main()
|
[
"numpy.testing.assert_almost_equal",
"numpy.linspace",
"dymos.utils.hermite.hermite_matrices",
"numpy.dot",
"unittest.main"
] |
[((2209, 2224), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2222, 2224), False, 'import unittest\n'), ((340, 363), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(100)'], {}), '(-1, 1, 100)\n', (351, 363), True, 'import numpy as np\n'), ((631, 668), 'dymos.utils.hermite.hermite_matrices', 'hermite_matrices', (['tau_given', 'tau_eval'], {}), '(tau_given, tau_eval)\n', (647, 668), False, 'from dymos.utils.hermite import hermite_matrices\n'), ((1072, 1108), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['y_i', 'y_computed'], {}), '(y_i, y_computed)\n', (1091, 1108), False, 'from numpy.testing import assert_almost_equal\n'), ((1117, 1159), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['ydot_i', 'ydot_computed'], {}), '(ydot_i, ydot_computed)\n', (1136, 1159), False, 'from numpy.testing import assert_almost_equal\n'), ((1321, 1344), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(101)'], {}), '(-1, 1, 101)\n', (1332, 1344), True, 'import numpy as np\n'), ((1624, 1661), 'dymos.utils.hermite.hermite_matrices', 'hermite_matrices', (['tau_given', 'tau_eval'], {}), '(tau_given, tau_eval)\n', (1640, 1661), False, 'from dymos.utils.hermite import hermite_matrices\n'), ((2068, 2104), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['y_i', 'y_computed'], {}), '(y_i, y_computed)\n', (2087, 2104), False, 'from numpy.testing import assert_almost_equal\n'), ((2113, 2155), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['ydot_i', 'ydot_computed'], {}), '(ydot_i, ydot_computed)\n', (2132, 2155), False, 'from numpy.testing import assert_almost_equal\n'), ((750, 769), 'numpy.dot', 'np.dot', (['Ai', 'y_given'], {}), '(Ai, y_given)\n', (756, 769), True, 'import numpy as np\n'), ((862, 884), 'numpy.dot', 'np.dot', (['Bd', 'ydot_given'], {}), '(Bd, ydot_given)\n', (868, 884), True, 'import numpy as np\n'), ((1743, 1762), 'numpy.dot', 'np.dot', (['Ai', 'y_given'], {}), '(Ai, y_given)\n', (1749, 1762), True, 'import numpy as np\n'), ((1855, 1877), 'numpy.dot', 'np.dot', (['Bd', 'ydot_given'], {}), '(Bd, ydot_given)\n', (1861, 1877), True, 'import numpy as np\n'), ((782, 804), 'numpy.dot', 'np.dot', (['Bi', 'ydot_given'], {}), '(Bi, ydot_given)\n', (788, 804), True, 'import numpy as np\n'), ((840, 859), 'numpy.dot', 'np.dot', (['Ad', 'y_given'], {}), '(Ad, y_given)\n', (846, 859), True, 'import numpy as np\n'), ((1775, 1797), 'numpy.dot', 'np.dot', (['Bi', 'ydot_given'], {}), '(Bi, ydot_given)\n', (1781, 1797), True, 'import numpy as np\n'), ((1833, 1852), 'numpy.dot', 'np.dot', (['Ad', 'y_given'], {}), '(Ad, y_given)\n', (1839, 1852), True, 'import numpy as np\n')]
|
# ******************************************************************************
# Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import numbers
from typing import Union
class NumericLimits(object):
"""Class providing interface to extract numerical limits for given data type."""
@staticmethod
def _get_number_limits_class(dtype):
# type: (np.dtype) -> Union[IntegralLimits, FloatingPointLimits]
"""Return specialized class instance with limits set for given data type.
:param dtype: The data type we want to check limits for.
:return: The specialized class instance providing numeric limits.
"""
data_type = dtype.type
value = data_type(1)
if isinstance(value, numbers.Integral):
return IntegralLimits(data_type)
elif isinstance(value, numbers.Real):
return FloatingPointLimits(data_type)
else:
raise ValueError('NumericLimits: unsupported data type: <{}>.'.format(dtype.type))
@staticmethod
def _get_dtype(dtype): # type: (Union[np.dtype, int, float]) -> np.dtype
"""Return numpy dtype object wrapping provided data type.
:param dtype: The data type to be wrapped.
:return: The numpy dtype object.
"""
return dtype if isinstance(dtype, np.dtype) else np.dtype(dtype)
@classmethod
def max(cls, dtype): # type: (np.dtype) -> Union[int, float]
"""Return maximum value that can be represented in given data type.
:param dtype: The data type we want to check maximum value for.
:return: The maximum value.
"""
return cls._get_number_limits_class(cls._get_dtype(dtype)).max
@classmethod
def min(cls, dtype): # type: (np.dtype) -> Union[int, float]
"""Return minimum value that can be represented in given data type.
:param dtype: The data type we want to check minimum value for.
:return: The minimum value.
"""
return cls._get_number_limits_class(cls._get_dtype(dtype)).min
class FloatingPointLimits(object):
"""Class providing access to numeric limits for floating point data types."""
def __init__(self, data_type): # type: (type) -> None
self.data_type = data_type
@property
def max(self): # type: () -> float
"""Provide maximum representable value by stored data type.
:return: The maximum value.
"""
return np.finfo(self.data_type).max
@property
def min(self): # type: () -> float
"""Provide minimum representable value by stored data type.
:return: The minimum value.
"""
return np.finfo(self.data_type).min
class IntegralLimits(object):
"""Class providing access to numeric limits for integral data types."""
def __init__(self, data_type): # type: (type) -> None
self.data_type = data_type
@property
def max(self): # type: () -> int
"""Provide maximum representable value by stored data type.
:return: The maximum value.
"""
return np.iinfo(self.data_type).max
@property
def min(self): # type: () -> int
"""Provide minimum representable value by stored data type.
:return: The minimum value.
"""
return np.iinfo(self.data_type).min
|
[
"numpy.finfo",
"numpy.dtype",
"numpy.iinfo"
] |
[((2119, 2134), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (2127, 2134), True, 'import numpy as np\n'), ((3240, 3264), 'numpy.finfo', 'np.finfo', (['self.data_type'], {}), '(self.data_type)\n', (3248, 3264), True, 'import numpy as np\n'), ((3456, 3480), 'numpy.finfo', 'np.finfo', (['self.data_type'], {}), '(self.data_type)\n', (3464, 3480), True, 'import numpy as np\n'), ((3873, 3897), 'numpy.iinfo', 'np.iinfo', (['self.data_type'], {}), '(self.data_type)\n', (3881, 3897), True, 'import numpy as np\n'), ((4087, 4111), 'numpy.iinfo', 'np.iinfo', (['self.data_type'], {}), '(self.data_type)\n', (4095, 4111), True, 'import numpy as np\n')]
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from npu_bridge.npu_init import *
import unittest
import numpy as np
from avod.datasets.kitti import kitti_aug
class KittiAugTest(unittest.TestCase):
def test_flip_boxes_3d(self):
boxes_3d = np.array([
[1, 2, 3, 4, 5, 6, np.pi / 4],
[1, 2, 3, 4, 5, 6, -np.pi / 4]
])
exp_flipped_boxes_3d = np.array([
[-1, 2, 3, 4, 5, 6, 3 * np.pi / 4],
[-1, 2, 3, 4, 5, 6, -3 * np.pi / 4]
])
flipped_boxes_3d = kitti_aug.flip_boxes_3d(boxes_3d)
np.testing.assert_almost_equal(flipped_boxes_3d, exp_flipped_boxes_3d)
|
[
"numpy.array",
"avod.datasets.kitti.kitti_aug.flip_boxes_3d",
"numpy.testing.assert_almost_equal"
] |
[((1490, 1563), 'numpy.array', 'np.array', (['[[1, 2, 3, 4, 5, 6, np.pi / 4], [1, 2, 3, 4, 5, 6, -np.pi / 4]]'], {}), '([[1, 2, 3, 4, 5, 6, np.pi / 4], [1, 2, 3, 4, 5, 6, -np.pi / 4]])\n', (1498, 1563), True, 'import numpy as np\n'), ((1630, 1718), 'numpy.array', 'np.array', (['[[-1, 2, 3, 4, 5, 6, 3 * np.pi / 4], [-1, 2, 3, 4, 5, 6, -3 * np.pi / 4]]'], {}), '([[-1, 2, 3, 4, 5, 6, 3 * np.pi / 4], [-1, 2, 3, 4, 5, 6, -3 * np.\n pi / 4]])\n', (1638, 1718), True, 'import numpy as np\n'), ((1776, 1809), 'avod.datasets.kitti.kitti_aug.flip_boxes_3d', 'kitti_aug.flip_boxes_3d', (['boxes_3d'], {}), '(boxes_3d)\n', (1799, 1809), False, 'from avod.datasets.kitti import kitti_aug\n'), ((1819, 1889), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['flipped_boxes_3d', 'exp_flipped_boxes_3d'], {}), '(flipped_boxes_3d, exp_flipped_boxes_3d)\n', (1849, 1889), True, 'import numpy as np\n')]
|
import cv2
import numpy as np
from pycocotools.coco import COCO
import os
from ..dataloading import get_yolox_datadir
from .datasets_wrapper import Dataset
class MOTDataset(Dataset):
"""
COCO dataset class.
"""
def __init__( # This function is called in the exps yolox_x_mot17_half.py in this way: dataset = MOTDataset(
# data_dir=os.path.join(get_yolox_datadir(), "mot"),
# json_file=self.train_ann,
# name='train',
# img_size=self.input_size,
# preproc=TrainTransform(rgb_means=(0.485, 0.456, 0.406),
# std=(0.229, 0.224, 0.225),
# max_labels=500,),)
self,
data_dir=None,
json_file="train_half.json",
name="train",
img_size=(608, 1088),
preproc=None,
):
"""
COCO dataset initialization. Annotation data are read into memory by COCO API.
Args:
data_dir (str): dataset root directory
json_file (str): COCO json file name
name (str): COCO data name (e.g. 'train2017' or 'val2017')
img_size (int): target image size after pre-processing
preproc: data augmentation strategy
"""
super().__init__(img_size)
if data_dir is None:
data_dir = os.path.join(get_yolox_datadir(), "mot")
self.data_dir = data_dir
self.json_file = json_file
self.coco = COCO(os.path.join(self.data_dir, "annotations", self.json_file))
self.ids = self.coco.getImgIds()
self.class_ids = sorted(self.coco.getCatIds())
cats = self.coco.loadCats(self.coco.getCatIds())
self._classes = tuple([c["name"] for c in cats])
self.annotations = self._load_coco_annotations()
self.name = name
self.img_size = img_size
self.preproc = preproc
def __len__(self):
return len(self.ids)
def _load_coco_annotations(self):
return [self.load_anno_from_ids(_ids) for _ids in self.ids]
def load_anno_from_ids(self, id_):
im_ann = self.coco.loadImgs(id_)[0]
width = im_ann["width"]
height = im_ann["height"]
#frame_id = im_ann["frame_id"] : the default value '1' avoid to break augmentation & evaluation processes
frame_id = 1
#video_id = im_ann["video_id"] : the default value '1' avoid to break augmentation & evaluation processes
video_id = 1
anno_ids = self.coco.getAnnIds(imgIds=[int(id_)], iscrowd=False)
annotations = self.coco.loadAnns(anno_ids)
objs = []
for obj in annotations:
x1 = obj["bbox"][0]
y1 = obj["bbox"][1]
x2 = x1 + obj["bbox"][2]
y2 = y1 + obj["bbox"][3]
if obj["area"] > 0 and x2 >= x1 and y2 >= y1:
obj["clean_bbox"] = [x1, y1, x2, y2]
objs.append(obj)
num_objs = len(objs)
res = np.zeros((num_objs, 6))
for ix, obj in enumerate(objs):
cls = self.class_ids.index(obj["category_id"])
res[ix, 0:4] = obj["clean_bbox"]
res[ix, 4] = cls
#res[ix, 5] = obj["track_id"] # See comment line 66; same comment for the default value 1
res[ix, 5] = 1
file_name = im_ann["file_name"] if "file_name" in im_ann else "{:012}".format(id_) + ".jpg"
img_info = (height, width, frame_id, video_id, file_name)
del im_ann, annotations
return (res, img_info, file_name)
def load_anno(self, index):
return self.annotations[index][0]
def pull_item(self, index):
id_ = self.ids[index]
res, img_info, file_name = self.annotations[index]
# load image and preprocess
img_file = os.path.join(
self.data_dir, self.name, file_name
)
img = cv2.imread(img_file)
assert img is not None
return img, res.copy(), img_info, np.array([id_])
@Dataset.resize_getitem
def __getitem__(self, index):
"""
One image / label pair for the given index is picked up and pre-processed.
Args:
index (int): data index
Returns:
img (numpy.ndarray): pre-processed image
padded_labels (torch.Tensor): pre-processed label data.
The shape is :math:`[max_labels, 5]`.
each label consists of [class, xc, yc, w, h]:
class (float): class index.
xc, yc (float) : center of bbox whose values range from 0 to 1.
w, h (float) : size of bbox whose values range from 0 to 1.
info_img : tuple of h, w, nh, nw, dx, dy.
h, w (int): original shape of the image
nh, nw (int): shape of the resized image without padding
dx, dy (int): pad size
img_id (int): same as the input index. Used for evaluation.
"""
img, target, img_info, img_id = self.pull_item(index)
if self.preproc is not None:
img, target = self.preproc(img, target, self.input_dim)
return img, target, img_info, img_id
|
[
"numpy.array",
"numpy.zeros",
"os.path.join",
"cv2.imread"
] |
[((3616, 3639), 'numpy.zeros', 'np.zeros', (['(num_objs, 6)'], {}), '((num_objs, 6))\n', (3624, 3639), True, 'import numpy as np\n'), ((4439, 4488), 'os.path.join', 'os.path.join', (['self.data_dir', 'self.name', 'file_name'], {}), '(self.data_dir, self.name, file_name)\n', (4451, 4488), False, 'import os\n'), ((4525, 4545), 'cv2.imread', 'cv2.imread', (['img_file'], {}), '(img_file)\n', (4535, 4545), False, 'import cv2\n'), ((2119, 2177), 'os.path.join', 'os.path.join', (['self.data_dir', '"""annotations"""', 'self.json_file'], {}), "(self.data_dir, 'annotations', self.json_file)\n", (2131, 2177), False, 'import os\n'), ((4620, 4635), 'numpy.array', 'np.array', (['[id_]'], {}), '([id_])\n', (4628, 4635), True, 'import numpy as np\n')]
|
import unittest
import dace
import numpy as np
from dace.transformation.dataflow import MapTiling, OutLocalStorage
N = dace.symbol('N')
@dace.program
def arange():
out = np.ndarray([N], np.int32)
for i in dace.map[0:N]:
with dace.tasklet:
o >> out[i]
o = i
return out
class LocalStorageTests(unittest.TestCase):
def test_even(self):
sdfg = arange.to_sdfg()
sdfg.apply_transformations([MapTiling, OutLocalStorage],
options=[{
'tile_sizes': [8]
}, {}])
self.assertTrue(
np.array_equal(sdfg(N=16), np.arange(16, dtype=np.int32)))
def test_uneven(self):
# For testing uneven decomposition, use longer buffer and ensure
# it's not filled over
output = np.ones(20, np.int32)
sdfg = arange.to_sdfg()
sdfg.apply_transformations([MapTiling, OutLocalStorage],
options=[{
'tile_sizes': [5]
}, {}])
dace.propagate_memlets_sdfg(sdfg)
sdfg(N=16, __return=output)
self.assertTrue(
np.array_equal(output[:16], np.arange(16, dtype=np.int32)))
self.assertTrue(np.array_equal(output[16:], np.ones(4, np.int32)))
if __name__ == '__main__':
unittest.main()
|
[
"numpy.ones",
"dace.propagate_memlets_sdfg",
"dace.symbol",
"numpy.ndarray",
"unittest.main",
"numpy.arange"
] |
[((120, 136), 'dace.symbol', 'dace.symbol', (['"""N"""'], {}), "('N')\n", (131, 136), False, 'import dace\n'), ((177, 202), 'numpy.ndarray', 'np.ndarray', (['[N]', 'np.int32'], {}), '([N], np.int32)\n', (187, 202), True, 'import numpy as np\n'), ((1422, 1437), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1435, 1437), False, 'import unittest\n'), ((874, 895), 'numpy.ones', 'np.ones', (['(20)', 'np.int32'], {}), '(20, np.int32)\n', (881, 895), True, 'import numpy as np\n'), ((1147, 1180), 'dace.propagate_memlets_sdfg', 'dace.propagate_memlets_sdfg', (['sdfg'], {}), '(sdfg)\n', (1174, 1180), False, 'import dace\n'), ((693, 722), 'numpy.arange', 'np.arange', (['(16)'], {'dtype': 'np.int32'}), '(16, dtype=np.int32)\n', (702, 722), True, 'import numpy as np\n'), ((1282, 1311), 'numpy.arange', 'np.arange', (['(16)'], {'dtype': 'np.int32'}), '(16, dtype=np.int32)\n', (1291, 1311), True, 'import numpy as np\n'), ((1366, 1386), 'numpy.ones', 'np.ones', (['(4)', 'np.int32'], {}), '(4, np.int32)\n', (1373, 1386), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the SPORCO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
"""
Basis Pursuit DeNoising
=======================
This example demonstrates the use of class :class:`.admm.bpdn.BPDN` to solve the Basis Pursuit DeNoising (BPDN) problem :cite:`chen-1998-atomic`
$$\mathrm{argmin}_\mathbf{x} \; (1/2) \| D \mathbf{x} - \mathbf{s} \|_2^2 + \lambda \| \mathbf{x} \|_1 \;,$$
where $D$ is the dictionary, $\mathbf{x}$ is the sparse representation, and $\mathbf{s}$ is the signal to be represented. In this example the BPDN problem is used to estimate the reference sparse representation that generated a signal from a noisy version of the signal.
"""
from __future__ import print_function
from builtins import input
import numpy as np
from sporco.admm import bpdn
from sporco import util
from sporco import plot
"""
Configure problem size, sparsity, and noise level.
"""
N = 512 # Signal size
M = 4*N # Dictionary size
L = 32 # Number of non-zero coefficients in generator
sigma = 0.5 # Noise level
"""
Construct random dictionary, reference random sparse representation, and test signal consisting of the synthesis of the reference sparse representation with additive Gaussian noise.
"""
# Construct random dictionary and random sparse coefficients
np.random.seed(12345)
D = np.random.randn(N, M)
x0 = np.zeros((M, 1))
si = np.random.permutation(list(range(0, M-1)))
x0[si[0:L]] = np.random.randn(L, 1)
# Construct reference and noisy signal
s0 = D.dot(x0)
s = s0 + sigma*np.random.randn(N,1)
"""
Set BPDN solver class options.
"""
opt = bpdn.BPDN.Options({'Verbose': False, 'MaxMainIter': 500,
'RelStopTol': 1e-3, 'AutoRho': {'RsdlTarget': 1.0}})
"""
Select regularization parameter $\lambda$ by evaluating the error in recovering the sparse representation over a logarithmicaly spaced grid. (The reference representation is assumed to be known, which is not realistic in a real application.) A function is defined that evalues the BPDN recovery error for a specified $\lambda$, and this function is evaluated in parallel by :func:`sporco.util.grid_search`.
"""
# Function computing reconstruction error at lmbda
def evalerr(prm):
lmbda = prm[0]
b = bpdn.BPDN(D, s, lmbda, opt)
x = b.solve()
return np.sum(np.abs(x-x0))
# Parallel evalution of error function on lmbda grid
lrng = np.logspace(1, 2, 20)
sprm, sfvl, fvmx, sidx = util.grid_search(evalerr, (lrng,))
lmbda = sprm[0]
print('Minimum ℓ1 error: %5.2f at 𝜆 = %.2e' % (sfvl, lmbda))
"""
Once the best $\lambda$ has been determined, run BPDN with verbose display of ADMM iteration statistics.
"""
# Initialise and run BPDN object for best lmbda
opt['Verbose'] = True
b = bpdn.BPDN(D, s, lmbda, opt)
x = b.solve()
print("BPDN solve time: %.2fs" % b.timer.elapsed('solve'))
"""
Plot comparison of reference and recovered representations.
"""
plot.plot(np.hstack((x0, x)), title='Sparse representation',
lgnd=['Reference', 'Reconstructed'])
"""
Plot lmbda error curve, functional value, residuals, and rho
"""
its = b.getitstat()
fig = plot.figure(figsize=(15, 10))
plot.subplot(2, 2, 1)
plot.plot(fvmx, x=lrng, ptyp='semilogx', xlbl='$\lambda$',
ylbl='Error', fig=fig)
plot.subplot(2, 2, 2)
plot.plot(its.ObjFun, xlbl='Iterations', ylbl='Functional', fig=fig)
plot.subplot(2, 2, 3)
plot.plot(np.vstack((its.PrimalRsdl, its.DualRsdl)).T,
ptyp='semilogy', xlbl='Iterations', ylbl='Residual',
lgnd=['Primal', 'Dual'], fig=fig)
plot.subplot(2, 2, 4)
plot.plot(its.Rho, xlbl='Iterations', ylbl='Penalty Parameter', fig=fig)
fig.show()
# Wait for enter on keyboard
input()
|
[
"numpy.abs",
"builtins.input",
"sporco.util.grid_search",
"numpy.hstack",
"sporco.admm.bpdn.BPDN",
"numpy.zeros",
"numpy.random.seed",
"numpy.vstack",
"sporco.admm.bpdn.BPDN.Options",
"sporco.plot.figure",
"sporco.plot.subplot",
"numpy.logspace",
"numpy.random.randn",
"sporco.plot.plot"
] |
[((1417, 1438), 'numpy.random.seed', 'np.random.seed', (['(12345)'], {}), '(12345)\n', (1431, 1438), True, 'import numpy as np\n'), ((1443, 1464), 'numpy.random.randn', 'np.random.randn', (['N', 'M'], {}), '(N, M)\n', (1458, 1464), True, 'import numpy as np\n'), ((1470, 1486), 'numpy.zeros', 'np.zeros', (['(M, 1)'], {}), '((M, 1))\n', (1478, 1486), True, 'import numpy as np\n'), ((1549, 1570), 'numpy.random.randn', 'np.random.randn', (['L', '(1)'], {}), '(L, 1)\n', (1564, 1570), True, 'import numpy as np\n'), ((1710, 1825), 'sporco.admm.bpdn.BPDN.Options', 'bpdn.BPDN.Options', (["{'Verbose': False, 'MaxMainIter': 500, 'RelStopTol': 0.001, 'AutoRho': {\n 'RsdlTarget': 1.0}}"], {}), "({'Verbose': False, 'MaxMainIter': 500, 'RelStopTol': \n 0.001, 'AutoRho': {'RsdlTarget': 1.0}})\n", (1727, 1825), False, 'from sporco.admm import bpdn\n'), ((2494, 2515), 'numpy.logspace', 'np.logspace', (['(1)', '(2)', '(20)'], {}), '(1, 2, 20)\n', (2505, 2515), True, 'import numpy as np\n'), ((2541, 2575), 'sporco.util.grid_search', 'util.grid_search', (['evalerr', '(lrng,)'], {}), '(evalerr, (lrng,))\n', (2557, 2575), False, 'from sporco import util\n'), ((2844, 2871), 'sporco.admm.bpdn.BPDN', 'bpdn.BPDN', (['D', 's', 'lmbda', 'opt'], {}), '(D, s, lmbda, opt)\n', (2853, 2871), False, 'from sporco.admm import bpdn\n'), ((3223, 3252), 'sporco.plot.figure', 'plot.figure', ([], {'figsize': '(15, 10)'}), '(figsize=(15, 10))\n', (3234, 3252), False, 'from sporco import plot\n'), ((3253, 3274), 'sporco.plot.subplot', 'plot.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (3265, 3274), False, 'from sporco import plot\n'), ((3275, 3361), 'sporco.plot.plot', 'plot.plot', (['fvmx'], {'x': 'lrng', 'ptyp': '"""semilogx"""', 'xlbl': '"""$\\\\lambda$"""', 'ylbl': '"""Error"""', 'fig': 'fig'}), "(fvmx, x=lrng, ptyp='semilogx', xlbl='$\\\\lambda$', ylbl='Error',\n fig=fig)\n", (3284, 3361), False, 'from sporco import plot\n'), ((3367, 3388), 'sporco.plot.subplot', 'plot.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (3379, 3388), False, 'from sporco import plot\n'), ((3389, 3457), 'sporco.plot.plot', 'plot.plot', (['its.ObjFun'], {'xlbl': '"""Iterations"""', 'ylbl': '"""Functional"""', 'fig': 'fig'}), "(its.ObjFun, xlbl='Iterations', ylbl='Functional', fig=fig)\n", (3398, 3457), False, 'from sporco import plot\n'), ((3458, 3479), 'sporco.plot.subplot', 'plot.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (3470, 3479), False, 'from sporco import plot\n'), ((3642, 3663), 'sporco.plot.subplot', 'plot.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (3654, 3663), False, 'from sporco import plot\n'), ((3664, 3736), 'sporco.plot.plot', 'plot.plot', (['its.Rho'], {'xlbl': '"""Iterations"""', 'ylbl': '"""Penalty Parameter"""', 'fig': 'fig'}), "(its.Rho, xlbl='Iterations', ylbl='Penalty Parameter', fig=fig)\n", (3673, 3736), False, 'from sporco import plot\n'), ((3780, 3787), 'builtins.input', 'input', ([], {}), '()\n', (3785, 3787), False, 'from builtins import input\n'), ((2354, 2381), 'sporco.admm.bpdn.BPDN', 'bpdn.BPDN', (['D', 's', 'lmbda', 'opt'], {}), '(D, s, lmbda, opt)\n', (2363, 2381), False, 'from sporco.admm import bpdn\n'), ((3027, 3045), 'numpy.hstack', 'np.hstack', (['(x0, x)'], {}), '((x0, x))\n', (3036, 3045), True, 'import numpy as np\n'), ((1641, 1662), 'numpy.random.randn', 'np.random.randn', (['N', '(1)'], {}), '(N, 1)\n', (1656, 1662), True, 'import numpy as np\n'), ((2418, 2432), 'numpy.abs', 'np.abs', (['(x - x0)'], {}), '(x - x0)\n', (2424, 2432), True, 'import numpy as np\n'), ((3490, 3531), 'numpy.vstack', 'np.vstack', (['(its.PrimalRsdl, its.DualRsdl)'], {}), '((its.PrimalRsdl, its.DualRsdl))\n', (3499, 3531), True, 'import numpy as np\n')]
|
""" A bar graph.
(c) September 2017 by <NAME>
"""
import argparse
from collections import defaultdict
from keras.models import Sequential
from keras.layers import Dense, Activation
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import sys
np.set_printoptions(suppress=True, linewidth=200)
# Some matplotlib settings.
plt.style.use('seaborn-darkgrid')
titlesize = 21
labelsize = 17
legendsize = 15
ticksize = 15
bar_width = 0.80
opacity = 1.0
error_config = {'ecolor': '0.0', 'linewidth':3.0}
def deprecated():
"""
This is a deprecated method, only to show how to possibly combine these into
one plot. However, I find this unwieldly.
"""
fig, ax = plt.subplots()
bar_width = 0.80
opacity = 0.5
error_config = {'ecolor': '0.3'}
rects1 = plt.bar(np.array([0,1]), means_lin, bar_width,
alpha=opacity,
color='b',
yerr=std_lin,
error_kw=error_config,
label='Lin')
rects2 = plt.bar(np.array([3,4,5,6,7]), means_rfs, bar_width,
alpha=opacity,
color='r',
yerr=std_rfs,
error_kw=error_config,
label='RF')
rects3 = plt.bar(np.array([9,10]), means_dnn, bar_width,
alpha=opacity,
color='y',
yerr=std_dnn,
error_kw=error_config,
label='DNN')
plt.xticks(np.arange(11) + bar_width / 2,
('A','B','','D','E','F','G','','','J','K'))
plt.xlabel('Group')
plt.ylabel('Scores')
plt.title('Scores by group and gender')
plt.tight_layout()
plt.legend()
plt.savefig('figures/validation_set_results.png')
def plot(results, vv):
lin_mean = []
lin_std = []
lin_keys = []
rfs_mean = []
rfs_std = []
rfs_keys = []
dnn_mean = []
dnn_std = []
dnn_keys = []
sorted_keys = sorted(results.keys())
for key in sorted_keys:
info = [ss['loss'] for ss in results[key]]
if 'Lin' in key:
lin_mean.append(np.mean(info))
lin_std.append(np.std(info))
lin_keys.append(key)
elif 'RFs' in key:
rfs_mean.append(np.mean(info))
rfs_std.append(np.std(info))
rfs_keys.append(key)
elif 'DNN' in key:
dnn_mean.append(np.mean(info))
dnn_std.append(np.std(info))
dnn_keys.append(key)
print("\nlin_mean: {}".format(lin_mean))
print("lin_std: {}".format(lin_std))
print("lin_keys: {}".format(lin_keys))
print("\nrfs_mean: {}".format(rfs_mean))
print("rfs_std: {}".format(rfs_std))
print("rfs_keys: {}".format(rfs_keys))
print("\nDNN results:")
for (mean,std,key) in zip(dnn_mean,dnn_std,dnn_keys):
print("{:.2f}\t{:.2f}\t{}".format(mean,std,key))
# sys.exit()
# Use this to determine which DNN models should be here.
dnn_threshold = 3.0
real_index = 0
for ii,(mean,std,key) in enumerate(zip(dnn_mean,dnn_std,dnn_keys)):
if mean > dnn_threshold:
continue
real_index += 1
# Gah! Now I can finally make the bar chart. I think it's easiest to have it
# split across three different subplots, one per algorithm category.
width_ratio = [len(lin_keys),len(rfs_keys),real_index]
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(16,5),
gridspec_kw={'width_ratios':width_ratio})
for ii,(mean,std,key) in enumerate(zip(lin_mean,lin_std,lin_keys)):
ax[0].bar(np.array([ii]), mean, bar_width,
alpha=opacity,
yerr=std,
error_kw=error_config,
label=key[4:])
for ii,(mean,std,key) in enumerate(zip(rfs_mean,rfs_std,rfs_keys)):
ax[1].bar(np.array([ii]), mean, bar_width,
alpha=opacity,
yerr=std,
error_kw=error_config,
label=key[4:])
real_index = 0
for ii,(mean,std,key) in enumerate(zip(dnn_mean,dnn_std,dnn_keys)):
if mean > dnn_threshold:
continue
ax[2].bar(np.array([real_index]), mean, bar_width,
alpha=opacity,
yerr=std,
error_kw=error_config,
label=key[4:])
real_index += 1
# Some rather tedious but necessary stuff to make it publication-quality.
ax[0].set_title('Linear', fontsize=titlesize)
ax[1].set_title('Random Forests', fontsize=titlesize)
ax[2].set_title('Deep Neural Networks', fontsize=titlesize)
ax[0].set_ylabel('Average Squared $L_2$, 10-Fold CV', fontsize=labelsize)
for i in range(3):
ax[i].set_xlabel('Algorithm', fontsize=labelsize)
ax[i].set_ylim([0.0,9.0])
ax[i].tick_params(axis='y', labelsize=ticksize)
ax[i].set_xticklabels([])
ax[0].legend(loc="best", ncol=1, prop={'size':legendsize})
ax[1].legend(loc="best", ncol=2, prop={'size':legendsize})
ax[2].legend(loc="best", ncol=3, prop={'size':legendsize})
plt.tight_layout()
plt.savefig('figures/validation_set_results_v'+vv+'.png')
if __name__ == "__main__":
pp = argparse.ArgumentParser()
pp.add_argument('--version', type=int)
pp.add_argument('--kfolds', type=int, default=10)
args = pp.parse_args()
assert args.version is not None
VERSION = str(args.version).zfill(2)
file_name = 'results/results_kfolds10_v'+VERSION+'.npy'
results = np.load(file_name)[()]
print("results has keys: {}".format(results.keys()))
plot(results, VERSION)
|
[
"numpy.mean",
"matplotlib.pyplot.savefig",
"argparse.ArgumentParser",
"matplotlib.pyplot.ylabel",
"matplotlib.use",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.style.use",
"numpy.array",
"matplotlib.pyplot.tight_layout",
"numpy.std",
"matplotlib.pyplot.title",
"numpy.load",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.legend",
"numpy.set_printoptions"
] |
[((201, 222), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (215, 222), False, 'import matplotlib\n'), ((285, 334), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)', 'linewidth': '(200)'}), '(suppress=True, linewidth=200)\n', (304, 334), True, 'import numpy as np\n'), ((364, 397), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-darkgrid"""'], {}), "('seaborn-darkgrid')\n", (377, 397), True, 'import matplotlib.pyplot as plt\n'), ((717, 731), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (729, 731), True, 'import matplotlib.pyplot as plt\n'), ((1649, 1668), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Group"""'], {}), "('Group')\n", (1659, 1668), True, 'import matplotlib.pyplot as plt\n'), ((1673, 1693), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Scores"""'], {}), "('Scores')\n", (1683, 1693), True, 'import matplotlib.pyplot as plt\n'), ((1698, 1737), 'matplotlib.pyplot.title', 'plt.title', (['"""Scores by group and gender"""'], {}), "('Scores by group and gender')\n", (1707, 1737), True, 'import matplotlib.pyplot as plt\n'), ((1742, 1760), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1758, 1760), True, 'import matplotlib.pyplot as plt\n'), ((1765, 1777), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1775, 1777), True, 'import matplotlib.pyplot as plt\n'), ((1782, 1831), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figures/validation_set_results.png"""'], {}), "('figures/validation_set_results.png')\n", (1793, 1831), True, 'import matplotlib.pyplot as plt\n'), ((3473, 3567), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(3)', 'figsize': '(16, 5)', 'gridspec_kw': "{'width_ratios': width_ratio}"}), "(nrows=1, ncols=3, figsize=(16, 5), gridspec_kw={'width_ratios':\n width_ratio})\n", (3485, 3567), True, 'import matplotlib.pyplot as plt\n'), ((5199, 5217), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5215, 5217), True, 'import matplotlib.pyplot as plt\n'), ((5222, 5283), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('figures/validation_set_results_v' + vv + '.png')"], {}), "('figures/validation_set_results_v' + vv + '.png')\n", (5233, 5283), True, 'import matplotlib.pyplot as plt\n'), ((5318, 5343), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5341, 5343), False, 'import argparse\n'), ((830, 846), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (838, 846), True, 'import numpy as np\n'), ((1071, 1096), 'numpy.array', 'np.array', (['[3, 4, 5, 6, 7]'], {}), '([3, 4, 5, 6, 7])\n', (1079, 1096), True, 'import numpy as np\n'), ((1317, 1334), 'numpy.array', 'np.array', (['[9, 10]'], {}), '([9, 10])\n', (1325, 1334), True, 'import numpy as np\n'), ((5620, 5638), 'numpy.load', 'np.load', (['file_name'], {}), '(file_name)\n', (5627, 5638), True, 'import numpy as np\n'), ((1554, 1567), 'numpy.arange', 'np.arange', (['(11)'], {}), '(11)\n', (1563, 1567), True, 'import numpy as np\n'), ((3680, 3694), 'numpy.array', 'np.array', (['[ii]'], {}), '([ii])\n', (3688, 3694), True, 'import numpy as np\n'), ((3938, 3952), 'numpy.array', 'np.array', (['[ii]'], {}), '([ii])\n', (3946, 3952), True, 'import numpy as np\n'), ((4269, 4291), 'numpy.array', 'np.array', (['[real_index]'], {}), '([real_index])\n', (4277, 4291), True, 'import numpy as np\n'), ((2193, 2206), 'numpy.mean', 'np.mean', (['info'], {}), '(info)\n', (2200, 2206), True, 'import numpy as np\n'), ((2235, 2247), 'numpy.std', 'np.std', (['info'], {}), '(info)\n', (2241, 2247), True, 'import numpy as np\n'), ((2337, 2350), 'numpy.mean', 'np.mean', (['info'], {}), '(info)\n', (2344, 2350), True, 'import numpy as np\n'), ((2379, 2391), 'numpy.std', 'np.std', (['info'], {}), '(info)\n', (2385, 2391), True, 'import numpy as np\n'), ((2481, 2494), 'numpy.mean', 'np.mean', (['info'], {}), '(info)\n', (2488, 2494), True, 'import numpy as np\n'), ((2523, 2535), 'numpy.std', 'np.std', (['info'], {}), '(info)\n', (2529, 2535), True, 'import numpy as np\n')]
|
# SPDX-License-Identifier: Apache-2.0
"""Unit Tests for custom rnns."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import init_ops
from backend_test_base import Tf2OnnxBackendTestBase
from common import * # pylint: disable=wildcard-import, unused-wildcard-import
from tf2onnx.tf_loader import is_tf2
# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test
# pylint: disable=abstract-method,arguments-differ
if is_tf2():
BasicLSTMCell = tf.compat.v1.nn.rnn_cell.BasicLSTMCell
LSTMCell = tf.compat.v1.nn.rnn_cell.LSTMCell
GRUCell = tf.compat.v1.nn.rnn_cell.GRUCell
RNNCell = tf.compat.v1.nn.rnn_cell.RNNCell
MultiRNNCell = tf.compat.v1.nn.rnn_cell.MultiRNNCell
dynamic_rnn = tf.compat.v1.nn.dynamic_rnn
bidirectional_dynamic_rnn = tf.compat.v1.nn.bidirectional_dynamic_rnn
else:
LSTMBlockCell = tf.contrib.rnn.LSTMBlockCell
LSTMCell = tf.nn.rnn_cell.LSTMCell
GRUCell = tf.nn.rnn_cell.LSTMCell
RNNCell = tf.nn.rnn_cell.RNNCell
MultiRNNCell = tf.contrib.rnn.MultiRNNCell
dynamic_rnn = tf.nn.dynamic_rnn
bidirectional_dynamic_rnn = tf.nn.bidirectional_dynamic_rnn
class CustomRnnCellTests(Tf2OnnxBackendTestBase):
@check_opset_min_version(8, "Scan")
@skip_tf2()
def test_single_dynamic_custom_rnn(self):
size = 5 # size of each model layer.
batch_size = 1
cell = GatedGRUCell(size)
x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
xs, s = dynamic_rnn(cell=cell, dtype=tf.float32, inputs=x, time_major=False)
return tf.identity(xs, name="output"), tf.identity(s, name="final_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "final_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)
@check_opset_min_version(8, "Scan")
@skip_tf2()
def test_single_dynamic_custom_rnn_time_major(self):
size = 5 # size of each model layer.
batch_size = 1
x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
cell = GatedGRUCell(size)
xs, s = dynamic_rnn(cell=cell, dtype=tf.float32, inputs=x, time_major=True)
return tf.identity(xs, name="output"), tf.identity(s, name="final_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "final_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)
@check_opset_min_version(8, "Scan")
@skip_tf2()
def test_single_dynamic_custom_rnn_with_seq_length(self):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.], [5., 5.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
# no scope
cell = GatedGRUCell(units)
outputs, cell_state = dynamic_rnn(
cell,
x,
dtype=tf.float32,
sequence_length=[4, 3, 4, 5, 2, 1])
return tf.identity(outputs, name="output"), tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
@check_opset_min_version(8, "Scan")
@skip_tf2()
def test_single_dynamic_custom_rnn_with_non_const_seq_length(self):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.], [5., 5.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
y_val = np.array([4, 3, 4, 5, 2, 1], dtype=np.int32)
def func(x, seq_length):
# no scope
cell = GatedGRUCell(units)
outputs, cell_state = dynamic_rnn(
cell,
x,
dtype=tf.float32,
sequence_length=tf.identity(seq_length))
return tf.identity(outputs, name="output"), tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val, "input_2:0": y_val}
input_names_with_port = ["input_1:0", "input_2:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
@check_opset_min_version(8, "Scan")
@check_tf_min_version("1.8")
@skip_tf2()
def test_attention_wrapper_const_encoder(self):
size = 5
time_step = 3
input_size = 4
attn_size = size
batch_size = 9
# shape [batch size, time step, size]
# attention_state: usually the output of an RNN encoder.
# This tensor should be shaped `[batch_size, max_time, ...]`.
decoder_time_step = 6
x_val = np.random.randn(decoder_time_step, input_size).astype('f')
x_val = np.stack([x_val] * batch_size)
attention_states = np.random.randn(batch_size, time_step, attn_size).astype('f')
def func(x):
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size, attention_states)
match_input_fn = lambda curr_input, state: tf.concat([curr_input, state], axis=-1)
cell = LSTMCell(size)
match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell,
attention_mechanism,
attention_layer_size=attn_size,
cell_input_fn=match_input_fn,
output_attention=False)
output, attr_state = dynamic_rnn(match_cell_fw, x, dtype=tf.float32)
return tf.identity(output, name="output"), tf.identity(attr_state.cell_state, name="final_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0"]
output_names_with_port = ["output:0", "final_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)
@check_opset_min_version(8, "Scan")
@check_tf_min_version("1.8")
@skip_tf2()
def test_attention_wrapper_lstm_encoder(self):
size = 5
time_step = 3
input_size = 4
attn_size = size
batch_size = 9
# shape [batch size, time step, size]
# attention_state: usually the output of an RNN encoder.
# This tensor should be shaped `[batch_size, max_time, ...]`
encoder_time_step = time_step
encoder_x_val = np.random.randn(encoder_time_step, input_size).astype('f')
encoder_x_val = np.stack([encoder_x_val] * batch_size)
decoder_time_step = 6
decoder_x_val = np.random.randn(decoder_time_step, input_size).astype('f')
decoder_x_val = np.stack([decoder_x_val] * batch_size)
def func(encoder_x, decoder_x):
encoder_cell = LSTMCell(size)
output, attr_state = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32)
output_0 = tf.identity(output, name="output_0")
attention_states = output
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size,
attention_states)
match_input_fn = lambda curr_input, state: tf.concat([curr_input, state], axis=-1)
cell = LSTMCell(size)
match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell,
attention_mechanism,
attention_layer_size=attn_size,
cell_input_fn=match_input_fn,
output_attention=False)
output, attr_state = dynamic_rnn(match_cell_fw, decoder_x, dtype=tf.float32)
return output_0, tf.identity(output, name="output"), tf.identity(attr_state.cell_state, name="final_state")
feed_dict = {"input_1:0": encoder_x_val, "input_2:0": decoder_x_val}
input_names_with_port = ["input_1:0", "input_2:0"]
output_names_with_port = ["output_0:0", "output:0", "final_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)
@check_opset_min_version(8, "Scan")
@check_tf_min_version("1.8")
@skip_tf2()
def test_attention_wrapper_gru_encoder(self):
size = 5
time_step = 3
input_size = 4
attn_size = size
batch_size = 9
# shape [batch size, time step, size]
# attention_state: usually the output of an RNN encoder.
# This tensor should be shaped `[batch_size, max_time, ...]`
encoder_time_step = time_step
encoder_x_val = np.random.randn(encoder_time_step, input_size).astype('f')
encoder_x_val = np.stack([encoder_x_val] * batch_size)
decoder_time_step = 6
decoder_x_val = np.random.randn(decoder_time_step, input_size).astype('f')
decoder_x_val = np.stack([decoder_x_val] * batch_size)
def func(encoder_x, decoder_x):
encoder_cell = GRUCell(size)
output, attr_state = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32)
_ = tf.identity(output, name="output_0")
attention_states = output
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size,
attention_states)
match_input_fn = lambda curr_input, state: tf.concat([curr_input, state], axis=-1)
cell = GRUCell(size)
match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell,
attention_mechanism,
attention_layer_size=attn_size,
cell_input_fn=match_input_fn,
output_attention=False)
output, attr_state = dynamic_rnn(match_cell_fw, decoder_x, dtype=tf.float32)
return tf.identity(output, name="output"), tf.identity(attr_state.cell_state, name="final_state")
feed_dict = {"input_1:0": encoder_x_val, "input_2:0": decoder_x_val}
input_names_with_port = ["input_1:0", "input_2:0"]
output_names_with_port = ["output_0:0", "output:0", "final_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)
@check_opset_min_version(8, "Scan")
@check_tf_min_version("1.8")
@skip_tf2()
def test_attention_wrapper_lstm_encoder_input_has_none_dim(self):
size = 5
time_step = 3
input_size = 4
attn_size = size
batch_size = 9
# shape [batch size, time step, size]
# attention_state: usually the output of an RNN encoder.
# This tensor should be shaped `[batch_size, max_time, ...]`
encoder_time_step = time_step
encoder_x_val = np.random.randn(encoder_time_step, input_size).astype('f')
encoder_x_val = np.stack([encoder_x_val] * batch_size)
decoder_time_step = 6
decoder_x_val = np.random.randn(decoder_time_step, input_size).astype('f')
decoder_x_val = np.stack([decoder_x_val] * batch_size)
def func(encoder_x, decoder_x):
encoder_cell = LSTMCell(size)
output, attr_state = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32)
_ = tf.identity(output, name="output_0")
attention_states = output
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size,
attention_states)
match_input_fn = lambda curr_input, state: tf.concat([curr_input, state], axis=-1)
cell = LSTMCell(size)
match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell,
attention_mechanism,
attention_layer_size=attn_size,
cell_input_fn=match_input_fn,
output_attention=False)
output, attr_state = dynamic_rnn(match_cell_fw, decoder_x, dtype=tf.float32)
return tf.identity(output, name="output"), tf.identity(attr_state.cell_state, name="final_state")
feed_dict = {"input_1:0": encoder_x_val, "input_2:0": decoder_x_val}
input_names_with_port = ["input_1:0", "input_2:0"]
output_names_with_port = ["output_0:0", "output:0", "final_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)
@check_opset_min_version(8, "Scan")
@skip_tf2()
def test_multi_rnn_lstm(self, state_is_tuple=True):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
initializer = init_ops.constant_initializer(0.5)
cell_0 = LSTMCell(units,
initializer=initializer,
state_is_tuple=state_is_tuple)
cell_1 = LSTMCell(units,
initializer=initializer,
state_is_tuple=state_is_tuple)
cell_2 = LSTMCell(units,
initializer=initializer,
state_is_tuple=state_is_tuple)
cells = MultiRNNCell([cell_0, cell_1, cell_2], state_is_tuple=state_is_tuple)
outputs, cell_state = dynamic_rnn(cells, x, dtype=tf.float32)
return tf.identity(outputs, name="output"), tf.identity(cell_state, name="cell_state")
input_names_with_port = ["input_1:0"]
feed_dict = {"input_1:0": x_val}
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
@check_opset_min_version(8, "Scan")
@check_tf_min_version("1.8")
@skip_opset(9, "ReverseSequence")
@skip_tf2()
@allow_missing_shapes("Missing RNN shape")
def test_bidrectional_attention_wrapper_lstm_encoder(self):
size = 30
time_step = 3
input_size = 4
attn_size = size
batch_size = 9
# shape [batch size, time step, size]
# attention_state: usually the output of an RNN encoder.
# This tensor should be shaped `[batch_size, max_time, ...]`
encoder_time_step = time_step
encoder_x_val = np.random.randn(encoder_time_step, input_size).astype('f')
encoder_x_val = np.stack([encoder_x_val] * batch_size)
decoder_time_step = 6
decoder_x_val = np.random.randn(decoder_time_step, batch_size, input_size).astype('f')
def func(encoder_x, decoder_x, seq_length):
encoder_cell = LSTMCell(size)
attention_states, _ = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32)
# [9, 3, 30], [9, 30]
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size,
attention_states)
match_input_fn = lambda curr_input, state: tf.concat([curr_input, state], axis=-1)
cell = LSTMCell(size)
match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell,
attention_mechanism,
attention_layer_size=attn_size,
cell_input_fn=match_input_fn,
output_attention=False)
match_cell_bk = tf.contrib.seq2seq.AttentionWrapper(cell,
attention_mechanism,
attention_layer_size=attn_size,
cell_input_fn=match_input_fn,
output_attention=False)
(match_output_fw, match_output_bk), (match_state_fw, match_state_bk) = \
bidirectional_dynamic_rnn(cell_fw=match_cell_fw,
cell_bw=match_cell_bk,
inputs=decoder_x,
sequence_length=tf.identity(seq_length),
dtype=tf.float32,
time_major=True)
matched_output = tf.concat([match_output_fw, match_output_bk], axis=-1)
matched_state = tf.concat([match_state_fw.cell_state, match_state_bk.cell_state], -1)
return tf.identity(matched_output, name="output_0"), tf.identity(matched_state, name="final_state")
feed_dict = {"input_1:0": encoder_x_val, "input_2:0": decoder_x_val,
"input_3:0": np.array([6, 5, 4, 3, 2, 1, 2, 3, 6], dtype=np.int32)}
input_names_with_port = ["input_1:0", "input_2:0", "input_3:0"]
output_names_with_port = ["output_0:0", "final_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)
class GatedGRUCell(RNNCell):
def __init__(self, hidden_dim, reuse=None):
super().__init__(self, _reuse=reuse)
self._num_units = hidden_dim
self._activation = tf.tanh
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def call(self, inputs, state):
# inputs shape: [batch size, time step, input size] = [1, 3, 2]
# num_units: 5
# W shape: [2, 3 * 5] = [2, 15]
# U shape: [5, 3 * 5] = [5, 15]
# b shape: [1, 3 * 5] = [1, 15]
# state shape: [batch size, state size] = [1, 5]
input_dim = inputs.get_shape()[-1]
assert input_dim is not None, "input dimension must be defined"
# W = tf.get_variable(name="W", shape=[input_dim, 3 * self._num_units], dtype=tf.float32)
W = np.arange(30.0, dtype=np.float32).reshape((2, 15))
# U = tf.get_variable(name='U', shape=[self._num_units, 3 * self._num_units], dtype=tf.float32)
U = np.arange(75.0, dtype=np.float32).reshape((5, 15))
# b = tf.get_variable(name='b', shape=[1, 3 * self._num_units], dtype=tf.float32)
b = np.arange(15.0, dtype=np.float32).reshape((1, 15))
xw = tf.split(tf.matmul(inputs, W) + b, 3, 1)
hu = tf.split(tf.matmul(state, U), 3, 1)
r = tf.sigmoid(xw[0] + hu[0])
z = tf.sigmoid(xw[1] + hu[1])
h1 = self._activation(xw[2] + r * hu[2])
next_h = h1 * (1 - z) + state * z
return next_h, next_h
if __name__ == '__main__':
unittest_main()
|
[
"tensorflow.contrib.seq2seq.BahdanauAttention",
"tensorflow.contrib.seq2seq.AttentionWrapper",
"tf2onnx.tf_loader.is_tf2",
"numpy.array",
"numpy.stack",
"tensorflow.sigmoid",
"tensorflow.concat",
"tensorflow.matmul",
"tensorflow.identity",
"tensorflow.python.ops.init_ops.constant_initializer",
"numpy.random.randn",
"numpy.arange"
] |
[((584, 592), 'tf2onnx.tf_loader.is_tf2', 'is_tf2', ([], {}), '()\n', (590, 592), False, 'from tf2onnx.tf_loader import is_tf2\n'), ((1563, 1627), 'numpy.array', 'np.array', (['[[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]]'], {'dtype': 'np.float32'}), '([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]], dtype=np.float32)\n', (1571, 1627), True, 'import numpy as np\n'), ((1638, 1668), 'numpy.stack', 'np.stack', (['([x_val] * batch_size)'], {}), '([x_val] * batch_size)\n', (1646, 1668), True, 'import numpy as np\n'), ((2311, 2375), 'numpy.array', 'np.array', (['[[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]]'], {'dtype': 'np.float32'}), '([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]], dtype=np.float32)\n', (2319, 2375), True, 'import numpy as np\n'), ((2386, 2416), 'numpy.stack', 'np.stack', (['([x_val] * batch_size)'], {}), '([x_val] * batch_size)\n', (2394, 2416), True, 'import numpy as np\n'), ((3073, 3165), 'numpy.array', 'np.array', (['[[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0], [5.0, 5.0]]'], {'dtype': 'np.float32'}), '([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0], [5.0, 5.0]],\n dtype=np.float32)\n', (3081, 3165), True, 'import numpy as np\n'), ((3168, 3198), 'numpy.stack', 'np.stack', (['([x_val] * batch_size)'], {}), '([x_val] * batch_size)\n', (3176, 3198), True, 'import numpy as np\n'), ((3994, 4086), 'numpy.array', 'np.array', (['[[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0], [5.0, 5.0]]'], {'dtype': 'np.float32'}), '([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0], [5.0, 5.0]],\n dtype=np.float32)\n', (4002, 4086), True, 'import numpy as np\n'), ((4089, 4119), 'numpy.stack', 'np.stack', (['([x_val] * batch_size)'], {}), '([x_val] * batch_size)\n', (4097, 4119), True, 'import numpy as np\n'), ((4136, 4180), 'numpy.array', 'np.array', (['[4, 3, 4, 5, 2, 1]'], {'dtype': 'np.int32'}), '([4, 3, 4, 5, 2, 1], dtype=np.int32)\n', (4144, 4180), True, 'import numpy as np\n'), ((5395, 5425), 'numpy.stack', 'np.stack', (['([x_val] * batch_size)'], {}), '([x_val] * batch_size)\n', (5403, 5425), True, 'import numpy as np\n'), ((7263, 7301), 'numpy.stack', 'np.stack', (['([encoder_x_val] * batch_size)'], {}), '([encoder_x_val] * batch_size)\n', (7271, 7301), True, 'import numpy as np\n'), ((7439, 7477), 'numpy.stack', 'np.stack', (['([decoder_x_val] * batch_size)'], {}), '([decoder_x_val] * batch_size)\n', (7447, 7477), True, 'import numpy as np\n'), ((9579, 9617), 'numpy.stack', 'np.stack', (['([encoder_x_val] * batch_size)'], {}), '([encoder_x_val] * batch_size)\n', (9587, 9617), True, 'import numpy as np\n'), ((9755, 9793), 'numpy.stack', 'np.stack', (['([decoder_x_val] * batch_size)'], {}), '([decoder_x_val] * batch_size)\n', (9763, 9793), True, 'import numpy as np\n'), ((11894, 11932), 'numpy.stack', 'np.stack', (['([encoder_x_val] * batch_size)'], {}), '([encoder_x_val] * batch_size)\n', (11902, 11932), True, 'import numpy as np\n'), ((12070, 12108), 'numpy.stack', 'np.stack', (['([decoder_x_val] * batch_size)'], {}), '([decoder_x_val] * batch_size)\n', (12078, 12108), True, 'import numpy as np\n'), ((13787, 13863), 'numpy.array', 'np.array', (['[[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]]'], {'dtype': 'np.float32'}), '([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]], dtype=np.float32)\n', (13795, 13863), True, 'import numpy as np\n'), ((13872, 13902), 'numpy.stack', 'np.stack', (['([x_val] * batch_size)'], {}), '([x_val] * batch_size)\n', (13880, 13902), True, 'import numpy as np\n'), ((15642, 15680), 'numpy.stack', 'np.stack', (['([encoder_x_val] * batch_size)'], {}), '([encoder_x_val] * batch_size)\n', (15650, 15680), True, 'import numpy as np\n'), ((19725, 19750), 'tensorflow.sigmoid', 'tf.sigmoid', (['(xw[0] + hu[0])'], {}), '(xw[0] + hu[0])\n', (19735, 19750), True, 'import tensorflow as tf\n'), ((19763, 19788), 'tensorflow.sigmoid', 'tf.sigmoid', (['(xw[1] + hu[1])'], {}), '(xw[1] + hu[1])\n', (19773, 19788), True, 'import tensorflow as tf\n'), ((5571, 5636), 'tensorflow.contrib.seq2seq.BahdanauAttention', 'tf.contrib.seq2seq.BahdanauAttention', (['attn_size', 'attention_states'], {}), '(attn_size, attention_states)\n', (5607, 5636), True, 'import tensorflow as tf\n'), ((5795, 5951), 'tensorflow.contrib.seq2seq.AttentionWrapper', 'tf.contrib.seq2seq.AttentionWrapper', (['cell', 'attention_mechanism'], {'attention_layer_size': 'attn_size', 'cell_input_fn': 'match_input_fn', 'output_attention': '(False)'}), '(cell, attention_mechanism,\n attention_layer_size=attn_size, cell_input_fn=match_input_fn,\n output_attention=False)\n', (5830, 5951), True, 'import tensorflow as tf\n'), ((7672, 7708), 'tensorflow.identity', 'tf.identity', (['output'], {'name': '"""output_0"""'}), "(output, name='output_0')\n", (7683, 7708), True, 'import tensorflow as tf\n'), ((7781, 7846), 'tensorflow.contrib.seq2seq.BahdanauAttention', 'tf.contrib.seq2seq.BahdanauAttention', (['attn_size', 'attention_states'], {}), '(attn_size, attention_states)\n', (7817, 7846), True, 'import tensorflow as tf\n'), ((8076, 8232), 'tensorflow.contrib.seq2seq.AttentionWrapper', 'tf.contrib.seq2seq.AttentionWrapper', (['cell', 'attention_mechanism'], {'attention_layer_size': 'attn_size', 'cell_input_fn': 'match_input_fn', 'output_attention': '(False)'}), '(cell, attention_mechanism,\n attention_layer_size=attn_size, cell_input_fn=match_input_fn,\n output_attention=False)\n', (8111, 8232), True, 'import tensorflow as tf\n'), ((9980, 10016), 'tensorflow.identity', 'tf.identity', (['output'], {'name': '"""output_0"""'}), "(output, name='output_0')\n", (9991, 10016), True, 'import tensorflow as tf\n'), ((10089, 10154), 'tensorflow.contrib.seq2seq.BahdanauAttention', 'tf.contrib.seq2seq.BahdanauAttention', (['attn_size', 'attention_states'], {}), '(attn_size, attention_states)\n', (10125, 10154), True, 'import tensorflow as tf\n'), ((10383, 10539), 'tensorflow.contrib.seq2seq.AttentionWrapper', 'tf.contrib.seq2seq.AttentionWrapper', (['cell', 'attention_mechanism'], {'attention_layer_size': 'attn_size', 'cell_input_fn': 'match_input_fn', 'output_attention': '(False)'}), '(cell, attention_mechanism,\n attention_layer_size=attn_size, cell_input_fn=match_input_fn,\n output_attention=False)\n', (10418, 10539), True, 'import tensorflow as tf\n'), ((12296, 12332), 'tensorflow.identity', 'tf.identity', (['output'], {'name': '"""output_0"""'}), "(output, name='output_0')\n", (12307, 12332), True, 'import tensorflow as tf\n'), ((12405, 12470), 'tensorflow.contrib.seq2seq.BahdanauAttention', 'tf.contrib.seq2seq.BahdanauAttention', (['attn_size', 'attention_states'], {}), '(attn_size, attention_states)\n', (12441, 12470), True, 'import tensorflow as tf\n'), ((12700, 12856), 'tensorflow.contrib.seq2seq.AttentionWrapper', 'tf.contrib.seq2seq.AttentionWrapper', (['cell', 'attention_mechanism'], {'attention_layer_size': 'attn_size', 'cell_input_fn': 'match_input_fn', 'output_attention': '(False)'}), '(cell, attention_mechanism,\n attention_layer_size=attn_size, cell_input_fn=match_input_fn,\n output_attention=False)\n', (12735, 12856), True, 'import tensorflow as tf\n'), ((13950, 13984), 'tensorflow.python.ops.init_ops.constant_initializer', 'init_ops.constant_initializer', (['(0.5)'], {}), '(0.5)\n', (13979, 13984), False, 'from tensorflow.python.ops import init_ops\n'), ((16058, 16123), 'tensorflow.contrib.seq2seq.BahdanauAttention', 'tf.contrib.seq2seq.BahdanauAttention', (['attn_size', 'attention_states'], {}), '(attn_size, attention_states)\n', (16094, 16123), True, 'import tensorflow as tf\n'), ((16353, 16509), 'tensorflow.contrib.seq2seq.AttentionWrapper', 'tf.contrib.seq2seq.AttentionWrapper', (['cell', 'attention_mechanism'], {'attention_layer_size': 'attn_size', 'cell_input_fn': 'match_input_fn', 'output_attention': '(False)'}), '(cell, attention_mechanism,\n attention_layer_size=attn_size, cell_input_fn=match_input_fn,\n output_attention=False)\n', (16388, 16509), True, 'import tensorflow as tf\n'), ((16786, 16942), 'tensorflow.contrib.seq2seq.AttentionWrapper', 'tf.contrib.seq2seq.AttentionWrapper', (['cell', 'attention_mechanism'], {'attention_layer_size': 'attn_size', 'cell_input_fn': 'match_input_fn', 'output_attention': '(False)'}), '(cell, attention_mechanism,\n attention_layer_size=attn_size, cell_input_fn=match_input_fn,\n output_attention=False)\n', (16821, 16942), True, 'import tensorflow as tf\n'), ((17698, 17752), 'tensorflow.concat', 'tf.concat', (['[match_output_fw, match_output_bk]'], {'axis': '(-1)'}), '([match_output_fw, match_output_bk], axis=-1)\n', (17707, 17752), True, 'import tensorflow as tf\n'), ((17781, 17850), 'tensorflow.concat', 'tf.concat', (['[match_state_fw.cell_state, match_state_bk.cell_state]', '(-1)'], {}), '([match_state_fw.cell_state, match_state_bk.cell_state], -1)\n', (17790, 17850), True, 'import tensorflow as tf\n'), ((18075, 18128), 'numpy.array', 'np.array', (['[6, 5, 4, 3, 2, 1, 2, 3, 6]'], {'dtype': 'np.int32'}), '([6, 5, 4, 3, 2, 1, 2, 3, 6], dtype=np.int32)\n', (18083, 18128), True, 'import numpy as np\n'), ((19686, 19705), 'tensorflow.matmul', 'tf.matmul', (['state', 'U'], {}), '(state, U)\n', (19695, 19705), True, 'import tensorflow as tf\n'), ((1798, 1828), 'tensorflow.identity', 'tf.identity', (['xs'], {'name': '"""output"""'}), "(xs, name='output')\n", (1809, 1828), True, 'import tensorflow as tf\n'), ((1830, 1864), 'tensorflow.identity', 'tf.identity', (['s'], {'name': '"""final_state"""'}), "(s, name='final_state')\n", (1841, 1864), True, 'import tensorflow as tf\n'), ((2583, 2613), 'tensorflow.identity', 'tf.identity', (['xs'], {'name': '"""output"""'}), "(xs, name='output')\n", (2594, 2613), True, 'import tensorflow as tf\n'), ((2615, 2649), 'tensorflow.identity', 'tf.identity', (['s'], {'name': '"""final_state"""'}), "(s, name='final_state')\n", (2626, 2649), True, 'import tensorflow as tf\n'), ((3475, 3510), 'tensorflow.identity', 'tf.identity', (['outputs'], {'name': '"""output"""'}), "(outputs, name='output')\n", (3486, 3510), True, 'import tensorflow as tf\n'), ((3512, 3554), 'tensorflow.identity', 'tf.identity', (['cell_state'], {'name': '"""cell_state"""'}), "(cell_state, name='cell_state')\n", (3523, 3554), True, 'import tensorflow as tf\n'), ((4474, 4509), 'tensorflow.identity', 'tf.identity', (['outputs'], {'name': '"""output"""'}), "(outputs, name='output')\n", (4485, 4509), True, 'import tensorflow as tf\n'), ((4511, 4553), 'tensorflow.identity', 'tf.identity', (['cell_state'], {'name': '"""cell_state"""'}), "(cell_state, name='cell_state')\n", (4522, 4553), True, 'import tensorflow as tf\n'), ((5320, 5366), 'numpy.random.randn', 'np.random.randn', (['decoder_time_step', 'input_size'], {}), '(decoder_time_step, input_size)\n', (5335, 5366), True, 'import numpy as np\n'), ((5454, 5503), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'time_step', 'attn_size'], {}), '(batch_size, time_step, attn_size)\n', (5469, 5503), True, 'import numpy as np\n'), ((5693, 5732), 'tensorflow.concat', 'tf.concat', (['[curr_input, state]'], {'axis': '(-1)'}), '([curr_input, state], axis=-1)\n', (5702, 5732), True, 'import tensorflow as tf\n'), ((6301, 6335), 'tensorflow.identity', 'tf.identity', (['output'], {'name': '"""output"""'}), "(output, name='output')\n", (6312, 6335), True, 'import tensorflow as tf\n'), ((6337, 6391), 'tensorflow.identity', 'tf.identity', (['attr_state.cell_state'], {'name': '"""final_state"""'}), "(attr_state.cell_state, name='final_state')\n", (6348, 6391), True, 'import tensorflow as tf\n'), ((7180, 7226), 'numpy.random.randn', 'np.random.randn', (['encoder_time_step', 'input_size'], {}), '(encoder_time_step, input_size)\n', (7195, 7226), True, 'import numpy as np\n'), ((7356, 7402), 'numpy.random.randn', 'np.random.randn', (['decoder_time_step', 'input_size'], {}), '(decoder_time_step, input_size)\n', (7371, 7402), True, 'import numpy as np\n'), ((7974, 8013), 'tensorflow.concat', 'tf.concat', (['[curr_input, state]'], {'axis': '(-1)'}), '([curr_input, state], axis=-1)\n', (7983, 8013), True, 'import tensorflow as tf\n'), ((8601, 8635), 'tensorflow.identity', 'tf.identity', (['output'], {'name': '"""output"""'}), "(output, name='output')\n", (8612, 8635), True, 'import tensorflow as tf\n'), ((8637, 8691), 'tensorflow.identity', 'tf.identity', (['attr_state.cell_state'], {'name': '"""final_state"""'}), "(attr_state.cell_state, name='final_state')\n", (8648, 8691), True, 'import tensorflow as tf\n'), ((9496, 9542), 'numpy.random.randn', 'np.random.randn', (['encoder_time_step', 'input_size'], {}), '(encoder_time_step, input_size)\n', (9511, 9542), True, 'import numpy as np\n'), ((9672, 9718), 'numpy.random.randn', 'np.random.randn', (['decoder_time_step', 'input_size'], {}), '(decoder_time_step, input_size)\n', (9687, 9718), True, 'import numpy as np\n'), ((10282, 10321), 'tensorflow.concat', 'tf.concat', (['[curr_input, state]'], {'axis': '(-1)'}), '([curr_input, state], axis=-1)\n', (10291, 10321), True, 'import tensorflow as tf\n'), ((10896, 10930), 'tensorflow.identity', 'tf.identity', (['output'], {'name': '"""output"""'}), "(output, name='output')\n", (10907, 10930), True, 'import tensorflow as tf\n'), ((10932, 10986), 'tensorflow.identity', 'tf.identity', (['attr_state.cell_state'], {'name': '"""final_state"""'}), "(attr_state.cell_state, name='final_state')\n", (10943, 10986), True, 'import tensorflow as tf\n'), ((11811, 11857), 'numpy.random.randn', 'np.random.randn', (['encoder_time_step', 'input_size'], {}), '(encoder_time_step, input_size)\n', (11826, 11857), True, 'import numpy as np\n'), ((11987, 12033), 'numpy.random.randn', 'np.random.randn', (['decoder_time_step', 'input_size'], {}), '(decoder_time_step, input_size)\n', (12002, 12033), True, 'import numpy as np\n'), ((12598, 12637), 'tensorflow.concat', 'tf.concat', (['[curr_input, state]'], {'axis': '(-1)'}), '([curr_input, state], axis=-1)\n', (12607, 12637), True, 'import tensorflow as tf\n'), ((13215, 13249), 'tensorflow.identity', 'tf.identity', (['output'], {'name': '"""output"""'}), "(output, name='output')\n", (13226, 13249), True, 'import tensorflow as tf\n'), ((13251, 13305), 'tensorflow.identity', 'tf.identity', (['attr_state.cell_state'], {'name': '"""final_state"""'}), "(attr_state.cell_state, name='final_state')\n", (13262, 13305), True, 'import tensorflow as tf\n'), ((14631, 14666), 'tensorflow.identity', 'tf.identity', (['outputs'], {'name': '"""output"""'}), "(outputs, name='output')\n", (14642, 14666), True, 'import tensorflow as tf\n'), ((14668, 14710), 'tensorflow.identity', 'tf.identity', (['cell_state'], {'name': '"""cell_state"""'}), "(cell_state, name='cell_state')\n", (14679, 14710), True, 'import tensorflow as tf\n'), ((15559, 15605), 'numpy.random.randn', 'np.random.randn', (['encoder_time_step', 'input_size'], {}), '(encoder_time_step, input_size)\n', (15574, 15605), True, 'import numpy as np\n'), ((15735, 15793), 'numpy.random.randn', 'np.random.randn', (['decoder_time_step', 'batch_size', 'input_size'], {}), '(decoder_time_step, batch_size, input_size)\n', (15750, 15793), True, 'import numpy as np\n'), ((16251, 16290), 'tensorflow.concat', 'tf.concat', (['[curr_input, state]'], {'axis': '(-1)'}), '([curr_input, state], axis=-1)\n', (16260, 16290), True, 'import tensorflow as tf\n'), ((17870, 17914), 'tensorflow.identity', 'tf.identity', (['matched_output'], {'name': '"""output_0"""'}), "(matched_output, name='output_0')\n", (17881, 17914), True, 'import tensorflow as tf\n'), ((17916, 17962), 'tensorflow.identity', 'tf.identity', (['matched_state'], {'name': '"""final_state"""'}), "(matched_state, name='final_state')\n", (17927, 17962), True, 'import tensorflow as tf\n'), ((19238, 19271), 'numpy.arange', 'np.arange', (['(30.0)'], {'dtype': 'np.float32'}), '(30.0, dtype=np.float32)\n', (19247, 19271), True, 'import numpy as np\n'), ((19405, 19438), 'numpy.arange', 'np.arange', (['(75.0)'], {'dtype': 'np.float32'}), '(75.0, dtype=np.float32)\n', (19414, 19438), True, 'import numpy as np\n'), ((19558, 19591), 'numpy.arange', 'np.arange', (['(15.0)'], {'dtype': 'np.float32'}), '(15.0, dtype=np.float32)\n', (19567, 19591), True, 'import numpy as np\n'), ((19632, 19652), 'tensorflow.matmul', 'tf.matmul', (['inputs', 'W'], {}), '(inputs, W)\n', (19641, 19652), True, 'import tensorflow as tf\n'), ((4430, 4453), 'tensorflow.identity', 'tf.identity', (['seq_length'], {}), '(seq_length)\n', (4441, 4453), True, 'import tensorflow as tf\n'), ((17524, 17547), 'tensorflow.identity', 'tf.identity', (['seq_length'], {}), '(seq_length)\n', (17535, 17547), True, 'import tensorflow as tf\n')]
|
import unittest
from numpy.testing import assert_array_equal
import numpy as np
from libact.base.dataset import Dataset
from libact.models import LogisticRegression
from libact.query_strategies import VarianceReduction
from .utils import run_qs
class VarianceReductionTestCase(unittest.TestCase):
"""Variance reduction test case using artifitial dataset"""
def setUp(self):
self.X = [[-2, -1], [1, 1], [-1, -2], [-1, -1], [1, 2], [2, 1]]
self.y = [0, 1, 0, 1, 0, 1]
self.quota = 4
def test_variance_reduction(self):
trn_ds = Dataset(self.X,
np.concatenate([self.y[:2],
[None] * (len(self.y) - 2)]))
qs = VarianceReduction(trn_ds, model=LogisticRegression(), sigma=0.1)
qseq = run_qs(trn_ds, qs, self.y, self.quota)
assert_array_equal(qseq, np.array([4, 5, 2, 3]))
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"numpy.array",
"libact.models.LogisticRegression"
] |
[((936, 951), 'unittest.main', 'unittest.main', ([], {}), '()\n', (949, 951), False, 'import unittest\n'), ((879, 901), 'numpy.array', 'np.array', (['[4, 5, 2, 3]'], {}), '([4, 5, 2, 3])\n', (887, 901), True, 'import numpy as np\n'), ((759, 779), 'libact.models.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (777, 779), False, 'from libact.models import LogisticRegression\n')]
|
from manimlib.imports import *
from manimlib.utils import bezier
import numpy as np
class VectorInterpolator:
def __init__(self,points):
self.points = points
self.n = len(self.points)
self.dists = [0]
for i in range(len(self.points)):
self.dists += [np.linalg.norm(
self.points[i] -
self.points[(i+1) % self.n]
)+self.dists[i]]
def interpolate(self,alpha):
dist = alpha*self.dists[-1]
idx = self.interpolate_index(dist)
mult = (dist - self.dists[idx])/np.linalg.norm(self.points[(idx+1)%self.n]-self.points[idx])
return self.points[idx] + \
mult*(self.points[(idx+1)%self.n]-self.points[idx])
def interpolate_index(self,dist):
def is_solution(idx):
if idx == self.n-1:
return self.dists[idx] <= dist
else:
return ((self.dists[cur] <= dist) and
(self.dists[(cur+1)%self.n] >= dist))
# binary search
step_size=int(self.n / 4)
cur=int(self.n / 2)
while not is_solution(cur):
if self.dists[cur] > dist:
cur -= step_size
else:
cur += step_size
step_size = max(int(step_size/2), 1)
return cur
|
[
"numpy.linalg.norm"
] |
[((575, 641), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.points[(idx + 1) % self.n] - self.points[idx])'], {}), '(self.points[(idx + 1) % self.n] - self.points[idx])\n', (589, 641), True, 'import numpy as np\n'), ((300, 362), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.points[i] - self.points[(i + 1) % self.n])'], {}), '(self.points[i] - self.points[(i + 1) % self.n])\n', (314, 362), True, 'import numpy as np\n')]
|
from __future__ import division
from unittest import skipIf, TestCase
import os
from pandas import DataFrame
import numpy as np
from numpy.testing import assert_array_equal
BACKEND_AVAILABLE = os.environ.get("ETS_TOOLKIT", "qt4") != "null"
if BACKEND_AVAILABLE:
from app_common.apptools.testing_utils import assert_obj_gui_works
from pybleau.app.plotting.plot_config import HeatmapPlotConfigurator, \
HEATMAP_PLOT_TYPE, HistogramPlotConfigurator, HIST_PLOT_TYPE, \
LinePlotConfigurator, BarPlotConfigurator, ScatterPlotConfigurator, \
SCATTER_PLOT_TYPE, CMAP_SCATTER_PLOT_TYPE, LINE_PLOT_TYPE, \
BAR_PLOT_TYPE
LEN = 16
TEST_DF = DataFrame({"a": [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4],
"b": [1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4],
"c": [1, 2, 3, 4, 2, 3, 1, 1, 4, 4, 5, 6, 4, 4, 5, 6],
"d": list("ababcabcdabcdeab"),
"e": np.random.randn(LEN),
"f": range(LEN),
# Highly repetitive column to split the entire data into 2
"g": np.array(["0", "1"] * (LEN // 2)),
"h": np.array([0, 1] * (LEN // 2), dtype=bool),
})
class BasePlotConfig(object):
def test_creation_fails_if_no_df(self):
with self.assertRaises(ValueError):
config = self.configurator()
config.to_dict()
def test_bring_up(self):
obj = self.configurator(data_source=TEST_DF)
assert_obj_gui_works(obj)
# Assertion utilities -----------------------------------------------------
def assert_editor_options(self, editor):
editor_options = editor.values
if self.numerical_cols_only:
for col in editor_options:
if col != "index":
self.assertIn(TEST_DF[col].dtype, (np.int64, np.float64))
else:
self.assertEqual(set(editor_options),
set(TEST_DF.columns) | {"index"})
class BaseXYPlotConfig(BasePlotConfig):
def test_plot_basic(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b")
self.assertEqual(config.plot_type, self.basic_type)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
assert_array_equal(config_dict["x_arr"], TEST_DF["a"].values)
self.assertIn("y_arr", config_dict)
assert_array_equal(config_dict["y_arr"], TEST_DF["b"].values)
def test_data_choices(self):
""" Make sure different configurators provide the right data choices.
"""
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b")
view_items = config._data_selection_items()
x_editor = view_items[0].content[0].editor
self.assert_editor_options(x_editor)
y_editor = view_items[1].content[0].editor
self.assert_editor_options(y_editor)
def test_plot_colored_by_str_col(self):
# Color by a column filled with boolean values
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="d")
self.assertIn(config.plot_type, self.basic_type)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
self.assertIsInstance(config_dict["x_arr"], dict)
d_values = TEST_DF["d"].unique()
self.assertEqual(set(config_dict["x_arr"].keys()), set(d_values))
for arr in config_dict["x_arr"].values():
self.assertIsInstance(arr, np.ndarray)
# For example:
assert_array_equal(config_dict["x_arr"]["c"], np.array([1, 4, 4]))
self.assertIn("y_arr", config_dict)
self.assertIsInstance(config_dict["y_arr"], dict)
self.assertEqual(set(config_dict["y_arr"].keys()), set(d_values))
for arr in config_dict["y_arr"].values():
self.assertIsInstance(arr, np.ndarray)
# For example:
assert_array_equal(config_dict["y_arr"]["c"], np.array([2, 2, 3]))
def test_plot_colored_by_bool_col(self):
# Color by a column filled with boolean values
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="h")
self.assertIn(config.plot_type, self.basic_type)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
self.assertIsInstance(config_dict["x_arr"], dict)
hue_values = set(TEST_DF["h"])
self.assertEqual(set(config_dict["x_arr"].keys()), hue_values)
assert_array_equal(config_dict["x_arr"][False], TEST_DF["a"][::2])
assert_array_equal(config_dict["x_arr"][True], TEST_DF["a"][1::2])
self.assertIn("y_arr", config_dict)
self.assertIsInstance(config_dict["y_arr"], dict)
self.assertEqual(set(config_dict["y_arr"].keys()), hue_values)
assert_array_equal(config_dict["y_arr"][False], TEST_DF["b"][::2])
assert_array_equal(config_dict["y_arr"][True], TEST_DF["b"][1::2])
def test_plot_colored_by_NON_EXISTENT_col(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="NON-EXISTENT")
with self.assertRaises(KeyError):
config.to_dict()
@skipIf(not BACKEND_AVAILABLE, "No UI backend available")
class TestScatterPlotConfig(TestCase, BaseXYPlotConfig):
def setUp(self):
self.configurator = ScatterPlotConfigurator
self.basic_type = SCATTER_PLOT_TYPE
self.numerical_cols_only = True
def test_plot_scatter_colored_by_int_col(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="c")
self.assertEqual(config.plot_type, CMAP_SCATTER_PLOT_TYPE)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
self.assertIsInstance(config_dict["x_arr"], np.ndarray)
self.assertIn("y_arr", config_dict)
self.assertIsInstance(config_dict["y_arr"], np.ndarray)
self.assertIn("z_arr", config_dict)
self.assertIsInstance(config_dict["z_arr"], np.ndarray)
def test_plot_scatter_colored_by_float_col(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="e")
self.assertEqual(config.plot_type, CMAP_SCATTER_PLOT_TYPE)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
self.assertIsInstance(config_dict["x_arr"], np.ndarray)
self.assertIn("y_arr", config_dict)
self.assertIsInstance(config_dict["y_arr"], np.ndarray)
self.assertIn("z_arr", config_dict)
self.assertIsInstance(config_dict["z_arr"], np.ndarray)
def test_style_colorize_by_float_changes_on_color_column_change(self):
""" The dtype of the column to colorize controls colorize_by_float.
"""
# Color by a string:
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="d")
self.assertFalse(config.plot_style.colorize_by_float)
# Color by a float:
config.z_col_name = "e"
self.assertTrue(config.plot_style.colorize_by_float)
def test_scatter_data_selection_columns(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="d")
columns = config._data_selection_columns()
expected = config._numerical_columns
self.assertCountEqual(columns, expected)
def test_scatter_color_selection_columns(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="d")
columns = config._color_selection_columns()
expected = [""] + config._available_columns
self.assertCountEqual(columns, expected)
@skipIf(not BACKEND_AVAILABLE, "No UI backend available")
class TestLinePlotConfig(TestCase, BaseXYPlotConfig):
def setUp(self):
self.configurator = LinePlotConfigurator
self.basic_type = LINE_PLOT_TYPE
self.numerical_cols_only = True
def test_line_data_selection_columns(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="d")
columns = config._data_selection_columns()
expected = config._numerical_columns
self.assertCountEqual(columns, expected)
def test_line_color_selection_columns(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="d")
columns = config._color_selection_columns()
expected = [""] + config._available_columns
self.assertCountEqual(columns, expected)
@skipIf(not BACKEND_AVAILABLE, "No UI backend available")
class TestBarPlotConfig(TestCase, BaseXYPlotConfig):
def setUp(self):
self.configurator = BarPlotConfigurator
self.basic_type = BAR_PLOT_TYPE
self.numerical_cols_only = False
def test_data_choices(self):
""" Make sure different configurators provide the right data choices.
"""
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b")
view_items = config._data_selection_items()
x_editor = view_items[0].content[3].content[0].content[0].editor
self.assert_editor_options(x_editor)
def test_melt_mode_no_effect(self):
config = self.configurator(data_source=TEST_DF, melt_source_data=True)
self.assertEqual(config.plot_type, self.basic_type)
# No columns to melt, so no transformation:
self.assertIs(config.data_source, TEST_DF)
self.assertIs(config.transformed_data, TEST_DF)
def test_melt_mode_with_melted_columns(self):
config = self.configurator(data_source=TEST_DF, melt_source_data=True,
columns_to_melt=["e", "f"])
self.assertIsNot(config.transformed_data, TEST_DF)
self.assertIs(config.data_source, TEST_DF)
# Pulling the x_arr forces a reset of the x_col_name
x_values = np.array(["e"]*LEN+["f"]*LEN)
assert_array_equal(config.x_arr, x_values)
self.assertEqual(config.x_col_name, "variable")
self.assertEqual(len(config.y_arr), 2 * LEN)
self.assertEqual(config.y_col_name, "value")
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
assert_array_equal(config_dict["x_arr"], x_values)
self.assertIn("y_arr", config_dict)
self.assertEqual(len(config_dict["y_arr"]), 2 * LEN)
def test_melt_mode_with_melted_columns_and_str_color(self):
config = self.configurator(data_source=TEST_DF, melt_source_data=True,
columns_to_melt=["e", "f"], z_col_name="g")
self.assertIsNot(config.transformed_data, TEST_DF)
self.assertIs(config.data_source, TEST_DF)
hue_values = TEST_DF["g"].unique()
# Pulling the x_arr forces a reset of the x_col_name
x_values = np.array(["e"] * (LEN // 2) + ["f"] * (LEN // 2))
self.assertEqual(set(config.x_arr.keys()), set(hue_values))
for key in hue_values:
assert_array_equal(config.x_arr[key], x_values)
self.assertEqual(config.x_col_name, "variable")
for key in hue_values:
self.assertEqual(len(config.y_arr[key]), LEN)
self.assertEqual(config.y_col_name, "value")
self.assertIn("g", config.transformed_data.columns)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
self.assertEqual(set(config_dict["x_arr"].keys()), set(hue_values))
for key in hue_values:
assert_array_equal(config_dict["x_arr"][key], x_values)
self.assertIn("y_arr", config_dict)
for key in hue_values:
self.assertEqual(len(config_dict["y_arr"][key]), LEN)
def test_melt_mode_with_melted_columns_and_bool_color(self):
config = self.configurator(data_source=TEST_DF, melt_source_data=True,
columns_to_melt=["e", "f"], z_col_name="h")
self.assertIsNot(config.transformed_data, TEST_DF)
self.assertIs(config.data_source, TEST_DF)
hue_values = TEST_DF["h"].unique()
# Pulling the x_arr forces a reset of the x_col_name
x_values = np.array(["e"] * (LEN // 2) + ["f"] * (LEN // 2))
self.assertEqual(set(config.x_arr.keys()), set(hue_values))
for key in hue_values:
assert_array_equal(config.x_arr[key], x_values)
self.assertEqual(config.x_col_name, "variable")
for key in hue_values:
self.assertEqual(len(config.y_arr[key]), LEN)
self.assertEqual(config.y_col_name, "value")
self.assertIn("h", config.transformed_data.columns)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
self.assertEqual(set(config_dict["x_arr"].keys()), set(hue_values))
for key in hue_values:
assert_array_equal(config_dict["x_arr"][key], x_values)
self.assertIn("y_arr", config_dict)
for key in hue_values:
self.assertEqual(len(config_dict["y_arr"][key]), LEN)
@skipIf(not BACKEND_AVAILABLE, "No UI backend available")
class TestHistogramPlotConfig(BasePlotConfig, TestCase):
def setUp(self):
self.configurator = HistogramPlotConfigurator
self.basic_type = HIST_PLOT_TYPE
self.numerical_cols_only = True
# Tests -------------------------------------------------------------------
def test_plot_basic(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a")
self.assertEqual(config.plot_type, self.basic_type)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
assert_array_equal(config_dict["x_arr"], TEST_DF["a"].values)
def test_plot_NON_EXISTENT_col(self):
config = self.configurator(data_source=TEST_DF,
x_col_name="NON-EXISTENT")
with self.assertRaises(KeyError):
config.to_dict()
def test_data_choices(self):
""" Make sure different configurators provide the right data choices.
"""
config = self.configurator(data_source=TEST_DF, x_col_name="a")
view_items = config._data_selection_items()
x_editor = view_items[0].content[0].editor
self.assert_editor_options(x_editor)
@skipIf(not BACKEND_AVAILABLE, "No UI backend available")
class TestHeatmapPlotConfig(BasePlotConfig, TestCase):
def setUp(self):
self.configurator = HeatmapPlotConfigurator
self.basic_type = HEATMAP_PLOT_TYPE
self.numerical_cols_only = True
# Tests -------------------------------------------------------------------
def test_plot_basic(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="e")
self.assertEqual(config.plot_type, self.basic_type)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
def test_plot_colored_by_NON_EXISTENT_col(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="NON-EXISTENT")
with self.assertRaises(KeyError):
config.to_dict()
def test_data_choices(self):
""" Make sure different configurators provide the right data choices.
Passing non-numerical
"""
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="e")
view_items = config._data_selection_items()
x_editor = view_items[0].content[0].editor
self.assert_editor_options(x_editor)
y_editor = view_items[1].content[0].editor
self.assert_editor_options(y_editor)
|
[
"app_common.apptools.testing_utils.assert_obj_gui_works",
"unittest.skipIf",
"os.environ.get",
"numpy.array",
"numpy.random.randn",
"numpy.testing.assert_array_equal"
] |
[((5646, 5702), 'unittest.skipIf', 'skipIf', (['(not BACKEND_AVAILABLE)', '"""No UI backend available"""'], {}), "(not BACKEND_AVAILABLE, 'No UI backend available')\n", (5652, 5702), False, 'from unittest import skipIf, TestCase\n'), ((8462, 8518), 'unittest.skipIf', 'skipIf', (['(not BACKEND_AVAILABLE)', '"""No UI backend available"""'], {}), "(not BACKEND_AVAILABLE, 'No UI backend available')\n", (8468, 8518), False, 'from unittest import skipIf, TestCase\n'), ((9402, 9458), 'unittest.skipIf', 'skipIf', (['(not BACKEND_AVAILABLE)', '"""No UI backend available"""'], {}), "(not BACKEND_AVAILABLE, 'No UI backend available')\n", (9408, 9458), False, 'from unittest import skipIf, TestCase\n'), ((14104, 14160), 'unittest.skipIf', 'skipIf', (['(not BACKEND_AVAILABLE)', '"""No UI backend available"""'], {}), "(not BACKEND_AVAILABLE, 'No UI backend available')\n", (14110, 14160), False, 'from unittest import skipIf, TestCase\n'), ((15401, 15457), 'unittest.skipIf', 'skipIf', (['(not BACKEND_AVAILABLE)', '"""No UI backend available"""'], {}), "(not BACKEND_AVAILABLE, 'No UI backend available')\n", (15407, 15457), False, 'from unittest import skipIf, TestCase\n'), ((195, 231), 'os.environ.get', 'os.environ.get', (['"""ETS_TOOLKIT"""', '"""qt4"""'], {}), "('ETS_TOOLKIT', 'qt4')\n", (209, 231), False, 'import os\n'), ((970, 990), 'numpy.random.randn', 'np.random.randn', (['LEN'], {}), '(LEN)\n', (985, 990), True, 'import numpy as np\n'), ((1136, 1169), 'numpy.array', 'np.array', (["(['0', '1'] * (LEN // 2))"], {}), "(['0', '1'] * (LEN // 2))\n", (1144, 1169), True, 'import numpy as np\n'), ((1197, 1238), 'numpy.array', 'np.array', (['([0, 1] * (LEN // 2))'], {'dtype': 'bool'}), '([0, 1] * (LEN // 2), dtype=bool)\n', (1205, 1238), True, 'import numpy as np\n'), ((1545, 1570), 'app_common.apptools.testing_utils.assert_obj_gui_works', 'assert_obj_gui_works', (['obj'], {}), '(obj)\n', (1565, 1570), False, 'from app_common.apptools.testing_utils import assert_obj_gui_works\n'), ((2451, 2512), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["config_dict['x_arr']", "TEST_DF['a'].values"], {}), "(config_dict['x_arr'], TEST_DF['a'].values)\n", (2469, 2512), False, 'from numpy.testing import assert_array_equal\n'), ((2565, 2626), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["config_dict['y_arr']", "TEST_DF['b'].values"], {}), "(config_dict['y_arr'], TEST_DF['b'].values)\n", (2583, 2626), False, 'from numpy.testing import assert_array_equal\n'), ((4902, 4968), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["config_dict['x_arr'][False]", "TEST_DF['a'][::2]"], {}), "(config_dict['x_arr'][False], TEST_DF['a'][::2])\n", (4920, 4968), False, 'from numpy.testing import assert_array_equal\n'), ((4977, 5043), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["config_dict['x_arr'][True]", "TEST_DF['a'][1::2]"], {}), "(config_dict['x_arr'][True], TEST_DF['a'][1::2])\n", (4995, 5043), False, 'from numpy.testing import assert_array_equal\n'), ((5226, 5292), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["config_dict['y_arr'][False]", "TEST_DF['b'][::2]"], {}), "(config_dict['y_arr'][False], TEST_DF['b'][::2])\n", (5244, 5292), False, 'from numpy.testing import assert_array_equal\n'), ((5301, 5367), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["config_dict['y_arr'][True]", "TEST_DF['b'][1::2]"], {}), "(config_dict['y_arr'][True], TEST_DF['b'][1::2])\n", (5319, 5367), False, 'from numpy.testing import assert_array_equal\n'), ((10803, 10838), 'numpy.array', 'np.array', (["(['e'] * LEN + ['f'] * LEN)"], {}), "(['e'] * LEN + ['f'] * LEN)\n", (10811, 10838), True, 'import numpy as np\n'), ((10841, 10883), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['config.x_arr', 'x_values'], {}), '(config.x_arr, x_values)\n', (10859, 10883), False, 'from numpy.testing import assert_array_equal\n'), ((11188, 11238), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["config_dict['x_arr']", 'x_values'], {}), "(config_dict['x_arr'], x_values)\n", (11206, 11238), False, 'from numpy.testing import assert_array_equal\n'), ((11803, 11852), 'numpy.array', 'np.array', (["(['e'] * (LEN // 2) + ['f'] * (LEN // 2))"], {}), "(['e'] * (LEN // 2) + ['f'] * (LEN // 2))\n", (11811, 11852), True, 'import numpy as np\n'), ((13182, 13231), 'numpy.array', 'np.array', (["(['e'] * (LEN // 2) + ['f'] * (LEN // 2))"], {}), "(['e'] * (LEN // 2) + ['f'] * (LEN // 2))\n", (13190, 13231), True, 'import numpy as np\n'), ((14760, 14821), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["config_dict['x_arr']", "TEST_DF['a'].values"], {}), "(config_dict['x_arr'], TEST_DF['a'].values)\n", (14778, 14821), False, 'from numpy.testing import assert_array_equal\n'), ((3898, 3917), 'numpy.array', 'np.array', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (3906, 3917), True, 'import numpy as np\n'), ((4274, 4293), 'numpy.array', 'np.array', (['[2, 2, 3]'], {}), '([2, 2, 3])\n', (4282, 4293), True, 'import numpy as np\n'), ((11964, 12011), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['config.x_arr[key]', 'x_values'], {}), '(config.x_arr[key], x_values)\n', (11982, 12011), False, 'from numpy.testing import assert_array_equal\n'), ((12524, 12579), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["config_dict['x_arr'][key]", 'x_values'], {}), "(config_dict['x_arr'][key], x_values)\n", (12542, 12579), False, 'from numpy.testing import assert_array_equal\n'), ((13343, 13390), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['config.x_arr[key]', 'x_values'], {}), '(config.x_arr[key], x_values)\n', (13361, 13390), False, 'from numpy.testing import assert_array_equal\n'), ((13903, 13958), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["config_dict['x_arr'][key]", 'x_values'], {}), "(config_dict['x_arr'][key], x_values)\n", (13921, 13958), False, 'from numpy.testing import assert_array_equal\n')]
|
import numpy as np
import logging
import numbers
import torch
import math
import json
import sys
from torch.optim.lr_scheduler import LambdaLR
from torchvision.transforms.functional import pad
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class ConstantLRSchedule(LambdaLR):
""" Constant learning rate schedule.
"""
def __init__(self, optimizer, last_epoch=-1):
super(ConstantLRSchedule, self).__init__(optimizer, lambda _: 1.0, last_epoch=last_epoch)
class WarmupConstantSchedule(LambdaLR):
""" Linear warmup and then constant.
Linearly increases learning rate schedule from 0 to 1 over `warmup_steps` training steps.
Keeps learning rate schedule equal to 1. after warmup_steps.
"""
def __init__(self, optimizer, warmup_steps, last_epoch=-1):
self.warmup_steps = warmup_steps
super(WarmupConstantSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1.0, self.warmup_steps))
return 1.
class WarmupLinearSchedule(LambdaLR):
""" Linear warmup and then linear decay.
Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps.
Linearly decreases learning rate from 1. to 0. over remaining `t_total - warmup_steps` steps.
"""
def __init__(self, optimizer, warmup_steps, t_total, last_epoch=-1):
self.warmup_steps = warmup_steps
self.t_total = t_total
super(WarmupLinearSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1, self.warmup_steps))
return max(0.0, float(self.t_total - step) / float(max(1.0, self.t_total - self.warmup_steps)))
class WarmupCosineSchedule(LambdaLR):
""" Linear warmup and then cosine decay.
Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps.
Decreases learning rate from 1. to 0. over remaining `t_total - warmup_steps` steps following a cosine curve.
If `cycles` (default=0.5) is different from default, learning rate follows cosine function after warmup.
"""
def __init__(self, optimizer, warmup_steps, t_total, cycles=.5, last_epoch=-1):
self.warmup_steps = warmup_steps
self.t_total = t_total
self.cycles = cycles
super(WarmupCosineSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1.0, self.warmup_steps))
# progress after warmup
progress = float(step - self.warmup_steps) / float(max(1, self.t_total - self.warmup_steps))
return max(0.0, 0.5 * (1. + math.cos(math.pi * float(self.cycles) * 2.0 * progress)))
def get_padding(image):
w, h = image.size
max_wh = np.max([w, h])
h_padding = (max_wh - w) / 2
v_padding = (max_wh - h) / 2
l_pad = h_padding if h_padding % 1 == 0 else h_padding + 0.5
t_pad = v_padding if v_padding % 1 == 0 else v_padding + 0.5
r_pad = h_padding if h_padding % 1 == 0 else h_padding - 0.5
b_pad = v_padding if v_padding % 1 == 0 else v_padding - 0.5
padding = (int(l_pad), int(t_pad), int(r_pad), int(b_pad))
return padding
class NewPad(object):
def __init__(self, fill=0, padding_mode='constant'):
assert isinstance(fill, (numbers.Number, str, tuple))
assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric']
self.fill = fill
self.padding_mode = padding_mode
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be padded.
Returns:
PIL Image: Padded image.
"""
return pad(img, get_padding(img), self.fill, self.padding_mode)
def __repr__(self):
return self.__class__.__name__ + '(padding={0}, fill={1}, padding_mode={2})'. \
format(self.fill, self.padding_mode)
def find_device():
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
return device
def read_json(data):
with open(data) as f:
return json.load(f)
def save_json(data, path):
with open(path, 'w', encoding='utf-8') as f:
json.dump(data, f)
def setup_logger():
logger = logging.getLogger('train')
logger.setLevel(logging.INFO)
if len(logger.handlers) == 0:
formatter = logging.Formatter('%(asctime)s | %(message)s')
ch = logging.StreamHandler(stream=sys.stdout)
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
def adjust_learning_rate(optimizer, epoch, lr):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def save_checkpoint(model, path):
torch.save(model.state_dict(), path)
def reverse_norm_image(image):
MEAN = torch.tensor([0.485, 0.456, 0.406])
STD = torch.tensor([0.229, 0.224, 0.225])
reverse_image = image * STD[:, None, None] + MEAN[:, None, None]
return reverse_image.permute(1, 2, 0).cpu().numpy()
|
[
"logging.getLogger",
"logging.StreamHandler",
"logging.Formatter",
"numpy.max",
"torch.tensor",
"torch.cuda.is_available",
"json.load",
"json.dump"
] |
[((3352, 3366), 'numpy.max', 'np.max', (['[w, h]'], {}), '([w, h])\n', (3358, 3366), True, 'import numpy as np\n'), ((4799, 4825), 'logging.getLogger', 'logging.getLogger', (['"""train"""'], {}), "('train')\n", (4816, 4825), False, 'import logging\n'), ((5465, 5500), 'torch.tensor', 'torch.tensor', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (5477, 5500), False, 'import torch\n'), ((5511, 5546), 'torch.tensor', 'torch.tensor', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (5523, 5546), False, 'import torch\n'), ((4646, 4658), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4655, 4658), False, 'import json\n'), ((4745, 4763), 'json.dump', 'json.dump', (['data', 'f'], {}), '(data, f)\n', (4754, 4763), False, 'import json\n'), ((4914, 4960), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s | %(message)s"""'], {}), "('%(asctime)s | %(message)s')\n", (4931, 4960), False, 'import logging\n'), ((4974, 5014), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (4995, 5014), False, 'import logging\n'), ((4526, 4551), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4549, 4551), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 03 11:06:37 2018
@author: vmg
"""
import sdf
import numpy as np
# Load 2006 LUT for interpolation
# 2006 Groeneveld Look-Up Table as presented in
# "2006 CHF Look-Up Table", Nuclear Engineering and Design 237, pp. 190-1922.
# This file requires the file 2006LUTdata.txt
# Pressure range [MPa] from 2006 LUT, convert to [Pa]
P = np.array((0.10,0.30,0.50,1.0,2.0,3.0,5.0,7.0,10.0,12.0,14.0,16.0,18.0,20.0,21.0))*1e6
# Mass Flux range [kg/m^2-s] from 2006 .LUT.
G = np.array((0.,50.,100.,300.,500.,750.,1000.,1500.,2000.,2500.,3000.,3500.,4000.,4500.,5000.,5500.,6000.,6500.,7000.,7500.,8000.))
# Quality range from 2006 LUT
x = np.array((-0.50,-0.40,-0.30,-0.20,-0.15,-0.10,-0.05,0.00,0.05,0.10,0.15,0.20,0.25,0.30,0.35,0.40,0.45,0.50,0.60,0.70,0.80,0.90,1.00))
# Critical heat flux [kW/m^2] from 2006 LUT, convert to [W/m^2]
q_raw=np.loadtxt('../Data/2006LUTdata.txt')*1e3
# Convert the imported array into a (MxNxQ) where:
# M is number of mass flux divisions
# N is number of quality divisions
# Q is number of pressure divisions
lenG = len(G)
lenx = len(x)
lenP = len(P)
q = np.zeros((lenG,lenx,lenP))
for i in xrange(lenG):
for j in xrange(lenx):
for k in xrange(lenP):
q[i,j,k] = q_raw[i + k*lenG,j]
# Create the datasets:
ds_G = sdf.Dataset('G', data=G, unit='kg/(m2.s)', is_scale=True, display_name='Mass Flux')
ds_x = sdf.Dataset('x', data=x, unit='1', is_scale=True, display_name='Quality')
ds_P = sdf.Dataset('P', data=P, unit='Pa', is_scale=True, display_name='Pressure')
ds_q = sdf.Dataset('q', data=q, unit='W/m2', scales=[ds_G,ds_x,ds_P])
# Create the root group and write the file:
g = sdf.Group('/', comment='2006 CHF LUT', datasets=[ds_G,ds_x,ds_P,ds_q])
sdf.save('../Data/2006LUT.sdf', g)
|
[
"sdf.Group",
"numpy.array",
"numpy.zeros",
"numpy.loadtxt",
"sdf.save",
"sdf.Dataset"
] |
[((517, 695), 'numpy.array', 'np.array', (['(0.0, 50.0, 100.0, 300.0, 500.0, 750.0, 1000.0, 1500.0, 2000.0, 2500.0, \n 3000.0, 3500.0, 4000.0, 4500.0, 5000.0, 5500.0, 6000.0, 6500.0, 7000.0,\n 7500.0, 8000.0)'], {}), '((0.0, 50.0, 100.0, 300.0, 500.0, 750.0, 1000.0, 1500.0, 2000.0, \n 2500.0, 3000.0, 3500.0, 4000.0, 4500.0, 5000.0, 5500.0, 6000.0, 6500.0,\n 7000.0, 7500.0, 8000.0))\n', (525, 695), True, 'import numpy as np\n'), ((681, 824), 'numpy.array', 'np.array', (['(-0.5, -0.4, -0.3, -0.2, -0.15, -0.1, -0.05, 0.0, 0.05, 0.1, 0.15, 0.2, \n 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)'], {}), '((-0.5, -0.4, -0.3, -0.2, -0.15, -0.1, -0.05, 0.0, 0.05, 0.1, 0.15,\n 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0))\n', (689, 824), True, 'import numpy as np\n'), ((1140, 1168), 'numpy.zeros', 'np.zeros', (['(lenG, lenx, lenP)'], {}), '((lenG, lenx, lenP))\n', (1148, 1168), True, 'import numpy as np\n'), ((1338, 1426), 'sdf.Dataset', 'sdf.Dataset', (['"""G"""'], {'data': 'G', 'unit': '"""kg/(m2.s)"""', 'is_scale': '(True)', 'display_name': '"""Mass Flux"""'}), "('G', data=G, unit='kg/(m2.s)', is_scale=True, display_name=\n 'Mass Flux')\n", (1349, 1426), False, 'import sdf\n'), ((1429, 1502), 'sdf.Dataset', 'sdf.Dataset', (['"""x"""'], {'data': 'x', 'unit': '"""1"""', 'is_scale': '(True)', 'display_name': '"""Quality"""'}), "('x', data=x, unit='1', is_scale=True, display_name='Quality')\n", (1440, 1502), False, 'import sdf\n'), ((1510, 1585), 'sdf.Dataset', 'sdf.Dataset', (['"""P"""'], {'data': 'P', 'unit': '"""Pa"""', 'is_scale': '(True)', 'display_name': '"""Pressure"""'}), "('P', data=P, unit='Pa', is_scale=True, display_name='Pressure')\n", (1521, 1585), False, 'import sdf\n'), ((1593, 1657), 'sdf.Dataset', 'sdf.Dataset', (['"""q"""'], {'data': 'q', 'unit': '"""W/m2"""', 'scales': '[ds_G, ds_x, ds_P]'}), "('q', data=q, unit='W/m2', scales=[ds_G, ds_x, ds_P])\n", (1604, 1657), False, 'import sdf\n'), ((1705, 1778), 'sdf.Group', 'sdf.Group', (['"""/"""'], {'comment': '"""2006 CHF LUT"""', 'datasets': '[ds_G, ds_x, ds_P, ds_q]'}), "('/', comment='2006 CHF LUT', datasets=[ds_G, ds_x, ds_P, ds_q])\n", (1714, 1778), False, 'import sdf\n'), ((1776, 1810), 'sdf.save', 'sdf.save', (['"""../Data/2006LUT.sdf"""', 'g'], {}), "('../Data/2006LUT.sdf', g)\n", (1784, 1810), False, 'import sdf\n'), ((381, 478), 'numpy.array', 'np.array', (['(0.1, 0.3, 0.5, 1.0, 2.0, 3.0, 5.0, 7.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0,\n 21.0)'], {}), '((0.1, 0.3, 0.5, 1.0, 2.0, 3.0, 5.0, 7.0, 10.0, 12.0, 14.0, 16.0, \n 18.0, 20.0, 21.0))\n', (389, 478), True, 'import numpy as np\n'), ((886, 923), 'numpy.loadtxt', 'np.loadtxt', (['"""../Data/2006LUTdata.txt"""'], {}), "('../Data/2006LUTdata.txt')\n", (896, 923), True, 'import numpy as np\n')]
|
import copy
import json
import logging
import os
import sys
import time
from collections import defaultdict
import numpy as np
import tensorflow as tf
from sklearn import decomposition
from .. import dp_logging
from . import labeler_utils
from .base_model import AutoSubRegistrationMeta, BaseModel, BaseTrainableModel
_file_dir = os.path.dirname(os.path.abspath(__file__))
logger = dp_logging.get_child_logger(__name__)
class NoV1ResourceMessageFilter(logging.Filter):
"""Removes TF2 warning for using TF1 model which has resources."""
def filter(self, record):
msg = 'is a problem, consider rebuilding the SavedModel after ' + \
'running tf.compat.v1.enable_resource_variables()'
return msg not in record.getMessage()
tf_logger = logging.getLogger('tensorflow')
tf_logger.addFilter(NoV1ResourceMessageFilter())
@tf.keras.utils.register_keras_serializable()
class FBetaScore(tf.keras.metrics.Metric):
r"""Computes F-Beta score.
Adapted and slightly modified from https://github.com/tensorflow/addons/blob/v0.12.0/tensorflow_addons/metrics/f_scores.py#L211-L283
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# https://github.com/tensorflow/addons/blob/v0.12.0/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
It is the weighted harmonic mean of precision
and recall. Output range is `[0, 1]`. Works for
both multi-class and multi-label classification.
$$
F_{\beta} = (1 + \beta^2) * \frac{\textrm{precision} * \textrm{precision}}{(\beta^2 \cdot \textrm{precision}) + \textrm{recall}}
$$
Args:
num_classes: Number of unique classes in the dataset.
average: Type of averaging to be performed on data.
Acceptable values are `None`, `micro`, `macro` and
`weighted`. Default value is None.
beta: Determines the weight of precision and recall
in harmonic mean. Determines the weight given to the
precision and recall. Default value is 1.
threshold: Elements of `y_pred` greater than threshold are
converted to be 1, and the rest 0. If threshold is
None, the argmax is converted to 1, and the rest 0.
name: (Optional) String name of the metric instance.
dtype: (Optional) Data type of the metric result.
Returns:
F-Beta Score: float.
"""
# Modification: remove the run-time type checking for functions
def __init__(self, num_classes, average=None, beta=1.0, threshold=None,
name="fbeta_score", dtype=None, **kwargs):
super().__init__(name=name, dtype=dtype)
if average not in (None, "micro", "macro", "weighted"):
raise ValueError(
"Unknown average type. Acceptable values "
"are: [None, 'micro', 'macro', 'weighted']"
)
if not isinstance(beta, float):
raise TypeError("The value of beta should be a python float")
if beta <= 0.0:
raise ValueError("beta value should be greater than zero")
if threshold is not None:
if not isinstance(threshold, float):
raise TypeError("The value of threshold should be a python float")
if threshold > 1.0 or threshold <= 0.0:
raise ValueError("threshold should be between 0 and 1")
self.num_classes = num_classes
self.average = average
self.beta = beta
self.threshold = threshold
self.axis = None
self.init_shape = []
if self.average != "micro":
self.axis = 0
self.init_shape = [self.num_classes]
def _zero_wt_init(name):
return self.add_weight(
name, shape=self.init_shape, initializer="zeros", dtype=self.dtype
)
self.true_positives = _zero_wt_init("true_positives")
self.false_positives = _zero_wt_init("false_positives")
self.false_negatives = _zero_wt_init("false_negatives")
self.weights_intermediate = _zero_wt_init("weights_intermediate")
def update_state(self, y_true, y_pred, sample_weight=None):
if self.threshold is None:
threshold = tf.reduce_max(y_pred, axis=-1, keepdims=True)
# make sure [0, 0, 0] doesn't become [1, 1, 1]
# Use abs(x) > eps, instead of x != 0 to check for zero
y_pred = tf.logical_and(y_pred >= threshold, tf.abs(y_pred) > 1e-12)
else:
y_pred = y_pred > self.threshold
y_true = tf.cast(y_true, self.dtype)
y_pred = tf.cast(y_pred, self.dtype)
def _weighted_sum(val, sample_weight):
if sample_weight is not None:
val = tf.math.multiply(val, tf.expand_dims(sample_weight, 1))
return tf.reduce_sum(val, axis=self.axis)
self.true_positives.assign_add(_weighted_sum(y_pred * y_true, sample_weight))
self.false_positives.assign_add(
_weighted_sum(y_pred * (1 - y_true), sample_weight)
)
self.false_negatives.assign_add(
_weighted_sum((1 - y_pred) * y_true, sample_weight)
)
self.weights_intermediate.assign_add(_weighted_sum(y_true, sample_weight))
def result(self):
precision = tf.math.divide_no_nan(
self.true_positives, self.true_positives + self.false_positives
)
recall = tf.math.divide_no_nan(
self.true_positives, self.true_positives + self.false_negatives
)
mul_value = precision * recall
add_value = (tf.math.square(self.beta) * precision) + recall
mean = tf.math.divide_no_nan(mul_value, add_value)
f1_score = mean * (1 + tf.math.square(self.beta))
if self.average == "weighted":
weights = tf.math.divide_no_nan(
self.weights_intermediate, tf.reduce_sum(self.weights_intermediate)
)
f1_score = tf.reduce_sum(f1_score * weights)
elif self.average is not None: # [micro, macro]
f1_score = tf.reduce_mean(f1_score)
return f1_score
def get_config(self):
"""Returns the serializable config of the metric."""
config = {
"num_classes": self.num_classes,
"average": self.average,
"beta": self.beta,
"threshold": self.threshold,
}
base_config = super().get_config()
return {**base_config, **config}
def reset_states(self):
reset_value = tf.zeros(self.init_shape, dtype=self.dtype)
tf.keras.backend.batch_set_value([(v, reset_value) for v in self.variables])
@tf.keras.utils.register_keras_serializable()
class F1Score(FBetaScore):
r"""Computes F-1 Score.
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# https://github.com/tensorflow/addons/blob/v0.12.0/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
It is the harmonic mean of precision and recall.
Output range is `[0, 1]`. Works for both multi-class
and multi-label classification.
$$
F_1 = 2 \cdot \frac{\textrm{precision} \cdot \textrm{recall}}{\textrm{precision} + \textrm{recall}}
$$
Args:
num_classes: Number of unique classes in the dataset.
average: Type of averaging to be performed on data.
Acceptable values are `None`, `micro`, `macro`
and `weighted`. Default value is None.
threshold: Elements of `y_pred` above threshold are
considered to be 1, and the rest 0. If threshold is
None, the argmax is converted to 1, and the rest 0.
name: (Optional) String name of the metric instance.
dtype: (Optional) Data type of the metric result.
Returns:
F-1 Score: float.
"""
# Modification: remove the run-time type checking for functions
def __init__(self, num_classes, average=None, threshold=None,
name="f1_score", dtype=None):
super().__init__(num_classes, average, 1.0, threshold, name=name, dtype=dtype)
def get_config(self):
base_config = super().get_config()
del base_config["beta"]
return base_config
def build_embd_dictionary(filename):
"""
Returns a numpy embedding dictionary from embed file with GloVe-like format
:param filename: Path to the embed file for loading
:type filename: str
"""
embd_table = dict()
with open(filename, 'r') as embds:
for line in embds:
line = line.strip().split()
embd_table[line[0]] = np.asarray(line[1:])
return embd_table
def create_glove_char(n_dims, source_file=None):
"""
Embeds GloVe chars embeddings from source file to n_dims principal
components in a new file
:param n_dims: Final number of principal component dims of the embeddings
:type n_dims: int
:param source_file: Location of original embeddings to factor down
:type source_file: str
"""
if source_file is None:
source_file = os.path.join(_file_dir,
"embeddings/glove.840B.300d-char.txt")
# get embedding table first and vectors as array
embd_table = build_embd_dictionary(source_file)
embd_words, embd_matrix = [
np.asarray(ls) if i > 0 else list(ls)
for i, ls in enumerate(zip(*embd_table.items()))]
# get PCA embedder
pca = decomposition.PCA(n_components=n_dims)
reduced_embds = pca.fit_transform(embd_matrix)
# write to file
dir_name = os.path.dirname(source_file)
embd_file_name = os.path.join(dir_name,
'glove-reduced-{}D.txt'.format(n_dims))
with open(embd_file_name, 'w') as file:
for word, embd in zip(embd_words, reduced_embds):
file.write(word + " " + ' '.join(str(num) for num in embd) + "\n")
class CharacterLevelCnnModel(BaseTrainableModel,
metaclass=AutoSubRegistrationMeta):
# boolean if the label mapping requires the mapping for index 0 reserved
requires_zero_mapping = True
def __init__(self, label_mapping=None, parameters=None):
"""
CNN Model Initializer. initialize epoch_id
:param label_mapping: maps labels to their encoded integers
:type label_mapping: dict
:param parameters: Contains all the appropriate parameters for the
model. Must contain num_labels. Other possible parameters are:
max_length, max_char_encoding_id, dim_embed, size_fc
dropout, size_conv, num_fil, optimizer, default_label
:type parameters: dict
:return: None
"""
# parameter initialization
if not parameters:
parameters = {}
parameters.setdefault('max_length', 3400)
parameters.setdefault('max_char_encoding_id', 127)
parameters.setdefault('dim_embed', 64)
parameters.setdefault('size_fc', [96, 96])
parameters.setdefault('dropout', 0.073)
parameters.setdefault('size_conv', 13)
parameters.setdefault('default_label', "UNKNOWN")
parameters.setdefault('num_fil', [48 for _ in range(4)])
parameters['pad_label'] = 'PAD'
self._epoch_id = 0
# reconstruct flags for model
self._model_num_labels = 0
self._model_default_ind = -1
BaseModel.__init__(self, label_mapping, parameters)
def __eq__(self, other):
"""
Checks if two models are equal with one another, may only check
important variables, i.e. may not check model itself.
:param self: a model
:param other: a model
:type self: BaseModel
:type other: BaseModel
:return: Whether or not self and other are equal
:rtype: bool
"""
if self._parameters != other._parameters \
or self._label_mapping != other._label_mapping:
return False
return True
def _validate_parameters(self, parameters):
"""
Validate the parameters sent in. Raise error if invalid parameters are
present.
:param parameters: parameter dict containing the following parameters:
max_length: Maximum char length in a sample
max_char_encoding_id: Maximum integer value for encoding the input
dim_embed: Number of embedded dimensions
size_fc: Size of each fully connected layers
dropout: Ratio of dropout in the model
size_conv: Convolution kernel size
default_label: Key for label_mapping that is the default label
pad_label: Key for entities_dict that is the pad label
num_fil: Number of filters in each convolution layer
:type parameters: dict
:return: None
"""
errors = []
list_of_necessary_params = ['max_length', 'max_char_encoding_id',
'dim_embed', 'size_fc', 'dropout',
'size_conv', 'default_label', 'pad_label',
'num_fil']
# Make sure the necessary parameters are present and valid.
for param in parameters:
if param in ['max_length', 'max_char_encoding_id', 'dim_embed',
'size_conv']:
if not isinstance(parameters[param], (int, float)) \
or parameters[param] < 0:
errors.append(param + " must be a valid integer or float "
"greater than 0.")
elif param == 'dropout':
if not isinstance(parameters[param], (int, float)) \
or parameters[param] < 0 or parameters[param] > 1:
errors.append(param + " must be a valid integer or float "
"from 0 to 1.")
elif param == 'size_fc' or param == 'num_fil':
if not isinstance(parameters[param], list) \
or len(parameters[param]) == 0:
errors.append(param + " must be a non-empty list of "
"integers.")
else:
for item in parameters[param]:
if not isinstance(item, int):
errors.append(param + " must be a non-empty "
"list of integers.")
break
elif param == 'default_label':
if not isinstance(parameters[param], str):
error = str(param) + " must be a string."
errors.append(error)
# Error if there are extra parameters thrown in
for param in parameters:
if param not in list_of_necessary_params:
errors.append(param + " is not an accepted parameter.")
if errors:
raise ValueError('\n'.join(errors))
def set_label_mapping(self, label_mapping):
"""
Sets the labels for the model
:param label_mapping: label mapping of the model
:type label_mapping: dict
:return: None
"""
if not isinstance(label_mapping, (list, dict)):
raise TypeError("Labels must either be a non-empty encoding dict "
"which maps labels to index encodings or a list.")
label_mapping = copy.deepcopy(label_mapping)
if 'PAD' not in label_mapping:
if isinstance(label_mapping, list): # if list missing PAD
label_mapping = ['PAD'] + label_mapping
elif 0 not in label_mapping.values(): # if dict missing PAD and 0
label_mapping.update({'PAD': 0})
if (isinstance(label_mapping, dict)
and label_mapping.get('PAD', None) != 0): # dict with bad PAD
raise ValueError("`PAD` must map to index zero.")
if self._parameters['default_label'] not in label_mapping:
raise ValueError("The `default_label` of {} must exist in the "
"label mapping.".format(
self._parameters['default_label']))
super().set_label_mapping(label_mapping)
def _need_to_reconstruct_model(self):
"""
Determines whether or not the model needs to be reconstructed.
:return: bool of whether or not the model needs to reconstruct.
"""
if not self._model:
return False
default_ind = self.label_mapping[self._parameters['default_label']]
return self.num_labels != self._model_num_labels or \
default_ind != self._model_default_ind
def save_to_disk(self, dirpath):
"""
Saves whole model to disk with weights
:param dirpath: directory path where you want to save the model to
:type dirpath: str
:return: None
"""
if not self._model:
self._construct_model()
elif self._need_to_reconstruct_model():
self._reconstruct_model()
model_param_dirpath = os.path.join(dirpath, "model_parameters.json")
with open(model_param_dirpath, 'w') as fp:
json.dump(self._parameters, fp)
labels_dirpath = os.path.join(dirpath, "label_mapping.json")
with open(labels_dirpath, 'w') as fp:
json.dump(self.label_mapping, fp)
self._model.save(os.path.join(dirpath))
@classmethod
def load_from_disk(cls, dirpath):
"""
Loads whole model from disk with weights
:param dirpath: directory path where you want to load the model from
:type dirpath: str
:return: None
"""
# load parameters
model_param_dirpath = os.path.join(dirpath, "model_parameters.json")
with open(model_param_dirpath, 'r') as fp:
parameters = json.load(fp)
# load label_mapping
labels_dirpath = os.path.join(dirpath, "label_mapping.json")
with open(labels_dirpath, 'r') as fp:
label_mapping = json.load(fp)
# use f1 score metric
custom_objects = {
"F1Score": F1Score(
num_classes=max(label_mapping.values()) + 1,
average='micro'),
"CharacterLevelCnnModel": cls,
}
with tf.keras.utils.custom_object_scope(custom_objects):
tf_model = tf.keras.models.load_model(dirpath)
loaded_model = cls(label_mapping, parameters)
loaded_model._model = tf_model
# Tensorflow v1 Model weights need to be transferred.
if not callable(tf_model):
loaded_model._construct_model()
tf1_weights = []
for var in tf_model.variables:
if 'training' not in var.name:
tf1_weights.append(var.value())
loaded_model._construct_model()
tf1_weights.append(loaded_model._model.weights[-1].value())
loaded_model._model.set_weights(tf1_weights)
# load self
loaded_model._model_num_labels = loaded_model.num_labels
loaded_model._model_default_ind = loaded_model.label_mapping[
loaded_model._parameters['default_label']
]
return loaded_model
@staticmethod
def _char_encoding_layer(input_str_tensor, max_char_encoding_id, max_len):
"""
Character encoding for the list of sentences
:param input_str_tensor: input list of sentences converted to tensor
:type input_str_tensor: tf.tensor
:param max_char_encoding_id: Maximum integer value for encoding the
input
:type max_char_encoding_id: int
:param max_len: Maximum char length in a sample
:type max_len: int
:return : tensor containing encoded list of input sentences
:rtype: tf.Tensor
"""
# convert characters to indices
input_str_flatten = tf.reshape(input_str_tensor, [-1])
sentences_encode = tf.strings.unicode_decode(input_str_flatten,
input_encoding='UTF-8')
sentences_encode = tf.add(tf.cast(1, tf.int32), sentences_encode)
sentences_encode = tf.math.minimum(sentences_encode,
max_char_encoding_id + 1)
# padding
sentences_encode_pad = sentences_encode.to_tensor(shape=[None, max_len])
return sentences_encode_pad
@staticmethod
def _argmax_threshold_layer(num_labels, threshold=0.0, default_ind=1):
"""
Adds an argmax threshold layer to the model. This layer's output will be
the argmax value if the confidence for that argmax meets the threshold
for its label, otherwise it will be the default label index.
:param num_labels: number of entities
:type num_labels: int
:param threshold: default set to 0 so all confidences pass.
:type threshold: float
:param default_ind: default index
:type default_ind: int
:return: final argmax threshold layer for the model
"""
# Initialize the thresholds vector variable and create the threshold
# matrix.
class ThreshArgMaxLayer(tf.keras.layers.Layer):
def __init__(self, threshold_, num_labels_):
super(ThreshArgMaxLayer, self).__init__()
thresh_init = tf.constant_initializer(threshold_)
self.thresh_vec = tf.Variable(
name='ThreshVec',
initial_value=thresh_init(shape=[num_labels_]),
trainable=False)
def call(self, argmax_layer, confidence_layer):
threshold_at_argmax = tf.gather(self.thresh_vec, argmax_layer)
confidence_max_layer = tf.keras.backend.max(confidence_layer,
axis=2)
# Check if the confidences meet the threshold minimum.
argmax_mask = tf.keras.backend.cast(
tf.keras.backend.greater_equal(confidence_max_layer,
threshold_at_argmax),
dtype=argmax_layer.dtype)
# Create a vector the same size as the batch_size which
# represents the background label
bg_label_tf = tf.keras.backend.constant(
default_ind, dtype=argmax_layer.dtype)
# Generate the final predicted output using the function:
final_predicted_layer = tf.add(
bg_label_tf,
tf.multiply(
tf.subtract(argmax_layer, bg_label_tf),
argmax_mask
), name='ThreshArgMax'
)
return final_predicted_layer
return ThreshArgMaxLayer(threshold, num_labels)
def _construct_model(self):
"""
Model constructor for the data labeler. This also serves as a weight
reset.
:return: None
"""
num_labels = self.num_labels
default_ind = self.label_mapping[self._parameters['default_label']]
# Reset model
tf.keras.backend.clear_session()
# generate glove embedding
create_glove_char(self._parameters['dim_embed'])
# generate model
self._model = tf.keras.models.Sequential()
# default parameters
max_length = self._parameters['max_length']
max_char_encoding_id = self._parameters['max_char_encoding_id']
# Encoding layer
def encoding_function(input_str):
char_in_vector = CharacterLevelCnnModel._char_encoding_layer(
input_str, max_char_encoding_id, max_length)
return char_in_vector
self._model.add(tf.keras.layers.Input(shape=(None,), dtype=tf.string))
self._model.add(
tf.keras.layers.Lambda(encoding_function,
output_shape=tuple([max_length])))
# Create a pre-trained weight matrix
# character encoding indices range from 0 to max_char_encoding_id,
# we add one extra index for out-of-vocabulary character
embed_file = os.path.join(
_file_dir, "embeddings/glove-reduced-{}D.txt".format(
self._parameters['dim_embed']))
embedding_matrix = np.zeros((max_char_encoding_id + 2,
self._parameters['dim_embed']))
embedding_dict = build_embd_dictionary(embed_file)
input_shape = tuple([max_length])
# Fill in the weight matrix: let pad and space be 0s
for ascii_num in range(max_char_encoding_id):
if chr(ascii_num) in embedding_dict:
embedding_matrix[ascii_num + 1] = embedding_dict[chr(ascii_num)]
self._model.add(tf.keras.layers.Embedding(
max_char_encoding_id + 2,
self._parameters['dim_embed'],
weights=[embedding_matrix],
input_length=input_shape[0],
trainable=True))
# Add the convolutional layers
for fil in self._parameters['num_fil']:
self._model.add(tf.keras.layers.Conv1D(
filters=fil, kernel_size=self._parameters['size_conv'],
activation='relu', padding='same'))
if self._parameters['dropout']:
self._model.add(
tf.keras.layers.Dropout(self._parameters['dropout']))
# Add batch normalization, set fused = True for compactness
self._model.add(
tf.keras.layers.BatchNormalization(fused=False, scale=True))
# Add the fully connected layers
for size in self._parameters['size_fc']:
self._model.add(
tf.keras.layers.Dense(units=size, activation='relu'))
if self._parameters['dropout']:
self._model.add(
tf.keras.layers.Dropout(self._parameters['dropout']))
# Add the final Softmax layer
self._model.add(
tf.keras.layers.Dense(num_labels, activation='softmax'))
# Output the model into a .pb file for TensorFlow
argmax_layer = tf.keras.backend.argmax(self._model.output)
# Create confidence layers
final_predicted_layer = CharacterLevelCnnModel._argmax_threshold_layer(
num_labels, threshold=0.0, default_ind=default_ind)
argmax_outputs = self._model.outputs + \
[argmax_layer,
final_predicted_layer(argmax_layer, self._model.output)]
self._model = tf.keras.Model(self._model.inputs, argmax_outputs)
# Compile the model
softmax_output_layer_name = self._model.outputs[0].name.split('/')[0]
losses = {softmax_output_layer_name: "categorical_crossentropy"}
# use f1 score metric
f1_score_training = F1Score(num_classes=num_labels, average='micro')
metrics = {softmax_output_layer_name: ['acc', f1_score_training]}
self._model.compile(loss=losses,
optimizer="adam",
metrics=metrics)
self._epoch_id = 0
self._model_num_labels = num_labels
self._model_default_ind = default_ind
def reset_weights(self):
"""
Reset the weights of the model.
:return: None
"""
self._construct_model()
def _reconstruct_model(self):
"""
Reconstruct the appropriate layers if the number of number of labels is
altered
:return: None
"""
# Reset model
tf.keras.backend.clear_session()
num_labels = self.num_labels
default_ind = self.label_mapping[self._parameters['default_label']]
# Remove the 3 output layers (dense_2', 'tf_op_layer_ArgMax',
# 'thresh_arg_max_layer')
for _ in range(3):
self._model.layers.pop()
# Add the final Softmax layer to the previous spot
final_softmax_layer = tf.keras.layers.Dense(
num_labels, activation='softmax', name="dense_2")(
self._model.layers[-4].output)
# Output the model into a .pb file for TensorFlow
argmax_layer = tf.keras.backend.argmax(final_softmax_layer)
# Create confidence layers
final_predicted_layer = CharacterLevelCnnModel._argmax_threshold_layer(
num_labels, threshold=0.0, default_ind=default_ind)
argmax_outputs = [final_softmax_layer] + \
[argmax_layer,
final_predicted_layer(argmax_layer,
final_softmax_layer)]
self._model = tf.keras.Model(self._model.inputs, argmax_outputs)
# Compile the model
softmax_output_layer_name = self._model.outputs[0].name.split('/')[0]
losses = {softmax_output_layer_name: "categorical_crossentropy"}
# use f1 score metric
f1_score_training = F1Score(num_classes=num_labels, average='micro')
metrics = {softmax_output_layer_name: ['acc', f1_score_training]}
self._model.compile(loss=losses,
optimizer="adam",
metrics=metrics)
self._epoch_id = 0
self._model_num_labels = num_labels
self._model_default_ind = default_ind
def fit(self, train_data, val_data=None, batch_size=32, label_mapping=None,
reset_weights=False, verbose=True):
"""
Train the current model with the training data and validation data
:param train_data: Training data used to train model
:type train_data: Union[list, np.ndarray]
:param val_data: Validation data used to validate the training
:type val_data: Union[list, np.ndarray]
:param batch_size: Used to determine number of samples in each batch
:type batch_size: int
:param label_mapping: maps labels to their encoded integers
:type label_mapping: Union[dict, None]
:param reset_weights: Flag to determine whether to reset the weights or
not
:type reset_weights: bool
:param verbose: Flag to determine whether to print status or not
:type verbose: bool
:return: None
"""
if label_mapping is not None:
self.set_label_mapping(label_mapping)
if not self._model:
self._construct_model()
else:
if self._need_to_reconstruct_model():
self._reconstruct_model()
if reset_weights:
self.reset_weights()
history = defaultdict()
f1 = None
f1_report = []
self._model.reset_metrics()
softmax_output_layer_name = self._model.outputs[0].name.split('/')[0]
start_time = time.time()
batch_id = 0
for x_train, y_train in train_data:
model_results = self._model.train_on_batch(
x_train, {softmax_output_layer_name: y_train})
sys.stdout.flush()
if verbose:
sys.stdout.write(
"\rEPOCH %d, batch_id %d: loss: %f - acc: %f - "
"f1_score %f" %
(self._epoch_id, batch_id, *model_results[1:]))
batch_id += 1
for i, metric_label in enumerate(self._model.metrics_names):
history[metric_label] = model_results[i]
if val_data:
f1, f1_report = self._validate_training(val_data)
history['f1_report'] = f1_report
val_f1 = f1_report['weighted avg']['f1-score'] \
if f1_report else np.NAN
val_precision = f1_report['weighted avg']['precision'] \
if f1_report else np.NAN
val_recall = f1_report['weighted avg']['recall'] \
if f1_report else np.NAN
epoch_time = time.time() - start_time
logger.info("\rEPOCH %d (%ds), loss: %f - acc: %f - f1_score %f -- "
"val_f1: %f - val_precision: %f - val_recall %f" %
(self._epoch_id, epoch_time, *model_results[1:],
val_f1, val_precision, val_recall))
self._epoch_id += 1
return history, f1, f1_report
def _validate_training(self, val_data, batch_size_test=32,
verbose_log=True, verbose_keras=False):
"""
Validate the model on the test set and return the evaluation metrics.
:param val_data: data generator for the validation
:type val_data: iterator
:param batch_size_test: Number of samples to process in testing
:type batch_size_test: int
:param verbose_log: whether or not to print out scores for training,
etc.
:type verbose_log: bool
:param verbose_keras: whether or not to print out scores for training,
from keras.
:type verbose_keras: bool
return (f1-score, f1 report).
"""
f1 = None
f1_report = None
if val_data is None:
return f1, f1_report
# Predict on the test set
batch_id = 0
y_val_pred = []
y_val_test = []
for x_val, y_val in val_data:
y_val_pred.append(self._model.predict(
x_val, batch_size=batch_size_test, verbose=verbose_keras)[1])
y_val_test.append(np.argmax(y_val, axis=-1))
batch_id += 1
sys.stdout.flush()
if verbose_log:
sys.stdout.write("\rEPOCH %g, validation_batch_id %d" %
(self._epoch_id, batch_id))
tf.keras.backend.set_floatx('float32')
# Clean the predicted entities and the actual entities
f1, f1_report = labeler_utils.evaluate_accuracy(
np.concatenate(y_val_pred, axis=0),
np.concatenate(y_val_test, axis=0),
self.num_labels,
self.reverse_label_mapping,
verbose=verbose_keras)
return f1, f1_report
def predict(self, data, batch_size=32, show_confidences=False,
verbose=True):
"""
Run model and get predictions
:param data: text input
:type data: Union[list, numpy.ndarray]
:param batch_size: number of samples in the batch of data
:type batch_size: int
:param show_confidences: whether user wants prediction confidences
:type show_confidences:
:param verbose: Flag to determine whether to print status or not
:type verbose: bool
:return: char level predictions and confidences
:rtype: dict
"""
if not self._model:
raise ValueError("You are trying to predict without a model. "
"Construct/Load a model before predicting.")
elif self._need_to_reconstruct_model():
raise RuntimeError("The model label mapping definitions have been "
"altered without additional training. Please "
"train the model or reset the label mapping to "
"predict.")
# Pre-allocate space for predictions
confidences = []
sentence_lengths = np.zeros((batch_size,), dtype=int)
predictions = np.zeros((batch_size, self._parameters['max_length']))
if show_confidences:
confidences = np.zeros((batch_size,
self._parameters['max_length'],
self.num_labels))
# Run model with batching
allocation_index = 0
for batch_id, batch_data in enumerate(data):
model_output = self._model(
tf.convert_to_tensor(batch_data)
)
# Count number of samples in batch to prevent array mismatch
num_samples_in_batch = len(batch_data)
allocation_index = batch_id * batch_size
# Double array size
if len(predictions) <= allocation_index:
predictions = np.pad(predictions, ((0, len(predictions)),
(0, 0)), mode='constant')
sentence_lengths = np.pad(
sentence_lengths, pad_width=((0, len(sentence_lengths)),),
mode='constant')
if show_confidences:
confidences = np.pad(confidences,
((0, len(predictions)),
(0, 0), (0, 0)), mode='constant')
if show_confidences:
confidences[allocation_index:allocation_index + num_samples_in_batch] = model_output[0].numpy()
predictions[allocation_index:allocation_index + num_samples_in_batch] = model_output[1].numpy()
sentence_lengths[allocation_index:allocation_index + num_samples_in_batch] = list(map(lambda x: len(x[0]), batch_data))
allocation_index += num_samples_in_batch
# Convert predictions, confidences to lists from numpy
predictions_list = [i for i in range(0, allocation_index)]
confidences_list = None
if show_confidences:
confidences_list = [i for i in range(0, allocation_index)]
# Append slices of predictions to return prediction & confidence matrices
for index, sentence_length \
in enumerate(sentence_lengths[:allocation_index]):
predictions_list[index] = list(predictions[index][:sentence_length])
if show_confidences:
confidences_list[index] = list(confidences[index][:sentence_length])
if show_confidences:
return {'pred': predictions_list, 'conf': confidences_list}
return {'pred': predictions_list}
def details(self):
"""
Prints the relevant details of the model (summary, parameters, label
mapping)
"""
print("\n###### Model Details ######\n")
self._model.summary()
print("\nModel Parameters:")
for key, value in self._parameters.items():
print("{}: {}".format(key, value))
print("\nModel Label Mapping:")
for key, value in self.label_mapping.items():
print("{}: {}".format(key, value))
|
[
"logging.getLogger",
"tensorflow.reduce_sum",
"tensorflow.math.divide_no_nan",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.models.load_model",
"copy.deepcopy",
"tensorflow.reduce_mean",
"tensorflow.cast",
"tensorflow.math.minimum",
"tensorflow.keras.layers.Input",
"sklearn.decomposition.PCA",
"numpy.asarray",
"tensorflow.keras.backend.max",
"numpy.concatenate",
"tensorflow.convert_to_tensor",
"sys.stdout.flush",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.layers.Conv1D",
"tensorflow.strings.unicode_decode",
"tensorflow.zeros",
"tensorflow.keras.backend.greater_equal",
"tensorflow.keras.layers.Dropout",
"numpy.argmax",
"tensorflow.keras.backend.set_floatx",
"tensorflow.reduce_max",
"tensorflow.keras.utils.register_keras_serializable",
"os.path.dirname",
"tensorflow.keras.layers.Embedding",
"tensorflow.keras.utils.custom_object_scope",
"tensorflow.gather",
"tensorflow.reshape",
"tensorflow.keras.backend.batch_set_value",
"tensorflow.expand_dims",
"tensorflow.subtract",
"os.path.abspath",
"time.time",
"tensorflow.math.square",
"os.path.join",
"tensorflow.keras.backend.constant",
"json.load",
"numpy.zeros",
"collections.defaultdict",
"tensorflow.constant_initializer",
"tensorflow.abs",
"tensorflow.keras.Model",
"tensorflow.keras.backend.argmax",
"tensorflow.keras.backend.clear_session",
"json.dump",
"sys.stdout.write"
] |
[((775, 806), 'logging.getLogger', 'logging.getLogger', (['"""tensorflow"""'], {}), "('tensorflow')\n", (792, 806), False, 'import logging\n'), ((859, 903), 'tensorflow.keras.utils.register_keras_serializable', 'tf.keras.utils.register_keras_serializable', ([], {}), '()\n', (901, 903), True, 'import tensorflow as tf\n'), ((7294, 7338), 'tensorflow.keras.utils.register_keras_serializable', 'tf.keras.utils.register_keras_serializable', ([], {}), '()\n', (7336, 7338), True, 'import tensorflow as tf\n'), ((349, 374), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (364, 374), False, 'import os\n'), ((10680, 10718), 'sklearn.decomposition.PCA', 'decomposition.PCA', ([], {'n_components': 'n_dims'}), '(n_components=n_dims)\n', (10697, 10718), False, 'from sklearn import decomposition\n'), ((10806, 10834), 'os.path.dirname', 'os.path.dirname', (['source_file'], {}), '(source_file)\n', (10821, 10834), False, 'import os\n'), ((5184, 5211), 'tensorflow.cast', 'tf.cast', (['y_true', 'self.dtype'], {}), '(y_true, self.dtype)\n', (5191, 5211), True, 'import tensorflow as tf\n'), ((5229, 5256), 'tensorflow.cast', 'tf.cast', (['y_pred', 'self.dtype'], {}), '(y_pred, self.dtype)\n', (5236, 5256), True, 'import tensorflow as tf\n'), ((5922, 6013), 'tensorflow.math.divide_no_nan', 'tf.math.divide_no_nan', (['self.true_positives', '(self.true_positives + self.false_positives)'], {}), '(self.true_positives, self.true_positives + self.\n false_positives)\n', (5943, 6013), True, 'import tensorflow as tf\n'), ((6048, 6139), 'tensorflow.math.divide_no_nan', 'tf.math.divide_no_nan', (['self.true_positives', '(self.true_positives + self.false_negatives)'], {}), '(self.true_positives, self.true_positives + self.\n false_negatives)\n', (6069, 6139), True, 'import tensorflow as tf\n'), ((6281, 6324), 'tensorflow.math.divide_no_nan', 'tf.math.divide_no_nan', (['mul_value', 'add_value'], {}), '(mul_value, add_value)\n', (6302, 6324), True, 'import tensorflow as tf\n'), ((7162, 7205), 'tensorflow.zeros', 'tf.zeros', (['self.init_shape'], {'dtype': 'self.dtype'}), '(self.init_shape, dtype=self.dtype)\n', (7170, 7205), True, 'import tensorflow as tf\n'), ((7214, 7290), 'tensorflow.keras.backend.batch_set_value', 'tf.keras.backend.batch_set_value', (['[(v, reset_value) for v in self.variables]'], {}), '([(v, reset_value) for v in self.variables])\n', (7246, 7290), True, 'import tensorflow as tf\n'), ((10307, 10369), 'os.path.join', 'os.path.join', (['_file_dir', '"""embeddings/glove.840B.300d-char.txt"""'], {}), "(_file_dir, 'embeddings/glove.840B.300d-char.txt')\n", (10319, 10369), False, 'import os\n'), ((16744, 16772), 'copy.deepcopy', 'copy.deepcopy', (['label_mapping'], {}), '(label_mapping)\n', (16757, 16772), False, 'import copy\n'), ((18434, 18480), 'os.path.join', 'os.path.join', (['dirpath', '"""model_parameters.json"""'], {}), "(dirpath, 'model_parameters.json')\n", (18446, 18480), False, 'import os\n'), ((18601, 18644), 'os.path.join', 'os.path.join', (['dirpath', '"""label_mapping.json"""'], {}), "(dirpath, 'label_mapping.json')\n", (18613, 18644), False, 'import os\n'), ((19098, 19144), 'os.path.join', 'os.path.join', (['dirpath', '"""model_parameters.json"""'], {}), "(dirpath, 'model_parameters.json')\n", (19110, 19144), False, 'import os\n'), ((19290, 19333), 'os.path.join', 'os.path.join', (['dirpath', '"""label_mapping.json"""'], {}), "(dirpath, 'label_mapping.json')\n", (19302, 19333), False, 'import os\n'), ((21288, 21322), 'tensorflow.reshape', 'tf.reshape', (['input_str_tensor', '[-1]'], {}), '(input_str_tensor, [-1])\n', (21298, 21322), True, 'import tensorflow as tf\n'), ((21350, 21418), 'tensorflow.strings.unicode_decode', 'tf.strings.unicode_decode', (['input_str_flatten'], {'input_encoding': '"""UTF-8"""'}), "(input_str_flatten, input_encoding='UTF-8')\n", (21375, 21418), True, 'import tensorflow as tf\n'), ((21573, 21632), 'tensorflow.math.minimum', 'tf.math.minimum', (['sentences_encode', '(max_char_encoding_id + 1)'], {}), '(sentences_encode, max_char_encoding_id + 1)\n', (21588, 21632), True, 'import tensorflow as tf\n'), ((24599, 24631), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (24629, 24631), True, 'import tensorflow as tf\n'), ((24773, 24801), 'tensorflow.keras.models.Sequential', 'tf.keras.models.Sequential', ([], {}), '()\n', (24799, 24801), True, 'import tensorflow as tf\n'), ((25785, 25852), 'numpy.zeros', 'np.zeros', (["(max_char_encoding_id + 2, self._parameters['dim_embed'])"], {}), "((max_char_encoding_id + 2, self._parameters['dim_embed']))\n", (25793, 25852), True, 'import numpy as np\n'), ((27629, 27672), 'tensorflow.keras.backend.argmax', 'tf.keras.backend.argmax', (['self._model.output'], {}), '(self._model.output)\n', (27652, 27672), True, 'import tensorflow as tf\n'), ((28048, 28098), 'tensorflow.keras.Model', 'tf.keras.Model', (['self._model.inputs', 'argmax_outputs'], {}), '(self._model.inputs, argmax_outputs)\n', (28062, 28098), True, 'import tensorflow as tf\n'), ((29070, 29102), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (29100, 29102), True, 'import tensorflow as tf\n'), ((29715, 29759), 'tensorflow.keras.backend.argmax', 'tf.keras.backend.argmax', (['final_softmax_layer'], {}), '(final_softmax_layer)\n', (29738, 29759), True, 'import tensorflow as tf\n'), ((30186, 30236), 'tensorflow.keras.Model', 'tf.keras.Model', (['self._model.inputs', 'argmax_outputs'], {}), '(self._model.inputs, argmax_outputs)\n', (30200, 30236), True, 'import tensorflow as tf\n'), ((32129, 32142), 'collections.defaultdict', 'defaultdict', ([], {}), '()\n', (32140, 32142), False, 'from collections import defaultdict\n'), ((32321, 32332), 'time.time', 'time.time', ([], {}), '()\n', (32330, 32332), False, 'import time\n'), ((35185, 35223), 'tensorflow.keras.backend.set_floatx', 'tf.keras.backend.set_floatx', (['"""float32"""'], {}), "('float32')\n", (35212, 35223), True, 'import tensorflow as tf\n'), ((36799, 36833), 'numpy.zeros', 'np.zeros', (['(batch_size,)'], {'dtype': 'int'}), '((batch_size,), dtype=int)\n', (36807, 36833), True, 'import numpy as np\n'), ((36856, 36910), 'numpy.zeros', 'np.zeros', (["(batch_size, self._parameters['max_length'])"], {}), "((batch_size, self._parameters['max_length']))\n", (36864, 36910), True, 'import numpy as np\n'), ((4853, 4898), 'tensorflow.reduce_max', 'tf.reduce_max', (['y_pred'], {'axis': '(-1)', 'keepdims': '(True)'}), '(y_pred, axis=-1, keepdims=True)\n', (4866, 4898), True, 'import tensorflow as tf\n'), ((5444, 5478), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['val'], {'axis': 'self.axis'}), '(val, axis=self.axis)\n', (5457, 5478), True, 'import tensorflow as tf\n'), ((6589, 6622), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(f1_score * weights)'], {}), '(f1_score * weights)\n', (6602, 6622), True, 'import tensorflow as tf\n'), ((9847, 9867), 'numpy.asarray', 'np.asarray', (['line[1:]'], {}), '(line[1:])\n', (9857, 9867), True, 'import numpy as np\n'), ((10550, 10564), 'numpy.asarray', 'np.asarray', (['ls'], {}), '(ls)\n', (10560, 10564), True, 'import numpy as np\n'), ((18544, 18575), 'json.dump', 'json.dump', (['self._parameters', 'fp'], {}), '(self._parameters, fp)\n', (18553, 18575), False, 'import json\n'), ((18703, 18736), 'json.dump', 'json.dump', (['self.label_mapping', 'fp'], {}), '(self.label_mapping, fp)\n', (18712, 18736), False, 'import json\n'), ((18762, 18783), 'os.path.join', 'os.path.join', (['dirpath'], {}), '(dirpath)\n', (18774, 18783), False, 'import os\n'), ((19221, 19234), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (19230, 19234), False, 'import json\n'), ((19408, 19421), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (19417, 19421), False, 'import json\n'), ((19673, 19723), 'tensorflow.keras.utils.custom_object_scope', 'tf.keras.utils.custom_object_scope', (['custom_objects'], {}), '(custom_objects)\n', (19707, 19723), True, 'import tensorflow as tf\n'), ((19748, 19783), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['dirpath'], {}), '(dirpath)\n', (19774, 19783), True, 'import tensorflow as tf\n'), ((21506, 21526), 'tensorflow.cast', 'tf.cast', (['(1)', 'tf.int32'], {}), '(1, tf.int32)\n', (21513, 21526), True, 'import tensorflow as tf\n'), ((25218, 25271), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(None,)', 'dtype': 'tf.string'}), '(shape=(None,), dtype=tf.string)\n', (25239, 25271), True, 'import tensorflow as tf\n'), ((26262, 26426), 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', (['(max_char_encoding_id + 2)', "self._parameters['dim_embed']"], {'weights': '[embedding_matrix]', 'input_length': 'input_shape[0]', 'trainable': '(True)'}), "(max_char_encoding_id + 2, self._parameters[\n 'dim_embed'], weights=[embedding_matrix], input_length=input_shape[0],\n trainable=True)\n", (26287, 26426), True, 'import tensorflow as tf\n'), ((27490, 27545), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['num_labels'], {'activation': '"""softmax"""'}), "(num_labels, activation='softmax')\n", (27511, 27545), True, 'import tensorflow as tf\n'), ((29504, 29575), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['num_labels'], {'activation': '"""softmax"""', 'name': '"""dense_2"""'}), "(num_labels, activation='softmax', name='dense_2')\n", (29525, 29575), True, 'import tensorflow as tf\n'), ((32529, 32547), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (32545, 32547), False, 'import sys\n'), ((34988, 35006), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (35004, 35006), False, 'import sys\n'), ((35356, 35390), 'numpy.concatenate', 'np.concatenate', (['y_val_pred'], {'axis': '(0)'}), '(y_val_pred, axis=0)\n', (35370, 35390), True, 'import numpy as np\n'), ((35404, 35438), 'numpy.concatenate', 'np.concatenate', (['y_val_test'], {'axis': '(0)'}), '(y_val_test, axis=0)\n', (35418, 35438), True, 'import numpy as np\n'), ((36966, 37037), 'numpy.zeros', 'np.zeros', (["(batch_size, self._parameters['max_length'], self.num_labels)"], {}), "((batch_size, self._parameters['max_length'], self.num_labels))\n", (36974, 37037), True, 'import numpy as np\n'), ((6218, 6243), 'tensorflow.math.square', 'tf.math.square', (['self.beta'], {}), '(self.beta)\n', (6232, 6243), True, 'import tensorflow as tf\n'), ((6356, 6381), 'tensorflow.math.square', 'tf.math.square', (['self.beta'], {}), '(self.beta)\n', (6370, 6381), True, 'import tensorflow as tf\n'), ((6511, 6551), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.weights_intermediate'], {}), '(self.weights_intermediate)\n', (6524, 6551), True, 'import tensorflow as tf\n'), ((6704, 6728), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['f1_score'], {}), '(f1_score)\n', (6718, 6728), True, 'import tensorflow as tf\n'), ((22764, 22799), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['threshold_'], {}), '(threshold_)\n', (22787, 22799), True, 'import tensorflow as tf\n'), ((23089, 23129), 'tensorflow.gather', 'tf.gather', (['self.thresh_vec', 'argmax_layer'], {}), '(self.thresh_vec, argmax_layer)\n', (23098, 23129), True, 'import tensorflow as tf\n'), ((23170, 23216), 'tensorflow.keras.backend.max', 'tf.keras.backend.max', (['confidence_layer'], {'axis': '(2)'}), '(confidence_layer, axis=2)\n', (23190, 23216), True, 'import tensorflow as tf\n'), ((23744, 23808), 'tensorflow.keras.backend.constant', 'tf.keras.backend.constant', (['default_ind'], {'dtype': 'argmax_layer.dtype'}), '(default_ind, dtype=argmax_layer.dtype)\n', (23769, 23808), True, 'import tensorflow as tf\n'), ((26596, 26714), 'tensorflow.keras.layers.Conv1D', 'tf.keras.layers.Conv1D', ([], {'filters': 'fil', 'kernel_size': "self._parameters['size_conv']", 'activation': '"""relu"""', 'padding': '"""same"""'}), "(filters=fil, kernel_size=self._parameters[\n 'size_conv'], activation='relu', padding='same')\n", (26618, 26714), True, 'import tensorflow as tf\n'), ((27012, 27071), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {'fused': '(False)', 'scale': '(True)'}), '(fused=False, scale=True)\n', (27046, 27071), True, 'import tensorflow as tf\n'), ((27209, 27261), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'size', 'activation': '"""relu"""'}), "(units=size, activation='relu')\n", (27230, 27261), True, 'import tensorflow as tf\n'), ((32588, 32724), 'sys.stdout.write', 'sys.stdout.write', (["('\\rEPOCH %d, batch_id %d: loss: %f - acc: %f - f1_score %f' % (self.\n _epoch_id, batch_id, *model_results[1:]))"], {}), "(\n '\\rEPOCH %d, batch_id %d: loss: %f - acc: %f - f1_score %f' % (self.\n _epoch_id, batch_id, *model_results[1:]))\n", (32604, 32724), False, 'import sys\n'), ((33399, 33410), 'time.time', 'time.time', ([], {}), '()\n', (33408, 33410), False, 'import time\n'), ((34923, 34948), 'numpy.argmax', 'np.argmax', (['y_val'], {'axis': '(-1)'}), '(y_val, axis=-1)\n', (34932, 34948), True, 'import numpy as np\n'), ((35051, 35138), 'sys.stdout.write', 'sys.stdout.write', (["('\\rEPOCH %g, validation_batch_id %d' % (self._epoch_id, batch_id))"], {}), "('\\rEPOCH %g, validation_batch_id %d' % (self._epoch_id,\n batch_id))\n", (35067, 35138), False, 'import sys\n'), ((37283, 37315), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['batch_data'], {}), '(batch_data)\n', (37303, 37315), True, 'import tensorflow as tf\n'), ((5083, 5097), 'tensorflow.abs', 'tf.abs', (['y_pred'], {}), '(y_pred)\n', (5089, 5097), True, 'import tensorflow as tf\n'), ((5391, 5423), 'tensorflow.expand_dims', 'tf.expand_dims', (['sample_weight', '(1)'], {}), '(sample_weight, 1)\n', (5405, 5423), True, 'import tensorflow as tf\n'), ((23422, 23495), 'tensorflow.keras.backend.greater_equal', 'tf.keras.backend.greater_equal', (['confidence_max_layer', 'threshold_at_argmax'], {}), '(confidence_max_layer, threshold_at_argmax)\n', (23452, 23495), True, 'import tensorflow as tf\n'), ((26841, 26893), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (["self._parameters['dropout']"], {}), "(self._parameters['dropout'])\n", (26864, 26893), True, 'import tensorflow as tf\n'), ((27360, 27412), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (["self._parameters['dropout']"], {}), "(self._parameters['dropout'])\n", (27383, 27412), True, 'import tensorflow as tf\n'), ((24043, 24081), 'tensorflow.subtract', 'tf.subtract', (['argmax_layer', 'bg_label_tf'], {}), '(argmax_layer, bg_label_tf)\n', (24054, 24081), True, 'import tensorflow as tf\n')]
|
from PyQt5.QtWidgets import QLabel, QWidget, QGridLayout, QCheckBox, QGroupBox
from InftyDoubleSpinBox import InftyDoubleSpinBox
from PyQt5.QtCore import pyqtSignal, Qt
import helplib as hl
import numpy as np
class dataControlWidget(QGroupBox):
showErrorBars_changed = pyqtSignal(bool)
ignoreFirstPoint_changed = pyqtSignal(bool)
data_changed = pyqtSignal(bool, bool)
data_shift = pyqtSignal(np.float64)
load_fits = pyqtSignal(list)
load_view = pyqtSignal(str)
load_meta = pyqtSignal(str)
fit_on_startup = pyqtSignal()
SHOW_ERROR_BARS = "Show error bars"
SHOW_ERROR_BARS_NOT_LOADED = "Show error bars (could not be calculated)"
def __init__(self):
QWidget.__init__(self)
self.setTitle('Data Settings')
self.__lblEnergyShift = QLabel("Energy Shift:")
self.__dsbEnergyShift = InftyDoubleSpinBox()
self.__dsbEnergyShift.editingFinished.connect(self.__energyShiftChanged)
self.__dsbEnergyShift.setSingleStep(0.01)
self.__chkShowErrorBars = QCheckBox(self.SHOW_ERROR_BARS_NOT_LOADED)
self.__chkShowErrorBars.stateChanged.connect(self.__chkShowErrorBars_changed)
self.__chkIgnoreFirstPoint = QCheckBox('Ignore first data point.')
self.__chkIgnoreFirstPoint.stateChanged.connect(self.__chkIgnoreFirstPoint_changed)
self.__mainLayout = QGridLayout()
self.setLayout(self.__mainLayout)
self.__mainLayout.setAlignment(Qt.AlignTop)
self.__mainLayout.addWidget(self.__lblEnergyShift, 0, 0)
self.__mainLayout.addWidget(self.__dsbEnergyShift, 0, 1)
self.__mainLayout.addWidget(self.__chkShowErrorBars, 1, 0, 1, 2)
self.__mainLayout.addWidget(self.__chkIgnoreFirstPoint, 2, 0, 1, 2)
self.__chkIgnoreFirstPoint.setVisible(False)
self.reset(False)
def reset(self, enable):
self.__data = None
self.__all_data = None
self.__stdErrors = None
self.__chkShowErrorBars.setCheckable(True)
self.__chkShowErrorBars.setChecked(False)
self.__chkShowErrorBars.setEnabled(False)
self.__chkIgnoreFirstPoint.setCheckable(True)
self.__chkIgnoreFirstPoint.setChecked(False)
self.__chkIgnoreFirstPoint.setEnabled(False)
self.setEnergyShift(0.0)
self.__prevShift = 0.0
self.setEnabled(enable)
def __chkShowErrorBars_changed(self, state):
self.__chkShowErrorBars.setCheckState(state)
self.showErrorBars_changed.emit(self.getShowErrorBars())
def __chkIgnoreFirstPoint_changed(self, state):
self.__chkIgnoreFirstPoint.setCheckState(state)
self.ignoreFirstPoint_changed.emit(self.getIgnoreFirstPoint())
def __energyShiftChanged(self):
self.cause_shift()
def cause_shift(self):
energyShift = self.__dsbEnergyShift.value()
increment = energyShift - self.__prevShift
self.__prevShift = energyShift
self.data_shift.emit(increment)
self.data_changed.emit(self.getShowErrorBars(), self.getIgnoreFirstPoint())
# def setData(self, data):
# self.__data = data
def getData(self):
first_point = 0
if self.getIgnoreFirstPoint():
first_point = 1
return self.__data[first_point:,]
def getEnergyShift(self):
return (self.__dsbEnergyShift.value())
def setEnergyShift(self, value):
#increment = self.__dsbEnergyShift.value() - value
increment = value - self.__dsbEnergyShift.value()
self.__dsbEnergyShift.setValue(value)
#self.__shiftData(increment)
#self.data_shift.emit(increment)
def __shiftData(self, increment):
try:
if self.__data is not None:
for set in self.__data:
set[0] += increment
except Exception as e:
print(e)
def getStdErrors(self):
if self.__stdErrors is not None:
first_point = 0
if self.getIgnoreFirstPoint():
first_point = 1
return self.__stdErrors[first_point:]
else:
return None
def getMax_Energy(self):
if self.getData() is not None:
return self.getData()[-1][0]
else:
return None
def getMin_Energy(self):
if self.getData() is not None:
return self.getData()[0][0]
else:
return None
def getShowErrorBars(self):
return self.__chkShowErrorBars.isChecked()
def setShowErrorBars(self, value):
self.__chkShowErrorBars.setChecked(value)
def getIgnoreFirstPoint(self):
return self.__chkIgnoreFirstPoint.isChecked()
def setIgnoreFirstPoint(self, value):
self.__chkIgnoreFirstPoint.setChecked(value)
def hasStdErrors(self):
return self.__stdErrors is not None
def loadFile(self, fileName, id_string):
self.__all_data, self.__stdErrors, (fit_strings, view_string, data_string, meta_string), id_found =\
hl.readFileForFitsDataAndStdErrorAndMetaData(fileName, id_string)
#we need a copy to not save any altered data!
self.__data = (self.__all_data[:, 0:2]).copy()
if len(self.__data) <= 1:
raise Exception("Not enough data in file!")
if self.hasStdErrors():
self.__chkShowErrorBars.setText(self.SHOW_ERROR_BARS)
else:
self.__chkShowErrorBars.setText(self.SHOW_ERROR_BARS_NOT_LOADED)
self.__chkShowErrorBars.setEnabled(self.hasStdErrors())
self.__chkShowErrorBars.setChecked(self.hasStdErrors())
self.__chkIgnoreFirstPoint.setEnabled(True)
self.data_changed.emit(self.hasStdErrors(), self.getIgnoreFirstPoint())
self.load_fits.emit(fit_strings)
self.load_view.emit(view_string)
self.load_meta.emit(meta_string)
self.load_from_data_string(data_string)
self.cause_shift()
self.fit_on_startup.emit()
return id_found
def load_from_data_string(self, data_string):
if data_string is not None:
split_string = data_string.split('\v')
for i in range(0, len(split_string)):
item = split_string[i].split('=')
if len(item) == 2:
if (item[0] == 'egs'):
self.setEnergyShift(np.float64(item[1]))
elif item[0] == 'seb':
if item[1] == '1' or item[1] == 'True':
self.setShowErrorBars(True)
elif item[1] == '0' or item[1] == 'False':
self.setShowErrorBars(False)
elif item[0] == 'ifd':
if item[1] == '1' or item[1] == 'True':
self.setIgnoreFirstPoint(True)
elif item[1] == '0' or item[1] == 'False':
self.setIgnoreFirstPoint(False)
def get_data_string(self):
return 'egs=' + str(self.getEnergyShift()) + '\vseb=' + str(self.getShowErrorBars()) +\
'\vifd=' + str(self.getIgnoreFirstPoint())
def saveFile(self, fileName, id_string, fit_strings, view_string, data_string, meta_string):
hl.saveFilewithMetaData(id_string, fileName, self.__all_data, (fit_strings, view_string, data_string, meta_string))
|
[
"PyQt5.QtCore.pyqtSignal",
"InftyDoubleSpinBox.InftyDoubleSpinBox",
"helplib.saveFilewithMetaData",
"numpy.float64",
"PyQt5.QtWidgets.QWidget.__init__",
"PyQt5.QtWidgets.QGridLayout",
"PyQt5.QtWidgets.QLabel",
"helplib.readFileForFitsDataAndStdErrorAndMetaData",
"PyQt5.QtWidgets.QCheckBox"
] |
[((274, 290), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['bool'], {}), '(bool)\n', (284, 290), False, 'from PyQt5.QtCore import pyqtSignal, Qt\n'), ((322, 338), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['bool'], {}), '(bool)\n', (332, 338), False, 'from PyQt5.QtCore import pyqtSignal, Qt\n'), ((358, 380), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['bool', 'bool'], {}), '(bool, bool)\n', (368, 380), False, 'from PyQt5.QtCore import pyqtSignal, Qt\n'), ((398, 420), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['np.float64'], {}), '(np.float64)\n', (408, 420), False, 'from PyQt5.QtCore import pyqtSignal, Qt\n'), ((437, 453), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['list'], {}), '(list)\n', (447, 453), False, 'from PyQt5.QtCore import pyqtSignal, Qt\n'), ((470, 485), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['str'], {}), '(str)\n', (480, 485), False, 'from PyQt5.QtCore import pyqtSignal, Qt\n'), ((502, 517), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['str'], {}), '(str)\n', (512, 517), False, 'from PyQt5.QtCore import pyqtSignal, Qt\n'), ((539, 551), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', ([], {}), '()\n', (549, 551), False, 'from PyQt5.QtCore import pyqtSignal, Qt\n'), ((711, 733), 'PyQt5.QtWidgets.QWidget.__init__', 'QWidget.__init__', (['self'], {}), '(self)\n', (727, 733), False, 'from PyQt5.QtWidgets import QLabel, QWidget, QGridLayout, QCheckBox, QGroupBox\n'), ((807, 830), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Energy Shift:"""'], {}), "('Energy Shift:')\n", (813, 830), False, 'from PyQt5.QtWidgets import QLabel, QWidget, QGridLayout, QCheckBox, QGroupBox\n'), ((863, 883), 'InftyDoubleSpinBox.InftyDoubleSpinBox', 'InftyDoubleSpinBox', ([], {}), '()\n', (881, 883), False, 'from InftyDoubleSpinBox import InftyDoubleSpinBox\n'), ((1058, 1100), 'PyQt5.QtWidgets.QCheckBox', 'QCheckBox', (['self.SHOW_ERROR_BARS_NOT_LOADED'], {}), '(self.SHOW_ERROR_BARS_NOT_LOADED)\n', (1067, 1100), False, 'from PyQt5.QtWidgets import QLabel, QWidget, QGridLayout, QCheckBox, QGroupBox\n'), ((1225, 1262), 'PyQt5.QtWidgets.QCheckBox', 'QCheckBox', (['"""Ignore first data point."""'], {}), "('Ignore first data point.')\n", (1234, 1262), False, 'from PyQt5.QtWidgets import QLabel, QWidget, QGridLayout, QCheckBox, QGroupBox\n'), ((1392, 1405), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', ([], {}), '()\n', (1403, 1405), False, 'from PyQt5.QtWidgets import QLabel, QWidget, QGridLayout, QCheckBox, QGroupBox\n'), ((5107, 5172), 'helplib.readFileForFitsDataAndStdErrorAndMetaData', 'hl.readFileForFitsDataAndStdErrorAndMetaData', (['fileName', 'id_string'], {}), '(fileName, id_string)\n', (5151, 5172), True, 'import helplib as hl\n'), ((7390, 7509), 'helplib.saveFilewithMetaData', 'hl.saveFilewithMetaData', (['id_string', 'fileName', 'self.__all_data', '(fit_strings, view_string, data_string, meta_string)'], {}), '(id_string, fileName, self.__all_data, (fit_strings,\n view_string, data_string, meta_string))\n', (7413, 7509), True, 'import helplib as hl\n'), ((6493, 6512), 'numpy.float64', 'np.float64', (['item[1]'], {}), '(item[1])\n', (6503, 6512), True, 'import numpy as np\n')]
|
# This notebook implements a proof-of-principle for
# Multi-Agent Common Knowledge Reinforcement Learning (MACKRL)
# The entire notebook can be executed online, no need to download anything
# http://pytorch.org/
from itertools import chain
import torch
import torch.nn.functional as F
from torch.multiprocessing import Pool, set_start_method, freeze_support
try:
set_start_method('spawn')
except RuntimeError:
pass
from torch.nn import init
from torch.optim import Adam, SGD
import numpy as np
import matplotlib.pyplot as plt
use_cuda = False
payoff_values = []
payoff_values.append(torch.tensor([ # payoff values
[5, 0, 0, 2, 0],
[0, 1, 2, 4, 2],
[0, 0, 0, 2, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0],
], dtype=torch.float32) * 0.2)
payoff_values.append(
torch.tensor([ # payoff values
[0, 0, 1, 0, 5],
[0, 0, 2, 0, 0],
[1, 2, 4, 2, 1],
[0, 0, 2, 0, 0],
[0, 0, 1, 0, 0],
], dtype=torch.float32) * 0.2)
n_agents = 2
n_actions = len(payoff_values[0])
n_states_dec = 5
n_states_joint = 3
n_mix_hidden = 3
p_observation = 0.5
p_ck_noise = [0.0]
# Number of gradient steps
t_max = 202
# We'll be using a high learning rate, since we have exact gradients
lr = 0.05 # DEBUG: 0.05 if exact gradients!
optim = 'adam'
# You can reduce this number if you are short on time. (Eg. n_trials = 20)
#n_trials = 100 # 30
n_trials = 20 #15 #100
std_val = 1.0
# These are the 3 settings we run: MACRKL, Joint-action-learner (always uses CK),
# Independent Actor-Critic (always uses decentralised actions selection)
labels = ["IAC", "JAL"]
p_vec = [0.0, 0.2, 0.4, 0.6, 0.8, 1.0]
final_res = []
# # Pair-Controller with 3 input state (no CK, CK & Matrix ID = 0, CK & Matrix ID = 1), n_actions^2 actions for
# # joint action + 1 action for delegation to the independent agents.
# theta_joint = init.normal_(torch.zeros(n_states_joint, n_actions ** 2 + 1, requires_grad=True), std=0.1)
# Produce marginalised policy: pi_pc[0] * pi^a * pi^b + p(u^ab)
def p_joint_all(pi_pc, pi_dec):
p_joint = pi_pc[1:].view(n_actions, n_actions).clone()
pi_a_pi_b = torch.ger(pi_dec[0], pi_dec[1])
p_joint = pi_pc[0] * pi_a_pi_b + p_joint
return p_joint
def p_joint_all_noise_alt(pi_pcs, pi_dec, p_ck_noise, ck_state):
p_none = (1-p_ck_noise) ** 2 # both unnoised
p_both = (p_ck_noise) ** 2 # both noised
p_one = (1-p_ck_noise) * p_ck_noise # exactly one noised
p_marg_ag0_ck1 = pi_pcs[1][1:].view(n_actions, n_actions).clone().sum(dim=0)
p_marg_ag0_ck2 = pi_pcs[2][1:].view(n_actions, n_actions).clone().sum(dim=0)
p_marg_ag1_ck1 = pi_pcs[1][1:].view(n_actions, n_actions).clone().sum(dim=1)
p_marg_ag1_ck2 = pi_pcs[2][1:].view(n_actions, n_actions).clone().sum(dim=1)
p_joint_ck0 = pi_pcs[0][1:].view(n_actions, n_actions).clone()
p_joint_ck1 = pi_pcs[1][1:].view(n_actions, n_actions).clone()
p_joint_ck2 = pi_pcs[2][1:].view(n_actions, n_actions).clone()
p_d_ck0 = pi_pcs[0][0]
p_d_ck1 = pi_pcs[1][0]
p_d_ck2 = pi_pcs[2][0]
def make_joint(p1, p2, mode="interval"):
"""
1. Pick uniform random variable between [0,1]
2. Do multinomial sampling through contiguous, ordered bucketing for both p1, p2
"""
p1 = p1.clone().view(-1)
p2 = p2.clone().view(-1)
p_final = p1.clone().zero_()
if mode == "interval":
for i in range(p1.shape[0]):
# calculate overlap between the probability distributions
low1 = torch.sum(p1[:i])
high1 = low1 + p1[i]
low2 = torch.sum(p2[:i])
high2 = low2 + p2[i]
if low1 >= low2 and high2 > low1:
p_final[i] = torch.min(high1, high2) - low1
pass
elif low2 >= low1 and high1 > low2:
p_final[i] = torch.min(high1, high2) - low2
else:
p_final[i] = 0
return p_final.clone().view(n_actions, n_actions)
if ck_state == 0:
p_joint = p_joint_ck0 + p_d_ck0 * torch.ger(pi_dec[0], pi_dec[1])
return p_joint # always delegate
elif ck_state == 1:
p_joint = p_none * p_joint_ck1 + \
p_both * p_joint_ck2 + \
p_one * make_joint(p_joint_ck1, p_joint_ck2) + \
p_one * make_joint(p_joint_ck2, p_joint_ck1) + \
(p_one * p_d_ck1 * p_d_ck2
+ p_one * p_d_ck2 * p_d_ck1
+ p_both * p_d_ck2
+ p_none * p_d_ck1) * torch.ger(pi_dec[0], pi_dec[1]) \
+ p_one * p_d_ck1 * (1 - p_d_ck2) * torch.ger(pi_dec[0], p_marg_ag1_ck2) \
+ p_one * (1 - p_d_ck2) * p_d_ck1 * torch.ger(p_marg_ag0_ck2, pi_dec[1]) \
+ p_one * p_d_ck2 * (1 - p_d_ck1) * torch.ger(pi_dec[0], p_marg_ag1_ck1) \
+ p_one * (1 - p_d_ck1) * p_d_ck2 * torch.ger(p_marg_ag0_ck1, pi_dec[1])
return p_joint
elif ck_state == 2:
p_joint = p_none * p_joint_ck2 + \
p_both * p_joint_ck1 + \
p_one * make_joint(p_joint_ck2, p_joint_ck1) + \
p_one * make_joint(p_joint_ck1, p_joint_ck2) + \
(p_one * p_d_ck2 * p_d_ck1
+ p_one * p_d_ck1 * p_d_ck2
+ p_both * p_d_ck1
+ p_none * p_d_ck2) * torch.ger(pi_dec[0], pi_dec[1]) \
+ p_one * p_d_ck2 * (1 - p_d_ck1) * torch.ger(pi_dec[0], p_marg_ag1_ck1) \
+ p_one * (1 - p_d_ck1) * p_d_ck2 * torch.ger(p_marg_ag0_ck1, pi_dec[1]) \
+ p_one * p_d_ck1 * (1 - p_d_ck2) * torch.ger(pi_dec[0], p_marg_ag1_ck2) \
+ p_one * (1 - p_d_ck2) * p_d_ck1 * torch.ger(p_marg_ag0_ck2, pi_dec[1])
return p_joint
pass
def get_policies(common_knowledge, observations, run, test, thetas_dec, theta_joint, p_ck_noise=0):
if test:
beta = 100
else:
beta = 1
actions = []
pi_dec = []
# common_knowledge decides whether ck_state is informative
if common_knowledge == 0:
ck_state = 0
else:
ck_state = int(observations[0] + 1)
if p_ck_noise == 0:
pol_vals = theta_joint[ck_state, :].clone()
# logits get masked out for independent learner and joint-action-learner
# independent learner has a pair controller that always delegates
if run == 'JAL':
pol_vals[0] = -10 ** 10
elif run == 'IAC':
pol_vals[1:] = -10 ** 10
# apply temperature to set testing
pi_pc = F.softmax(pol_vals * beta, -1)
# calcuate decentralised policies
for i in range(n_agents):
dec_state = int(observations[i])
pi = F.softmax(thetas_dec[i][dec_state] * beta, -1)
pi_dec.append(pi)
return pi_pc, pi_dec
else:
pol_vals = theta_joint.clone()
pi_pcs = []
for i in range(n_states_joint):
if run == 'JAL':
pol_vals[i][0] = -10 ** 10
elif run == 'IAC':
pol_vals[i][1:] = -10 ** 10
# apply temperature to set testing
pi_pcs.append(F.softmax(pol_vals[i] * beta, -1))
# calcuate decentralised policies
for i in range(n_agents):
dec_state = int(observations[i])
pi = F.softmax(thetas_dec[i][dec_state] * beta, -1)
pi_dec.append(pi)
return pi_pcs, pi_dec, ck_state
def get_state(common_knowledge, obs_0, obs_1, matrix_id):
receives_obs = [obs_0, obs_1]
if common_knowledge == 1:
observations = np.repeat(matrix_id, 2)
else:
observations = np.ones((n_agents)) * 2 #
for ag in range(n_agents):
if receives_obs[ag]:
observations[ag] += matrix_id + 1
return common_knowledge, observations, matrix_id
# Calculate the expected return: sum_{\tau} P(\tau | pi) R(\tau)
def expected_return(p_common, p_observation, thetas, run, test, p_ck_noise=0):
thetas_dec = thetas["dec"]
theta_joint = thetas["joint"]
# Probability of CK
p_common_val = [1 - p_common, p_common]
# Probability of observation given no CK)
p_obs_val = [1 - p_observation, p_observation]
# Matrices are chosen 50 / 50
p_matrix = [0.5, 0.5]
# p_matrix = [1.0, 0.0] # DEBUG!
# Initialise expected return
ret_val = 0
for ck in [0, 1]:
for matrix_id in [0, 1]:
for obs_0 in [0, 1]:
for obs_1 in [0, 1]:
p_state = p_common_val[ck] * p_obs_val[obs_0] * p_obs_val[obs_1] * p_matrix[matrix_id]
common_knowledge, observations, matrix_id = get_state(ck, obs_0, obs_1, matrix_id)
# Get final probabilities for joint actions
if p_ck_noise==0:
pi_pc, pi_dec = get_policies(common_knowledge, observations, run, test, thetas_dec, theta_joint)
p_joint_val = p_joint_all(pi_pc, pi_dec)
else:
pol_vals, pi_dec, ck_state = get_policies(common_knowledge, observations, run, test, thetas_dec, theta_joint, p_ck_noise)
p_joint_val = p_joint_all_noise_alt(pol_vals, pi_dec, p_ck_noise, ck_state)
# Expected return is just the elementwise product of rewards and action probabilities
expected_ret = (p_joint_val * payoff_values[matrix_id]).sum()
# Add return from given state
ret_val = ret_val + p_state * expected_ret
return ret_val
def _proc(args):
p_common, p_observation, run, p_ck_noise, t_max, n_trials = args
results = []
for nt in range(n_trials):
print("Run: {} P_CK_NOISE: {} P_common: {} #Trial: {}".format(run, p_ck_noise, p_common, nt))
results_log = np.zeros((t_max // (t_max // 100),))
results_log_test = np.zeros((t_max // (t_max // 100),))
thetas = {}
thetas["dec"] = [init.normal_(torch.zeros(n_states_dec, n_actions, requires_grad=True), std=std_val) for i in
range(n_agents)]
thetas["joint"] = init.normal_(torch.zeros(n_states_joint, n_actions ** 2 + 1, requires_grad=True),
std=std_val)
params = chain(*[_v if isinstance(_v, (list, tuple)) else [_v] for _v in thetas.values()])
params = list(params)
if use_cuda:
for param in params:
param = param.to("cuda")
if optim == 'sgd':
optimizer = SGD(params, lr=lr)
else:
optimizer = Adam(params, lr=lr)
for i in range(t_max):
if run in ['MACKRL',
'JAL',
'IAC']:
loss = - expected_return(p_common, p_observation, thetas, run, False, p_ck_noise)
r_s = -loss.data.numpy()
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % (t_max // 100) == 0:
if run in ['MACKRL',
'JAL',
'IAC']:
r_test = expected_return(p_common, p_observation, thetas, run, True, p_ck_noise)
results_log_test[i // (t_max // 100)] = r_test
results_log[i // (t_max // 100)] = r_s
results.append((results_log_test, results_log))
return results
def main():
use_mp = True
if use_mp:
pool = Pool(processes=2)
# Well be appending results to these lists
run_results = []
for run in labels:
noise_results = []
for pnoise in p_ck_noise:
print("Run: {} P_CK_NOISE: {}".format(run, pnoise))
results = pool.map(_proc, [ (pc, p_observation, run, pnoise, t_max, n_trials) for pc in p_vec ])
noise_results.append(results)
run_results.append(noise_results)
for p_common_id, p_common in enumerate(p_vec):
all_res = []
all_res_test = []
for run_id, run in enumerate(labels):
for pnoise_id, pnoise in enumerate(p_ck_noise):
try:
results = run_results[run_id][pnoise_id][p_common_id]
except Exception as e:
pass
all_res_test.append(np.stack([r[0] for r in results], axis=1))
all_res.append(np.stack([r[1] for r in results], axis=1))
final_res.append([all_res_test, all_res])
pool.close()
pool.join()
else:
# Well be appending results to these lists
run_results = []
for run in labels:
noise_results = []
for pnoise in p_ck_noise:
print("Run: {} P_CK_NOISE: {}".format(run, pnoise))
results = [_proc((pc, p_observation, run, pnoise, t_max, n_trials)) for pc in p_vec ]
noise_results.append(results)
run_results.append(noise_results)
for p_common_id, p_common in enumerate(p_vec):
all_res = []
all_res_test = []
for run_id, run in enumerate(labels):
for pnoise_id, pnoise in enumerate(p_ck_noise):
try:
results = run_results[run_id][pnoise_id][p_common_id]
except Exception as e:
pass
all_res_test.append(np.stack([r[0] for r in results], axis=1))
all_res.append(np.stack([r[1] for r in results], axis=1))
final_res.append([all_res_test, all_res])
import pickle
import uuid
import os
res_dict = {}
res_dict["final_res"] = final_res
res_dict["labels"] = labels
res_dict["p_ck_noise"] = p_ck_noise
res_dict["p_vec"] = p_vec
if not os.path.exists(os.path.join(os.path.dirname(os.path.abspath(__file__)),
"pickles")):
os.makedirs(os.path.join(os.path.dirname(os.path.abspath(__file__)),
"pickles"))
pickle.dump(res_dict, open(os.path.join(os.path.dirname(os.path.abspath(__file__)),
"pickles",
"final_res_{}.p".format(uuid.uuid4().hex[:4])), "wb"))
plt.figure(figsize=(5, 5))
color = ['b', 'r','g', 'c', 'm', 'y', 'k','b', 'r','g', 'c', 'm', 'y', 'k']
titles = ['Test', 'Train Performance']
for pl in [0,1]:
ax = plt.subplot(1, 1, 1)
for i in range(len(labels)):
for pck, pcknoise in enumerate(p_ck_noise):
mean_vals = []
min_vals = []
max_vals = []
for j, p in enumerate( p_vec ):
vals = final_res[j][pl]
this_mean = np.mean( vals[i*len(p_ck_noise) + pck], 1)[-1]
std = np.std(vals[i], 1)[-1]/0.5
low = this_mean-std / (n_trials)**0.5
high = this_mean + std / (n_trials)**0.5
mean_vals.append( this_mean )
min_vals.append( low )
max_vals.append( high )
plt.plot(p_vec,
mean_vals,
color[(i*len(p_ck_noise) + pck) % len(color)],
label = "{} p_ck_noise: {}".format(labels[i], pcknoise))
plt.fill_between(p_vec,
min_vals,
max_vals,
facecolor=color[i],
alpha=0.3)
plt.xlabel('P(common knowledge)')
plt.ylabel('Expected Return')
plt.ylim([0.0, 1.01])
plt.xlim([-0.01, 1.01])
ax.set_facecolor((1.0, 1.0, 1.0))
ax.grid(color='k', linestyle='-', linewidth=1)
ax.set_title(titles[pl])
plt.legend()
plt.xticks([0, 0.5, 1])
plt.yticks([0.5, 0.75, 1])
plt.savefig("MACKRL {}.pdf".format(titles[pl]))
plt.show(block=False)
if __name__ == "__main__":
freeze_support()
main()
|
[
"torch.ger",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.fill_between",
"torch.min",
"torch.multiprocessing.freeze_support",
"torch.sum",
"torch.nn.functional.softmax",
"numpy.repeat",
"matplotlib.pyplot.xlabel",
"torch.multiprocessing.Pool",
"numpy.stack",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.ylim",
"torch.multiprocessing.set_start_method",
"torch.optim.SGD",
"numpy.ones",
"matplotlib.pyplot.xticks",
"uuid.uuid4",
"numpy.std",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"torch.optim.Adam",
"torch.tensor",
"matplotlib.pyplot.figure",
"numpy.zeros",
"os.path.abspath",
"matplotlib.pyplot.subplot",
"torch.zeros"
] |
[((370, 395), 'torch.multiprocessing.set_start_method', 'set_start_method', (['"""spawn"""'], {}), "('spawn')\n", (386, 395), False, 'from torch.multiprocessing import Pool, set_start_method, freeze_support\n'), ((2125, 2156), 'torch.ger', 'torch.ger', (['pi_dec[0]', 'pi_dec[1]'], {}), '(pi_dec[0], pi_dec[1])\n', (2134, 2156), False, 'import torch\n'), ((14535, 14561), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (14545, 14561), True, 'import matplotlib.pyplot as plt\n'), ((16329, 16345), 'torch.multiprocessing.freeze_support', 'freeze_support', ([], {}), '()\n', (16343, 16345), False, 'from torch.multiprocessing import Pool, set_start_method, freeze_support\n'), ((599, 724), 'torch.tensor', 'torch.tensor', (['[[5, 0, 0, 2, 0], [0, 1, 2, 4, 2], [0, 0, 0, 2, 0], [0, 0, 0, 1, 0], [0, 0,\n 0, 0, 0]]'], {'dtype': 'torch.float32'}), '([[5, 0, 0, 2, 0], [0, 1, 2, 4, 2], [0, 0, 0, 2, 0], [0, 0, 0, \n 1, 0], [0, 0, 0, 0, 0]], dtype=torch.float32)\n', (611, 724), False, 'import torch\n'), ((793, 918), 'torch.tensor', 'torch.tensor', (['[[0, 0, 1, 0, 5], [0, 0, 2, 0, 0], [1, 2, 4, 2, 1], [0, 0, 2, 0, 0], [0, 0,\n 1, 0, 0]]'], {'dtype': 'torch.float32'}), '([[0, 0, 1, 0, 5], [0, 0, 2, 0, 0], [1, 2, 4, 2, 1], [0, 0, 2, \n 0, 0], [0, 0, 1, 0, 0]], dtype=torch.float32)\n', (805, 918), False, 'import torch\n'), ((6659, 6689), 'torch.nn.functional.softmax', 'F.softmax', (['(pol_vals * beta)', '(-1)'], {}), '(pol_vals * beta, -1)\n', (6668, 6689), True, 'import torch.nn.functional as F\n'), ((7710, 7733), 'numpy.repeat', 'np.repeat', (['matrix_id', '(2)'], {}), '(matrix_id, 2)\n', (7719, 7733), True, 'import numpy as np\n'), ((9970, 10006), 'numpy.zeros', 'np.zeros', (['(t_max // (t_max // 100),)'], {}), '((t_max // (t_max // 100),))\n', (9978, 10006), True, 'import numpy as np\n'), ((10034, 10070), 'numpy.zeros', 'np.zeros', (['(t_max // (t_max // 100),)'], {}), '((t_max // (t_max // 100),))\n', (10042, 10070), True, 'import numpy as np\n'), ((11627, 11644), 'torch.multiprocessing.Pool', 'Pool', ([], {'processes': '(2)'}), '(processes=2)\n', (11631, 11644), False, 'from torch.multiprocessing import Pool, set_start_method, freeze_support\n'), ((14720, 14740), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (14731, 14740), True, 'import matplotlib.pyplot as plt\n'), ((15859, 15892), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""P(common knowledge)"""'], {}), "('P(common knowledge)')\n", (15869, 15892), True, 'import matplotlib.pyplot as plt\n'), ((15901, 15930), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Expected Return"""'], {}), "('Expected Return')\n", (15911, 15930), True, 'import matplotlib.pyplot as plt\n'), ((15939, 15960), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.01]'], {}), '([0.0, 1.01])\n', (15947, 15960), True, 'import matplotlib.pyplot as plt\n'), ((15969, 15992), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-0.01, 1.01]'], {}), '([-0.01, 1.01])\n', (15977, 15992), True, 'import matplotlib.pyplot as plt\n'), ((16131, 16143), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (16141, 16143), True, 'import matplotlib.pyplot as plt\n'), ((16152, 16175), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 0.5, 1]'], {}), '([0, 0.5, 1])\n', (16162, 16175), True, 'import matplotlib.pyplot as plt\n'), ((16184, 16210), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0.5, 0.75, 1]'], {}), '([0.5, 0.75, 1])\n', (16194, 16210), True, 'import matplotlib.pyplot as plt\n'), ((16275, 16296), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (16283, 16296), True, 'import matplotlib.pyplot as plt\n'), ((6829, 6875), 'torch.nn.functional.softmax', 'F.softmax', (['(thetas_dec[i][dec_state] * beta)', '(-1)'], {}), '(thetas_dec[i][dec_state] * beta, -1)\n', (6838, 6875), True, 'import torch.nn.functional as F\n'), ((7445, 7491), 'torch.nn.functional.softmax', 'F.softmax', (['(thetas_dec[i][dec_state] * beta)', '(-1)'], {}), '(thetas_dec[i][dec_state] * beta, -1)\n', (7454, 7491), True, 'import torch.nn.functional as F\n'), ((7767, 7784), 'numpy.ones', 'np.ones', (['n_agents'], {}), '(n_agents)\n', (7774, 7784), True, 'import numpy as np\n'), ((10291, 10358), 'torch.zeros', 'torch.zeros', (['n_states_joint', '(n_actions ** 2 + 1)'], {'requires_grad': '(True)'}), '(n_states_joint, n_actions ** 2 + 1, requires_grad=True)\n', (10302, 10358), False, 'import torch\n'), ((10690, 10708), 'torch.optim.SGD', 'SGD', (['params'], {'lr': 'lr'}), '(params, lr=lr)\n', (10693, 10708), False, 'from torch.optim import Adam, SGD\n'), ((10747, 10766), 'torch.optim.Adam', 'Adam', (['params'], {'lr': 'lr'}), '(params, lr=lr)\n', (10751, 10766), False, 'from torch.optim import Adam, SGD\n'), ((3537, 3554), 'torch.sum', 'torch.sum', (['p1[:i]'], {}), '(p1[:i])\n', (3546, 3554), False, 'import torch\n'), ((3615, 3632), 'torch.sum', 'torch.sum', (['p2[:i]'], {}), '(p2[:i])\n', (3624, 3632), False, 'import torch\n'), ((4110, 4141), 'torch.ger', 'torch.ger', (['pi_dec[0]', 'pi_dec[1]'], {}), '(pi_dec[0], pi_dec[1])\n', (4119, 4141), False, 'import torch\n'), ((7267, 7300), 'torch.nn.functional.softmax', 'F.softmax', (['(pol_vals[i] * beta)', '(-1)'], {}), '(pol_vals[i] * beta, -1)\n', (7276, 7300), True, 'import torch.nn.functional as F\n'), ((10130, 10186), 'torch.zeros', 'torch.zeros', (['n_states_dec', 'n_actions'], {'requires_grad': '(True)'}), '(n_states_dec, n_actions, requires_grad=True)\n', (10141, 10186), False, 'import torch\n'), ((15643, 15717), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['p_vec', 'min_vals', 'max_vals'], {'facecolor': 'color[i]', 'alpha': '(0.3)'}), '(p_vec, min_vals, max_vals, facecolor=color[i], alpha=0.3)\n', (15659, 15717), True, 'import matplotlib.pyplot as plt\n'), ((4965, 5001), 'torch.ger', 'torch.ger', (['p_marg_ag0_ck1', 'pi_dec[1]'], {}), '(p_marg_ag0_ck1, pi_dec[1])\n', (4974, 5001), False, 'import torch\n'), ((14070, 14095), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (14085, 14095), False, 'import os\n'), ((14204, 14229), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (14219, 14229), False, 'import os\n'), ((14348, 14373), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (14363, 14373), False, 'import os\n'), ((3753, 3776), 'torch.min', 'torch.min', (['high1', 'high2'], {}), '(high1, high2)\n', (3762, 3776), False, 'import torch\n'), ((4872, 4908), 'torch.ger', 'torch.ger', (['pi_dec[0]', 'p_marg_ag1_ck1'], {}), '(pi_dec[0], p_marg_ag1_ck1)\n', (4881, 4908), False, 'import torch\n'), ((5807, 5843), 'torch.ger', 'torch.ger', (['p_marg_ag0_ck2', 'pi_dec[1]'], {}), '(p_marg_ag0_ck2, pi_dec[1])\n', (5816, 5843), False, 'import torch\n'), ((12531, 12572), 'numpy.stack', 'np.stack', (['[r[0] for r in results]'], {'axis': '(1)'}), '([r[0] for r in results], axis=1)\n', (12539, 12572), True, 'import numpy as np\n'), ((12609, 12650), 'numpy.stack', 'np.stack', (['[r[1] for r in results]'], {'axis': '(1)'}), '([r[1] for r in results], axis=1)\n', (12617, 12650), True, 'import numpy as np\n'), ((13633, 13674), 'numpy.stack', 'np.stack', (['[r[0] for r in results]'], {'axis': '(1)'}), '([r[0] for r in results], axis=1)\n', (13641, 13674), True, 'import numpy as np\n'), ((13711, 13752), 'numpy.stack', 'np.stack', (['[r[1] for r in results]'], {'axis': '(1)'}), '([r[1] for r in results], axis=1)\n', (13719, 13752), True, 'import numpy as np\n'), ((3894, 3917), 'torch.min', 'torch.min', (['high1', 'high2'], {}), '(high1, high2)\n', (3903, 3917), False, 'import torch\n'), ((4779, 4815), 'torch.ger', 'torch.ger', (['p_marg_ag0_ck2', 'pi_dec[1]'], {}), '(p_marg_ag0_ck2, pi_dec[1])\n', (4788, 4815), False, 'import torch\n'), ((5714, 5750), 'torch.ger', 'torch.ger', (['pi_dec[0]', 'p_marg_ag1_ck2'], {}), '(pi_dec[0], p_marg_ag1_ck2)\n', (5723, 5750), False, 'import torch\n'), ((14500, 14512), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (14510, 14512), False, 'import uuid\n'), ((15122, 15140), 'numpy.std', 'np.std', (['vals[i]', '(1)'], {}), '(vals[i], 1)\n', (15128, 15140), True, 'import numpy as np\n'), ((4686, 4722), 'torch.ger', 'torch.ger', (['pi_dec[0]', 'p_marg_ag1_ck2'], {}), '(pi_dec[0], p_marg_ag1_ck2)\n', (4695, 4722), False, 'import torch\n'), ((5621, 5657), 'torch.ger', 'torch.ger', (['p_marg_ag0_ck1', 'pi_dec[1]'], {}), '(p_marg_ag0_ck1, pi_dec[1])\n', (5630, 5657), False, 'import torch\n'), ((4598, 4629), 'torch.ger', 'torch.ger', (['pi_dec[0]', 'pi_dec[1]'], {}), '(pi_dec[0], pi_dec[1])\n', (4607, 4629), False, 'import torch\n'), ((5528, 5564), 'torch.ger', 'torch.ger', (['pi_dec[0]', 'p_marg_ag1_ck1'], {}), '(pi_dec[0], p_marg_ag1_ck1)\n', (5537, 5564), False, 'import torch\n'), ((5440, 5471), 'torch.ger', 'torch.ger', (['pi_dec[0]', 'pi_dec[1]'], {}), '(pi_dec[0], pi_dec[1])\n', (5449, 5471), False, 'import torch\n')]
|
import time
import torch
import warnings
import numpy as np
from tianshou.env import BaseVectorEnv
from tianshou.data import Batch, ReplayBuffer,\
ListReplayBuffer
from tianshou.utils import MovAvg
class Collector(object):
"""docstring for Collector"""
def __init__(self, policy, env, buffer=None, stat_size=100):
super().__init__()
self.env = env
self.env_num = 1
self.collect_step = 0
self.collect_episode = 0
self.collect_time = 0
if buffer is None:
self.buffer = ReplayBuffer(100)
else:
self.buffer = buffer
self.policy = policy
self.process_fn = policy.process_fn
self._multi_env = isinstance(env, BaseVectorEnv)
self._multi_buf = False # True if buf is a list
# need multiple cache buffers only if storing in one buffer
self._cached_buf = []
if self._multi_env:
self.env_num = len(env)
if isinstance(self.buffer, list):
assert len(self.buffer) == self.env_num, \
'The number of data buffer does not match the number of ' \
'input env.'
self._multi_buf = True
elif isinstance(self.buffer, ReplayBuffer):
self._cached_buf = [
ListReplayBuffer() for _ in range(self.env_num)]
else:
raise TypeError('The buffer in data collector is invalid!')
self.reset_env()
self.reset_buffer()
# state over batch is either a list, an np.ndarray, or a torch.Tensor
self.state = None
self.step_speed = MovAvg(stat_size)
self.episode_speed = MovAvg(stat_size)
def reset_buffer(self):
if self._multi_buf:
for b in self.buffer:
b.reset()
else:
self.buffer.reset()
def get_env_num(self):
return self.env_num
def reset_env(self):
self._obs = self.env.reset()
self._act = self._rew = self._done = self._info = None
if self._multi_env:
self.reward = np.zeros(self.env_num)
self.length = np.zeros(self.env_num)
else:
self.reward, self.length = 0, 0
for b in self._cached_buf:
b.reset()
def seed(self, seed=None):
if hasattr(self.env, 'seed'):
return self.env.seed(seed)
def render(self, **kwargs):
if hasattr(self.env, 'render'):
return self.env.render(**kwargs)
def close(self):
if hasattr(self.env, 'close'):
self.env.close()
def _make_batch(self, data):
if isinstance(data, np.ndarray):
return data[None]
else:
return np.array([data])
def collect(self, n_step=0, n_episode=0, render=0):
warning_count = 0
if not self._multi_env:
n_episode = np.sum(n_episode)
start_time = time.time()
assert sum([(n_step != 0), (n_episode != 0)]) == 1, \
"One and only one collection number specification permitted!"
cur_step = 0
cur_episode = np.zeros(self.env_num) if self._multi_env else 0
reward_sum = 0
length_sum = 0
while True:
if warning_count >= 100000:
warnings.warn(
'There are already many steps in an episode. '
'You should add a time limitation to your environment!',
Warning)
if self._multi_env:
batch_data = Batch(
obs=self._obs, act=self._act, rew=self._rew,
done=self._done, obs_next=None, info=self._info)
else:
batch_data = Batch(
obs=self._make_batch(self._obs),
act=self._make_batch(self._act),
rew=self._make_batch(self._rew),
done=self._make_batch(self._done),
obs_next=None,
info=self._make_batch(self._info))
result = self.policy(batch_data, self.state)
self.state = result.state if hasattr(result, 'state') else None
if isinstance(result.act, torch.Tensor):
self._act = result.act.detach().cpu().numpy()
elif not isinstance(self._act, np.ndarray):
self._act = np.array(result.act)
else:
self._act = result.act
obs_next, self._rew, self._done, self._info = self.env.step(
self._act if self._multi_env else self._act[0])
if render > 0:
self.env.render()
time.sleep(render)
self.length += 1
self.reward += self._rew
if self._multi_env:
for i in range(self.env_num):
data = {
'obs': self._obs[i], 'act': self._act[i],
'rew': self._rew[i], 'done': self._done[i],
'obs_next': obs_next[i], 'info': self._info[i]}
if self._cached_buf:
warning_count += 1
self._cached_buf[i].add(**data)
elif self._multi_buf:
warning_count += 1
self.buffer[i].add(**data)
cur_step += 1
else:
warning_count += 1
self.buffer.add(**data)
cur_step += 1
if self._done[i]:
if n_step != 0 or np.isscalar(n_episode) or \
cur_episode[i] < n_episode[i]:
cur_episode[i] += 1
reward_sum += self.reward[i]
length_sum += self.length[i]
if self._cached_buf:
cur_step += len(self._cached_buf[i])
self.buffer.update(self._cached_buf[i])
self.reward[i], self.length[i] = 0, 0
if self._cached_buf:
self._cached_buf[i].reset()
if isinstance(self.state, list):
self.state[i] = None
elif self.state is not None:
if isinstance(self.state[i], dict):
self.state[i] = {}
else:
self.state[i] = self.state[i] * 0
if isinstance(self.state, torch.Tensor):
# remove ref count in pytorch (?)
self.state = self.state.detach()
if sum(self._done):
obs_next = self.env.reset(np.where(self._done)[0])
if n_episode != 0:
if isinstance(n_episode, list) and \
(cur_episode >= np.array(n_episode)).all() or \
np.isscalar(n_episode) and \
cur_episode.sum() >= n_episode:
break
else:
self.buffer.add(
self._obs, self._act[0], self._rew,
self._done, obs_next, self._info)
cur_step += 1
if self._done:
cur_episode += 1
reward_sum += self.reward
length_sum += self.length
self.reward, self.length = 0, 0
self.state = None
obs_next = self.env.reset()
if n_episode != 0 and cur_episode >= n_episode:
break
if n_step != 0 and cur_step >= n_step:
break
self._obs = obs_next
self._obs = obs_next
if self._multi_env:
cur_episode = sum(cur_episode)
duration = time.time() - start_time
self.step_speed.add(cur_step / duration)
self.episode_speed.add(cur_episode / duration)
self.collect_step += cur_step
self.collect_episode += cur_episode
self.collect_time += duration
if isinstance(n_episode, list):
n_episode = np.sum(n_episode)
else:
n_episode = max(cur_episode, 1)
return {
'n/ep': cur_episode,
'n/st': cur_step,
'v/st': self.step_speed.get(),
'v/ep': self.episode_speed.get(),
'rew': reward_sum / n_episode,
'len': length_sum / n_episode,
}
def sample(self, batch_size):
if self._multi_buf:
if batch_size > 0:
lens = [len(b) for b in self.buffer]
total = sum(lens)
batch_index = np.random.choice(
total, batch_size, p=np.array(lens) / total)
else:
batch_index = np.array([])
batch_data = Batch()
for i, b in enumerate(self.buffer):
cur_batch = (batch_index == i).sum()
if batch_size and cur_batch or batch_size <= 0:
batch, indice = b.sample(cur_batch)
batch = self.process_fn(batch, b, indice)
batch_data.append(batch)
else:
batch_data, indice = self.buffer.sample(batch_size)
batch_data = self.process_fn(batch_data, self.buffer, indice)
return batch_data
|
[
"tianshou.utils.MovAvg",
"numpy.isscalar",
"numpy.where",
"tianshou.data.ReplayBuffer",
"time.sleep",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"tianshou.data.Batch",
"tianshou.data.ListReplayBuffer",
"warnings.warn",
"time.time"
] |
[((1660, 1677), 'tianshou.utils.MovAvg', 'MovAvg', (['stat_size'], {}), '(stat_size)\n', (1666, 1677), False, 'from tianshou.utils import MovAvg\n'), ((1707, 1724), 'tianshou.utils.MovAvg', 'MovAvg', (['stat_size'], {}), '(stat_size)\n', (1713, 1724), False, 'from tianshou.utils import MovAvg\n'), ((2961, 2972), 'time.time', 'time.time', ([], {}), '()\n', (2970, 2972), False, 'import time\n'), ((550, 567), 'tianshou.data.ReplayBuffer', 'ReplayBuffer', (['(100)'], {}), '(100)\n', (562, 567), False, 'from tianshou.data import Batch, ReplayBuffer, ListReplayBuffer\n'), ((2124, 2146), 'numpy.zeros', 'np.zeros', (['self.env_num'], {}), '(self.env_num)\n', (2132, 2146), True, 'import numpy as np\n'), ((2173, 2195), 'numpy.zeros', 'np.zeros', (['self.env_num'], {}), '(self.env_num)\n', (2181, 2195), True, 'import numpy as np\n'), ((2766, 2782), 'numpy.array', 'np.array', (['[data]'], {}), '([data])\n', (2774, 2782), True, 'import numpy as np\n'), ((2922, 2939), 'numpy.sum', 'np.sum', (['n_episode'], {}), '(n_episode)\n', (2928, 2939), True, 'import numpy as np\n'), ((3152, 3174), 'numpy.zeros', 'np.zeros', (['self.env_num'], {}), '(self.env_num)\n', (3160, 3174), True, 'import numpy as np\n'), ((8048, 8059), 'time.time', 'time.time', ([], {}), '()\n', (8057, 8059), False, 'import time\n'), ((8361, 8378), 'numpy.sum', 'np.sum', (['n_episode'], {}), '(n_episode)\n', (8367, 8378), True, 'import numpy as np\n'), ((9082, 9089), 'tianshou.data.Batch', 'Batch', ([], {}), '()\n', (9087, 9089), False, 'from tianshou.data import Batch, ReplayBuffer, ListReplayBuffer\n'), ((3323, 3456), 'warnings.warn', 'warnings.warn', (['"""There are already many steps in an episode. You should add a time limitation to your environment!"""', 'Warning'], {}), "(\n 'There are already many steps in an episode. You should add a time limitation to your environment!'\n , Warning)\n", (3336, 3456), False, 'import warnings\n'), ((3572, 3675), 'tianshou.data.Batch', 'Batch', ([], {'obs': 'self._obs', 'act': 'self._act', 'rew': 'self._rew', 'done': 'self._done', 'obs_next': 'None', 'info': 'self._info'}), '(obs=self._obs, act=self._act, rew=self._rew, done=self._done,\n obs_next=None, info=self._info)\n', (3577, 3675), False, 'from tianshou.data import Batch, ReplayBuffer, ListReplayBuffer\n'), ((4695, 4713), 'time.sleep', 'time.sleep', (['render'], {}), '(render)\n', (4705, 4713), False, 'import time\n'), ((9044, 9056), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (9052, 9056), True, 'import numpy as np\n'), ((4403, 4423), 'numpy.array', 'np.array', (['result.act'], {}), '(result.act)\n', (4411, 4423), True, 'import numpy as np\n'), ((1334, 1352), 'tianshou.data.ListReplayBuffer', 'ListReplayBuffer', ([], {}), '()\n', (1350, 1352), False, 'from tianshou.data import Batch, ReplayBuffer, ListReplayBuffer\n'), ((5642, 5664), 'numpy.isscalar', 'np.isscalar', (['n_episode'], {}), '(n_episode)\n', (5653, 5664), True, 'import numpy as np\n'), ((6904, 6924), 'numpy.where', 'np.where', (['self._done'], {}), '(self._done)\n', (6912, 6924), True, 'import numpy as np\n'), ((7125, 7147), 'numpy.isscalar', 'np.isscalar', (['n_episode'], {}), '(n_episode)\n', (7136, 7147), True, 'import numpy as np\n'), ((8972, 8986), 'numpy.array', 'np.array', (['lens'], {}), '(lens)\n', (8980, 8986), True, 'import numpy as np\n'), ((7065, 7084), 'numpy.array', 'np.array', (['n_episode'], {}), '(n_episode)\n', (7073, 7084), True, 'import numpy as np\n')]
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: CC-BY-4.0
import os
import cv2
from collections import namedtuple
import imageio
from PIL import Image
from random import randrange
import numpy as np
from sklearn.decomposition import PCA
from scipy.spatial.distance import pdist, squareform
import torch
import matplotlib
matplotlib.use('Agg') # Required for gif animations
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.image as image
import matplotlib.patches as patches
from multimodal_affinities.visualization.vis_handler import VisHandler
from multimodal_affinities.visualization.image_utils import resize_image
from multimodal_affinities.visualization.colors_util import rgb_hex_to_tuple
class PlotsProducer:
def __init__(self, document, output_path):
# Load background image
self.image_path = document.image_path
self.img = plt.imread(self.image_path)
self.img_opencv = cv2.imread(self.image_path)
dpi = 120
mpl.rcParams['figure.dpi'] = dpi
height = self.img.shape[0]
width = self.img.shape[1]
self.figsize = width / float(dpi), height / float(dpi) # Fig size in inches
self.document = document
self.output_path = output_path
if not os.path.exists(output_path):
os.makedirs(output_path)
def plot_word_boxes_on_image(self):
set_of_words = [[word] for word in self.document.get_words()] # list of singleton word lists
fig, ax = plt.subplots(1, figsize=self.figsize)
monochrome_colors_list = ['#5a5d8f' for _ in self.document.get_words()]
self._draw_entity_bounding_boxes(fig=fig, ax=ax, bg_img=self.img,
title='',
entity_sets=set_of_words,
colors_list=monochrome_colors_list)
fig.savefig(os.path.join(self.output_path, self.document.basename + '_word_boxes.png'))
plt.close(fig)
def save_phrase_detection_results(self):
set_of_phrases = [[phrase] for phrase in self.document.get_phrases()] # list of singleton phrase lists
fig, ax = plt.subplots(1, figsize=self.figsize)
self._draw_entity_bounding_boxes(fig=fig, ax=ax, bg_img=self.img,
title='Phrase Detection', entity_sets=set_of_phrases)
fig.savefig(os.path.join(self.output_path, self.document.basename + '_phrase_detection.png'))
plt.close(fig)
def save_clustering_results(self, with_title=True, colors_list=None):
set_of_clusters = [cluster.words for cluster in self.document.get_clusters()] # list of list of words (clusters)
self._save_set_of_clusters(set_of_clusters, with_title, colors_list)
def save_clustering_labels(self, clustering_labels, colors_list=None):
cluster_ids = np.unique(np.array(clustering_labels))
cluster_id_to_cluster_idx = {cluster_id: idx for idx, cluster_id in enumerate(cluster_ids)}
# Converts from list of labels to list of list of words (clusters)
set_of_clusters = [[] for _ in range(len(cluster_ids))]
for word_idx, word in enumerate(self.document.get_words()):
cluster_id = clustering_labels[word_idx]
if cluster_id == -1: # Ignore non-clustered words
continue
cluster_idx = cluster_id_to_cluster_idx[cluster_id]
set_of_clusters[cluster_idx].append(word)
self._save_set_of_clusters(set_of_clusters, colors_list)
def _save_set_of_clusters(self, set_of_clusters, with_title=True, colors_list=None):
"""
:param document:
:param set_of_clusters: list of list of words (clusters)
:return:
"""
output_img = self._draw_entity_bounding_boxes_opencv(bg_img=self.img_opencv,
entity_sets=set_of_clusters,
colors_list=colors_list)
cv2.imwrite(os.path.join(self.output_path, self.document.basename + '_clustering.png'), output_img)
@staticmethod
def _draw_entity_bounding_boxes_opencv(bg_img, entity_sets, colors_list=None):
img_height = bg_img.shape[0]
img_width = bg_img.shape[1]
if colors_list is None:
colors_list = VisHandler.generate_colors_list(amount=len(entity_sets))
face_colors = colors_list
edge_colors = VisHandler.generate_darker_palette(colors_list)
output_img = bg_img.copy()
alpha = 0.8
for set_idx, entities_set in enumerate(entity_sets):
face_color = face_colors[set_idx]
edge_color = edge_colors[set_idx]
for entity in entities_set:
x = entity.geometry.left * img_width
y = entity.geometry.top * img_height
width = entity.geometry.width * img_width
height = entity.geometry.height * img_height
# writing the text onto the image and returning it
rgb_color = rgb_hex_to_tuple(face_color)
cv2.rectangle(output_img, (int(x), int(y)), (int(x + width), int(y + height)),
(rgb_color[2], rgb_color[1], rgb_color[0]), cv2.FILLED)
output_img = cv2.addWeighted(output_img, alpha, bg_img, 1 - alpha, 0)
return output_img
@staticmethod
def _draw_entity_bounding_boxes(fig, ax, bg_img, title, entity_sets, colors_list=None):
ax.set_title(title)
plt.tick_params(axis='both', which='both',
bottom='off', top='off', labelbottom='off', right='off', left='off',
labelleft='off')
plt.imshow(bg_img)
img_height = bg_img.shape[0]
img_width = bg_img.shape[1]
if colors_list is None:
colors_list = VisHandler.generate_colors_list(amount=len(entity_sets))
face_colors = colors_list
edge_colors = VisHandler.generate_darker_palette(colors_list)
for set_idx, entities_set in enumerate(entity_sets):
face_color = face_colors[set_idx]
edge_color = edge_colors[set_idx]
for entity in entities_set:
x = entity.geometry.left * img_width
y = entity.geometry.top * img_height
width = entity.geometry.width * img_width
height = entity.geometry.height * img_height
rect = patches.Rectangle((x, y), width, height,
linewidth=2,
edgecolor=edge_color,
facecolor=face_color,
alpha=0.4)
ax.add_patch(rect)
@staticmethod
def plot_pca_embedding_space_for_clusters(document, output_path,
embedding_property='embedding',
title=''):
"""
Plot 2d PCA visualization of the embedding space according to cluster colors.
:param document: Document with clustering results
:param embedding_property: Embedding property of words - normally 'embedding' or 'unprojected_embedding'
:return:
"""
if not os.path.exists(output_path):
os.makedirs(output_path)
words = document.get_words()
clusters = document.get_clusters()
if len(words) == 0 or getattr(words[0], embedding_property) is None:
return
if embedding_property == 'unprojected_embedding':
embeddings = []
for word in words:
unprojected_embedding = torch.cat(word.unprojected_embedding['embeddings'], dim=1)
unprojected_embedding = unprojected_embedding.detach().cpu().numpy()
embeddings.append(unprojected_embedding)
else:
embeddings = [getattr(word, embedding_property).detach().cpu().numpy() for word in words]
colors_palette = VisHandler.generate_colors_list(amount=len(clusters))
word_to_color = {word: colors_palette[cluster_idx]
for cluster_idx, cluster in enumerate(clusters)
for word in cluster.words}
colors = [word_to_color[word] for word in words]
embeddings_array = np.array(embeddings).squeeze()
num_pca_comp = 2
embeddings_2d = PCA(n_components=num_pca_comp).fit_transform(embeddings_array)
x_list = [embeddings_2d[i, 0] for i in range(embeddings_2d.shape[0])]
y_list = [embeddings_2d[i, 1] for i in range(embeddings_2d.shape[0])]
fig, ax = plt.subplots(1)
plot_title = embedding_property
if plot_title != '':
plot_title += ': ' + title
plt.title(plot_title)
plt.scatter(x_list, y_list, c=colors, s=1, alpha=0.8)
fig.tight_layout()
fig.savefig(os.path.join(output_path, document.basename + '_' + embedding_property + '_pca.png'))
plt.close(fig)
@staticmethod
def _find_k_furthest_words_per_cluster(document, embeddings_2d, k=3):
""" Greedy approximation algorithm for finding k furthest neighbour words per cluster.
k is expected to be relatively small (< 100)
"""
words = document.get_words()
word_to_embedding_2d_idx = {word: idx for idx, word in enumerate(words)}
clusters = document.get_clusters()
solution_per_cluster = {}
ClusterSolution = namedtuple('ClusterSolution', ['word_indices', 'words'])
for cluster in clusters:
# Generate cluster pairwise distances matrix
all_cluster_embeddings_indices = [word_to_embedding_2d_idx[word] for word in cluster.words]
all_cluster_embeddings = np.take(embeddings_2d, all_cluster_embeddings_indices, axis=0)
pairwise_distances = pdist(all_cluster_embeddings, metric='euclidean')
distances_matrix = squareform(pairwise_distances)
# Total distance from selected set so far
distances_accumulator = np.zeros(len(cluster.words))
# Sample first point
random_index = randrange(len(cluster.words))
# Indices of selected points
selected_points = [random_index]
# How many points we need to add
points_to_calc_count = min(k - 1, len(words) - 1)
for _ in range(points_to_calc_count):
last_point_selected = selected_points[-1]
# Update accumulator with distance collected from last point
distances_accumulator += distances_matrix[last_point_selected]
# Eliminate last point selected from distance matrix & accumulator
distances_matrix[:, random_index] = 0
distances_matrix[random_index, :] = 0
furthrest_point_from_set = np.argmax(distances_accumulator, axis=0)
selected_points.append(furthrest_point_from_set)
selected_words = [cluster.words[point] for point in selected_points]
selected_word_indices = [word_to_embedding_2d_idx[word] for word in selected_words]
solution_per_cluster[cluster] = ClusterSolution(word_indices=selected_word_indices, words=selected_words)
return solution_per_cluster
@staticmethod
def _extract_crops_per_cluster_solution(document, solution_per_cluster):
"""
Extracts crops for each selected word in k-furthest neighbours solution
:param document:
:param solution_per_cluster: Solution of k-furthest neighbours
:return:
"""
word_indices_to_crops = {}
for cluster, cluster_solution in solution_per_cluster.items():
for word_index, word in zip(cluster_solution.word_indices, cluster_solution.words):
bbox = word.get_bbox() # left, top, width, height
y_min = int(round(bbox[1] * document.height))
y_max = int(round((bbox[1] + bbox[3]) * document.height))
x_min = int(round(bbox[0] * document.width))
x_max = int(round((bbox[0] + bbox[2]) * document.width))
image_of_crop = document.image[max(0, y_min):min(y_max, document.height),
max(0, x_min):min(x_max, document.width), :]
pil_image = Image.fromarray(image_of_crop[...,::-1]) # BGR to RGB
pil_image = pil_image.convert('RGB')
word_indices_to_crops[word_index] = pil_image
return word_indices_to_crops
@staticmethod
def _space_out_crops(indices_to_crops, words, x_list, y_list, dist_from_pt=0.01, height=0.02):
"""
Calculates the positions and dimensions of crop images on the embedding space plot.
Makes sure crops don't overlay each other.
This method assumes a small number of crops (< 1000) and performs a naive linear comparison for each crop.
:param indices_to_crops: dict of word index (by order in doc) to PIL crop
:param words: List of words
:param x_list: List of corresponding pt x positions
:param y_list: List of corresponding pt y positions
:param dist_from_pt: How far in (x-y) coords the crop should be placed from the plot
:param height: Height of the crop, in figure axes dimensions (note: for normalized pca space: -1 to 1)
:return: indices_to_extents: dict of word index to extens describing position and dimensions of each crop.
Crops are shifted so they don't cover each other,
"""
indices_to_extents = {}
MatplotExtent = namedtuple('matplot_extent', ['left', 'right', 'bottom', 'top'])
is_extent_x_intersect = lambda e1, e2: not (e1.right < e2.left or e1.left > e2.right)
is_extent_y_intersect = lambda e1, e2: not (e1.top > e2.bottom or e1.bottom < e2.top)
is_extent_intersect = lambda e1, e2: is_extent_x_intersect(e1, e2) and is_extent_y_intersect(e1, e2)
min_x, max_x = min(x_list), max(x_list)
min_y, max_y = min(y_list), max(y_list)
height = (max_y - min_y) * height
dist_from_pt = min(max_y - min_y, max_x - min_x) * dist_from_pt
for point_index, crop in indices_to_crops.items():
word_aspect_ratio = words[point_index].geometry.width / words[point_index].geometry.height
axis_ratio = (max_x-min_x) / (max_y-min_y) / 2
width = height * word_aspect_ratio * axis_ratio
left, right = x_list[point_index] + dist_from_pt, x_list[point_index] + dist_from_pt + width
bottom, top = y_list[point_index] + dist_from_pt + height, y_list[point_index] + dist_from_pt
overlap = True
while overlap:
overlap = False
extent = MatplotExtent(left, right, bottom, top)
for other_crop_extent in indices_to_extents.values():
other_left, other_right, other_bottom, other_top = other_crop_extent
spaceout_margin = dist_from_pt / 2
if is_extent_intersect(extent, other_crop_extent):
overlap = True
# shift below
if other_bottom <= top <= other_top:
top = other_bottom + spaceout_margin
bottom = top + height
else: # shift above
bottom = other_top - spaceout_margin
top = bottom - height
continue
indices_to_extents[point_index] = extent
return indices_to_extents
def plot_clusters_and_embedding_space_with_crops(self, document, output_path, crops_per_cluster=3,
embedding_properties=['embedding', 'unprojected_embedding'],
unprojected_caption=None):
"""
Plot 2d PCA visualization of the embedding space according to cluster colors.
:param document: Document with clustering results
:param embedding_property: Embedding property of words - normally 'embedding' or 'unprojected_embedding'
:return:
"""
if not os.path.exists(output_path):
os.makedirs(output_path)
words = document.get_words()
clusters = document.get_clusters()
if len(words) == 0 or \
all([getattr(words[0], embedding_property) is None for embedding_property in embedding_properties]):
return
colors_palette = VisHandler.generate_colors_list(amount=len(clusters))
word_to_color = {word: colors_palette[cluster_idx]
for cluster_idx, cluster in enumerate(clusters)
for word in cluster.words}
colors = [word_to_color[word] for word in words]
# Initially empty, the first embedding property we process will set those for all figures
selected_word_crops_per_cluster = None
indices_to_crops = None
for embedding_property in embedding_properties:
if embedding_property == 'unprojected_embedding': # Can't handle tuples, concat them
embeddings = []
for word in words:
unprojected_embedding = torch.cat(word.unprojected_embedding['embeddings'], dim=1)
unprojected_embedding = unprojected_embedding.detach().cpu().numpy()
embeddings.append(unprojected_embedding)
else:
embeddings = [getattr(word, embedding_property).detach().cpu().numpy() for word in words]
embeddings_array = np.array(embeddings).squeeze()
num_pca_comp = 2
embeddings_2d = PCA(n_components=num_pca_comp).fit_transform(embeddings_array)
x_list = [embeddings_2d[i, 0] for i in range(embeddings_2d.shape[0])]
y_list = [embeddings_2d[i, 1] for i in range(embeddings_2d.shape[0])]
fig, ax = plt.subplots(1)
if crops_per_cluster > 0:
if selected_word_crops_per_cluster is None and indices_to_crops is None: # Calculate per first attribute
selected_word_crops_per_cluster = PlotsProducer._find_k_furthest_words_per_cluster(document, embeddings_2d, k=crops_per_cluster)
indices_to_crops = PlotsProducer._extract_crops_per_cluster_solution(document, selected_word_crops_per_cluster)
indices_to_extents = PlotsProducer._space_out_crops(indices_to_crops, words,
x_list, y_list, dist_from_pt=0.02, height=0.04)
# Plot crop images
for point_index, crop in indices_to_crops.items():
extent = indices_to_extents[point_index]
rect = patches.Rectangle((extent.left, extent.top), extent.right-extent.left, extent.bottom-extent.top,
linewidth=0.5,
edgecolor="black",
facecolor="none",
zorder=5)
ax.imshow(crop, aspect='auto', alpha=0.65, extent=extent, zorder=4)
ax.add_patch(rect)
# Plot points
if embedding_property == 'unprojected_embedding':
plot_title = 'Initial unprojected embeddings, pre training (PCA)'
else:
if unprojected_caption is None:
plot_title = 'Projected embeddings, post training (PCA)'
else:
plot_title = unprojected_caption
plt.title(plot_title)
plt.scatter(x_list, y_list, c=colors, s=18, alpha=1.0, edgecolors='black', linewidth=1.0, zorder=3)
plt.tick_params(axis='both', which='both',
bottom='off', top='off', labelbottom='off', right='off', left='off',
labelleft='off')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout()
fig.savefig(os.path.join(output_path, document.basename + '_' + embedding_property + '_pca.png'))
plt.close(fig)
# Finally plot clusters on original image
self.save_clustering_results(with_title=False, colors_list=colors_palette)
return colors_palette
@staticmethod
def animate_pca_embedding_space_for_clusters(document, output_path, embeddings_history, colors_palette=None):
"""
Plot 2d PCA visualization of the embedding space according to cluster colors.
:param document: Document with clustering results
:param embedding_property: Embedding property of words - normally 'embedding' or 'unprojected_embedding'
:return:
"""
if not os.path.exists(output_path):
os.makedirs(output_path)
words = document.get_words()
clusters = document.get_clusters()
if len(words) == 0 or embeddings_history is None or len(embeddings_history) == 0:
return
if colors_palette is None:
colors_palette = VisHandler.generate_colors_list(amount=len(clusters))
word_to_color = {word: colors_palette[cluster_idx]
for cluster_idx, cluster in enumerate(clusters)
for word in cluster.words}
colors = [word_to_color[word] for word in words]
scatter_data = []
for state_idx, embeddings_state in enumerate(embeddings_history):
epoch = state_idx + 1
normalized_embeddings_dict = embeddings_state['normalized']
unnormalized_embeddings_dict = embeddings_state['unnormalized']
if len(normalized_embeddings_dict) > 0:
normalized_embeddings = [normalized_embeddings_dict[word].detach().cpu().numpy() for word in words]
chosen_embedding = normalized_embeddings
elif len(unnormalized_embeddings_dict) > 0:
unnormalized_embeddings = [unnormalized_embeddings_dict[word].detach().cpu().numpy() for word in words]
chosen_embedding = unnormalized_embeddings
else:
return
embeddings_array = np.array(chosen_embedding).squeeze()
num_pca_comp = 2
embeddings_2d = PCA(n_components=num_pca_comp).fit_transform(embeddings_array)
x_list = [embeddings_2d[i, 0] for i in range(embeddings_2d.shape[0])]
y_list = [embeddings_2d[i, 1] for i in range(embeddings_2d.shape[0])]
push_pull_ratio = embeddings_state['push_pull_ratio']
scatter_data.append((epoch, x_list, y_list, push_pull_ratio))
min_x = min(min(scatter_data, key=lambda entry: min(entry[1]))[1])
max_x = max(max(scatter_data, key=lambda entry: max(entry[1]))[1])
min_y = min(min(scatter_data, key=lambda entry: min(entry[2]))[2])
max_y = max(max(scatter_data, key=lambda entry: max(entry[2]))[2])
padding_factor = 0.1
min_x -= (max_x - min_x) * padding_factor
max_x += (max_x - min_x) * padding_factor
min_y -= (max_y - min_y) * padding_factor
max_y += (max_y - min_y) * padding_factor
frames = []
for epoch, x_list, y_list, push_pull_ratio in scatter_data:
fig, ax = plt.subplots(1)
ax.set_xlim(min_x, max_x)
ax.set_ylim(min_y, max_y)
plot_title = 'Projected embeddings at epoch #' + str(epoch) + ' (PCA)'
plt.title(plot_title)
plt.scatter(x_list, y_list, c=colors, s=18, alpha=1.0, edgecolors='black', linewidth=1.0, zorder=3)
plt.tick_params(axis='both', which='both',
bottom='off', top='off', labelbottom='off', right='off', left='off',
labelleft='off')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# Used to return the plot as an image rray
fig.tight_layout()
fig.canvas.draw() # draw the canvas, cache the renderer
output_frame = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')
output_frame = output_frame.reshape(fig.canvas.get_width_height()[::-1] + (3,))
frames.append(output_frame)
imageio.mimsave(os.path.join(output_path, document.basename + '_embeddings_history.gif'), frames, fps=2)
|
[
"numpy.array",
"matplotlib.pyplot.imshow",
"os.path.exists",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.close",
"cv2.addWeighted",
"numpy.take",
"matplotlib.pyplot.scatter",
"collections.namedtuple",
"scipy.spatial.distance.squareform",
"matplotlib.use",
"scipy.spatial.distance.pdist",
"matplotlib.pyplot.tick_params",
"numpy.argmax",
"multimodal_affinities.visualization.colors_util.rgb_hex_to_tuple",
"matplotlib.pyplot.title",
"cv2.imread",
"torch.cat",
"PIL.Image.fromarray",
"matplotlib.patches.Rectangle",
"os.makedirs",
"matplotlib.pyplot.imread",
"multimodal_affinities.visualization.vis_handler.VisHandler.generate_darker_palette",
"os.path.join",
"matplotlib.pyplot.subplots"
] |
[((370, 391), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (384, 391), False, 'import matplotlib\n'), ((939, 966), 'matplotlib.pyplot.imread', 'plt.imread', (['self.image_path'], {}), '(self.image_path)\n', (949, 966), True, 'import matplotlib.pyplot as plt\n'), ((993, 1020), 'cv2.imread', 'cv2.imread', (['self.image_path'], {}), '(self.image_path)\n', (1003, 1020), False, 'import cv2\n'), ((1551, 1588), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': 'self.figsize'}), '(1, figsize=self.figsize)\n', (1563, 1588), True, 'import matplotlib.pyplot as plt\n'), ((2042, 2056), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (2051, 2056), True, 'import matplotlib.pyplot as plt\n'), ((2233, 2270), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': 'self.figsize'}), '(1, figsize=self.figsize)\n', (2245, 2270), True, 'import matplotlib.pyplot as plt\n'), ((2550, 2564), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (2559, 2564), True, 'import matplotlib.pyplot as plt\n'), ((4550, 4597), 'multimodal_affinities.visualization.vis_handler.VisHandler.generate_darker_palette', 'VisHandler.generate_darker_palette', (['colors_list'], {}), '(colors_list)\n', (4584, 4597), False, 'from multimodal_affinities.visualization.vis_handler import VisHandler\n'), ((5398, 5454), 'cv2.addWeighted', 'cv2.addWeighted', (['output_img', 'alpha', 'bg_img', '(1 - alpha)', '(0)'], {}), '(output_img, alpha, bg_img, 1 - alpha, 0)\n', (5413, 5454), False, 'import cv2\n'), ((5630, 5762), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""both"""', 'bottom': '"""off"""', 'top': '"""off"""', 'labelbottom': '"""off"""', 'right': '"""off"""', 'left': '"""off"""', 'labelleft': '"""off"""'}), "(axis='both', which='both', bottom='off', top='off',\n labelbottom='off', right='off', left='off', labelleft='off')\n", (5645, 5762), True, 'import matplotlib.pyplot as plt\n'), ((5816, 5834), 'matplotlib.pyplot.imshow', 'plt.imshow', (['bg_img'], {}), '(bg_img)\n', (5826, 5834), True, 'import matplotlib.pyplot as plt\n'), ((6081, 6128), 'multimodal_affinities.visualization.vis_handler.VisHandler.generate_darker_palette', 'VisHandler.generate_darker_palette', (['colors_list'], {}), '(colors_list)\n', (6115, 6128), False, 'from multimodal_affinities.visualization.vis_handler import VisHandler\n'), ((8798, 8813), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {}), '(1)\n', (8810, 8813), True, 'import matplotlib.pyplot as plt\n'), ((8930, 8951), 'matplotlib.pyplot.title', 'plt.title', (['plot_title'], {}), '(plot_title)\n', (8939, 8951), True, 'import matplotlib.pyplot as plt\n'), ((8960, 9013), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_list', 'y_list'], {'c': 'colors', 's': '(1)', 'alpha': '(0.8)'}), '(x_list, y_list, c=colors, s=1, alpha=0.8)\n', (8971, 9013), True, 'import matplotlib.pyplot as plt\n'), ((9156, 9170), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (9165, 9170), True, 'import matplotlib.pyplot as plt\n'), ((9649, 9705), 'collections.namedtuple', 'namedtuple', (['"""ClusterSolution"""', "['word_indices', 'words']"], {}), "('ClusterSolution', ['word_indices', 'words'])\n", (9659, 9705), False, 'from collections import namedtuple\n'), ((13820, 13884), 'collections.namedtuple', 'namedtuple', (['"""matplot_extent"""', "['left', 'right', 'bottom', 'top']"], {}), "('matplot_extent', ['left', 'right', 'bottom', 'top'])\n", (13830, 13884), False, 'from collections import namedtuple\n'), ((1324, 1351), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (1338, 1351), False, 'import os\n'), ((1365, 1389), 'os.makedirs', 'os.makedirs', (['output_path'], {}), '(output_path)\n', (1376, 1389), False, 'import os\n'), ((1958, 2032), 'os.path.join', 'os.path.join', (['self.output_path', "(self.document.basename + '_word_boxes.png')"], {}), "(self.output_path, self.document.basename + '_word_boxes.png')\n", (1970, 2032), False, 'import os\n'), ((2460, 2545), 'os.path.join', 'os.path.join', (['self.output_path', "(self.document.basename + '_phrase_detection.png')"], {}), "(self.output_path, self.document.basename + '_phrase_detection.png'\n )\n", (2472, 2545), False, 'import os\n'), ((2947, 2974), 'numpy.array', 'np.array', (['clustering_labels'], {}), '(clustering_labels)\n', (2955, 2974), True, 'import numpy as np\n'), ((4113, 4187), 'os.path.join', 'os.path.join', (['self.output_path', "(self.document.basename + '_clustering.png')"], {}), "(self.output_path, self.document.basename + '_clustering.png')\n", (4125, 4187), False, 'import os\n'), ((7415, 7442), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (7429, 7442), False, 'import os\n'), ((7456, 7480), 'os.makedirs', 'os.makedirs', (['output_path'], {}), '(output_path)\n', (7467, 7480), False, 'import os\n'), ((9062, 9150), 'os.path.join', 'os.path.join', (['output_path', "(document.basename + '_' + embedding_property + '_pca.png')"], {}), "(output_path, document.basename + '_' + embedding_property +\n '_pca.png')\n", (9074, 9150), False, 'import os\n'), ((9937, 9999), 'numpy.take', 'np.take', (['embeddings_2d', 'all_cluster_embeddings_indices'], {'axis': '(0)'}), '(embeddings_2d, all_cluster_embeddings_indices, axis=0)\n', (9944, 9999), True, 'import numpy as np\n'), ((10033, 10082), 'scipy.spatial.distance.pdist', 'pdist', (['all_cluster_embeddings'], {'metric': '"""euclidean"""'}), "(all_cluster_embeddings, metric='euclidean')\n", (10038, 10082), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((10114, 10144), 'scipy.spatial.distance.squareform', 'squareform', (['pairwise_distances'], {}), '(pairwise_distances)\n', (10124, 10144), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((16469, 16496), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (16483, 16496), False, 'import os\n'), ((16510, 16534), 'os.makedirs', 'os.makedirs', (['output_path'], {}), '(output_path)\n', (16521, 16534), False, 'import os\n'), ((18261, 18276), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {}), '(1)\n', (18273, 18276), True, 'import matplotlib.pyplot as plt\n'), ((19986, 20007), 'matplotlib.pyplot.title', 'plt.title', (['plot_title'], {}), '(plot_title)\n', (19995, 20007), True, 'import matplotlib.pyplot as plt\n'), ((20020, 20123), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_list', 'y_list'], {'c': 'colors', 's': '(18)', 'alpha': '(1.0)', 'edgecolors': '"""black"""', 'linewidth': '(1.0)', 'zorder': '(3)'}), "(x_list, y_list, c=colors, s=18, alpha=1.0, edgecolors='black',\n linewidth=1.0, zorder=3)\n", (20031, 20123), True, 'import matplotlib.pyplot as plt\n'), ((20132, 20264), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""both"""', 'bottom': '"""off"""', 'top': '"""off"""', 'labelbottom': '"""off"""', 'right': '"""off"""', 'left': '"""off"""', 'labelleft': '"""off"""'}), "(axis='both', which='both', bottom='off', top='off',\n labelbottom='off', right='off', left='off', labelleft='off')\n", (20147, 20264), True, 'import matplotlib.pyplot as plt\n'), ((20563, 20577), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (20572, 20577), True, 'import matplotlib.pyplot as plt\n'), ((21189, 21216), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (21203, 21216), False, 'import os\n'), ((21230, 21254), 'os.makedirs', 'os.makedirs', (['output_path'], {}), '(output_path)\n', (21241, 21254), False, 'import os\n'), ((23723, 23738), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {}), '(1)\n', (23735, 23738), True, 'import matplotlib.pyplot as plt\n'), ((23910, 23931), 'matplotlib.pyplot.title', 'plt.title', (['plot_title'], {}), '(plot_title)\n', (23919, 23931), True, 'import matplotlib.pyplot as plt\n'), ((23945, 24048), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_list', 'y_list'], {'c': 'colors', 's': '(18)', 'alpha': '(1.0)', 'edgecolors': '"""black"""', 'linewidth': '(1.0)', 'zorder': '(3)'}), "(x_list, y_list, c=colors, s=18, alpha=1.0, edgecolors='black',\n linewidth=1.0, zorder=3)\n", (23956, 24048), True, 'import matplotlib.pyplot as plt\n'), ((24057, 24189), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""both"""', 'bottom': '"""off"""', 'top': '"""off"""', 'labelbottom': '"""off"""', 'right': '"""off"""', 'left': '"""off"""', 'labelleft': '"""off"""'}), "(axis='both', which='both', bottom='off', top='off',\n labelbottom='off', right='off', left='off', labelleft='off')\n", (24072, 24189), True, 'import matplotlib.pyplot as plt\n'), ((24730, 24802), 'os.path.join', 'os.path.join', (['output_path', "(document.basename + '_embeddings_history.gif')"], {}), "(output_path, document.basename + '_embeddings_history.gif')\n", (24742, 24802), False, 'import os\n'), ((5166, 5194), 'multimodal_affinities.visualization.colors_util.rgb_hex_to_tuple', 'rgb_hex_to_tuple', (['face_color'], {}), '(face_color)\n', (5182, 5194), False, 'from multimodal_affinities.visualization.colors_util import rgb_hex_to_tuple\n'), ((6571, 6683), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(x, y)', 'width', 'height'], {'linewidth': '(2)', 'edgecolor': 'edge_color', 'facecolor': 'face_color', 'alpha': '(0.4)'}), '((x, y), width, height, linewidth=2, edgecolor=edge_color,\n facecolor=face_color, alpha=0.4)\n', (6588, 6683), True, 'import matplotlib.patches as patches\n'), ((7815, 7873), 'torch.cat', 'torch.cat', (["word.unprojected_embedding['embeddings']"], {'dim': '(1)'}), "(word.unprojected_embedding['embeddings'], dim=1)\n", (7824, 7873), False, 'import torch\n'), ((8480, 8500), 'numpy.array', 'np.array', (['embeddings'], {}), '(embeddings)\n', (8488, 8500), True, 'import numpy as np\n'), ((8560, 8590), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'num_pca_comp'}), '(n_components=num_pca_comp)\n', (8563, 8590), False, 'from sklearn.decomposition import PCA\n'), ((11053, 11093), 'numpy.argmax', 'np.argmax', (['distances_accumulator'], {'axis': '(0)'}), '(distances_accumulator, axis=0)\n', (11062, 11093), True, 'import numpy as np\n'), ((12540, 12581), 'PIL.Image.fromarray', 'Image.fromarray', (['image_of_crop[..., ::-1]'], {}), '(image_of_crop[..., ::-1])\n', (12555, 12581), False, 'from PIL import Image\n'), ((20465, 20553), 'os.path.join', 'os.path.join', (['output_path', "(document.basename + '_' + embedding_property + '_pca.png')"], {}), "(output_path, document.basename + '_' + embedding_property +\n '_pca.png')\n", (20477, 20553), False, 'import os\n'), ((17558, 17616), 'torch.cat', 'torch.cat', (["word.unprojected_embedding['embeddings']"], {'dim': '(1)'}), "(word.unprojected_embedding['embeddings'], dim=1)\n", (17567, 17616), False, 'import torch\n'), ((17923, 17943), 'numpy.array', 'np.array', (['embeddings'], {}), '(embeddings)\n', (17931, 17943), True, 'import numpy as np\n'), ((18011, 18041), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'num_pca_comp'}), '(n_components=num_pca_comp)\n', (18014, 18041), False, 'from sklearn.decomposition import PCA\n'), ((19119, 19291), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(extent.left, extent.top)', '(extent.right - extent.left)', '(extent.bottom - extent.top)'], {'linewidth': '(0.5)', 'edgecolor': '"""black"""', 'facecolor': '"""none"""', 'zorder': '(5)'}), "((extent.left, extent.top), extent.right - extent.left, \n extent.bottom - extent.top, linewidth=0.5, edgecolor='black', facecolor\n ='none', zorder=5)\n", (19136, 19291), True, 'import matplotlib.patches as patches\n'), ((22621, 22647), 'numpy.array', 'np.array', (['chosen_embedding'], {}), '(chosen_embedding)\n', (22629, 22647), True, 'import numpy as np\n'), ((22715, 22745), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'num_pca_comp'}), '(n_components=num_pca_comp)\n', (22718, 22745), False, 'from sklearn.decomposition import PCA\n')]
|
import os
import numpy as np
save_stem='extra_vis_friday_harbor'
data_dir='../../data/sdk_new_100'
resolution=100
cre=False
source_acronyms=['VISal','VISam','VISl','VISp','VISpl','VISpm',
'VISli','VISpor','VISrl','VISa']
lambda_list = np.logspace(3,12,10)
scale_lambda=True
min_vox=0
# save_file_name='visual_output.hdf5'
#source_coverage=0.90
source_coverage=0.95
#source_shell = 1
source_shell=None
save_dir=os.path.join('../../data/connectivities',save_stem)
experiments_fn=None
target_acronyms=source_acronyms
solver=os.path.abspath('../smoothness_c/solve')
cmdfile=os.path.join(save_dir,'model_fitting_cmds')
selected_fit_cmds=os.path.join(save_dir,'model_fitting_after_selection_cmds')
save_mtx=True
cross_val_matrices=True
cross_val=5
fit_gaussian=False
select_one_lambda=False
if select_one_lambda:
lambda_fn='lambda_opt'
else:
lambda_fn='lambda_ipsi_contra_opt'
laplacian='free'
shuffle_seed=666
max_injection_volume=0.7
|
[
"os.path.abspath",
"numpy.logspace",
"os.path.join"
] |
[((253, 275), 'numpy.logspace', 'np.logspace', (['(3)', '(12)', '(10)'], {}), '(3, 12, 10)\n', (264, 275), True, 'import numpy as np\n'), ((428, 480), 'os.path.join', 'os.path.join', (['"""../../data/connectivities"""', 'save_stem'], {}), "('../../data/connectivities', save_stem)\n", (440, 480), False, 'import os\n'), ((539, 579), 'os.path.abspath', 'os.path.abspath', (['"""../smoothness_c/solve"""'], {}), "('../smoothness_c/solve')\n", (554, 579), False, 'import os\n'), ((588, 632), 'os.path.join', 'os.path.join', (['save_dir', '"""model_fitting_cmds"""'], {}), "(save_dir, 'model_fitting_cmds')\n", (600, 632), False, 'import os\n'), ((650, 710), 'os.path.join', 'os.path.join', (['save_dir', '"""model_fitting_after_selection_cmds"""'], {}), "(save_dir, 'model_fitting_after_selection_cmds')\n", (662, 710), False, 'import os\n')]
|
# encoding: utf-8
'''
@author: yangsen
@license:
@contact:
@software:
@file: numpy_mat.py
@time: 18-8-25 下午9:56
@desc:
'''
import numpy as np
a = np.arange(9).reshape(3,3)
# 行
a[1]
a[[1,2]]
a[np.array([1,2])]
# 列
a[:,1]
a[:,[1,2]]
a[:,np.array([1,2])]
|
[
"numpy.array",
"numpy.arange"
] |
[((194, 210), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (202, 210), True, 'import numpy as np\n'), ((146, 158), 'numpy.arange', 'np.arange', (['(9)'], {}), '(9)\n', (155, 158), True, 'import numpy as np\n'), ((238, 254), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (246, 254), True, 'import numpy as np\n')]
|
from typing import Union, Iterable, List
import numpy as np
import pandas as pd
from ..models._transformer import _ArrayTransformer, _MultiArrayTransformer
class _DataFrameTransformer(_ArrayTransformer):
'''`_ArrayTransformer` wrapper for `pandas.DataFrame`.
'''
def __init__(self):
super().__init__()
def fit(self, X : pd.DataFrame, axis : Union[int, Iterable[int]] = 0):
if not isinstance(X, pd.DataFrame):
raise ValueError('This interface is for `pandas.DataFrame` only')
if isinstance(axis, list):
axis = axis[0]
# Set sample and feature index
if axis == 0:
self.index_samples = X.index
self.index_features = X.columns
elif axis == 1:
self.index_samples = X.columns
self.index_features = X.index
else:
raise ValueError('axis must be either 0 or 1')
# Fit the data
try:
super().fit(X=X.values, axis=axis)
except AttributeError:
err_msg = 'weights must be of type {:}.'.format(repr(pd.DataFrame))
raise TypeError(err_msg)
return self
def transform(self, X : pd.DataFrame) -> np.ndarray:
try:
return super().transform(X.values)
except AttributeError:
err_msg = 'weights must be of type {:}.'.format(repr(pd.DataFrame))
raise TypeError(err_msg)
def fit_transform(self, X : pd.DataFrame, axis : int = 0) -> np.ndarray:
return self.fit(X=X, axis=axis).transform(X)
def transform_weights(self, weights : pd.DataFrame) -> np.ndarray:
try:
return super().transform_weights(weights.values)
except AttributeError:
return super().transform_weights(weights)
def back_transform(self, X : np.ndarray) -> pd.DataFrame:
df = super().back_transform(X)
return pd.DataFrame(
df,
index=self.index_samples,
columns=self.index_features
)
def back_transform_eofs(self, X : np.ndarray) -> pd.DataFrame:
eofs = super().back_transform_eofs(X)
return pd.DataFrame(
eofs,
index=self.index_features,
columns=range(1, eofs.shape[-1] + 1)
)
def back_transform_pcs(self, X : np.ndarray) -> pd.DataFrame:
pcs = super().back_transform_pcs(X)
return pd.DataFrame(
pcs,
index=self.index_samples,
columns=range(1, pcs.shape[-1] + 1)
)
class _MultiDataFrameTransformer(_MultiArrayTransformer):
'Transform multiple 2D ``pd.DataFrame`` to a single 2D ``np.ndarry``.'
def __init__(self):
super().__init__()
def fit(self, X : Union[pd.DataFrame, List[pd.DataFrame]], axis : Union[int, Iterable[int]] = 0):
X = self._convert2list(X)
self.tfs = [_DataFrameTransformer().fit(x, axis=axis) for x in X]
if len(set([tf.n_valid_samples for tf in self.tfs])) > 1:
err_msg = 'All individual arrays must have same number of samples.'
raise ValueError(err_msg)
self.idx_array_sep = np.cumsum([tf.n_valid_features for tf in self.tfs])
self.axis_samples = self.tfs[0].axis_samples
return self
def transform(self, X : Union[pd.DataFrame, List[pd.DataFrame]]) -> np.ndarray:
return super().transform(X=X)
def transform_weights(self, weights : Union[pd.DataFrame, List[pd.DataFrame]]) -> np.ndarray:
return super().transform_weights(weights=weights)
def fit_transform(
self, X : Union[pd.DataFrame, List[pd.DataFrame]],
axis : Union[int, Iterable[int]] = 0
) -> np.ndarray:
return self.fit(X=X, axis=axis).transform(X)
def back_transform(self, X : np.ndarray) -> pd.DataFrame:
return super().back_transform(X=X)
def back_transform_eofs(self, X : np.ndarray) -> pd.DataFrame:
return super().back_transform_eofs(X=X)
def back_transform_pcs(self, X : np.ndarray) -> pd.DataFrame:
return super().back_transform_pcs(X=X)
|
[
"pandas.DataFrame",
"numpy.cumsum"
] |
[((1911, 1982), 'pandas.DataFrame', 'pd.DataFrame', (['df'], {'index': 'self.index_samples', 'columns': 'self.index_features'}), '(df, index=self.index_samples, columns=self.index_features)\n', (1923, 1982), True, 'import pandas as pd\n'), ((3153, 3204), 'numpy.cumsum', 'np.cumsum', (['[tf.n_valid_features for tf in self.tfs]'], {}), '([tf.n_valid_features for tf in self.tfs])\n', (3162, 3204), True, 'import numpy as np\n')]
|
# Neural Networks Demystified
# Part 1: Data + Architecture
#
# Supporting code for short YouTube series on artificial neural networks.
#
# <NAME>
# @stephencwelch
import numpy as np
# X = (hours sleeping, hours studying), y = Score on test
X = np.array(([3,5], [5,1], [10,2]), dtype=float)
y = np.array(([75], [82], [93]), dtype=float)
# Normalize
X = X/np.amax(X, axis=0)
y = y/100 #Max test score is 100
|
[
"numpy.array",
"numpy.amax"
] |
[((247, 295), 'numpy.array', 'np.array', (['([3, 5], [5, 1], [10, 2])'], {'dtype': 'float'}), '(([3, 5], [5, 1], [10, 2]), dtype=float)\n', (255, 295), True, 'import numpy as np\n'), ((297, 338), 'numpy.array', 'np.array', (['([75], [82], [93])'], {'dtype': 'float'}), '(([75], [82], [93]), dtype=float)\n', (305, 338), True, 'import numpy as np\n'), ((358, 376), 'numpy.amax', 'np.amax', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (365, 376), True, 'import numpy as np\n')]
|
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import histogram_module
import dist_module
def rgb2gray(rgb):
r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
# model_images - list of file names of model images
# query_images - list of file names of query images
#
# dist_type - string which specifies distance type: 'chi2', 'l2', 'intersect'
# hist_type - string which specifies histogram type: 'grayvalue', 'dxdy', 'rgb', 'rg'
#
# note: use functions 'get_dist_by_name', 'get_hist_by_name' and 'is_grayvalue_hist' to obtain
# handles to distance and histogram functions, and to find out whether histogram function
# expects grayvalue or color image
def find_best_match(model_images, query_images, dist_type, hist_type, num_bins):
hist_isgray = histogram_module.is_grayvalue_hist(hist_type)
model_hists = compute_histograms(model_images, hist_type, hist_isgray, num_bins)
query_hists = compute_histograms(query_images, hist_type, hist_isgray, num_bins)
D = np.zeros((len(model_images), len(query_images)))
# compute distance for each couple of query - image
for j, query in enumerate(query_hists):
for i, model in enumerate(model_hists):
D[i, j] = dist_module.get_dist_by_name(model, query, dist_type)
best_match = [] # to save best matches
# for each query , find best model
for j in range(len(query_images)):
query_matches = D[:, j] # get query columns from matrix
argmin = np.argmin(query_matches) # get index with minimum distance
best_match.append(argmin) # save index for query
best_match = np.array(best_match) # array of best match for each query
return best_match, D
def compute_histograms(image_list, hist_type, hist_isgray, num_bins):
image_hist = []
# Compute hisgoram for each image and add it at the bottom of image_hist
# ... (your code here)
for img in image_list:
img_color = np.array(Image.open(img))
# if hist is gray type we use gray image
# othewise rgb image
img_to_process = rgb2gray(img_color) if hist_isgray else img_color.astype('double')
# We compute histogram for image
hist = histogram_module.get_hist_by_name(img=img_to_process,
num_bins_gray=num_bins,
hist_name=hist_type
)
image_hist.append(hist)
return image_hist
# For each image file from 'query_images' find and visualize the 5 nearest images from 'model_image'.
#
# Note: use the previously implemented function 'find_best_match'
# Note: use subplot command to show all the images in the same Python figure, one row per query image
def show_neighbors(model_images, query_images, dist_type, hist_type, num_bins):
plt.figure()
num_nearest = 5 # show the top-5 neighbors
# ... (your code here)
_, D = find_best_match(model_images=model_images,
query_images=query_images,
dist_type=dist_type,
hist_type=hist_type,
num_bins=num_bins
)
Q = len(query_images)
pos = 0
for j in range(Q):
query_matches = D[:, j]
best_args = np.argsort(query_matches)[:num_nearest]
query_img = query_images[j]
pos += 1
plt.subplot(Q, 6, pos);
plt.imshow(np.array(Image.open(query_img)), vmin=0, vmax=255);
plt.title(f'Q{j}')
for ind in range(len(best_args)):
pos += 1
model_ind = best_args[ind]
model_img = model_images[model_ind]
plt.subplot(Q, 6, pos);
plt.imshow(np.array(Image.open(model_img)), vmin=0, vmax=255);
plt.title(f'MO.{model_ind}')
plt.show()
|
[
"PIL.Image.open",
"histogram_module.is_grayvalue_hist",
"histogram_module.get_hist_by_name",
"numpy.argsort",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.argmin",
"matplotlib.pyplot.title",
"dist_module.get_dist_by_name",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] |
[((866, 911), 'histogram_module.is_grayvalue_hist', 'histogram_module.is_grayvalue_hist', (['hist_type'], {}), '(hist_type)\n', (900, 911), False, 'import histogram_module\n'), ((1708, 1728), 'numpy.array', 'np.array', (['best_match'], {}), '(best_match)\n', (1716, 1728), True, 'import numpy as np\n'), ((2953, 2965), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2963, 2965), True, 'import matplotlib.pyplot as plt\n'), ((3968, 3978), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3976, 3978), True, 'import matplotlib.pyplot as plt\n'), ((1572, 1596), 'numpy.argmin', 'np.argmin', (['query_matches'], {}), '(query_matches)\n', (1581, 1596), True, 'import numpy as np\n'), ((2292, 2395), 'histogram_module.get_hist_by_name', 'histogram_module.get_hist_by_name', ([], {'img': 'img_to_process', 'num_bins_gray': 'num_bins', 'hist_name': 'hist_type'}), '(img=img_to_process, num_bins_gray=\n num_bins, hist_name=hist_type)\n', (2325, 2395), False, 'import histogram_module\n'), ((3539, 3561), 'matplotlib.pyplot.subplot', 'plt.subplot', (['Q', '(6)', 'pos'], {}), '(Q, 6, pos)\n', (3550, 3561), True, 'import matplotlib.pyplot as plt\n'), ((3642, 3660), 'matplotlib.pyplot.title', 'plt.title', (['f"""Q{j}"""'], {}), "(f'Q{j}')\n", (3651, 3660), True, 'import matplotlib.pyplot as plt\n'), ((1312, 1365), 'dist_module.get_dist_by_name', 'dist_module.get_dist_by_name', (['model', 'query', 'dist_type'], {}), '(model, query, dist_type)\n', (1340, 1365), False, 'import dist_module\n'), ((2047, 2062), 'PIL.Image.open', 'Image.open', (['img'], {}), '(img)\n', (2057, 2062), False, 'from PIL import Image\n'), ((3436, 3461), 'numpy.argsort', 'np.argsort', (['query_matches'], {}), '(query_matches)\n', (3446, 3461), True, 'import numpy as np\n'), ((3823, 3845), 'matplotlib.pyplot.subplot', 'plt.subplot', (['Q', '(6)', 'pos'], {}), '(Q, 6, pos)\n', (3834, 3845), True, 'import matplotlib.pyplot as plt\n'), ((3934, 3962), 'matplotlib.pyplot.title', 'plt.title', (['f"""MO.{model_ind}"""'], {}), "(f'MO.{model_ind}')\n", (3943, 3962), True, 'import matplotlib.pyplot as plt\n'), ((3591, 3612), 'PIL.Image.open', 'Image.open', (['query_img'], {}), '(query_img)\n', (3601, 3612), False, 'from PIL import Image\n'), ((3879, 3900), 'PIL.Image.open', 'Image.open', (['model_img'], {}), '(model_img)\n', (3889, 3900), False, 'from PIL import Image\n')]
|
################################################################################
# Copyright (c) 2015-2018 Skymind, Inc.
#
# This program and the accompanying materials are made available under the
# terms of the Apache License, Version 2.0 which is available at
# https://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# SPDX-License-Identifier: Apache-2.0
################################################################################
from .java_classes import *
import numpy as np
import ctypes
import warnings
native_ops = NativeOpsHolder.getInstance().getDeviceNativeOps()
# DATA TYPE MANAGEMENT
DOUBLE = DataType.DOUBLE
FLOAT = DataType.FLOAT
HALF = DataType.HALF
LONG = DataType.LONG
INT = DataType.INT
SHORT = DataType.SHORT
UBYTE = DataType.UBYTE
BYTE = DataType.BYTE
BOOL = DataType.BOOL
UTF8 = DataType.UTF8
COMPRESSED = DataType.COMPRESSED
UNKNOWN = DataType.UNKNOWN
SUPPORTED_JAVA_DTYPES = [
DOUBLE,
FLOAT,
HALF,
LONG,
INT,
SHORT,
BOOL
#UTF8
]
SUPPORTED_PYTHON_DTYPES = [
np.float64,
np.float32,
np.float16,
np.int64,
np.int32,
np.int16,
np.bool_
#np.str_
]
_PY2J = {SUPPORTED_PYTHON_DTYPES[i] : SUPPORTED_JAVA_DTYPES[i] for i in range(len(SUPPORTED_JAVA_DTYPES))}
_J2PY = {SUPPORTED_JAVA_DTYPES[i] : SUPPORTED_PYTHON_DTYPES[i] for i in range(len(SUPPORTED_JAVA_DTYPES))}
def _dtype_py2j(dtype):
if isinstance(dtype, str):
dtype = np.dtype(dtype).type
elif isinstance(dtype, np.dtype):
dtype = dtype.type
jtype = _PY2J.get(dtype)
if jtype is None:
raise NotImplementedError("Unsupported type: " + dtype.name)
return jtype
def _dtype_j2py(dtype):
pytype = _J2PY.get(dtype)
if pytype is None:
raise NotImplementedError("Unsupported type: " + (str(dtype)))
return pytype
def set_context_dtype(dtype):
'''
Sets the dtype for nd4j
# Arguments
dtype: 'float' or 'double'
'''
dtype_map = {
'float32': 'float',
'float64': 'double'
}
dtype = dtype_map.get(dtype, dtype)
if dtype not in ['float', 'double']:
raise ValueError("Invalid dtype '{}'. Available dtypes are 'float' and 'double'.".format(dtype))
dtype_ = DataTypeUtil.getDtypeFromContext(dtype)
DataTypeUtil.setDTypeForContext(dtype_)
if get_context_dtype() != dtype:
warnings.warn("Can not set context dtype now. Set it at the beginning of your program.")
def get_context_dtype():
'''
Returns the nd4j dtype
'''
dtype = DataTypeUtil.getDtypeFromContext()
return DataTypeUtil.getDTypeForName(dtype)
_refs = []
def _from_numpy(np_array):
'''
Convert numpy array to nd4j array
'''
pointer_address, _ = np_array.__array_interface__['data']
_refs.append(np_array)
pointer = native_ops.pointerForAddress(pointer_address)
size = np_array.size
pointer.limit(size)
jdtype = _dtype_py2j(np_array.dtype)
'''
mapping = {
DOUBLE: DoublePointer,
FLOAT: FloatPointer,
HALF: HalfPointer,
LONG: LongPointer,
INT: IntPointer,
SHORT: ShortPointer,
BOOL: BoolPointer
}
pc = mapping[jdtype]
#pointer = pc(pointer)
'''
buff = Nd4j.createBuffer(pointer, size, jdtype)
assert buff.address() == pointer_address
_refs.append(buff)
elem_size = buff.getElementSize()
assert elem_size == np_array.dtype.itemsize
strides = np_array.strides
strides = [dim / elem_size for dim in strides]
shape = np_array.shape
nd4j_array = Nd4j.create(buff, shape, strides, 0)
assert buff.address() == nd4j_array.data().address()
return nd4j_array
def _to_numpy(nd4j_array):
'''
Convert nd4j array to numpy array
'''
buff = nd4j_array.data()
address = buff.pointer().address()
dtype = nd4j_array.dataType().toString()
mapping = {
'DOUBLE': ctypes.c_double,
'FLOAT': ctypes.c_float,
'HALF': ctypes.c_short,
'LONG': ctypes.c_long,
'INT': ctypes.c_int,
'SHORT': ctypes.c_short,
'BOOL': ctypes.c_bool
}
Pointer = ctypes.POINTER(mapping[dtype])
pointer = ctypes.cast(address, Pointer)
np_array = np.ctypeslib.as_array(pointer, tuple(nd4j_array.shape()))
return np_array
def _indarray(x):
typ = type(x)
if typ is INDArray:
return x
elif typ is ndarray:
return x.array
elif 'numpy' in str(typ):
return _from_numpy(x)
elif typ in (list, tuple):
return _from_numpy(np.array(x))
elif typ in (int, float):
return Nd4j.scalar(x)
else:
raise Exception('Data type not understood :' + str(typ))
def _nparray(x):
typ = type(x)
if typ is INDArray:
return ndarray(x).numpy()
elif typ is ndarray:
return x.numpy()
elif 'numpy' in str(typ):
return x
elif typ in (list, tuple):
return np.array(x)
elif typ in (int, float):
return np.array(x)
else:
raise Exception('Data type not understood :' + str(typ))
def broadcast_like(y, x):
xs = x.shape()
ys = y.shape()
if xs == ys:
return y
_xs = tuple(xs)
_ys = tuple(ys)
nx = len(xs)
ny = len(ys)
if nx > ny:
diff = nx - ny
ys = ([1] * diff) + ys
y = y.reshape(ys)
ny = nx
elif ny > nx:
raise Exception('Unable to broadcast shapes ' + str(_xs) + ''
' and ' + str(_ys))
yt = []
rep_y = False
for xd, yd in zip(xs, ys):
if xd == yd:
yt.append(1)
elif xd == 1:
raise Exception('Unable to broadcast shapes ' + str(_xs) + ''
' and ' + str(_ys))
elif yd == 1:
yt.append(xd)
rep_y = True
else:
raise Exception('Unable to broadcast shapes ' + str(_xs) + ''
' and ' + str(_ys))
if rep_y:
y = y.repmat(*yt)
return y
def broadcast(x, y):
xs = x.shape()
ys = y.shape()
if xs == ys:
return x, y
_xs = tuple(xs)
_ys = tuple(ys)
nx = len(xs)
ny = len(ys)
if nx > ny:
diff = nx - ny
ys = ([1] * diff) + ys
y = y.reshape(*ys)
ny = nx
elif ny > nx:
diff = ny - nx
xs = ([1] * diff) + xs
x = x.reshape(*xs)
nx = ny
xt = []
yt = []
rep_x = False
rep_y = False
for xd, yd in zip(xs, ys):
if xd == yd:
xt.append(1)
yt.append(1)
elif xd == 1:
xt.append(yd)
yt.append(1)
rep_x = True
elif yd == 1:
xt.append(1)
yt.append(xd)
rep_y = True
else:
raise Exception('Unable to broadcast shapes ' + str(_xs) + ''
' and ' + str(_ys))
if rep_x:
x = Nd4j.tile(x, *xt)
if rep_y:
try:
y = Nd4j.tile(y, *yt)
except:
y = Nd4j.tile(y, *yt)
return x, y
class ndarray(object):
def __init__(self, data, dtype=None):
# we ignore dtype for now
typ = type(data)
if 'nd4j' in typ.__name__:
# Note that we don't make a copy here
self.array = data
elif typ is ndarray:
self.array = data.array.dup()
else:
if typ is not np.ndarray:
data = np.array(data)
self.array = _from_numpy(data)
def numpy(self):
try:
return self.np_array
except AttributeError:
self.np_array = _to_numpy(self.array)
return self.np_array
@property
def size(self):
return self.array.length()
@property
def shape(self):
return tuple(self.array.shape())
@shape.setter
def shape(self, value):
arr = self.reshape(value)
self.array = arr.array
@property
def ndim(self):
return len(self.array.shape())
def __getitem__(self, key):
return ndarray(self.numpy()[key])
if type(key) is int:
return ndarray(self.array.get(NDArrayIndex.point(key)))
if type(key) is slice:
start = key.start
stop = key.stop
step = key.step
if start is None:
start = 0
if stop is None:
shape = self.array.shape()
if shape[0] == 1:
stop = shape[1]
else:
stop = shape[0]
if stop - start <= 0:
return None
if step is None or step == 1:
return ndarray(self.array.get(NDArrayIndex.interval(start, stop)))
else:
return ndarray(self.array.get(NDArrayIndex.interval(start, step, stop)))
if type(key) is list:
raise NotImplementedError(
'Sorry, this type of indexing is not supported yet.')
if type(key) is tuple:
key = list(key)
shape = self.array.shape()
ndim = len(shape)
nk = len(key)
key += [slice(None)] * (ndim - nk)
args = []
for i, dim in enumerate(key):
if type(dim) is int:
args.append(NDArrayIndex.point(dim))
elif type(dim) is slice:
if dim == slice(None):
args.append(NDArrayIndex.all())
else:
start = dim.start
stop = dim.stop
step = dim.step
if start is None:
start = 0
if stop is None:
stop = shape[i]
if stop - start <= 0:
return None
if step is None or step == 1:
args.append(NDArrayIndex.interval(start, stop))
else:
args.append(NDArrayIndex.interval(
start, step, stop))
elif type(dim) in (list, tuple):
raise NotImplementedError(
'Sorry, this type of indexing is not supported yet.')
return ndarray(self.array.get(*args))
def __setitem__(self, key, other):
self.numpy()[key] = _nparray(other)
return
other = _indarray(other)
view = self[key]
if view is None:
return
view = view.array
other = broadcast_like(other, view)
view.assign(other)
def __add__(self, other):
return ndarray(self.numpy() + _nparray(other))
other = _indarray(other)
x, y = broadcast(self.array, other)
return ndarray(x.add(y))
def __sub__(self, other):
return ndarray(self.numpy() - _nparray(other))
other = _indarray(other)
x, y = broadcast(self.array, other)
return ndarray(x.sub(y))
def __mul__(self, other):
return ndarray(self.numpy() * _nparray(other))
other = _indarray(other)
x, y = broadcast(self.array, other)
return ndarray(x.mul(y))
def __div__(self, other):
return ndarray(self.numpy() / _nparray(other))
other = _indarray(other)
x, y = broadcast(self.array, other)
return ndarray(x.div(y))
def __pow__(self, other):
return ndarray(self.numpy() ** _nparray(other))
other = _indarray(other)
x, y = broadcast(self.array, other)
return ndarray(Transforms.pow(x, y))
def __iadd__(self, other):
self.numpy().__iadd__(_nparray(other))
return self
other = _indarray(other)
if self.array.shape() == other.shape():
self.array = self.array.addi(other)
else:
x, y = broadcast(self.array, other)
self.array = x.add(y)
return self
def __isub__(self, other):
self.numpy().__isub__(_nparray(other))
return self
other = _indarray(other)
if self.array.shape() == other.shape():
self.array = self.array.subi(other)
else:
x, y = broadcast(self.array, other)
self.array = x.sub(y)
return self
def __imul__(self, other):
self.numpy().__imul__(_nparray(other))
return self
other = _indarray(other)
if self.array.shape() == other.shape():
self.array = self.array.muli(other)
else:
x, y = broadcast(self.array, other)
self.array = x.mul(y)
return self
def __idiv__(self, other):
self.numpy().__idiv__(_nparray(other))
return self
other = _indarray(other)
if self.array.shape() == other.shape():
self.array = self.array.divi(other)
else:
x, y = broadcast(self.array, other)
self.array = x.div(y)
return self
def __ipow__(self, other):
self.numpy().__ipow__(_nparray(other))
return self
other = _indarray(other)
if self.array.shape() == other.shape():
self.array = self.array.divi(other)
else:
x, y = broadcast(self.array, other)
self.array = Transforms.pow(x, y)
return self
def __getattr__(self, attr):
import ops
f = getattr(ops, attr)
setattr(ndarray, attr, f)
return getattr(self, attr)
def __int__(self):
if self.array.length() == 1:
return self.array.getInt(0)
raise Exception('Applicable only for scalars')
def __float__(self):
if self.array.length() == 1:
return self.array.getDouble(0)
raise Exception('Applicable only for scalars')
@property
def T(self):
return self.transpose()
def array(*args, **kwargs):
return ndarray(*args, **kwargs)
|
[
"ctypes.POINTER",
"numpy.array",
"ctypes.cast",
"warnings.warn",
"numpy.dtype"
] |
[((4456, 4486), 'ctypes.POINTER', 'ctypes.POINTER', (['mapping[dtype]'], {}), '(mapping[dtype])\n', (4470, 4486), False, 'import ctypes\n'), ((4501, 4530), 'ctypes.cast', 'ctypes.cast', (['address', 'Pointer'], {}), '(address, Pointer)\n', (4512, 4530), False, 'import ctypes\n'), ((2678, 2771), 'warnings.warn', 'warnings.warn', (['"""Can not set context dtype now. Set it at the beginning of your program."""'], {}), "(\n 'Can not set context dtype now. Set it at the beginning of your program.')\n", (2691, 2771), False, 'import warnings\n'), ((1752, 1767), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (1760, 1767), True, 'import numpy as np\n'), ((5255, 5266), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (5263, 5266), True, 'import numpy as np\n'), ((7783, 7797), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (7791, 7797), True, 'import numpy as np\n'), ((4869, 4880), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (4877, 4880), True, 'import numpy as np\n'), ((5312, 5323), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (5320, 5323), True, 'import numpy as np\n')]
|
#! /usr/bin/Python
from gensim.models.keyedvectors import KeyedVectors
from scipy import spatial
from numpy import linalg
import argparse
import sys
vector_file = sys.argv[1]
if len(sys.argv) != 6:
print('arguments wrong!')
print(len(sys.argv))
exit()
else:
words = [sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5]]
print(words)
wvs = KeyedVectors.load_word2vec_format(vector_file, binary=True)
print('WVs loaded.')
for w in words:
if w not in wvs.vocab:
print('out of vocab!')
exit()
#print(wvs.most_similar(positive=[words[1], words[2]], negative=[words[0]], topn=3))
w1 = wvs[words[0]]
w2 = wvs[words[1]]
w3 = wvs[words[2]]
w4 = wvs[words[3]]
m1 = w1 / linalg.norm(w1)
m2 = w2 / linalg.norm(w2)
m3 = w3 / linalg.norm(w3)
m4 = w4 / linalg.norm(w4)
diff1 = w1 - w2
diff2 = w3 - w4
miff1 = m1 - m2
miff2 = m3 - m4
print('-------Word Space---------')
print('to word-4: ', 1-spatial.distance.cosine(m2+m3-m1, m4))
print('to word-3: ', 1-spatial.distance.cosine(m1+m4-m2, m3))
print('to word-2: ', 1-spatial.distance.cosine(m4+m1-m3, m2))
print('to word-1: ', 1-spatial.distance.cosine(m2+m3-m4, m1))
print('------Analogy Space-------')
print(' cosine: ', 1-spatial.distance.cosine(diff1, diff2))
print(' Euclidean: ', 1-linalg.norm(diff1-diff2)/(linalg.norm(diff1)+linalg.norm(diff2)))
print(' M-cosine: ', 1-spatial.distance.cosine(miff1, miff2))
print('M-Euclidean: ', 1-linalg.norm(miff1-miff2)/(linalg.norm(miff1)+linalg.norm(miff2)))
|
[
"gensim.models.keyedvectors.KeyedVectors.load_word2vec_format",
"scipy.spatial.distance.cosine",
"numpy.linalg.norm"
] |
[((366, 425), 'gensim.models.keyedvectors.KeyedVectors.load_word2vec_format', 'KeyedVectors.load_word2vec_format', (['vector_file'], {'binary': '(True)'}), '(vector_file, binary=True)\n', (399, 425), False, 'from gensim.models.keyedvectors import KeyedVectors\n'), ((709, 724), 'numpy.linalg.norm', 'linalg.norm', (['w1'], {}), '(w1)\n', (720, 724), False, 'from numpy import linalg\n'), ((736, 751), 'numpy.linalg.norm', 'linalg.norm', (['w2'], {}), '(w2)\n', (747, 751), False, 'from numpy import linalg\n'), ((763, 778), 'numpy.linalg.norm', 'linalg.norm', (['w3'], {}), '(w3)\n', (774, 778), False, 'from numpy import linalg\n'), ((790, 805), 'numpy.linalg.norm', 'linalg.norm', (['w4'], {}), '(w4)\n', (801, 805), False, 'from numpy import linalg\n'), ((939, 980), 'scipy.spatial.distance.cosine', 'spatial.distance.cosine', (['(m2 + m3 - m1)', 'm4'], {}), '(m2 + m3 - m1, m4)\n', (962, 980), False, 'from scipy import spatial\n'), ((1002, 1043), 'scipy.spatial.distance.cosine', 'spatial.distance.cosine', (['(m1 + m4 - m2)', 'm3'], {}), '(m1 + m4 - m2, m3)\n', (1025, 1043), False, 'from scipy import spatial\n'), ((1065, 1106), 'scipy.spatial.distance.cosine', 'spatial.distance.cosine', (['(m4 + m1 - m3)', 'm2'], {}), '(m4 + m1 - m3, m2)\n', (1088, 1106), False, 'from scipy import spatial\n'), ((1128, 1169), 'scipy.spatial.distance.cosine', 'spatial.distance.cosine', (['(m2 + m3 - m4)', 'm1'], {}), '(m2 + m3 - m4, m1)\n', (1151, 1169), False, 'from scipy import spatial\n'), ((1230, 1267), 'scipy.spatial.distance.cosine', 'spatial.distance.cosine', (['diff1', 'diff2'], {}), '(diff1, diff2)\n', (1253, 1267), False, 'from scipy import spatial\n'), ((1387, 1424), 'scipy.spatial.distance.cosine', 'spatial.distance.cosine', (['miff1', 'miff2'], {}), '(miff1, miff2)\n', (1410, 1424), False, 'from scipy import spatial\n'), ((1295, 1321), 'numpy.linalg.norm', 'linalg.norm', (['(diff1 - diff2)'], {}), '(diff1 - diff2)\n', (1306, 1321), False, 'from numpy import linalg\n'), ((1452, 1478), 'numpy.linalg.norm', 'linalg.norm', (['(miff1 - miff2)'], {}), '(miff1 - miff2)\n', (1463, 1478), False, 'from numpy import linalg\n'), ((1321, 1339), 'numpy.linalg.norm', 'linalg.norm', (['diff1'], {}), '(diff1)\n', (1332, 1339), False, 'from numpy import linalg\n'), ((1340, 1358), 'numpy.linalg.norm', 'linalg.norm', (['diff2'], {}), '(diff2)\n', (1351, 1358), False, 'from numpy import linalg\n'), ((1478, 1496), 'numpy.linalg.norm', 'linalg.norm', (['miff1'], {}), '(miff1)\n', (1489, 1496), False, 'from numpy import linalg\n'), ((1497, 1515), 'numpy.linalg.norm', 'linalg.norm', (['miff2'], {}), '(miff2)\n', (1508, 1515), False, 'from numpy import linalg\n')]
|
import numpy as np
import scipy as sp
import scipy.sparse.linalg as splinalg
def eig2_nL(g, tol_eigs = 1.0e-6, normalize:bool = True, dim:int=1):
"""
DESCRIPTION
-----------
Computes the eigenvector that corresponds to the second smallest eigenvalue
of the normalized Laplacian matrix then it uses sweep cut to round the solution.
PARAMETERS (mandatory)
----------------------
g: graph object
PARAMETERS (optional)
---------------------
dim: positive, int
default == 1
The number of eigenvectors or dimensions to compute.
tol_eigs: positive float, double
default == 1.0e-6
Tolerance for computation of the eigenvector that corresponds to
the second smallest eigenvalue of the normalized Laplacian matrix.
normalize: bool,
default == True
True if we should return the eigenvectors of the generalized
eigenvalue problem associated with the normalized Laplacian.
This should be on unless you know what you are doing.
RETURNS
------
p: Eigenvector or Eigenvector matrixthat
corresponds to the second smallest eigenvalue of the
normalized Laplacian matrix and larger eigenvectors if dim >= 0.
"""
n = g.adjacency_matrix.shape[0]
D_sqrt_neg = sp.sparse.spdiags(g.dn_sqrt.transpose(), 0, n, n)
L = sp.sparse.identity(n) - D_sqrt_neg.dot((g.adjacency_matrix.dot(D_sqrt_neg)))
emb_eig_val, p = splinalg.eigsh(L, which='SM', k=1+dim, tol = tol_eigs)
F = np.real(p[:,1:])
if normalize:
F *= g.dn_sqrt[:,np.newaxis]
return F, emb_eig_val
"""
Random walks and local cuts in graphs, Chung, LAA 2007
We just form the sub-matrix of the Laplacian and use the eigenvector there.
"""
def eig2nL_subgraph(g, ref_nodes, tol_eigs = 1.0e-6, normalize: bool = True):
A_sub = g.adjacency_matrix.tocsr()[ref_nodes, :].tocsc()[:, ref_nodes]
nref = len(ref_nodes)
D_sqrt_neg = sp.sparse.spdiags(g.dn_sqrt[ref_nodes].transpose(), 0, nref, nref)
L_sub = sp.sparse.identity(nref) - D_sqrt_neg.dot((A_sub.dot(D_sqrt_neg)))
emb_eig_val, emb_eig = splinalg.eigsh(L_sub, which='SM', k=1, tol=tol_eigs)
emb_eig *= -1 if max(emb_eig) < 0 else 1
f = emb_eig[:,0]
if normalize:
f *= g.dn_sqrt[ref_nodes]
return ((ref_nodes,f), emb_eig_val)
|
[
"numpy.real",
"scipy.sparse.identity",
"scipy.sparse.linalg.eigsh"
] |
[((1613, 1667), 'scipy.sparse.linalg.eigsh', 'splinalg.eigsh', (['L'], {'which': '"""SM"""', 'k': '(1 + dim)', 'tol': 'tol_eigs'}), "(L, which='SM', k=1 + dim, tol=tol_eigs)\n", (1627, 1667), True, 'import scipy.sparse.linalg as splinalg\n'), ((1677, 1694), 'numpy.real', 'np.real', (['p[:, 1:]'], {}), '(p[:, 1:])\n', (1684, 1694), True, 'import numpy as np\n'), ((2286, 2338), 'scipy.sparse.linalg.eigsh', 'splinalg.eigsh', (['L_sub'], {'which': '"""SM"""', 'k': '(1)', 'tol': 'tol_eigs'}), "(L_sub, which='SM', k=1, tol=tol_eigs)\n", (2300, 2338), True, 'import scipy.sparse.linalg as splinalg\n'), ((1514, 1535), 'scipy.sparse.identity', 'sp.sparse.identity', (['n'], {}), '(n)\n', (1532, 1535), True, 'import scipy as sp\n'), ((2192, 2216), 'scipy.sparse.identity', 'sp.sparse.identity', (['nref'], {}), '(nref)\n', (2210, 2216), True, 'import scipy as sp\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import numpy as np
# Generic data augmentation
class Augmenter:
""" Generic data augmentation class with chained operations
"""
def __init__(self, ops=[]):
if not isinstance(ops, list):
print("Error: ops must be a list of functions")
quit()
self.ops = ops
def add(self, op):
self.ops.append(op)
def augment(self, img):
aug = img.copy()
for op in self.ops:
aug = op(aug)
return aug
def __call__(self, img):
return self.augment(img)
##########
# Images #
##########
def horizontal_flip(p=0.5):
def fc(img):
if random.random() < p:
return img[..., ::-1]
else:
return img
return fc
def vertical_flip(p=0.5):
def fc(img):
if random.random() < p:
return img[..., ::-1, :]
else:
return img
return fc
def gaussian_noise(p=0.5, mean=0, sigma=0.02):
def fc(img):
if random.random() < p:
gauss = np.random.normal(mean, sigma, img.shape).astype(np.float32)
return img + gauss
else:
return img
return fc
def black_vstripe(p=0.5, size=10):
def fc(img):
if random.random() < p:
j = int(random.random() * (img.shape[1]-size))
img[..., j:j+size] = 0
return img
else:
return img
return fc
def black_hstripe(p=0.5, size=10):
def fc(img):
if random.random() < p:
j = int(random.random() * (img.shape[0]-size))
img[..., j:j+size, :] = 0
return img
else:
return img
return fc
def default_augmenter(p=0.5, strip_size=3, mean=0, sigma=0.02):
"""Default data augmentation with horizontal flip, vertical flip, gaussian noise, black hstripe, and black vstripe.
Returns:
Augmenter object. Use as: aug.augment(img)
"""
print("Using default image augmenter")
return Augmenter([ horizontal_flip(p), gaussian_noise(p, mean, sigma), black_hstripe(p, size=strip_size), black_vstripe(p, size=strip_size) ])
##########
# Videos #
##########
def horizontal_flip_vid(p=0.5):
def fc(vid):
if random.random() < p:
return vid[..., ::-1]
else:
return vid
return fc
def black_vstripe_vid(p=0.5, size=10):
def fc(batch):
if random.random() < p:
j = int(random.random() * (batch.shape[-1]-size))
batch[..., j:j+size] = 0
return batch
else:
return batch
return fc
def black_hstripe_vid(p=0.5, size=10):
def fc(batch):
if random.random() < p:
j = int(random.random() * (batch.shape[-2]-size))
batch[..., j:j+size, :] = 0
return batch
else:
return batch
return fc
def default_augmenter_vid(p=0.5, strip_size=3, mean=0, sigma=0.02):
"""Default data augmentation with horizontal flip, gaussian noise, black hstripe, and black vstripe.
Returns:
Augmenter object. Use as: aug.augment(img)
"""
return Augmenter([ horizontal_flip_vid(p), gaussian_noise(p, mean, sigma), black_hstripe_vid(p, size=strip_size), black_vstripe_vid(p, size=strip_size) ])
|
[
"numpy.random.normal",
"random.random"
] |
[((712, 727), 'random.random', 'random.random', ([], {}), '()\n', (725, 727), False, 'import random\n'), ((873, 888), 'random.random', 'random.random', ([], {}), '()\n', (886, 888), False, 'import random\n'), ((1058, 1073), 'random.random', 'random.random', ([], {}), '()\n', (1071, 1073), False, 'import random\n'), ((1305, 1320), 'random.random', 'random.random', ([], {}), '()\n', (1318, 1320), False, 'import random\n'), ((1558, 1573), 'random.random', 'random.random', ([], {}), '()\n', (1571, 1573), False, 'import random\n'), ((2299, 2314), 'random.random', 'random.random', ([], {}), '()\n', (2312, 2314), False, 'import random\n'), ((2475, 2490), 'random.random', 'random.random', ([], {}), '()\n', (2488, 2490), False, 'import random\n'), ((2743, 2758), 'random.random', 'random.random', ([], {}), '()\n', (2756, 2758), False, 'import random\n'), ((1099, 1139), 'numpy.random.normal', 'np.random.normal', (['mean', 'sigma', 'img.shape'], {}), '(mean, sigma, img.shape)\n', (1115, 1139), True, 'import numpy as np\n'), ((1346, 1361), 'random.random', 'random.random', ([], {}), '()\n', (1359, 1361), False, 'import random\n'), ((1599, 1614), 'random.random', 'random.random', ([], {}), '()\n', (1612, 1614), False, 'import random\n'), ((2516, 2531), 'random.random', 'random.random', ([], {}), '()\n', (2529, 2531), False, 'import random\n'), ((2784, 2799), 'random.random', 'random.random', ([], {}), '()\n', (2797, 2799), False, 'import random\n')]
|
import numpy as np
def rot_to_angle(rot):
return np.arccos(0.5*np.trace(rot)-0.5)
def rot_to_heading(rot):
# This function calculates the heading angle of the rot matrix w.r.t. the y-axis
new_rot = rot[0:3:2, 0:3:2] # remove the mid row and column corresponding to the y-axis
new_rot = new_rot/np.linalg.det(new_rot)
return np.arctan2(new_rot[1, 0], new_rot[0, 0])
|
[
"numpy.trace",
"numpy.arctan2",
"numpy.linalg.det"
] |
[((349, 389), 'numpy.arctan2', 'np.arctan2', (['new_rot[1, 0]', 'new_rot[0, 0]'], {}), '(new_rot[1, 0], new_rot[0, 0])\n', (359, 389), True, 'import numpy as np\n'), ((315, 337), 'numpy.linalg.det', 'np.linalg.det', (['new_rot'], {}), '(new_rot)\n', (328, 337), True, 'import numpy as np\n'), ((69, 82), 'numpy.trace', 'np.trace', (['rot'], {}), '(rot)\n', (77, 82), True, 'import numpy as np\n')]
|
import logging
logger = logging.getLogger(__name__)
import random
import chainercv
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # NOQA
from pose.hand_dataset.geometry_utils import normalize_joint_zyx
from pose.hand_dataset.image_utils import normalize_depth
# Decimal Code (R,G,B)
BASE_COLOR = {
"RED": (255, 0, 0),
"GREEN": (0, 255, 0),
"BLUE": (0, 0, 255),
"YELLOW": (255, 255, 0),
"CYAN": (0, 255, 255),
"MAGENTA": (255, 0, 255),
}
def vis_image(img, ax=None):
"""
extend chainercv.visualizations.vis_image
"""
C, H, W = img.shape
if C == 1:
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
# remove channnel dimension
ax.imshow(img.squeeze())
else:
ax = chainercv.visualizations.vis_image(img, ax)
return ax
def preprocess(point, ax, img):
input_point = np.asarray(point)
if input_point.ndim == 2:
input_point = np.expand_dims(point, axis=0)
H, W = None, None
if ax is None:
fig = plt.figure()
if input_point.shape[-1] == 3:
ax = fig.add_subplot(1, 1, 1, projection="3d")
else:
ax = fig.add_subplot(1, 1, 1)
if img is not None:
ax = vis_image(img, ax=ax)
_, H, W = img.shape
return input_point, ax, H, W
def vis_point(point, img=None, color=None, ax=None):
"""
Visualize points in an image, customized to our purpose.
Base implementation is taken from chainercv.visualizations.vis_image
"""
point, ax, H, W = preprocess(point, ax, img)
n_inst = len(point)
c = np.asarray(color) / 255. if color is not None else None
for i in range(n_inst):
# note that the shape of `point[i]` is (K,N) and the format of one is (y, x), (z,y,x).
# (K, N) -> (N, K)
pts = point[i].transpose() # (K,N) -> (N,K)
# resort coordinate order : yx -> xy or zyx -> xyz
pts = pts[::-1]
ax.scatter(*pts, c=c)
if W is not None:
ax.set_xlim(left=0, right=W)
if H is not None:
ax.set_ylim(bottom=H - 1, top=0)
return ax
def vis_edge(point, indices, img=None, color=None, ax=None):
"""
Visualize edges in an image
"""
point, ax, H, W = preprocess(point, ax, img)
n_inst = len(point)
if color is not None:
color = np.asarray(color) / 255.
else:
color = [None] * len(indices)
for i in range(n_inst):
# note that the shape of `point[i]` is (K,N) and the format of one is (y, x) or (z,y,x).
pts = point[i]
for ((s, t), c) in zip(indices, color):
# Select point which consists edge. It is a pair or point (start, target).
# Note that [::-1] does resort coordinate order: yx -> xy or zyx -> xyz
edge = pts[[s, t]].transpose()
edge = edge[::-1]
ax.plot(*edge, c=c)
if W is not None:
ax.set_xlim(left=0, right=W)
if H is not None:
ax.set_ylim(bottom=H - 1, top=0)
return ax
def vis_pose(point, indices, img=None, point_color=None, edge_color=None, ax=None):
ax = vis_point(point, img=img, color=point_color, ax=ax)
vis_edge(point, indices, img=img, color=edge_color, ax=ax)
def visualize_both(dataset, keypoint_names, edges, color_map, normalize=False):
import random
idx = random.randint(0, len(dataset) - 1)
logger.info("get example")
example = dataset.get_example(idx)
logger.info("Done get example")
fig = plt.figure(figsize=(8, 8))
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223, projection="3d")
ax4 = fig.add_subplot(224, projection="3d")
color = [color_map[k] for k in keypoint_names]
edge_color = [color_map[s, t] for s, t in edges]
depth = example["depth"].astype(np.float32)
depth_joint = example["depth_joint"]
depth_camera = example["depth_camera"]
depth_vu, depth_z = depth_camera.zyx2vu(depth_joint, return_z=True)
z_size = example["param"]["z_size"]
if normalize:
depth = normalize_depth(depth, z_com=depth_z.mean(), z_size=z_size)
depth_joint = normalize_joint_zyx(depth_joint, depth_camera, z_size)
rgb = example["rgb"]
rgb_joint = example["rgb_joint"]
rgb_camera = example["rgb_camera"]
rgb_vu = rgb_camera.zyx2vu(rgb_joint)
rgb_joint = normalize_joint_zyx(rgb_joint, rgb_camera, z_size)
print(example["param"])
vis_point(rgb_vu, img=rgb, color=color, ax=ax1)
vis_edge(rgb_vu, indices=edges, color=edge_color, ax=ax1)
vis_point(rgb_joint, color=color, ax=ax3)
vis_edge(rgb_joint, indices=edges, color=edge_color, ax=ax3)
vis_point(depth_vu, img=depth, color=color, ax=ax2)
vis_edge(depth_vu, indices=edges, color=edge_color, ax=ax2)
vis_point(depth_joint, color=color, ax=ax4)
vis_edge(depth_joint, indices=edges, color=edge_color, ax=ax4)
for ax in [ax3, ax4]:
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.view_init(-65, -90)
plt.savefig("output.png")
plt.show()
def visualize_rgb(dataset, keypoint_names, edges, color_map, idx=None):
import random
if idx is None:
idx = random.randint(0, len(dataset) - 1)
logger.info("get example")
example = dataset.get_example(idx)
logger.info("Done get example")
fig = plt.figure(figsize=(5, 10))
ax1 = fig.add_subplot(211)
ax3 = fig.add_subplot(212, projection="3d")
color = [color_map[k] for k in keypoint_names]
edge_color = [color_map[s, t] for s, t in edges]
rgb = example["rgb"]
rgb_joint = example["rgb_joint"]
rgb_camera = example["rgb_camera"]
rgb_vu = rgb_camera.zyx2vu(rgb_joint)
vis_point(rgb_vu, img=rgb, color=color, ax=ax1)
vis_edge(rgb_vu, indices=edges, color=edge_color, ax=ax1)
vis_point(rgb_joint, color=color, ax=ax3)
vis_edge(rgb_joint, indices=edges, color=edge_color, ax=ax3)
for ax in [ax3]:
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.view_init(-65, -90)
plt.savefig("output.png")
plt.show()
def visualize_depth(dataset, keypoint_names, edges, color_map, normalize=False):
idx = random.randint(0, len(dataset) - 1)
logger.info("get example")
example = dataset.get_example(idx)
logger.info("Done get example")
fig = plt.figure(figsize=(5, 10))
ax2 = fig.add_subplot(211)
ax4 = fig.add_subplot(212, projection="3d")
color = [color_map[k] for k in keypoint_names]
edge_color = [color_map[s, t] for s, t in edges]
depth = example["depth"].astype(np.float32)
depth_joint = example["depth_joint"]
depth_camera = example["depth_camera"]
depth_vu, depth_z = depth_camera.zyx2vu(depth_joint, return_z=True)
z_size = example["param"]["z_size"]
if normalize:
depth = normalize_depth(depth, z_com=depth_z.mean(), z_size=z_size)
depth_joint = normalize_joint_zyx(depth_joint, depth_camera, z_size)
print(example["param"])
vis_point(depth_vu, img=depth, color=color, ax=ax2)
vis_edge(depth_vu, indices=edges, color=edge_color, ax=ax2)
vis_point(depth_joint, color=color, ax=ax4)
vis_edge(depth_joint, indices=edges, color=edge_color, ax=ax4)
for ax in [ax4]:
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.view_init(-65, -90)
plt.savefig("output.png")
plt.show()
|
[
"logging.getLogger",
"chainercv.visualizations.vis_image",
"matplotlib.pyplot.savefig",
"numpy.asarray",
"matplotlib.pyplot.figure",
"pose.hand_dataset.geometry_utils.normalize_joint_zyx",
"numpy.expand_dims",
"matplotlib.pyplot.show"
] |
[((25, 52), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (42, 52), False, 'import logging\n'), ((946, 963), 'numpy.asarray', 'np.asarray', (['point'], {}), '(point)\n', (956, 963), True, 'import numpy as np\n'), ((3562, 3588), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (3572, 3588), True, 'from matplotlib import pyplot as plt\n'), ((4427, 4477), 'pose.hand_dataset.geometry_utils.normalize_joint_zyx', 'normalize_joint_zyx', (['rgb_joint', 'rgb_camera', 'z_size'], {}), '(rgb_joint, rgb_camera, z_size)\n', (4446, 4477), False, 'from pose.hand_dataset.geometry_utils import normalize_joint_zyx\n'), ((5115, 5140), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""output.png"""'], {}), "('output.png')\n", (5126, 5140), True, 'from matplotlib import pyplot as plt\n'), ((5145, 5155), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5153, 5155), True, 'from matplotlib import pyplot as plt\n'), ((5434, 5461), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 10)'}), '(figsize=(5, 10))\n', (5444, 5461), True, 'from matplotlib import pyplot as plt\n'), ((6155, 6180), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""output.png"""'], {}), "('output.png')\n", (6166, 6180), True, 'from matplotlib import pyplot as plt\n'), ((6185, 6195), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6193, 6195), True, 'from matplotlib import pyplot as plt\n'), ((6441, 6468), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 10)'}), '(figsize=(5, 10))\n', (6451, 6468), True, 'from matplotlib import pyplot as plt\n'), ((7009, 7063), 'pose.hand_dataset.geometry_utils.normalize_joint_zyx', 'normalize_joint_zyx', (['depth_joint', 'depth_camera', 'z_size'], {}), '(depth_joint, depth_camera, z_size)\n', (7028, 7063), False, 'from pose.hand_dataset.geometry_utils import normalize_joint_zyx\n'), ((7468, 7493), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""output.png"""'], {}), "('output.png')\n", (7479, 7493), True, 'from matplotlib import pyplot as plt\n'), ((7498, 7508), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7506, 7508), True, 'from matplotlib import pyplot as plt\n'), ((836, 879), 'chainercv.visualizations.vis_image', 'chainercv.visualizations.vis_image', (['img', 'ax'], {}), '(img, ax)\n', (870, 879), False, 'import chainercv\n'), ((1017, 1046), 'numpy.expand_dims', 'np.expand_dims', (['point'], {'axis': '(0)'}), '(point, axis=0)\n', (1031, 1046), True, 'import numpy as np\n'), ((1102, 1114), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1112, 1114), True, 'from matplotlib import pyplot as plt\n'), ((4212, 4266), 'pose.hand_dataset.geometry_utils.normalize_joint_zyx', 'normalize_joint_zyx', (['depth_joint', 'depth_camera', 'z_size'], {}), '(depth_joint, depth_camera, z_size)\n', (4231, 4266), False, 'from pose.hand_dataset.geometry_utils import normalize_joint_zyx\n'), ((689, 701), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (699, 701), True, 'from matplotlib import pyplot as plt\n'), ((1675, 1692), 'numpy.asarray', 'np.asarray', (['color'], {}), '(color)\n', (1685, 1692), True, 'import numpy as np\n'), ((2409, 2426), 'numpy.asarray', 'np.asarray', (['color'], {}), '(color)\n', (2419, 2426), True, 'import numpy as np\n')]
|
from typing import List, Tuple, Union
import numpy as np
import scipy.special
from PIL import Image, ImageFilter
class RandomBetaMorphology:
def __init__(
self, filter_size_min: int, filter_size_max: int, alpha: float, beta: float
) -> None:
assert filter_size_min % 2 != 0, "Filter size must be odd"
assert filter_size_max % 2 != 0, "Filter size must be odd"
self.filter_size_min = filter_size_min
self.filter_size_max = filter_size_max
self.alpha = alpha
self.beta = beta
self.filter_sizes, self.filter_probs = self._create_filter_distribution(
filter_size_min, filter_size_max, alpha, beta
)
@staticmethod
def _create_filter_distribution(
filter_size_min: int, filter_size_max: int, alpha: float, beta: float
) -> Tuple[List[int], Union[List[float], np.ndarray]]:
n = (filter_size_max - filter_size_min) // 2 + 1
if n < 2:
return [filter_size_min], np.asarray([1.0], dtype=np.float32)
filter_sizes = []
filter_probs = []
for k in range(n):
filter_sizes.append(filter_size_min + 2 * k)
filter_probs.append(
scipy.special.comb(n, k) * scipy.special.beta(alpha + k, n - k + beta)
)
np_filter_probs = np.asarray(filter_probs, dtype=np.float32)
np_filter_probs = filter_probs / np_filter_probs.sum()
return filter_sizes, np_filter_probs
def sample_filter_size(self):
filter_size = np.random.choice(self.filter_sizes, p=self.filter_probs)
return filter_size
def __call__(self, *args, **kwargs):
return NotImplementedError
def __repr__(self) -> str:
return (
f"vision.{self.__class__.__name__}("
f"filter_size_min={self.filter_size_min}, "
f"filter_size_max={self.filter_size_max}, "
f"alpha={self.alpha}, beta={self.beta})"
)
class Dilate(RandomBetaMorphology):
def __init__(
self,
filter_size_min: int = 3,
filter_size_max: int = 7,
alpha: float = 1,
beta: float = 3,
) -> None:
super().__init__(filter_size_min, filter_size_max, alpha, beta)
def __call__(self, img: Image) -> Image:
filter_size = self.sample_filter_size()
return img.filter(ImageFilter.MaxFilter(filter_size))
class Erode(RandomBetaMorphology):
def __init__(
self,
filter_size_min: int = 3,
filter_size_max: int = 5,
alpha: float = 1,
beta: float = 3,
) -> None:
super().__init__(filter_size_min, filter_size_max, alpha, beta)
def __call__(self, img: Image) -> Image:
filter_size = self.sample_filter_size()
return img.filter(ImageFilter.MinFilter(filter_size))
if __name__ == "__main__":
import argparse
from PIL import ImageOps
parser = argparse.ArgumentParser()
parser.add_argument("--operation", choices=("dilate", "erode"), default="dilate")
parser.add_argument("images", type=argparse.FileType("rb"), nargs="+")
args = parser.parse_args()
transformer = Dilate() if args.operation == "dilate" else Erode()
for f in args.images:
x = Image.open(f, "r").convert("L")
x = ImageOps.invert(x)
y = transformer(x)
w, h = x.size
z = Image.new("L", (w, 2 * h))
z.paste(x, (0, 0))
z.paste(y, (0, h))
z = z.resize(size=(w // 2, h), resample=Image.BICUBIC)
z.show()
input()
|
[
"argparse.FileType",
"PIL.Image.open",
"argparse.ArgumentParser",
"numpy.random.choice",
"PIL.Image.new",
"PIL.ImageFilter.MinFilter",
"numpy.asarray",
"PIL.ImageOps.invert",
"PIL.ImageFilter.MaxFilter"
] |
[((2926, 2951), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2949, 2951), False, 'import argparse\n'), ((1328, 1370), 'numpy.asarray', 'np.asarray', (['filter_probs'], {'dtype': 'np.float32'}), '(filter_probs, dtype=np.float32)\n', (1338, 1370), True, 'import numpy as np\n'), ((1536, 1592), 'numpy.random.choice', 'np.random.choice', (['self.filter_sizes'], {'p': 'self.filter_probs'}), '(self.filter_sizes, p=self.filter_probs)\n', (1552, 1592), True, 'import numpy as np\n'), ((3298, 3316), 'PIL.ImageOps.invert', 'ImageOps.invert', (['x'], {}), '(x)\n', (3313, 3316), False, 'from PIL import ImageOps\n'), ((3379, 3405), 'PIL.Image.new', 'Image.new', (['"""L"""', '(w, 2 * h)'], {}), "('L', (w, 2 * h))\n", (3388, 3405), False, 'from PIL import Image, ImageFilter\n'), ((2366, 2400), 'PIL.ImageFilter.MaxFilter', 'ImageFilter.MaxFilter', (['filter_size'], {}), '(filter_size)\n', (2387, 2400), False, 'from PIL import Image, ImageFilter\n'), ((2797, 2831), 'PIL.ImageFilter.MinFilter', 'ImageFilter.MinFilter', (['filter_size'], {}), '(filter_size)\n', (2818, 2831), False, 'from PIL import Image, ImageFilter\n'), ((3077, 3100), 'argparse.FileType', 'argparse.FileType', (['"""rb"""'], {}), "('rb')\n", (3094, 3100), False, 'import argparse\n'), ((996, 1031), 'numpy.asarray', 'np.asarray', (['[1.0]'], {'dtype': 'np.float32'}), '([1.0], dtype=np.float32)\n', (1006, 1031), True, 'import numpy as np\n'), ((3254, 3272), 'PIL.Image.open', 'Image.open', (['f', '"""r"""'], {}), "(f, 'r')\n", (3264, 3272), False, 'from PIL import Image, ImageFilter\n')]
|
'''Analysis utility functions.
:Author: <NAME> <<EMAIL>>
:Date: 2016-03-26
:Copyright: 2016-2018, Karr Lab
:License: MIT
'''
# TODO(Arthur): IMPORTANT: refactor and replace
from matplotlib import pyplot
from matplotlib import ticker
from wc_lang import Model, Submodel
from scipy.constants import Avogadro
import numpy as np
import re
def plot(model, time = np.zeros(0),
species_counts = None, volume = np.zeros(0), extracellular_volume = np.zeros(0),
selected_species_compartments = [],
yDatas = {},
units = 'mM', title = '', fileName = ''):
#convert time to hours
time = time.copy() / 3600
#create figure
fig = pyplot.figure()
#extract data to plot
if not yDatas:
yDatas = {}
for species_compartment_id in selected_species_compartments:
#extract data
match = re.match('^(?P<speciesId>[a-z0-9\-_]+)\[(?P<compartmentId>[a-z0-9\-_]+)\]$',
species_compartment_id, re.I).groupdict()
speciesId = match['speciesId']
compartmentId = match['compartmentId']
if isinstance(model, Model):
species = model.get_component_by_id(speciesId, 'species')
compartment = model.get_component_by_id(compartmentId, 'compartments')
yData = species_counts[species.index, compartment.index, :]
elif isinstance(model, Submodel):
yData = species_counts[species_compartment_id]
else:
raise Exception('Invalid model type %s' % model.__class__.__name__)
#scale
if compartmentId == 'c':
V = volume
else:
V = extracellular_volume
if units == 'pM':
scale = 1 / Avogadro / V * 1e12
elif units == 'nM':
scale = 1 / Avogadro / V * 1e9
elif units == 'uM':
scale = 1 / Avogadro / V * 1e6
elif units == 'mM':
scale = 1 / Avogadro / V * 1e3
elif units == 'M':
scale = 1 / Avogadro / V * 1e0
elif units == 'molecules':
scale = 1
else:
raise Exception('Invalid units "%s"' % units)
yData *= scale
yDatas[species_compartment_id] = yData
#plot results
yMin = 1e12
yMax = -1e12
for label, yData in yDatas.items():
#update range
yMin = min(yMin, np.min(yData))
yMax = max(yMax, np.max(yData))
#add to plot
pyplot.plot(time, yData, label=label)
#set axis limits
pyplot.xlim((0, time[-1]))
pyplot.ylim((yMin, yMax))
#add axis labels and legend
if title:
pyplot.title(title)
pyplot.xlabel('Time (h)')
if units == 'molecules':
pyplot.ylabel('Copy number')
else:
pyplot.ylabel('Concentration (%s)' % units)
y_formatter = ticker.ScalarFormatter(useOffset=False)
pyplot.gca().get_yaxis().set_major_formatter(y_formatter)
if len(selected_species_compartments) > 1:
pyplot.legend()
#save
if fileName:
fig.savefig(fileName)
pyplot.close(fig)
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"re.match",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.ticker.ScalarFormatter",
"numpy.min",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.legend"
] |
[((362, 373), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (370, 373), True, 'import numpy as np\n'), ((411, 422), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (419, 422), True, 'import numpy as np\n'), ((447, 458), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (455, 458), True, 'import numpy as np\n'), ((651, 666), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {}), '()\n', (664, 666), False, 'from matplotlib import pyplot\n'), ((2617, 2643), 'matplotlib.pyplot.xlim', 'pyplot.xlim', (['(0, time[-1])'], {}), '((0, time[-1]))\n', (2628, 2643), False, 'from matplotlib import pyplot\n'), ((2648, 2673), 'matplotlib.pyplot.ylim', 'pyplot.ylim', (['(yMin, yMax)'], {}), '((yMin, yMax))\n', (2659, 2673), False, 'from matplotlib import pyplot\n'), ((2754, 2779), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""Time (h)"""'], {}), "('Time (h)')\n", (2767, 2779), False, 'from matplotlib import pyplot\n'), ((2928, 2967), 'matplotlib.ticker.ScalarFormatter', 'ticker.ScalarFormatter', ([], {'useOffset': '(False)'}), '(useOffset=False)\n', (2950, 2967), False, 'from matplotlib import ticker\n'), ((2553, 2590), 'matplotlib.pyplot.plot', 'pyplot.plot', (['time', 'yData'], {'label': 'label'}), '(time, yData, label=label)\n', (2564, 2590), False, 'from matplotlib import pyplot\n'), ((2729, 2748), 'matplotlib.pyplot.title', 'pyplot.title', (['title'], {}), '(title)\n', (2741, 2748), False, 'from matplotlib import pyplot\n'), ((2818, 2846), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""Copy number"""'], {}), "('Copy number')\n", (2831, 2846), False, 'from matplotlib import pyplot\n'), ((2865, 2908), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (["('Concentration (%s)' % units)"], {}), "('Concentration (%s)' % units)\n", (2878, 2908), False, 'from matplotlib import pyplot\n'), ((3086, 3101), 'matplotlib.pyplot.legend', 'pyplot.legend', ([], {}), '()\n', (3099, 3101), False, 'from matplotlib import pyplot\n'), ((3168, 3185), 'matplotlib.pyplot.close', 'pyplot.close', (['fig'], {}), '(fig)\n', (3180, 3185), False, 'from matplotlib import pyplot\n'), ((2468, 2481), 'numpy.min', 'np.min', (['yData'], {}), '(yData)\n', (2474, 2481), True, 'import numpy as np\n'), ((2508, 2521), 'numpy.max', 'np.max', (['yData'], {}), '(yData)\n', (2514, 2521), True, 'import numpy as np\n'), ((848, 963), 're.match', 're.match', (['"""^(?P<speciesId>[a-z0-9\\\\-_]+)\\\\[(?P<compartmentId>[a-z0-9\\\\-_]+)\\\\]$"""', 'species_compartment_id', 're.I'], {}), "('^(?P<speciesId>[a-z0-9\\\\-_]+)\\\\[(?P<compartmentId>[a-z0-9\\\\-_]+)\\\\]$'\n , species_compartment_id, re.I)\n", (856, 963), False, 'import re\n'), ((2972, 2984), 'matplotlib.pyplot.gca', 'pyplot.gca', ([], {}), '()\n', (2982, 2984), False, 'from matplotlib import pyplot\n')]
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseTensorsMap."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
# pylint: disable=protected-access
add_sparse_to_tensors_map = sparse_ops._add_sparse_to_tensors_map
add_many_sparse_to_tensors_map = sparse_ops._add_many_sparse_to_tensors_map
take_many_sparse_from_tensors_map = (
sparse_ops._take_many_sparse_from_tensors_map)
# pylint: enable=protected-access
class SparseTensorsMapTest(test.TestCase):
def _SparseTensorPlaceholder(self, dtype=None):
if dtype is None:
dtype = dtypes.int32
return sparse_tensor_lib.SparseTensor(
array_ops.placeholder(dtypes.int64),
array_ops.placeholder(dtype), array_ops.placeholder(dtypes.int64))
def _SparseTensorValue_5x6(self, permutation):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2],
[3, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.int32)
ind = ind[permutation]
val = val[permutation]
shape = np.array([5, 6]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def _SparseTensorValue_3x4(self, permutation):
ind = np.array([[0, 0], [1, 0], [1, 2], [1, 3], [2, 2],
[2, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.int32)
ind = ind[permutation]
val = val[permutation]
shape = np.array([3, 4]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def _SparseTensorValue_1x1x1(self):
ind = np.array([[0, 0, 0]]).astype(np.int64)
val = np.array([0]).astype(np.int32)
shape = np.array([3, 4, 5]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def testAddTakeMany(self):
with self.session(graph=ops.Graph(), use_gpu=False) as sess:
sp_input0 = self._SparseTensorValue_5x6(np.arange(6))
sp_input1 = self._SparseTensorValue_3x4(np.arange(6))
handle0 = add_sparse_to_tensors_map(sp_input0, shared_name="a")
handle1 = add_sparse_to_tensors_map(sp_input1, shared_name="a")
self.assertEqual(handle0.get_shape(), ())
handles_concat = array_ops.stack([handle0, handle1])
sp_out = take_many_sparse_from_tensors_map(
sparse_map_op=handle0.op, sparse_handles=handles_concat)
combined_indices, combined_values, combined_shape = self.evaluate(sp_out)
self.assertAllEqual(combined_indices[:6, 0], [0] * 6) # minibatch 0
self.assertAllEqual(combined_indices[:6, 1:], sp_input0[0])
self.assertAllEqual(combined_indices[6:, 0], [1] * 6) # minibatch 1
self.assertAllEqual(combined_indices[6:, 1:], sp_input1[0])
self.assertAllEqual(combined_values[:6], sp_input0[1])
self.assertAllEqual(combined_values[6:], sp_input1[1])
self.assertAllEqual(combined_shape, [2, 5, 6])
def testFeedAddTakeMany(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
input1_val = self._SparseTensorValue_3x4(np.arange(6))
handle = add_sparse_to_tensors_map(sp_input)
handle0_value = sess.run(handle, feed_dict={sp_input: input0_val})
handle1_value = sess.run(handle, feed_dict={sp_input: input1_val})
sparse_handles = ops.convert_to_tensor(
[handle0_value, handle1_value], dtype=dtypes.int64)
sp_roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=handle.op, sparse_handles=sparse_handles)
combined_indices, combined_values, combined_shape = self.evaluate(
sp_roundtrip)
self.assertAllEqual(combined_indices[:6, 0], [0] * 6) # minibatch 0
self.assertAllEqual(combined_indices[:6, 1:], input0_val[0])
self.assertAllEqual(combined_indices[6:, 0], [1] * 6) # minibatch 1
self.assertAllEqual(combined_indices[6:, 1:], input1_val[0])
self.assertAllEqual(combined_values[:6], input0_val[1])
self.assertAllEqual(combined_values[6:], input1_val[1])
self.assertAllEqual(combined_shape, [2, 5, 6])
def testAddManyTakeManyRoundTrip(self):
with self.session(use_gpu=False) as sess:
# N == 4 because shape_value == [4, 5]
indices_value = np.array([[0, 0], [0, 1], [2, 0]], dtype=np.int64)
values_value = np.array([b"a", b"b", b"c"])
shape_value = np.array([4, 5], dtype=np.int64)
sparse_tensor = self._SparseTensorPlaceholder(dtype=dtypes.string)
handles = add_many_sparse_to_tensors_map(sparse_tensor)
roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=handles.op, sparse_handles=handles)
handles_value, roundtrip_value = sess.run(
[handles, roundtrip],
feed_dict={
sparse_tensor.indices: indices_value,
sparse_tensor.values: values_value,
sparse_tensor.dense_shape: shape_value
})
self.assertEqual(handles_value.shape, (4,))
self.assertAllEqual(roundtrip_value.indices, indices_value)
self.assertAllEqual(roundtrip_value.values, values_value)
self.assertAllEqual(roundtrip_value.dense_shape, shape_value)
def testDeserializeFailsInconsistentRank(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
input1_val = self._SparseTensorValue_1x1x1()
handle = add_sparse_to_tensors_map(sp_input)
handle0_value = sess.run(handle, feed_dict={sp_input: input0_val})
handle1_value = sess.run(handle, feed_dict={sp_input: input1_val})
handle_concat = ops.convert_to_tensor(
[handle0_value, handle1_value], dtype=dtypes.int64)
sp_roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=handle.op, sparse_handles=handle_concat)
with self.assertRaisesOpError(
r"Inconsistent rank across SparseTensors: rank prior to "
r"SparseTensor\[1\] was: 3 but rank of SparseTensor\[1\] is: 4"):
self.evaluate(sp_roundtrip)
def testTakeManyFailsWrongInputOp(self):
with self.session(use_gpu=False) as sess:
input_val = self._SparseTensorValue_5x6(np.arange(6))
handle = add_sparse_to_tensors_map(input_val)
handle_value = self.evaluate(handle)
bad_handle = handle_value + 10
sp_roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=handle.op, sparse_handles=[handle_value, bad_handle])
with self.assertRaisesOpError(r"Unable to find SparseTensor: 10"):
self.evaluate(sp_roundtrip)
class BenchmarkSparseTensorsMapVsSerialization(test.Benchmark):
def benchmarkVeryLarge2DFloatSparseTensor(self):
np.random.seed(127)
num_elements = 10000
batch_size = 64
indices_batch = np.random.randint(
batch_size, size=num_elements, dtype=np.int64)
indices_value = np.arange(num_elements, dtype=np.int64)
indices = np.asarray(
sorted(zip(indices_batch, indices_value)), dtype=np.int64)
values = ["feature_value_for_embedding_lookup"] * num_elements
shape = np.asarray([batch_size, num_elements], dtype=np.int64)
with session.Session(config=benchmark.benchmark_config()) as sess:
with ops.device("/cpu:0"):
indices = variables.Variable(indices)
values = variables.Variable(values)
shape = variables.Variable(shape)
st = sparse_tensor_lib.SparseTensor(indices, values, shape)
st_handles = add_many_sparse_to_tensors_map(st)
st_roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=st_handles.op, sparse_handles=st_handles)
st_roundtrip_op = st_roundtrip.values.op
st_serialized = sparse_ops.serialize_many_sparse(st)
st_deserialized = sparse_ops.deserialize_many_sparse(
st_serialized, dtype=values.dtype)
st_deserialized_op = st_deserialized.values.op
variables.global_variables_initializer().run()
st_roundtrip_values = self.evaluate(st_roundtrip)
st_deserialized_values = self.evaluate(st_deserialized)
np.testing.assert_equal(st_roundtrip_values.values,
st_deserialized_values.values)
np.testing.assert_equal(st_roundtrip_values.indices,
st_deserialized_values.indices)
np.testing.assert_equal(st_roundtrip_values.dense_shape,
st_deserialized_values.dense_shape)
self.run_op_benchmark(
sess,
st_roundtrip_op,
min_iters=2000,
name="benchmark_very_large_2d_float_st_tensor_maps")
self.run_op_benchmark(
sess,
st_deserialized_op,
min_iters=2000,
name="benchmark_very_large_2d_float_st_serialization")
if __name__ == "__main__":
test.main()
|
[
"tensorflow.python.ops.sparse_ops.serialize_many_sparse",
"numpy.testing.assert_equal",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.python.framework.sparse_tensor.SparseTensorValue",
"numpy.array",
"tensorflow.python.ops.variables.Variable",
"numpy.arange",
"tensorflow.python.ops.array_ops.placeholder",
"numpy.asarray",
"tensorflow.python.platform.benchmark.benchmark_config",
"numpy.random.seed",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.framework.ops.device",
"tensorflow.python.ops.sparse_ops.deserialize_many_sparse",
"tensorflow.python.framework.sparse_tensor.SparseTensor",
"tensorflow.python.ops.array_ops.stack",
"numpy.random.randint",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.platform.test.main"
] |
[((10045, 10056), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (10054, 10056), False, 'from tensorflow.python.platform import test\n'), ((2231, 2283), 'tensorflow.python.framework.sparse_tensor.SparseTensorValue', 'sparse_tensor_lib.SparseTensorValue', (['ind', 'val', 'shape'], {}), '(ind, val, shape)\n', (2266, 2283), True, 'from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib\n'), ((2614, 2666), 'tensorflow.python.framework.sparse_tensor.SparseTensorValue', 'sparse_tensor_lib.SparseTensorValue', (['ind', 'val', 'shape'], {}), '(ind, val, shape)\n', (2649, 2666), True, 'from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib\n'), ((2856, 2908), 'tensorflow.python.framework.sparse_tensor.SparseTensorValue', 'sparse_tensor_lib.SparseTensorValue', (['ind', 'val', 'shape'], {}), '(ind, val, shape)\n', (2891, 2908), True, 'from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib\n'), ((7898, 7917), 'numpy.random.seed', 'np.random.seed', (['(127)'], {}), '(127)\n', (7912, 7917), True, 'import numpy as np\n'), ((7983, 8047), 'numpy.random.randint', 'np.random.randint', (['batch_size'], {'size': 'num_elements', 'dtype': 'np.int64'}), '(batch_size, size=num_elements, dtype=np.int64)\n', (8000, 8047), True, 'import numpy as np\n'), ((8077, 8116), 'numpy.arange', 'np.arange', (['num_elements'], {'dtype': 'np.int64'}), '(num_elements, dtype=np.int64)\n', (8086, 8116), True, 'import numpy as np\n'), ((8289, 8343), 'numpy.asarray', 'np.asarray', (['[batch_size, num_elements]'], {'dtype': 'np.int64'}), '([batch_size, num_elements], dtype=np.int64)\n', (8299, 8343), True, 'import numpy as np\n'), ((1789, 1824), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.int64'], {}), '(dtypes.int64)\n', (1810, 1824), False, 'from tensorflow.python.ops import array_ops\n'), ((1834, 1862), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtype'], {}), '(dtype)\n', (1855, 1862), False, 'from tensorflow.python.ops import array_ops\n'), ((1864, 1899), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.int64'], {}), '(dtypes.int64)\n', (1885, 1899), False, 'from tensorflow.python.ops import array_ops\n'), ((3335, 3370), 'tensorflow.python.ops.array_ops.stack', 'array_ops.stack', (['[handle0, handle1]'], {}), '([handle0, handle1])\n', (3350, 3370), False, 'from tensorflow.python.ops import array_ops\n'), ((4501, 4574), 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['[handle0_value, handle1_value]'], {'dtype': 'dtypes.int64'}), '([handle0_value, handle1_value], dtype=dtypes.int64)\n', (4522, 4574), False, 'from tensorflow.python.framework import ops\n'), ((5425, 5475), 'numpy.array', 'np.array', (['[[0, 0], [0, 1], [2, 0]]'], {'dtype': 'np.int64'}), '([[0, 0], [0, 1], [2, 0]], dtype=np.int64)\n', (5433, 5475), True, 'import numpy as np\n'), ((5497, 5525), 'numpy.array', 'np.array', (["[b'a', b'b', b'c']"], {}), "([b'a', b'b', b'c'])\n", (5505, 5525), True, 'import numpy as np\n'), ((5546, 5578), 'numpy.array', 'np.array', (['[4, 5]'], {'dtype': 'np.int64'}), '([4, 5], dtype=np.int64)\n', (5554, 5578), True, 'import numpy as np\n'), ((6825, 6898), 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['[handle0_value, handle1_value]'], {'dtype': 'dtypes.int64'}), '([handle0_value, handle1_value], dtype=dtypes.int64)\n', (6846, 6898), False, 'from tensorflow.python.framework import ops\n'), ((1961, 2019), 'numpy.array', 'np.array', (['[[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]]'], {}), '([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]])\n', (1969, 2019), True, 'import numpy as np\n'), ((2067, 2100), 'numpy.array', 'np.array', (['[0, 10, 13, 14, 32, 33]'], {}), '([0, 10, 13, 14, 32, 33])\n', (2075, 2100), True, 'import numpy as np\n'), ((2186, 2202), 'numpy.array', 'np.array', (['[5, 6]'], {}), '([5, 6])\n', (2194, 2202), True, 'import numpy as np\n'), ((2344, 2402), 'numpy.array', 'np.array', (['[[0, 0], [1, 0], [1, 2], [1, 3], [2, 2], [2, 3]]'], {}), '([[0, 0], [1, 0], [1, 2], [1, 3], [2, 2], [2, 3]])\n', (2352, 2402), True, 'import numpy as np\n'), ((2450, 2483), 'numpy.array', 'np.array', (['[0, 10, 13, 14, 32, 33]'], {}), '([0, 10, 13, 14, 32, 33])\n', (2458, 2483), True, 'import numpy as np\n'), ((2569, 2585), 'numpy.array', 'np.array', (['[3, 4]'], {}), '([3, 4])\n', (2577, 2585), True, 'import numpy as np\n'), ((2716, 2737), 'numpy.array', 'np.array', (['[[0, 0, 0]]'], {}), '([[0, 0, 0]])\n', (2724, 2737), True, 'import numpy as np\n'), ((2765, 2778), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (2773, 2778), True, 'import numpy as np\n'), ((2808, 2827), 'numpy.array', 'np.array', (['[3, 4, 5]'], {}), '([3, 4, 5])\n', (2816, 2827), True, 'import numpy as np\n'), ((3050, 3062), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (3059, 3062), True, 'import numpy as np\n'), ((3110, 3122), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (3119, 3122), True, 'import numpy as np\n'), ((4204, 4216), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (4213, 4216), True, 'import numpy as np\n'), ((4265, 4277), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (4274, 4277), True, 'import numpy as np\n'), ((6539, 6551), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (6548, 6551), True, 'import numpy as np\n'), ((7386, 7398), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (7395, 7398), True, 'import numpy as np\n'), ((8426, 8446), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (8436, 8446), False, 'from tensorflow.python.framework import ops\n'), ((8466, 8493), 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['indices'], {}), '(indices)\n', (8484, 8493), False, 'from tensorflow.python.ops import variables\n'), ((8511, 8537), 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['values'], {}), '(values)\n', (8529, 8537), False, 'from tensorflow.python.ops import variables\n'), ((8554, 8579), 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['shape'], {}), '(shape)\n', (8572, 8579), False, 'from tensorflow.python.ops import variables\n'), ((8593, 8647), 'tensorflow.python.framework.sparse_tensor.SparseTensor', 'sparse_tensor_lib.SparseTensor', (['indices', 'values', 'shape'], {}), '(indices, values, shape)\n', (8623, 8647), True, 'from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib\n'), ((8905, 8941), 'tensorflow.python.ops.sparse_ops.serialize_many_sparse', 'sparse_ops.serialize_many_sparse', (['st'], {}), '(st)\n', (8937, 8941), False, 'from tensorflow.python.ops import sparse_ops\n'), ((8968, 9037), 'tensorflow.python.ops.sparse_ops.deserialize_many_sparse', 'sparse_ops.deserialize_many_sparse', (['st_serialized'], {'dtype': 'values.dtype'}), '(st_serialized, dtype=values.dtype)\n', (9002, 9037), False, 'from tensorflow.python.ops import sparse_ops\n'), ((9293, 9380), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['st_roundtrip_values.values', 'st_deserialized_values.values'], {}), '(st_roundtrip_values.values, st_deserialized_values.\n values)\n', (9316, 9380), True, 'import numpy as np\n'), ((9416, 9505), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['st_roundtrip_values.indices', 'st_deserialized_values.indices'], {}), '(st_roundtrip_values.indices, st_deserialized_values\n .indices)\n', (9439, 9505), True, 'import numpy as np\n'), ((9541, 9637), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['st_roundtrip_values.dense_shape', 'st_deserialized_values.dense_shape'], {}), '(st_roundtrip_values.dense_shape,\n st_deserialized_values.dense_shape)\n', (9564, 9637), True, 'import numpy as np\n'), ((2967, 2978), 'tensorflow.python.framework.ops.Graph', 'ops.Graph', ([], {}), '()\n', (2976, 2978), False, 'from tensorflow.python.framework import ops\n'), ((8376, 8404), 'tensorflow.python.platform.benchmark.benchmark_config', 'benchmark.benchmark_config', ([], {}), '()\n', (8402, 8404), False, 'from tensorflow.python.platform import benchmark\n'), ((9115, 9155), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (9153, 9155), False, 'from tensorflow.python.ops import variables\n')]
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import pysam
import os
import pandas as pd
import numpy as np
import time
import argparse
import sys
from multiprocessing import Pool
# In[ ]:
# ##arguments for testing
# bam_file_path = '/fh/scratch/delete90/ha_g/realigned_bams/cfDNA_MBC_ULP_hg38/realign_bam_paired_snakemake-master/results/MBC_1041_1_ULP/MBC_1041_1_ULP_recalibrated.bam'
# bam_file_name = 'MBC_1041_1_ULP'
# mapable_path = '../../downloads/genome/repeat_masker.mapable.k50.Umap.hg38.bedGraph'
# ref_seq_path = '/fh/fast/ha_g/grp/reference/GRCh38/GRCh38.fa'
# chrom_sizes_path = '/fh/fast/ha_g/grp/reference/GRCh38/hg38.standard.chrom.sizes'
# out_dir = './tmp/'
# map_q = 20
# size_range = [15,500]
# CPU = 4
# In[ ]:
parser = argparse.ArgumentParser()
parser.add_argument('--bam_file', help='sample_bam_file', required=True)
parser.add_argument('--bam_file_name', help='sample name (does not need to match actual file name)', required=True)
parser.add_argument('--mapable_regions', help='highly mapable regions to be used in GC correction, bedGraph or bed foramt', required=True)
parser.add_argument('--ref_seq',help='reference sequence (fasta format)',required=True)
parser.add_argument('--chrom_sizes',help='path to chromosome sizes for the reference seq',required=True)
parser.add_argument('--out_dir',help='folder for GC bias results',required=True)
parser.add_argument('--map_q',help='minimum mapping quality for reads to be considered',type=int,required=True)
parser.add_argument('--size_range',help='range of read sizes to be included',nargs=2, type=int, required=True)
parser.add_argument('--CPU',help='number of CPU for parallelizing', type=int, required=True)
args = parser.parse_args()
bam_file_path = args.bam_file
bam_file_name = args.bam_file_name
mapable_path=args.mapable_regions
ref_seq_path = args.ref_seq
chrom_sizes_path = args.chrom_sizes
out_dir = args.out_dir
map_q = args.map_q
size_range = args.size_range
CPU = args.CPU
# In[ ]:
print('arguments provided:')
print('\tbam_file_path = "'+bam_file_path+'"')
print('\tbam_file_name = "'+bam_file_name+'"')
print('\tmapable_regions = "'+mapable_path+'"')
print('\tref_seq_path = "'+ref_seq_path+'"')
print('\tchrom_sizes_path = "'+chrom_sizes_path+'"')
print('\tout_dir = "'+out_dir+'"')
print('\tmap_q = '+str(map_q))
print('\tsize_range = '+str(size_range))
print('\tCPU = '+str(CPU))
# In[ ]:
mapable_name = mapable_path.rsplit('/',1)[1].rsplit('.',1)[0]
out_file = out_dir +'/'+mapable_name+'/GC_counts/'+ bam_file_name+'.GC_counts.txt'
print('out_file',out_file)
# In[ ]:
#create a directory for the GC data
if not os.path.exists(out_dir +'/'+mapable_name):
os.mkdir(out_dir +'/'+mapable_name)
if not os.path.exists(out_dir +'/'+mapable_name+'/GC_counts/'):
os.mkdir(out_dir +'/'+mapable_name+'/GC_counts/')
# In[ ]:
#import filter
mapable_intervals = pd.read_csv(mapable_path, sep='\t', header=None)
#remove non standard chromosomes and X and Y
chroms = ['chr'+str(m) for m in range(1,23)]
mapable_intervals = mapable_intervals[mapable_intervals[0].isin(chroms)]
print('chroms:', chroms)
print('number_of_intervals:',len(mapable_intervals))
sys.stdout.flush()
# In[ ]:
def collect_reads(sublist):
#create a dict for holding the frequency of each read length and GC content
GC_dict = {}
for length in range(size_range[0],size_range[1]+1):
GC_dict[length]={}
for num_GC in range(0,length+1):
GC_dict[length][num_GC]=0
#import the bam file
#this needs to be done within the loop otherwise it gives a truncated file warning
bam_file = pysam.AlignmentFile(bam_file_path, "rb")
print('sublist intervals:',len(sublist))
#this might also need to be in the loop
#import the ref_seq
ref_seq=pysam.FastaFile(ref_seq_path)
for i in range(len(sublist)):
chrom = sublist.iloc[i][0]
start = sublist.iloc[i][1]
end = sublist.iloc[i][2]
if i%5000==0:
print('interval',i,':',chrom,start,end,'seconds:',np.round(time.time()-start_time))
sys.stdout.flush()
#fetch any read that overlaps the inteterval (don't need to extend the interval because the fetch function does this automatically)
fetched = bam_file.fetch(chrom,start,end)
for read in fetched:
#use both fw (positive template length) and rv (negative template length) reads
if (read.is_reverse==False and read.template_length>=size_range[0] and read.template_length<=size_range[1]) or (read.is_reverse==True and -read.template_length>=size_range[0] and -read.template_length<=size_range[1]):
#qc filters, some longer fragments are considered 'improper pairs' but I would like to keep these
if read.is_paired==True and read.mapping_quality>=map_q and read.is_duplicate==False and read.is_qcfail==False:
if read.is_reverse==False:
read_start = read.reference_start
read_end = read.reference_start+read.template_length
elif read.is_reverse==True:
read_end = read.reference_start + read.reference_length
read_start = read_end + read.template_length
fragment_seq = ref_seq.fetch(read.reference_name,read_start,read_end)
#tally up the GC content
fragment_seq=fragment_seq.replace('g','G').replace('c','C').replace('a','A').replace('t','T').replace('n','N')
# #################
# ##logic check####
# #################
# if read.is_reverse==False:
# if fragment_seq[0:read.reference_length]==read.query_sequence and len(fragment_seq)==read.template_length:
# print('fw match',read.reference_length)
# else:
# print(fragment_seq[0:read.reference_length],read.reference_length,'fw')
# print(read.query_sequence,len(read.query_sequence),'fw')
# print(len(fragment_seq),read.template_length)
# print('\n')
# elif read.is_reverse==True:
# if fragment_seq[-read.reference_length:]==read.query_sequence and len(fragment_seq)==-read.template_length:
# print('rv match',read.reference_length)
# else:
# print(fragment_seq[-read.reference_length:],read.reference_length,'rv')
# print(read.query_sequence,len(read.query_sequence),'rv')
# print(len(fragment_seq),read.template_length)
# print('\n')
# #################
#split and convert to numpy array
fragment_seq = np.array(list(fragment_seq))
#replace with values
fragment_seq[(fragment_seq=='G') | (fragment_seq=='C')]=1
fragment_seq[(fragment_seq=='A') | (fragment_seq=='T')]=0
fragment_seq[(fragment_seq=='N')]=np.random.randint(2) #choose a random 0 or 1 for N (so that you always get an integer) #should be very rare if the filter is done right
fragment_seq = fragment_seq.astype(int)
num_GC = int(fragment_seq.sum())
GC_dict[abs(read.template_length)][num_GC]+=1
print('done')
return(GC_dict)
# In[ ]:
start_time = time.time()
p = Pool(processes=CPU) #use the available CPU
sublists = np.array_split(mapable_intervals,CPU) #split the list into sublists, one per CPU
GC_dict_list = p.map(collect_reads, sublists, 1)
# In[ ]:
all_GC_df = pd.DataFrame()
for i,GC_dict in enumerate(GC_dict_list):
GC_df = pd.DataFrame()
for length in GC_dict.keys():
current = pd.Series(GC_dict[length]).reset_index()
current = current.rename(columns={'index':'num_GC',0:'number_of_fragments'})
current['length']=length
current = current[['length','num_GC','number_of_fragments']]
GC_df = GC_df.append(current, ignore_index=True)
GC_df = GC_df.set_index(['length','num_GC'])
all_GC_df[i] = GC_df['number_of_fragments']
del(GC_df,GC_dict)
all_GC_df = all_GC_df.sum(axis=1)
all_GC_df = pd.DataFrame(all_GC_df).rename(columns = {0:'number_of_fragments'})
all_GC_df = all_GC_df.reset_index()
all_GC_df.to_csv(out_file,sep='\t',index=False)
# In[ ]:
print('done')
# In[ ]:
# In[ ]:
# In[ ]:
|
[
"pandas.Series",
"os.path.exists",
"argparse.ArgumentParser",
"pandas.read_csv",
"pysam.AlignmentFile",
"numpy.array_split",
"numpy.random.randint",
"multiprocessing.Pool",
"os.mkdir",
"pandas.DataFrame",
"sys.stdout.flush",
"time.time",
"pysam.FastaFile"
] |
[((762, 787), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (785, 787), False, 'import argparse\n'), ((2901, 2949), 'pandas.read_csv', 'pd.read_csv', (['mapable_path'], {'sep': '"""\t"""', 'header': 'None'}), "(mapable_path, sep='\\t', header=None)\n", (2912, 2949), True, 'import pandas as pd\n'), ((3194, 3212), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3210, 3212), False, 'import sys\n'), ((7702, 7713), 'time.time', 'time.time', ([], {}), '()\n', (7711, 7713), False, 'import time\n'), ((7718, 7737), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'CPU'}), '(processes=CPU)\n', (7722, 7737), False, 'from multiprocessing import Pool\n'), ((7772, 7810), 'numpy.array_split', 'np.array_split', (['mapable_intervals', 'CPU'], {}), '(mapable_intervals, CPU)\n', (7786, 7810), True, 'import numpy as np\n'), ((7928, 7942), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (7940, 7942), True, 'import pandas as pd\n'), ((2652, 2696), 'os.path.exists', 'os.path.exists', (["(out_dir + '/' + mapable_name)"], {}), "(out_dir + '/' + mapable_name)\n", (2666, 2696), False, 'import os\n'), ((2699, 2737), 'os.mkdir', 'os.mkdir', (["(out_dir + '/' + mapable_name)"], {}), "(out_dir + '/' + mapable_name)\n", (2707, 2737), False, 'import os\n'), ((2742, 2802), 'os.path.exists', 'os.path.exists', (["(out_dir + '/' + mapable_name + '/GC_counts/')"], {}), "(out_dir + '/' + mapable_name + '/GC_counts/')\n", (2756, 2802), False, 'import os\n'), ((2803, 2857), 'os.mkdir', 'os.mkdir', (["(out_dir + '/' + mapable_name + '/GC_counts/')"], {}), "(out_dir + '/' + mapable_name + '/GC_counts/')\n", (2811, 2857), False, 'import os\n'), ((3649, 3689), 'pysam.AlignmentFile', 'pysam.AlignmentFile', (['bam_file_path', '"""rb"""'], {}), "(bam_file_path, 'rb')\n", (3668, 3689), False, 'import pysam\n'), ((3820, 3849), 'pysam.FastaFile', 'pysam.FastaFile', (['ref_seq_path'], {}), '(ref_seq_path)\n', (3835, 3849), False, 'import pysam\n'), ((7997, 8011), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (8009, 8011), True, 'import pandas as pd\n'), ((8520, 8543), 'pandas.DataFrame', 'pd.DataFrame', (['all_GC_df'], {}), '(all_GC_df)\n', (8532, 8543), True, 'import pandas as pd\n'), ((4122, 4140), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4138, 4140), False, 'import sys\n'), ((8064, 8090), 'pandas.Series', 'pd.Series', (['GC_dict[length]'], {}), '(GC_dict[length])\n', (8073, 8090), True, 'import pandas as pd\n'), ((7321, 7341), 'numpy.random.randint', 'np.random.randint', (['(2)'], {}), '(2)\n', (7338, 7341), True, 'import numpy as np\n'), ((4085, 4096), 'time.time', 'time.time', ([], {}), '()\n', (4094, 4096), False, 'import time\n')]
|
import numpy as np
import network
def main():
x = np.array([2, 3])
nw = network.NeuralNetwork()
print(nw.feedforward(x))
if __name__ == "__main__":
main()
|
[
"numpy.array",
"network.NeuralNetwork"
] |
[((56, 72), 'numpy.array', 'np.array', (['[2, 3]'], {}), '([2, 3])\n', (64, 72), True, 'import numpy as np\n'), ((82, 105), 'network.NeuralNetwork', 'network.NeuralNetwork', ([], {}), '()\n', (103, 105), False, 'import network\n')]
|
""" Generates Tisserand plots """
from enum import Enum
import numpy as np
from astropy import units as u
from matplotlib import pyplot as plt
from poliastro.plotting._base import BODY_COLORS
from poliastro.twobody.mean_elements import get_mean_elements
from poliastro.util import norm
class TisserandKind(Enum):
"""All possible Tisserand kinds"""
APSIS = "apsis"
ENERGY = "energy"
PERIOD = "period"
class TisserandPlotter:
"""Generates Tisserand figures"""
def __init__(self, kind=TisserandKind.APSIS, axes=None):
"""Object initializer
Parameters
----------
kind : TisserandKind
Nature for the Tisserand
axes : ~matplotlib.pyplot.axes
Axes for the figure
"""
# Asign Tisserand kind
self.kind = kind
# Check if axis available
if not axes:
_, self.ax = plt.subplots(1, 1)
else:
self.ax = axes
# Force axes scale regarding Tisserand kind
self.ax.set_xscale("log")
if self.kind == TisserandKind.APSIS:
self.ax.set_yscale("log")
def _solve_tisserand(
self, body, vinf_span, num_contours, alpha_lim=(0, np.pi), N=100
):
"""Solves all possible Tisserand lines with a meshgrid workflow
Parameters
----------
body : ~poliastro.bodies.Body
Body to be plotted Tisserand
vinf_array : ~astropy.units.Quantity
Desired Vinf for the flyby
num_contours : int
Number of contour lines for flyby speed
alpha_lim : tuple
Minimum and maximum flyby angles.
N : int
Number of points for flyby angle.
Notes
-----
The algorithm for generating Tisserand plots is the one depicted in
"Preliminary Trajectory Design of a Mission to Enceladus" by David
<NAME>, section 3.6
"""
# Generate mean orbital elements Earth
body_rv = get_mean_elements(body).to_vectors()
R_body, V_body = norm(body_rv.r), norm(body_rv.v)
# Generate non-dimensional velocity and alpha span
vinf_array = np.linspace(vinf_span[0], vinf_span[-1], num_contours)
alpha_array = np.linspace(alpha_lim[0], alpha_lim[-1], N)
vinf_array /= V_body
# Construct the mesh for any configuration
V_INF, ALPHA = np.meshgrid(vinf_array, alpha_array)
# Solving for non-dimensional a_sc and ecc_sc
A_SC = 1 / np.abs(1 - V_INF ** 2 - 2 * V_INF * np.cos(ALPHA))
ECC_SC = np.sqrt(1 - 1 / A_SC * ((3 - 1 / A_SC - V_INF ** 2) / (2)) ** 2)
# Compute main Tisserand variables
RR_P = A_SC * R_body * (1 - ECC_SC)
RR_A = A_SC * R_body * (1 + ECC_SC)
TT = 2 * np.pi * np.sqrt((A_SC * R_body) ** 3 / body.parent.k)
EE = -body.parent.k / (2 * A_SC * R_body)
# Build color lines to internal canvas
return RR_P, RR_A, EE, TT
def _build_lines(self, RR_P, RR_A, EE, TT, color):
"""Collect lines and append them to internal data
Parameters
----------
data : list
Array containing [RR_P, RR_A, EE, TT, color]
Returns
-------
lines: list
Plotting lines for the Tisserand
"""
# Plot desired kind lines
if self.kind == TisserandKind.APSIS:
# Generate apsis lines
lines = self.ax.plot(RR_A.to(u.AU), RR_P.to(u.AU), color=color)
elif self.kind == TisserandKind.ENERGY:
# Generate energy lines
lines = self.ax.plot(
RR_P.to(u.AU), EE.to(u.km ** 2 / u.s ** 2), color=color
)
elif self.kind == TisserandKind.PERIOD:
# Generate period lines
lines = self.ax.plot(RR_P.to(u.AU), TT.to(u.year), color=color)
return lines
def plot_line(self, body, vinf, alpha_lim=(0, np.pi), color=None):
"""Plots body Tisserand line within flyby angle
Parameters
----------
body : ~poliastro.bodies.Body
Body to be plotted Tisserand
vinf : ~astropy.units.Quantity
Vinf velocity line
alpha_lim : tuple
Minimum and maximum flyby angles
color : str
String representing for the color lines
Returns
-------
self.ax: ~matplotlib.axes.Axes
Apsis tisserand is the default plotting option
"""
# HACK: to reuse Tisserand solver, we transform input Vinf into a tuple
vinf_span = (vinf, vinf)
# Solve Tisserand parameters
RR_P, RR_A, EE, TT = self._solve_tisserand(
body, vinf_span, num_contours=2, alpha_lim=alpha_lim
)
# Check if color defined
if not color:
color = BODY_COLORS[body.name]
# Build canvas lines from Tisserand parameters
self._build_lines(RR_P, RR_A, EE, TT, color)
return self.ax
def plot(self, body, vinf_span, num_contours=10, color=None):
"""Plots body Tisserand for given amount of solutions within Vinf span
Parameters
----------
body : ~poliastro.bodies.Body
Body to be plotted Tisserand
vinf_span : tuple
Minimum and maximum Vinf velocities
num_contours : int
Number of points to iterate over previously defined velocities
color : str
String representing for the color lines
Returns
-------
self.ax: ~matplotlib.axes.Axes
Apsis tisserand is the default plotting option
"""
# Solve Tisserand parameters
RR_P, RR_A, EE, TT = self._solve_tisserand(body, vinf_span, num_contours)
# Check if color defined
if not color:
color = BODY_COLORS[body.name]
# Build canvas lines from Tisserand parameters
self._build_lines(RR_P, RR_A, EE, TT, color)
return self.ax
|
[
"poliastro.util.norm",
"numpy.sqrt",
"poliastro.twobody.mean_elements.get_mean_elements",
"numpy.linspace",
"numpy.cos",
"numpy.meshgrid",
"matplotlib.pyplot.subplots"
] |
[((2191, 2245), 'numpy.linspace', 'np.linspace', (['vinf_span[0]', 'vinf_span[-1]', 'num_contours'], {}), '(vinf_span[0], vinf_span[-1], num_contours)\n', (2202, 2245), True, 'import numpy as np\n'), ((2268, 2311), 'numpy.linspace', 'np.linspace', (['alpha_lim[0]', 'alpha_lim[-1]', 'N'], {}), '(alpha_lim[0], alpha_lim[-1], N)\n', (2279, 2311), True, 'import numpy as np\n'), ((2416, 2452), 'numpy.meshgrid', 'np.meshgrid', (['vinf_array', 'alpha_array'], {}), '(vinf_array, alpha_array)\n', (2427, 2452), True, 'import numpy as np\n'), ((2595, 2657), 'numpy.sqrt', 'np.sqrt', (['(1 - 1 / A_SC * ((3 - 1 / A_SC - V_INF ** 2) / 2) ** 2)'], {}), '(1 - 1 / A_SC * ((3 - 1 / A_SC - V_INF ** 2) / 2) ** 2)\n', (2602, 2657), True, 'import numpy as np\n'), ((904, 922), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (916, 922), True, 'from matplotlib import pyplot as plt\n'), ((2077, 2092), 'poliastro.util.norm', 'norm', (['body_rv.r'], {}), '(body_rv.r)\n', (2081, 2092), False, 'from poliastro.util import norm\n'), ((2094, 2109), 'poliastro.util.norm', 'norm', (['body_rv.v'], {}), '(body_rv.v)\n', (2098, 2109), False, 'from poliastro.util import norm\n'), ((2817, 2862), 'numpy.sqrt', 'np.sqrt', (['((A_SC * R_body) ** 3 / body.parent.k)'], {}), '((A_SC * R_body) ** 3 / body.parent.k)\n', (2824, 2862), True, 'import numpy as np\n'), ((2015, 2038), 'poliastro.twobody.mean_elements.get_mean_elements', 'get_mean_elements', (['body'], {}), '(body)\n', (2032, 2038), False, 'from poliastro.twobody.mean_elements import get_mean_elements\n'), ((2563, 2576), 'numpy.cos', 'np.cos', (['ALPHA'], {}), '(ALPHA)\n', (2569, 2576), True, 'import numpy as np\n')]
|
from mars import main_loop
import numpy as np
from mars.settings import *
class Problem:
"""
Synopsis
--------
User class for the Kelvin-Helmholtz instability
Args
----
None
Methods
-------
initialise
Set all variables in each cell to initialise the simulation.
internal_bc
Specify the internal boundary for the simulation.
TODO
----
None
"""
def __init__(self):
self.parameter = {
'Name':'<NAME> instability.',
'Dimensions':'2D',
'x1 min':-0.5,
'x1 max':0.5,
'x2 min':-0.5,
'x2 max':0.5,
'x3 min':-0.5,
'x3 max':0.5,
'resolution x1':256,
'resolution x2':256,
'resolution x3':0,
'cfl':0.3,
'initial dt':1.0e-5,
'max dt increase':1.5,
'initial t': 0.0,
'max time': 5.0,
'save frequency': 2.5e-2,
'output type': ['numpy'],
'output primitives': True,
'print to file':False,
'profiling': True,
'restart file':None,
'gamma':1.4,
'density unit':1.0,
'length unit':1.0,
'velocity unit':1.0,
'optimisation': 'numba',
'riemann':'hllc',
'reconstruction':'linear',
'limiter':'minmod',
'time stepping':'RK2',
'method':'hydro',
'lower x1 boundary':'reciprocal',
'upper x1 boundary':'reciprocal',
'lower x2 boundary':'reciprocal',
'upper x2 boundary':'reciprocal',
'lower x3 boundary':'reciprocal',
'upper x3 boundary':'reciprocal',
'internal boundary':False
}
def initialise(self, V, g, l):
if self.parameter['Dimensions'] == '2D':
Y, X = np.meshgrid(g.x1, g.x2, indexing='ij')
if self.parameter['Dimensions'] == '3D':
Z, Y, X = np.meshgrid(g.x1, g.x2, g.x3, indexing='ij')
yp = 0.25
dens_1 = 2.0
dens_2 = 1.0
pres = 2.0
vel_1 = 0.5
vel_2 = 0.0
amp = 0.001
vx1_per = (np.random.random(V.shape)*2.0 - 1)*amp
vx2_per = (np.random.random(V.shape)*2.0 - 1)*amp
region_1 = np.absolute(Y) < yp
region_2 = np.absolute(Y) > yp
V[rho, region_1] = dens_1
V[prs, region_1] = pres
V[vx1, region_1] = vel_1 + vx1_per[vx1, region_1]
V[vx2, region_1] = vel_2 + vx2_per[vx2, region_1]
V[rho, region_2] = dens_2
V[prs, region_2] = pres
V[vx1, region_2] = -vel_1 + vx1_per[vx1, region_2]
V[vx2, region_2] = vel_2 + vx2_per[vx2, region_2]
def internal_bc(self):
return None
if __name__ == "__main__":
main_loop(Problem())
|
[
"numpy.random.random",
"numpy.meshgrid",
"numpy.absolute"
] |
[((1932, 1970), 'numpy.meshgrid', 'np.meshgrid', (['g.x1', 'g.x2'], {'indexing': '"""ij"""'}), "(g.x1, g.x2, indexing='ij')\n", (1943, 1970), True, 'import numpy as np\n'), ((2043, 2087), 'numpy.meshgrid', 'np.meshgrid', (['g.x1', 'g.x2', 'g.x3'], {'indexing': '"""ij"""'}), "(g.x1, g.x2, g.x3, indexing='ij')\n", (2054, 2087), True, 'import numpy as np\n'), ((2365, 2379), 'numpy.absolute', 'np.absolute', (['Y'], {}), '(Y)\n', (2376, 2379), True, 'import numpy as np\n'), ((2404, 2418), 'numpy.absolute', 'np.absolute', (['Y'], {}), '(Y)\n', (2415, 2418), True, 'import numpy as np\n'), ((2248, 2273), 'numpy.random.random', 'np.random.random', (['V.shape'], {}), '(V.shape)\n', (2264, 2273), True, 'import numpy as np\n'), ((2306, 2331), 'numpy.random.random', 'np.random.random', (['V.shape'], {}), '(V.shape)\n', (2322, 2331), True, 'import numpy as np\n')]
|
import numpy as np
from scipy.signal import savgol_filter
import matplotlib.pyplot as plt
import MadDog
x = []
y = []
def generate():
# Generate random data
base = np.linspace(0, 5, 11)
# base = np.random.randint(0, 10, 5)
outliers = np.random.randint(10, 20, 2)
data = np.concatenate((base, outliers))
np.random.shuffle(data)
return data
def fill_data():
# Build random data
return np.concatenate((np.array([0]), MadDog.find_outliers(generate()))), np.concatenate(
(np.array([0]), MadDog.find_outliers(generate()))) # np.sin(x) + np.cos(x) + np.random.random(100)
# np.linspace(0, 2*np.pi, 100)
def savitzky(x, y, ploy_nom):
return savgol_filter(x, len(x) - 1, 10), savgol_filter(y, len(y) - 1, 10)
def map(x_filtered, y_filtered, x, y, title="title"):
# Generate some test data
heatmap, xedges, yedges = np.histogram2d(x, y, bins=50)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
plt.clf()
plt.imshow(heatmap.T, extent=extent, origin='lower')
plt.show()
heatmap, xedges, yedges = np.histogram2d(x_filtered, y_filtered, bins=50)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
plt.clf()
plt.imshow(heatmap.T, extent=extent, origin='lower')
plt.show()
def show(x_filtered, y_filtered, x, y, title="Lorem ipsum"):
# Plotting
fig = plt.figure()
ax = fig.subplots()
plt.plot(x_filtered, y_filtered, 'red', marker="o")
plt.plot(x, y, 'green', marker="o")
plt.subplots_adjust(bottom=0.25)
plt.xlabel('x')
plt.ylabel('y')
plt.title(title)
plt.legend(["Filter", "Raw"])
plt.show()
# Generating the noisy signal
x, y = fill_data()
print(len(y))
# Savitzky-Golay filter
x_filtered, y_filtered = savitzky(x, y, 2)
print("X unfiltered>> ", x)
print("Y unfiltered>> ", y)
print("X filtered>> ", x_filtered)
print("Y filtered>> ", y_filtered)
show(x_filtered, y_filtered, x, y)
|
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.plot",
"numpy.array",
"numpy.linspace",
"numpy.random.randint",
"matplotlib.pyplot.figure",
"numpy.concatenate",
"numpy.histogram2d",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots_adjust",
"numpy.random.shuffle"
] |
[((175, 196), 'numpy.linspace', 'np.linspace', (['(0)', '(5)', '(11)'], {}), '(0, 5, 11)\n', (186, 196), True, 'import numpy as np\n'), ((253, 281), 'numpy.random.randint', 'np.random.randint', (['(10)', '(20)', '(2)'], {}), '(10, 20, 2)\n', (270, 281), True, 'import numpy as np\n'), ((293, 325), 'numpy.concatenate', 'np.concatenate', (['(base, outliers)'], {}), '((base, outliers))\n', (307, 325), True, 'import numpy as np\n'), ((330, 353), 'numpy.random.shuffle', 'np.random.shuffle', (['data'], {}), '(data)\n', (347, 353), True, 'import numpy as np\n'), ((877, 906), 'numpy.histogram2d', 'np.histogram2d', (['x', 'y'], {'bins': '(50)'}), '(x, y, bins=50)\n', (891, 906), True, 'import numpy as np\n'), ((972, 981), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (979, 981), True, 'import matplotlib.pyplot as plt\n'), ((986, 1038), 'matplotlib.pyplot.imshow', 'plt.imshow', (['heatmap.T'], {'extent': 'extent', 'origin': '"""lower"""'}), "(heatmap.T, extent=extent, origin='lower')\n", (996, 1038), True, 'import matplotlib.pyplot as plt\n'), ((1043, 1053), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1051, 1053), True, 'import matplotlib.pyplot as plt\n'), ((1085, 1132), 'numpy.histogram2d', 'np.histogram2d', (['x_filtered', 'y_filtered'], {'bins': '(50)'}), '(x_filtered, y_filtered, bins=50)\n', (1099, 1132), True, 'import numpy as np\n'), ((1198, 1207), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1205, 1207), True, 'import matplotlib.pyplot as plt\n'), ((1212, 1264), 'matplotlib.pyplot.imshow', 'plt.imshow', (['heatmap.T'], {'extent': 'extent', 'origin': '"""lower"""'}), "(heatmap.T, extent=extent, origin='lower')\n", (1222, 1264), True, 'import matplotlib.pyplot as plt\n'), ((1269, 1279), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1277, 1279), True, 'import matplotlib.pyplot as plt\n'), ((1368, 1380), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1378, 1380), True, 'import matplotlib.pyplot as plt\n'), ((1409, 1460), 'matplotlib.pyplot.plot', 'plt.plot', (['x_filtered', 'y_filtered', '"""red"""'], {'marker': '"""o"""'}), "(x_filtered, y_filtered, 'red', marker='o')\n", (1417, 1460), True, 'import matplotlib.pyplot as plt\n'), ((1465, 1500), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""green"""'], {'marker': '"""o"""'}), "(x, y, 'green', marker='o')\n", (1473, 1500), True, 'import matplotlib.pyplot as plt\n'), ((1505, 1537), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.25)'}), '(bottom=0.25)\n', (1524, 1537), True, 'import matplotlib.pyplot as plt\n'), ((1542, 1557), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (1552, 1557), True, 'import matplotlib.pyplot as plt\n'), ((1562, 1577), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (1572, 1577), True, 'import matplotlib.pyplot as plt\n'), ((1582, 1598), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (1591, 1598), True, 'import matplotlib.pyplot as plt\n'), ((1603, 1632), 'matplotlib.pyplot.legend', 'plt.legend', (["['Filter', 'Raw']"], {}), "(['Filter', 'Raw'])\n", (1613, 1632), True, 'import matplotlib.pyplot as plt\n'), ((1637, 1647), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1645, 1647), True, 'import matplotlib.pyplot as plt\n'), ((440, 453), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (448, 453), True, 'import numpy as np\n'), ((516, 529), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (524, 529), True, 'import numpy as np\n')]
|
import random as rn
import numpy as np
# open system dynamics of a qubit and compare numerical results with the analytical calculations
# NOTE these are also TUTORIALS of the library, so see the Tutorials for what these are doing and analytical
# calculations.
# currently includes 2 cases: (i) decay only, and (ii) unitary evolution by calling Liouville method without giving
# any collapse operators. For now, only looks at excited state populations
# TODO this is an unfinished test. below two tests are the same and it actually is not testing open system dynamics.
decayRateSM = rn.random()
excitedPopulation = lambda t: 0.5*np.exp(-(0.00001*(decayRateSM+1)*2+1j)*50*t)
populations = {'excitedAnalytical':[], 'excitedNumerical':[]}
# this is used as the calculate attribute of the qubit, and the singleQubit fixture evolve method calls this at every
# step of the evolution. It stores both numerical and analytical excited state populations into the dictionary above.
def singleQubitDecayCalculate(qub, state, i):
populations['excitedAnalytical'].append(excitedPopulation(i*qub.stepSize))
populations['excitedNumerical'].append(state[0, 0])
def test_qubitUnitaryEvolutionFromLiouville(singleQubit):
for k in populations:
populations[k] = []
singleQubit.evolutionMethod = singleQubit.openEvolution
singleQubit.calculate = singleQubitDecayCalculate
singleQubit.evolve()
assert singleQubit.stepCount == len(populations['excitedNumerical'])
def test_qubitDecay(singleQubit):
for k in populations:
populations[k] = []
singleQubit.evolutionMethod = singleQubit.openEvolution
singleQubit.calculate = singleQubitDecayCalculate
singleQubit.evolve()
assert singleQubit.stepCount == len(populations['excitedNumerical'])
|
[
"numpy.exp",
"random.random"
] |
[((587, 598), 'random.random', 'rn.random', ([], {}), '()\n', (596, 598), True, 'import random as rn\n'), ((634, 690), 'numpy.exp', 'np.exp', (['(-(1e-05 * (decayRateSM + 1) * 2 + 1.0j) * 50 * t)'], {}), '(-(1e-05 * (decayRateSM + 1) * 2 + 1.0j) * 50 * t)\n', (640, 690), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
#=============================================================================#
# #
# NAME: do_RMsynth_1D.py #
# #
# PURPOSE: API for runnning RM-synthesis on an ASCII Stokes I, Q & U spectrum.#
# #
# MODIFIED: 16-Nov-2018 by <NAME> #
# MODIFIED: 23-October-2019 by <NAME> #
# #
#=============================================================================#
# #
# The MIT License (MIT) #
# #
# Copyright (c) 2015 - 2018 <NAME> #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the "Software"), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
#=============================================================================#
import sys
import os
import time
import traceback
import json
import math as m
import numpy as np
import matplotlib.pyplot as plt
from RMutils.util_RM import do_rmsynth
from RMutils.util_RM import do_rmsynth_planes
from RMutils.util_RM import get_rmsf_planes
from RMutils.util_RM import measure_FDF_parms
from RMutils.util_RM import measure_qu_complexity
from RMutils.util_RM import measure_fdf_complexity
from RMutils.util_misc import nanmedian
from RMutils.util_misc import toscalar
from RMutils.util_misc import create_frac_spectra
from RMutils.util_misc import poly5
from RMutils.util_misc import MAD
from RMutils.util_plotTk import plot_Ipqu_spectra_fig
from RMutils.util_plotTk import plot_rmsf_fdf_fig
from RMutils.util_plotTk import plot_complexity_fig
from RMutils.util_plotTk import CustomNavbar
from RMutils.util_plotTk import plot_rmsIQU_vs_nu_ax
if sys.version_info.major == 2:
print('RM-tools will no longer run with Python 2! Please use Python 3.')
exit()
C = 2.997924538e8 # Speed of light [m/s]
#-----------------------------------------------------------------------------#
def run_rmsynth(data, polyOrd=3, phiMax_radm2=None, dPhi_radm2=None,
nSamples=10.0, weightType="variance", fitRMSF=False,
noStokesI=False, phiNoise_radm2=1e6, nBits=32, showPlots=False,
debug=False, verbose=False, log=print,units='Jy/beam', prefixOut="prefixOut", args=None):
"""Run RM synthesis on 1D data.
Args:
data (list): Contains frequency and polarization data as either:
[freq_Hz, I, Q, U, dI, dQ, dU]
freq_Hz (array_like): Frequency of each channel in Hz.
I (array_like): Stokes I intensity in each channel.
Q (array_like): Stokes Q intensity in each channel.
U (array_like): Stokes U intensity in each channel.
dI (array_like): Error in Stokes I intensity in each channel.
dQ (array_like): Error in Stokes Q intensity in each channel.
dU (array_like): Error in Stokes U intensity in each channel.
or
[freq_Hz, q, u, dq, du]
freq_Hz (array_like): Frequency of each channel in Hz.
q (array_like): Fractional Stokes Q intensity (Q/I) in each channel.
u (array_like): Fractional Stokes U intensity (U/I) in each channel.
dq (array_like): Error in fractional Stokes Q intensity in each channel.
du (array_like): Error in fractional Stokes U intensity in each channel.
Kwargs:
polyOrd (int): Order of polynomial to fit to Stokes I spectrum.
phiMax_radm2 (float): Maximum absolute Faraday depth (rad/m^2).
dPhi_radm2 (float): Faraday depth channel size (rad/m^2).
nSamples (float): Number of samples across the RMSF.
weightType (str): Can be "variance" or "uniform"
"variance" -- Weight by uncertainty in Q and U.
"uniform" -- Weight uniformly (i.e. with 1s)
fitRMSF (bool): Fit a Gaussian to the RMSF?
noStokesI (bool: Is Stokes I data provided?
phiNoise_radm2 (float): ????
nBits (int): Precision of floating point numbers.
showPlots (bool): Show plots?
debug (bool): Turn on debugging messages & plots?
verbose (bool): Verbosity.
log (function): Which logging function to use.
units (str): Units of data.
Returns:
mDict (dict): Summary of RM synthesis results.
aDict (dict): Data output by RM synthesis.
"""
# Sanity checks
if not os.path.exists(args.dataFile[0]):
print("File does not exist: '%s'." % args.dataFile[0])
sys.exit()
prefixOut, ext = os.path.splitext(args.dataFile[0])
# Default data types
dtFloat = "float" + str(nBits)
dtComplex = "complex" + str(2*nBits)
# freq_Hz, I, Q, U, dI, dQ, dU
try:
if verbose: log("> Trying [freq_Hz, I, Q, U, dI, dQ, dU]", end=' ')
(freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr) = data
if verbose: log("... success.")
except Exception:
if verbose: log("...failed.")
# freq_Hz, q, u, dq, du
try:
if verbose: log("> Trying [freq_Hz, q, u, dq, du]", end=' ')
(freqArr_Hz, QArr, UArr, dQArr, dUArr) = data
if verbose: log("... success.")
noStokesI = True
except Exception:
if verbose: log("...failed.")
if debug:
log(traceback.format_exc())
sys.exit()
if verbose: log("Successfully read in the Stokes spectra.")
# If no Stokes I present, create a dummy spectrum = unity
if noStokesI:
if verbose: log("Warn: no Stokes I data in use.")
IArr = np.ones_like(QArr)
dIArr = np.zeros_like(QArr)
# Convert to GHz for convenience
freqArr_GHz = freqArr_Hz / 1e9
dQUArr = (dQArr + dUArr)/2.0
# Fit the Stokes I spectrum and create the fractional spectra
IModArr, qArr, uArr, dqArr, duArr, fitDict = \
create_frac_spectra(freqArr = freqArr_GHz,
IArr = IArr,
QArr = QArr,
UArr = UArr,
dIArr = dIArr,
dQArr = dQArr,
dUArr = dUArr,
polyOrd = polyOrd,
verbose = True,
debug = debug)
# Plot the data and the Stokes I model fit
if verbose: log("Plotting the input data and spectral index fit.")
freqHirArr_Hz = np.linspace(freqArr_Hz[0], freqArr_Hz[-1], 10000)
IModHirArr = poly5(fitDict["p"])(freqHirArr_Hz/1e9)
specFig = plt.figure(figsize=(12.0, 8))
plot_Ipqu_spectra_fig(freqArr_Hz = freqArr_Hz,
IArr = IArr,
qArr = qArr,
uArr = uArr,
dIArr = dIArr,
dqArr = dqArr,
duArr = duArr,
freqHirArr_Hz = freqHirArr_Hz,
IModArr = IModHirArr,
fig = specFig,
units = units)
# Use the custom navigation toolbar (does not work on Mac OS X)
# try:
# specFig.canvas.toolbar.pack_forget()
# CustomNavbar(specFig.canvas, specFig.canvas.toolbar.window)
# except Exception:
# pass
# Display the figure
# if not plt.isinteractive():
# specFig.show()
# DEBUG (plot the Q, U and average RMS spectrum)
if debug:
rmsFig = plt.figure(figsize=(12.0, 8))
ax = rmsFig.add_subplot(111)
ax.plot(freqArr_Hz/1e9, dQUArr, marker='o', color='k', lw=0.5,
label='rms <QU>')
ax.plot(freqArr_Hz/1e9, dQArr, marker='o', color='b', lw=0.5,
label='rms Q')
ax.plot(freqArr_Hz/1e9, dUArr, marker='o', color='r', lw=0.5,
label='rms U')
xRange = (np.nanmax(freqArr_Hz)-np.nanmin(freqArr_Hz))/1e9
ax.set_xlim( np.min(freqArr_Hz)/1e9 - xRange*0.05,
np.max(freqArr_Hz)/1e9 + xRange*0.05)
ax.set_xlabel('$\\nu$ (GHz)')
ax.set_ylabel('RMS '+units)
ax.set_title("RMS noise in Stokes Q, U and <Q,U> spectra")
# rmsFig.show()
#-------------------------------------------------------------------------#
# Calculate some wavelength parameters
lambdaSqArr_m2 = np.power(C/freqArr_Hz, 2.0)
dFreq_Hz = np.nanmin(np.abs(np.diff(freqArr_Hz)))
lambdaSqRange_m2 = ( np.nanmax(lambdaSqArr_m2) -
np.nanmin(lambdaSqArr_m2) )
dLambdaSqMin_m2 = np.nanmin(np.abs(np.diff(lambdaSqArr_m2)))
dLambdaSqMax_m2 = np.nanmax(np.abs(np.diff(lambdaSqArr_m2)))
# Set the Faraday depth range
fwhmRMSF_radm2 = 2.0 * m.sqrt(3.0) / lambdaSqRange_m2
if dPhi_radm2 is None:
dPhi_radm2 = fwhmRMSF_radm2 / nSamples
if phiMax_radm2 is None:
phiMax_radm2 = m.sqrt(3.0) / dLambdaSqMax_m2
phiMax_radm2 = max(phiMax_radm2, fwhmRMSF_radm2*10.) # Force the minimum phiMax to 10 FWHM
# Faraday depth sampling. Zero always centred on middle channel
nChanRM = int(round(abs((phiMax_radm2 - 0.0) / dPhi_radm2)) * 2.0 + 1.0)
startPhi_radm2 = - (nChanRM-1.0) * dPhi_radm2 / 2.0
stopPhi_radm2 = + (nChanRM-1.0) * dPhi_radm2 / 2.0
phiArr_radm2 = np.linspace(startPhi_radm2, stopPhi_radm2, nChanRM)
phiArr_radm2 = phiArr_radm2.astype(dtFloat)
if verbose: log("PhiArr = %.2f to %.2f by %.2f (%d chans)." % (phiArr_radm2[0],
phiArr_radm2[-1],
float(dPhi_radm2),
nChanRM))
# Calculate the weighting as 1/sigma^2 or all 1s (uniform)
if weightType=="variance":
weightArr = 1.0 / np.power(dQUArr, 2.0)
else:
weightType = "uniform"
weightArr = np.ones(freqArr_Hz.shape, dtype=dtFloat)
if verbose: log("Weight type is '%s'." % weightType)
startTime = time.time()
# Perform RM-synthesis on the spectrum
dirtyFDF, lam0Sq_m2 = do_rmsynth_planes(dataQ = qArr,
dataU = uArr,
lambdaSqArr_m2 = lambdaSqArr_m2,
phiArr_radm2 = phiArr_radm2,
weightArr = weightArr,
nBits = nBits,
verbose = verbose,
log = log)
# Calculate the Rotation Measure Spread Function
RMSFArr, phi2Arr_radm2, fwhmRMSFArr, fitStatArr = \
get_rmsf_planes(lambdaSqArr_m2 = lambdaSqArr_m2,
phiArr_radm2 = phiArr_radm2,
weightArr = weightArr,
mskArr = ~np.isfinite(qArr),
lam0Sq_m2 = lam0Sq_m2,
double = True,
fitRMSF = fitRMSF,
fitRMSFreal = False,
nBits = nBits,
verbose = verbose,
log = log)
fwhmRMSF = float(fwhmRMSFArr)
# ALTERNATE RM-SYNTHESIS CODE --------------------------------------------#
#dirtyFDF, [phi2Arr_radm2, RMSFArr], lam0Sq_m2, fwhmRMSF = \
# do_rmsynth(qArr, uArr, lambdaSqArr_m2, phiArr_radm2, weightArr)
#-------------------------------------------------------------------------#
endTime = time.time()
cputime = (endTime - startTime)
if verbose: log("> RM-synthesis completed in %.2f seconds." % cputime)
# Determine the Stokes I value at lam0Sq_m2 from the Stokes I model
# Multiply the dirty FDF by Ifreq0 to recover the PI
freq0_Hz = C / m.sqrt(lam0Sq_m2)
Ifreq0 = poly5(fitDict["p"])(freq0_Hz/1e9)
dirtyFDF *= (Ifreq0) # FDF is in fracpol units initially, convert back to flux
# Calculate the theoretical noise in the FDF !!Old formula only works for wariance weights!
weightArr = np.where(np.isnan(weightArr), 0.0, weightArr)
dFDFth = np.sqrt( np.sum(weightArr**2 * np.nan_to_num(dQUArr)**2) / (np.sum(weightArr))**2 )
# Measure the parameters of the dirty FDF
# Use the theoretical noise to calculate uncertainties
mDict = measure_FDF_parms(FDF = dirtyFDF,
phiArr = phiArr_radm2,
fwhmRMSF = fwhmRMSF,
dFDF = dFDFth,
lamSqArr_m2 = lambdaSqArr_m2,
lam0Sq = lam0Sq_m2)
mDict["Ifreq0"] = toscalar(Ifreq0)
mDict["polyCoeffs"] = ",".join([str(x) for x in fitDict["p"]])
mDict["IfitStat"] = fitDict["fitStatus"]
mDict["IfitChiSqRed"] = fitDict["chiSqRed"]
mDict["lam0Sq_m2"] = toscalar(lam0Sq_m2)
mDict["freq0_Hz"] = toscalar(freq0_Hz)
mDict["fwhmRMSF"] = toscalar(fwhmRMSF)
mDict["dQU"] = toscalar(nanmedian(dQUArr))
mDict["dFDFth"] = toscalar(dFDFth)
mDict["units"] = units
if fitDict["fitStatus"] >= 128:
log("WARNING: Stokes I model contains negative values!")
elif fitDict["fitStatus"] >= 64:
log("Caution: Stokes I model has low signal-to-noise.")
#Add information on nature of channels:
good_channels=np.where(np.logical_and(weightArr != 0,np.isfinite(qArr)))[0]
mDict["min_freq"]=float(np.min(freqArr_Hz[good_channels]))
mDict["max_freq"]=float(np.max(freqArr_Hz[good_channels]))
mDict["N_channels"]=good_channels.size
mDict["median_channel_width"]=float(np.median(np.diff(freqArr_Hz)))
# Measure the complexity of the q and u spectra
mDict["fracPol"] = mDict["ampPeakPIfit"]/(Ifreq0)
mD, pD = measure_qu_complexity(freqArr_Hz = freqArr_Hz,
qArr = qArr,
uArr = uArr,
dqArr = dqArr,
duArr = duArr,
fracPol = mDict["fracPol"],
psi0_deg = mDict["polAngle0Fit_deg"],
RM_radm2 = mDict["phiPeakPIfit_rm2"])
mDict.update(mD)
# Debugging plots for spectral complexity measure
if debug:
tmpFig = plot_complexity_fig(xArr=pD["xArrQ"],
qArr=pD["yArrQ"],
dqArr=pD["dyArrQ"],
sigmaAddqArr=pD["sigmaAddArrQ"],
chiSqRedqArr=pD["chiSqRedArrQ"],
probqArr=pD["probArrQ"],
uArr=pD["yArrU"],
duArr=pD["dyArrU"],
sigmaAdduArr=pD["sigmaAddArrU"],
chiSqReduArr=pD["chiSqRedArrU"],
probuArr=pD["probArrU"],
mDict=mDict)
if saveOutput:
if verbose: print("Saving debug plots:")
outFilePlot = prefixOut + ".debug-plots.pdf"
if verbose: print("> " + outFilePlot)
tmpFig.savefig(outFilePlot, bbox_inches = 'tight')
else:
tmpFig.show()
#add array dictionary
aDict = dict()
aDict["phiArr_radm2"] = phiArr_radm2
aDict["phi2Arr_radm2"] = phi2Arr_radm2
aDict["RMSFArr"] = RMSFArr
aDict["freqArr_Hz"] = freqArr_Hz
aDict["weightArr"]=weightArr
aDict["dirtyFDF"]=dirtyFDF
if verbose:
# Print the results to the screen
log()
log('-'*80)
log('RESULTS:\n')
log('FWHM RMSF = %.4g rad/m^2' % (mDict["fwhmRMSF"]))
log('Pol Angle = %.4g (+/-%.4g) deg' % (mDict["polAngleFit_deg"],
mDict["dPolAngleFit_deg"]))
log('Pol Angle 0 = %.4g (+/-%.4g) deg' % (mDict["polAngle0Fit_deg"],
mDict["dPolAngle0Fit_deg"]))
log('Peak FD = %.4g (+/-%.4g) rad/m^2' % (mDict["phiPeakPIfit_rm2"],
mDict["dPhiPeakPIfit_rm2"]))
log('freq0_GHz = %.4g ' % (mDict["freq0_Hz"]/1e9))
log('I freq0 = %.4g %s' % (mDict["Ifreq0"],units))
log('Peak PI = %.4g (+/-%.4g) %s' % (mDict["ampPeakPIfit"],
mDict["dAmpPeakPIfit"],units))
log('QU Noise = %.4g %s' % (mDict["dQU"],units))
log('FDF Noise (theory) = %.4g %s' % (mDict["dFDFth"],units))
log('FDF Noise (Corrected MAD) = %.4g %s' % (mDict["dFDFcorMAD"],units))
log('FDF Noise (rms) = %.4g %s' % (mDict["dFDFrms"],units))
log('FDF SNR = %.4g ' % (mDict["snrPIfit"]))
log('sigma_add(q) = %.4g (+%.4g, -%.4g)' % (mDict["sigmaAddQ"],
mDict["dSigmaAddPlusQ"],
mDict["dSigmaAddMinusQ"]))
log('sigma_add(u) = %.4g (+%.4g, -%.4g)' % (mDict["sigmaAddU"],
mDict["dSigmaAddPlusU"],
mDict["dSigmaAddMinusU"]))
log()
log('-'*80)
# Plot the RM Spread Function and dirty FDF
if showPlots or saveOutput:
fdfFig = plt.figure(figsize=(12.0, 8))
plot_rmsf_fdf_fig(phiArr = phiArr_radm2,
FDF = dirtyFDF,
phi2Arr = phi2Arr_radm2,
RMSFArr = RMSFArr,
fwhmRMSF = fwhmRMSF,
vLine = mDict["phiPeakPIfit_rm2"],
fig = fdfFig,
units = units)
# Use the custom navigation toolbar
# try:
# fdfFig.canvas.toolbar.pack_forget()
# CustomNavbar(fdfFig.canvas, fdfFig.canvas.toolbar.window)
# except Exception:
# pass
# Display the figure
# fdfFig.show()
# Pause if plotting enabled
if showPlots:
plt.show()
elif saveOutput or debug:
if verbose: print("Saving RMSF and dirty FDF plot:")
outFilePlot = prefixOut + ".RMSF-dirtyFDF-plots.pdf"
if verbose: print("> " + outFilePlot)
fdfFig.savefig(outFilePlot, bbox_inches = 'tight')
# #if verbose: print "Press <RETURN> to exit ...",
# input()
return mDict, aDict
def readFile(dataFile, nBits, verbose=True, debug=False):
"""
Read the I, Q & U data from the ASCII file.
Inputs:
datafile (str): relative or absolute path to file.
nBits (int): number of bits to store the data as.
verbose (bool): Print verbose messages to terminal?
debug (bool): Print full traceback in case of failure?
Returns:
data (list of arrays): List containing the columns found in the file.
If Stokes I is present, this will be [freq_Hz, I, Q, U, dI, dQ, dU],
else [freq_Hz, q, u, dq, du].
"""
# Default data types
dtFloat = "float" + str(nBits)
dtComplex = "complex" + str(2*nBits)
# Output prefix is derived from the input file name
# Read the data-file. Format=space-delimited, comments="#".
if verbose: print("Reading the data file '%s':" % dataFile)
# freq_Hz, I, Q, U, dI, dQ, dU
try:
if verbose: print("> Trying [freq_Hz, I, Q, U, dI, dQ, dU]", end=' ')
(freqArr_Hz, IArr, QArr, UArr,
dIArr, dQArr, dUArr) = \
np.loadtxt(dataFile, unpack=True, dtype=dtFloat)
if verbose: print("... success.")
data=[freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr]
except Exception:
if verbose: print("...failed.")
# freq_Hz, q, u, dq, du
try:
if verbose: print("> Trying [freq_Hz, q, u, dq, du]", end=' ')
(freqArr_Hz, QArr, UArr, dQArr, dUArr) = \
np.loadtxt(dataFile, unpack=True, dtype=dtFloat)
if verbose: print("... success.")
data=[freqArr_Hz, QArr, UArr, dQArr, dUArr]
noStokesI = True
except Exception:
if verbose: print("...failed.")
if debug:
print(traceback.format_exc())
sys.exit()
if verbose: print("Successfully read in the Stokes spectra.")
return data
def saveOutput(outdict, arrdict, prefixOut, verbose):
# Save the dirty FDF, RMSF and weight array to ASCII files
if verbose: print("Saving the dirty FDF, RMSF weight arrays to ASCII files.")
outFile = prefixOut + "_FDFdirty.dat"
if verbose:
print("> %s" % outFile)
np.savetxt(outFile, list(zip(arrdict["phiArr_radm2"], arrdict["dirtyFDF"].real, arrdict["dirtyFDF"].imag)))
outFile = prefixOut + "_RMSF.dat"
if verbose:
print("> %s" % outFile)
np.savetxt(outFile, list(zip(arrdict["phi2Arr_radm2"], arrdict["RMSFArr"].real, arrdict["RMSFArr"].imag)))
outFile = prefixOut + "_weight.dat"
if verbose:
print("> %s" % outFile)
np.savetxt(outFile, list(zip(arrdict["freqArr_Hz"], arrdict["weightArr"])))
# Save the measurements to a "key=value" text file
outFile = prefixOut + "_RMsynth.dat"
if verbose:
print("Saving the measurements on the FDF in 'key=val' and JSON formats.")
print("> %s" % outFile)
FH = open(outFile, "w")
for k, v in outdict.items():
FH.write("%s=%s\n" % (k, v))
FH.close()
outFile = prefixOut + "_RMsynth.json"
if verbose:
print("> %s" % outFile)
json.dump(dict(outdict), open(outFile, "w"))
#-----------------------------------------------------------------------------#
def main():
import argparse
"""
Start the function to perform RM-synthesis if called from the command line.
"""
# Help string to be shown using the -h option
descStr = """
Run RM-synthesis on Stokes I, Q and U spectra (1D) stored in an ASCII
file. The Stokes I spectrum is first fit with a polynomial and the
resulting model used to create fractional q = Q/I and u = U/I spectra.
The ASCII file should the following columns, in a space separated format:
[freq_Hz, I, Q, U, I_err, Q_err, U_err]
OR
[freq_Hz, Q, U, Q_err, U_err]
To get outputs, one or more of the following flags must be set: -S, -p, -v.
"""
epilog_text="""
Outputs with -S flag:
_FDFdirty.dat: Dirty FDF/RM Spectrum [Phi, Q, U]
_RMSF.dat: Computed RMSF [Phi, Q, U]
_RMsynth.dat: list of derived parameters for RM spectrum
(approximately equivalent to -v flag output)
_RMsynth.json: dictionary of derived parameters for RM spectrum
_weight.dat: Calculated channel weights [freq_Hz, weight]
"""
# Parse the command line options
parser = argparse.ArgumentParser(description=descStr,epilog=epilog_text,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("dataFile", metavar="dataFile.dat", nargs=1,
help="ASCII file containing Stokes spectra & errors.")
parser.add_argument("-t", dest="fitRMSF", action="store_true",
help="fit a Gaussian to the RMSF [False]")
parser.add_argument("-l", dest="phiMax_radm2", type=float, default=None,
help="absolute max Faraday depth sampled [Auto].")
parser.add_argument("-d", dest="dPhi_radm2", type=float, default=None,
help="width of Faraday depth channel [Auto].\n(overrides -s NSAMPLES flag)")
parser.add_argument("-s", dest="nSamples", type=float, default=10,
help="number of samples across the RMSF lobe [10].")
parser.add_argument("-w", dest="weightType", default="variance",
help="weighting [inverse variance] or 'uniform' (all 1s).")
parser.add_argument("-o", dest="polyOrd", type=int, default=2,
help="polynomial order to fit to I spectrum [2].")
parser.add_argument("-i", dest="noStokesI", action="store_true",
help="ignore the Stokes I spectrum [False].")
parser.add_argument("-b", dest="bit64", action="store_true",
help="use 64-bit floating point precision [False (uses 32-bit)]")
parser.add_argument("-p", dest="showPlots", action="store_true",
help="show the plots [False].")
parser.add_argument("-v", dest="verbose", action="store_true",
help="verbose output [False].")
parser.add_argument("-S", dest="saveOutput", action="store_true",
help="save the arrays and plots [False].")
parser.add_argument("-D", dest="debug", action="store_true",
help="turn on debugging messages & plots [False].")
parser.add_argument("-U", dest="units", type=str, default="Jy/beam",
help="Intensity units of the data. [Jy/beam]")
args = parser.parse_args()
# Sanity checks
if not os.path.exists(args.dataFile[0]):
print("File does not exist: '%s'." % args.dataFile[0])
sys.exit()
prefixOut, ext = os.path.splitext(args.dataFile[0])
dataDir, dummy = os.path.split(args.dataFile[0])
# Set the floating point precision
nBits = 32
if args.bit64:
nBits = 64
verbose=args.verbose
data = readFile(args.dataFile[0],nBits, verbose=verbose, debug=args.debug)
# Run RM-synthesis on the spectra
mDict, aDict = run_rmsynth(data = data,
polyOrd = args.polyOrd,
phiMax_radm2 = args.phiMax_radm2,
dPhi_radm2 = args.dPhi_radm2,
nSamples = args.nSamples,
weightType = args.weightType,
fitRMSF = args.fitRMSF,
noStokesI = args.noStokesI,
nBits = nBits,
showPlots = args.showPlots,
debug = args.debug,
verbose = verbose,
units = args.units,
prefixOut = prefixOut,
args = args,
)
if args.saveOutput:
saveOutput(mDict, aDict, prefixOut, verbose)
#-----------------------------------------------------------------------------#
if __name__ == "__main__":
main()
|
[
"math.sqrt",
"RMutils.util_misc.toscalar",
"numpy.isfinite",
"sys.exit",
"numpy.nanmin",
"os.path.exists",
"RMutils.util_misc.create_frac_spectra",
"argparse.ArgumentParser",
"numpy.diff",
"os.path.split",
"numpy.max",
"numpy.linspace",
"numpy.nanmax",
"numpy.min",
"RMutils.util_plotTk.plot_rmsf_fdf_fig",
"RMutils.util_RM.measure_FDF_parms",
"RMutils.util_plotTk.plot_complexity_fig",
"RMutils.util_plotTk.plot_Ipqu_spectra_fig",
"RMutils.util_RM.measure_qu_complexity",
"numpy.ones",
"RMutils.util_RM.do_rmsynth_planes",
"os.path.splitext",
"numpy.isnan",
"time.time",
"matplotlib.pyplot.show",
"numpy.ones_like",
"traceback.format_exc",
"numpy.power",
"RMutils.util_misc.poly5",
"numpy.sum",
"matplotlib.pyplot.figure",
"RMutils.util_misc.nanmedian",
"numpy.loadtxt",
"numpy.zeros_like",
"numpy.nan_to_num"
] |
[((6474, 6508), 'os.path.splitext', 'os.path.splitext', (['args.dataFile[0]'], {}), '(args.dataFile[0])\n', (6490, 6508), False, 'import os\n'), ((7816, 7980), 'RMutils.util_misc.create_frac_spectra', 'create_frac_spectra', ([], {'freqArr': 'freqArr_GHz', 'IArr': 'IArr', 'QArr': 'QArr', 'UArr': 'UArr', 'dIArr': 'dIArr', 'dQArr': 'dQArr', 'dUArr': 'dUArr', 'polyOrd': 'polyOrd', 'verbose': '(True)', 'debug': 'debug'}), '(freqArr=freqArr_GHz, IArr=IArr, QArr=QArr, UArr=UArr,\n dIArr=dIArr, dQArr=dQArr, dUArr=dUArr, polyOrd=polyOrd, verbose=True,\n debug=debug)\n', (7835, 7980), False, 'from RMutils.util_misc import create_frac_spectra\n'), ((8457, 8506), 'numpy.linspace', 'np.linspace', (['freqArr_Hz[0]', 'freqArr_Hz[-1]', '(10000)'], {}), '(freqArr_Hz[0], freqArr_Hz[-1], 10000)\n', (8468, 8506), True, 'import numpy as np\n'), ((8577, 8606), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12.0, 8)'}), '(figsize=(12.0, 8))\n', (8587, 8606), True, 'import matplotlib.pyplot as plt\n'), ((8611, 8812), 'RMutils.util_plotTk.plot_Ipqu_spectra_fig', 'plot_Ipqu_spectra_fig', ([], {'freqArr_Hz': 'freqArr_Hz', 'IArr': 'IArr', 'qArr': 'qArr', 'uArr': 'uArr', 'dIArr': 'dIArr', 'dqArr': 'dqArr', 'duArr': 'duArr', 'freqHirArr_Hz': 'freqHirArr_Hz', 'IModArr': 'IModHirArr', 'fig': 'specFig', 'units': 'units'}), '(freqArr_Hz=freqArr_Hz, IArr=IArr, qArr=qArr, uArr=\n uArr, dIArr=dIArr, dqArr=dqArr, duArr=duArr, freqHirArr_Hz=\n freqHirArr_Hz, IModArr=IModHirArr, fig=specFig, units=units)\n', (8632, 8812), False, 'from RMutils.util_plotTk import plot_Ipqu_spectra_fig\n'), ((10474, 10503), 'numpy.power', 'np.power', (['(C / freqArr_Hz)', '(2.0)'], {}), '(C / freqArr_Hz, 2.0)\n', (10482, 10503), True, 'import numpy as np\n'), ((11419, 11470), 'numpy.linspace', 'np.linspace', (['startPhi_radm2', 'stopPhi_radm2', 'nChanRM'], {}), '(startPhi_radm2, stopPhi_radm2, nChanRM)\n', (11430, 11470), True, 'import numpy as np\n'), ((12140, 12151), 'time.time', 'time.time', ([], {}), '()\n', (12149, 12151), False, 'import time\n'), ((12222, 12390), 'RMutils.util_RM.do_rmsynth_planes', 'do_rmsynth_planes', ([], {'dataQ': 'qArr', 'dataU': 'uArr', 'lambdaSqArr_m2': 'lambdaSqArr_m2', 'phiArr_radm2': 'phiArr_radm2', 'weightArr': 'weightArr', 'nBits': 'nBits', 'verbose': 'verbose', 'log': 'log'}), '(dataQ=qArr, dataU=uArr, lambdaSqArr_m2=lambdaSqArr_m2,\n phiArr_radm2=phiArr_radm2, weightArr=weightArr, nBits=nBits, verbose=\n verbose, log=log)\n', (12239, 12390), False, 'from RMutils.util_RM import do_rmsynth_planes\n'), ((13809, 13820), 'time.time', 'time.time', ([], {}), '()\n', (13818, 13820), False, 'import time\n'), ((14607, 14741), 'RMutils.util_RM.measure_FDF_parms', 'measure_FDF_parms', ([], {'FDF': 'dirtyFDF', 'phiArr': 'phiArr_radm2', 'fwhmRMSF': 'fwhmRMSF', 'dFDF': 'dFDFth', 'lamSqArr_m2': 'lambdaSqArr_m2', 'lam0Sq': 'lam0Sq_m2'}), '(FDF=dirtyFDF, phiArr=phiArr_radm2, fwhmRMSF=fwhmRMSF,\n dFDF=dFDFth, lamSqArr_m2=lambdaSqArr_m2, lam0Sq=lam0Sq_m2)\n', (14624, 14741), False, 'from RMutils.util_RM import measure_FDF_parms\n'), ((14950, 14966), 'RMutils.util_misc.toscalar', 'toscalar', (['Ifreq0'], {}), '(Ifreq0)\n', (14958, 14966), False, 'from RMutils.util_misc import toscalar\n'), ((15153, 15172), 'RMutils.util_misc.toscalar', 'toscalar', (['lam0Sq_m2'], {}), '(lam0Sq_m2)\n', (15161, 15172), False, 'from RMutils.util_misc import toscalar\n'), ((15197, 15215), 'RMutils.util_misc.toscalar', 'toscalar', (['freq0_Hz'], {}), '(freq0_Hz)\n', (15205, 15215), False, 'from RMutils.util_misc import toscalar\n'), ((15240, 15258), 'RMutils.util_misc.toscalar', 'toscalar', (['fwhmRMSF'], {}), '(fwhmRMSF)\n', (15248, 15258), False, 'from RMutils.util_misc import toscalar\n'), ((15328, 15344), 'RMutils.util_misc.toscalar', 'toscalar', (['dFDFth'], {}), '(dFDFth)\n', (15336, 15344), False, 'from RMutils.util_misc import toscalar\n'), ((16067, 16267), 'RMutils.util_RM.measure_qu_complexity', 'measure_qu_complexity', ([], {'freqArr_Hz': 'freqArr_Hz', 'qArr': 'qArr', 'uArr': 'uArr', 'dqArr': 'dqArr', 'duArr': 'duArr', 'fracPol': "mDict['fracPol']", 'psi0_deg': "mDict['polAngle0Fit_deg']", 'RM_radm2': "mDict['phiPeakPIfit_rm2']"}), "(freqArr_Hz=freqArr_Hz, qArr=qArr, uArr=uArr, dqArr=\n dqArr, duArr=duArr, fracPol=mDict['fracPol'], psi0_deg=mDict[\n 'polAngle0Fit_deg'], RM_radm2=mDict['phiPeakPIfit_rm2'])\n", (16088, 16267), False, 'from RMutils.util_RM import measure_qu_complexity\n'), ((25240, 25355), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'descStr', 'epilog': 'epilog_text', 'formatter_class': 'argparse.RawTextHelpFormatter'}), '(description=descStr, epilog=epilog_text,\n formatter_class=argparse.RawTextHelpFormatter)\n', (25263, 25355), False, 'import argparse\n'), ((27601, 27635), 'os.path.splitext', 'os.path.splitext', (['args.dataFile[0]'], {}), '(args.dataFile[0])\n', (27617, 27635), False, 'import os\n'), ((27657, 27688), 'os.path.split', 'os.path.split', (['args.dataFile[0]'], {}), '(args.dataFile[0])\n', (27670, 27688), False, 'import os\n'), ((6337, 6369), 'os.path.exists', 'os.path.exists', (['args.dataFile[0]'], {}), '(args.dataFile[0])\n', (6351, 6369), False, 'import os\n'), ((6442, 6452), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6450, 6452), False, 'import sys\n'), ((7524, 7542), 'numpy.ones_like', 'np.ones_like', (['QArr'], {}), '(QArr)\n', (7536, 7542), True, 'import numpy as np\n'), ((7559, 7578), 'numpy.zeros_like', 'np.zeros_like', (['QArr'], {}), '(QArr)\n', (7572, 7578), True, 'import numpy as np\n'), ((8524, 8543), 'RMutils.util_misc.poly5', 'poly5', (["fitDict['p']"], {}), "(fitDict['p'])\n", (8529, 8543), False, 'from RMutils.util_misc import poly5\n'), ((9601, 9630), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12.0, 8)'}), '(figsize=(12.0, 8))\n', (9611, 9630), True, 'import matplotlib.pyplot as plt\n'), ((10581, 10606), 'numpy.nanmax', 'np.nanmax', (['lambdaSqArr_m2'], {}), '(lambdaSqArr_m2)\n', (10590, 10606), True, 'import numpy as np\n'), ((10634, 10659), 'numpy.nanmin', 'np.nanmin', (['lambdaSqArr_m2'], {}), '(lambdaSqArr_m2)\n', (10643, 10659), True, 'import numpy as np\n'), ((12025, 12065), 'numpy.ones', 'np.ones', (['freqArr_Hz.shape'], {'dtype': 'dtFloat'}), '(freqArr_Hz.shape, dtype=dtFloat)\n', (12032, 12065), True, 'import numpy as np\n'), ((14081, 14098), 'math.sqrt', 'm.sqrt', (['lam0Sq_m2'], {}), '(lam0Sq_m2)\n', (14087, 14098), True, 'import math as m\n'), ((14112, 14131), 'RMutils.util_misc.poly5', 'poly5', (["fitDict['p']"], {}), "(fitDict['p'])\n", (14117, 14131), False, 'from RMutils.util_misc import poly5\n'), ((14354, 14373), 'numpy.isnan', 'np.isnan', (['weightArr'], {}), '(weightArr)\n', (14362, 14373), True, 'import numpy as np\n'), ((15287, 15304), 'RMutils.util_misc.nanmedian', 'nanmedian', (['dQUArr'], {}), '(dQUArr)\n', (15296, 15304), False, 'from RMutils.util_misc import nanmedian\n'), ((15734, 15767), 'numpy.min', 'np.min', (['freqArr_Hz[good_channels]'], {}), '(freqArr_Hz[good_channels])\n', (15740, 15767), True, 'import numpy as np\n'), ((15797, 15830), 'numpy.max', 'np.max', (['freqArr_Hz[good_channels]'], {}), '(freqArr_Hz[good_channels])\n', (15803, 15830), True, 'import numpy as np\n'), ((16655, 16979), 'RMutils.util_plotTk.plot_complexity_fig', 'plot_complexity_fig', ([], {'xArr': "pD['xArrQ']", 'qArr': "pD['yArrQ']", 'dqArr': "pD['dyArrQ']", 'sigmaAddqArr': "pD['sigmaAddArrQ']", 'chiSqRedqArr': "pD['chiSqRedArrQ']", 'probqArr': "pD['probArrQ']", 'uArr': "pD['yArrU']", 'duArr': "pD['dyArrU']", 'sigmaAdduArr': "pD['sigmaAddArrU']", 'chiSqReduArr': "pD['chiSqRedArrU']", 'probuArr': "pD['probArrU']", 'mDict': 'mDict'}), "(xArr=pD['xArrQ'], qArr=pD['yArrQ'], dqArr=pD['dyArrQ'],\n sigmaAddqArr=pD['sigmaAddArrQ'], chiSqRedqArr=pD['chiSqRedArrQ'],\n probqArr=pD['probArrQ'], uArr=pD['yArrU'], duArr=pD['dyArrU'],\n sigmaAdduArr=pD['sigmaAddArrU'], chiSqReduArr=pD['chiSqRedArrU'],\n probuArr=pD['probArrU'], mDict=mDict)\n", (16674, 16979), False, 'from RMutils.util_plotTk import plot_complexity_fig\n'), ((19693, 19722), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12.0, 8)'}), '(figsize=(12.0, 8))\n', (19703, 19722), True, 'import matplotlib.pyplot as plt\n'), ((19731, 19908), 'RMutils.util_plotTk.plot_rmsf_fdf_fig', 'plot_rmsf_fdf_fig', ([], {'phiArr': 'phiArr_radm2', 'FDF': 'dirtyFDF', 'phi2Arr': 'phi2Arr_radm2', 'RMSFArr': 'RMSFArr', 'fwhmRMSF': 'fwhmRMSF', 'vLine': "mDict['phiPeakPIfit_rm2']", 'fig': 'fdfFig', 'units': 'units'}), "(phiArr=phiArr_radm2, FDF=dirtyFDF, phi2Arr=phi2Arr_radm2,\n RMSFArr=RMSFArr, fwhmRMSF=fwhmRMSF, vLine=mDict['phiPeakPIfit_rm2'],\n fig=fdfFig, units=units)\n", (19748, 19908), False, 'from RMutils.util_plotTk import plot_rmsf_fdf_fig\n'), ((20471, 20481), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20479, 20481), True, 'import matplotlib.pyplot as plt\n'), ((21937, 21985), 'numpy.loadtxt', 'np.loadtxt', (['dataFile'], {'unpack': '(True)', 'dtype': 'dtFloat'}), '(dataFile, unpack=True, dtype=dtFloat)\n', (21947, 21985), True, 'import numpy as np\n'), ((27464, 27496), 'os.path.exists', 'os.path.exists', (['args.dataFile[0]'], {}), '(args.dataFile[0])\n', (27478, 27496), False, 'import os\n'), ((27569, 27579), 'sys.exit', 'sys.exit', ([], {}), '()\n', (27577, 27579), False, 'import sys\n'), ((10534, 10553), 'numpy.diff', 'np.diff', (['freqArr_Hz'], {}), '(freqArr_Hz)\n', (10541, 10553), True, 'import numpy as np\n'), ((10701, 10724), 'numpy.diff', 'np.diff', (['lambdaSqArr_m2'], {}), '(lambdaSqArr_m2)\n', (10708, 10724), True, 'import numpy as np\n'), ((10766, 10789), 'numpy.diff', 'np.diff', (['lambdaSqArr_m2'], {}), '(lambdaSqArr_m2)\n', (10773, 10789), True, 'import numpy as np\n'), ((10854, 10865), 'math.sqrt', 'm.sqrt', (['(3.0)'], {}), '(3.0)\n', (10860, 10865), True, 'import math as m\n'), ((11011, 11022), 'math.sqrt', 'm.sqrt', (['(3.0)'], {}), '(3.0)\n', (11017, 11022), True, 'import math as m\n'), ((11942, 11963), 'numpy.power', 'np.power', (['dQUArr', '(2.0)'], {}), '(dQUArr, 2.0)\n', (11950, 11963), True, 'import numpy as np\n'), ((15925, 15944), 'numpy.diff', 'np.diff', (['freqArr_Hz'], {}), '(freqArr_Hz)\n', (15932, 15944), True, 'import numpy as np\n'), ((9993, 10014), 'numpy.nanmax', 'np.nanmax', (['freqArr_Hz'], {}), '(freqArr_Hz)\n', (10002, 10014), True, 'import numpy as np\n'), ((10015, 10036), 'numpy.nanmin', 'np.nanmin', (['freqArr_Hz'], {}), '(freqArr_Hz)\n', (10024, 10036), True, 'import numpy as np\n'), ((13086, 13103), 'numpy.isfinite', 'np.isfinite', (['qArr'], {}), '(qArr)\n', (13097, 13103), True, 'import numpy as np\n'), ((14464, 14481), 'numpy.sum', 'np.sum', (['weightArr'], {}), '(weightArr)\n', (14470, 14481), True, 'import numpy as np\n'), ((15683, 15700), 'numpy.isfinite', 'np.isfinite', (['qArr'], {}), '(qArr)\n', (15694, 15700), True, 'import numpy as np\n'), ((22356, 22404), 'numpy.loadtxt', 'np.loadtxt', (['dataFile'], {'unpack': '(True)', 'dtype': 'dtFloat'}), '(dataFile, unpack=True, dtype=dtFloat)\n', (22366, 22404), True, 'import numpy as np\n'), ((7295, 7305), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7303, 7305), False, 'import sys\n'), ((10063, 10081), 'numpy.min', 'np.min', (['freqArr_Hz'], {}), '(freqArr_Hz)\n', (10069, 10081), True, 'import numpy as np\n'), ((10122, 10140), 'numpy.max', 'np.max', (['freqArr_Hz'], {}), '(freqArr_Hz)\n', (10128, 10140), True, 'import numpy as np\n'), ((22687, 22697), 'sys.exit', 'sys.exit', ([], {}), '()\n', (22695, 22697), False, 'import sys\n'), ((14435, 14456), 'numpy.nan_to_num', 'np.nan_to_num', (['dQUArr'], {}), '(dQUArr)\n', (14448, 14456), True, 'import numpy as np\n'), ((7259, 7281), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (7279, 7281), False, 'import traceback\n'), ((22651, 22673), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (22671, 22673), False, 'import traceback\n')]
|
"""
Contains functions to generate and combine a clustering ensemble.
"""
import numpy as np
import pandas as pd
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score as ari
from sklearn.metrics import adjusted_mutual_info_score as ami
from sklearn.metrics import normalized_mutual_info_score as nmi
from tqdm import tqdm
from clustering.utils import reset_estimator, compare_arrays
def generate_ensemble(data, clusterers: dict, attributes: list, affinity_matrix=None):
"""
It generates an ensemble from the data given a set of clusterers (a
clusterer is an instance of a clustering algorithm with a fixed set of
parameters).
Args:
data:
A numpy array, pandas dataframe, or any other structure supported
by the clusterers as data input.
clusterers:
A dictionary with clusterers specified in this format: { 'k-means
#1': KMeans(n_clusters=2), ... }
attributes:
A list of attributes to save in the final dataframe; for example,
including "n_clusters" will extract this attribute from the
estimator and include it in the final dataframe returned.
affinity_matrix:
If the clustering algorithm is AgglomerativeClustering (from
sklearn) and the linkage method is different than ward (which only
support euclidean distance), the affinity_matrix is given as data
input to the estimator instead of data.
Returns:
A pandas DataFrame with all the partitions generated by the clusterers.
Columns include the clusterer name/id, the partition, the estimator
parameters (obtained with the get_params() method) and any other
attribute specified.
"""
ensemble = []
for clus_name, clus_obj in tqdm(clusterers.items(), total=len(clusterers)):
# get partition
#
# for agglomerative clustering both data and affinity_matrix should be
# given; for ward linkage, data is used, and for the other linkage
# methods the affinity_matrix is used
if (type(clus_obj).__name__ == "AgglomerativeClustering") and (
clus_obj.linkage != "ward"
):
partition = clus_obj.fit_predict(affinity_matrix).astype(float)
else:
partition = clus_obj.fit_predict(data).astype(float)
# remove from partition noisy points (for example, if using DBSCAN)
partition[partition < 0] = np.nan
# get number of clusters
partition_no_nan = partition[~np.isnan(partition)]
n_clusters = np.unique(partition_no_nan).shape[0]
# stop if n_clusters <= 1
if n_clusters <= 1:
reset_estimator(clus_obj)
continue
res = pd.Series(
{
"clusterer_id": clus_name,
"clusterer_params": str(clus_obj.get_params()),
"partition": partition,
}
)
for attr in attributes:
if attr == "n_clusters" and not hasattr(clus_obj, attr):
res[attr] = n_clusters
else:
res[attr] = getattr(clus_obj, attr)
ensemble.append(res)
# for some estimators such as DBSCAN this is needed, because otherwise
# the estimator saves references of huge data structures not needed in
# this context
reset_estimator(clus_obj)
return pd.DataFrame(ensemble).set_index("clusterer_id")
def get_ensemble_distance_matrix(ensemble, n_jobs=1):
"""
Given an ensemble, it computes the coassociation matrix (a distance matrix
for all objects using the ensemble information). For each object pair, the
coassociation matrix contains the percentage of times the pair of objects
was clustered together in the ensemble.
Args:
ensemble:
A numpy array representing a set of clustering solutions on the same
data. Each row is a clustering solution (partition) and columns are
objects.
n_jobs:
The number of jobs used by the pairwise_distance matrix from
sklearn.
Returns:
A numpy array representing a square distance matrix for all objects
(coassociation matrix).
"""
def _compare(x, y):
xy = np.array([x, y]).T
xy = xy[~np.isnan(xy).any(axis=1)]
return (xy[:, 0] != xy[:, 1]).sum() / xy.shape[0]
return pairwise_distances(
ensemble.T, metric=_compare, n_jobs=n_jobs, force_all_finite="allow-nan"
)
def supraconsensus(ensemble, k, methods, selection_criterion, n_jobs=1, use_tqdm=False):
"""
It combines a clustering ensemble using a set of methods that the user can
specify. Each of these methods combines the ensemble and returns a single
partition. This function returns the combined partition that maximizes the
selection criterion.
Args:
ensemble:
a clustering ensemble (rows are partitions, columns are objects).
k:
the final number of clusters for the combined partition.
methods:
a list of methods to apply on the ensemble; each returns a combined
partition.
selection_criterion:
a function that represents the selection criterion; this function
has to accept an ensemble as the first argument, and a partition as
the second one.
n_jobs:
number of jobs.
use_tqdm:
ensembles/disables the use of tqdm to show a progress bar.
Returns:
Returns a tuple: (partition, best method name, best criterion value)
"""
from concurrent.futures import ProcessPoolExecutor, as_completed
methods_results = {}
with ProcessPoolExecutor(max_workers=n_jobs) as executor:
tasks = {executor.submit(m, ensemble, k): m.__name__ for m in methods}
for future in tqdm(
as_completed(tasks),
total=len(tasks),
disable=(not use_tqdm),
ncols=100,
):
method_name = tasks[future]
part = future.result()
criterion_value = selection_criterion(ensemble, part)
methods_results[method_name] = {
"partition": part,
"criterion_value": criterion_value,
}
# select the best performing method according to the selection criterion
best_method = max(
methods_results, key=lambda x: methods_results[x]["criterion_value"]
)
best_method_results = methods_results[best_method]
return (
best_method_results["partition"],
best_method,
best_method_results["criterion_value"],
)
def run_method_and_compute_agreement(method_func, ensemble_data, ensemble, k, **kwargs):
"""
Runs a consensus clustering method on the ensemble data, obtains the
consolidated partition with the desired number of clusters, and computes
a series of performance measures.
Args:
method_func:
A consensus function (first argument is either the ensemble or
the coassociation matrix derived from the ensemble).
ensemble_data:
A numpy array with the ensemble data that will be given to the
specified method. For evidence accumulation methods, this is the
coassociation matrix (a square matrix with the distance between
object pairs derived from the ensemble).
ensemble:
A numpy array representing the ensemble (partitions in rows, objects
in columns).
k:
The number of clusters to obtain from the ensemble data using the
specified method.
kwargs:
Other parameters passed to `method_func`.
Returns:
It returns a tuple with the data partition derived from the ensemble
data using the specified method, and some performance measures of this
partition.
"""
part = method_func(ensemble_data, k, **kwargs)
nmi_values = np.array(
[
compare_arrays(ensemble_member, part, nmi, use_weighting=True)
for ensemble_member in ensemble
]
)
ami_values = np.array(
[
compare_arrays(ensemble_member, part, ami, use_weighting=True)
for ensemble_member in ensemble
]
)
ari_values = np.array(
[
compare_arrays(ensemble_member, part, ari, use_weighting=True)
for ensemble_member in ensemble
]
)
performance_values = {
"ari_mean": np.mean(ari_values),
"ari_median": np.median(ari_values),
"ari_std": np.std(ari_values),
"ami_mean": np.mean(ami_values),
"ami_median": np.median(ami_values),
"ami_std": np.std(ami_values),
"nmi_mean": np.mean(nmi_values),
"nmi_median": np.median(nmi_values),
"nmi_std": np.std(nmi_values),
}
return part, performance_values
|
[
"numpy.mean",
"numpy.median",
"numpy.unique",
"clustering.utils.reset_estimator",
"sklearn.metrics.pairwise_distances",
"concurrent.futures.as_completed",
"numpy.array",
"numpy.isnan",
"concurrent.futures.ProcessPoolExecutor",
"numpy.std",
"pandas.DataFrame",
"clustering.utils.compare_arrays"
] |
[((4500, 4596), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['ensemble.T'], {'metric': '_compare', 'n_jobs': 'n_jobs', 'force_all_finite': '"""allow-nan"""'}), "(ensemble.T, metric=_compare, n_jobs=n_jobs,\n force_all_finite='allow-nan')\n", (4518, 4596), False, 'from sklearn.metrics import pairwise_distances\n'), ((3448, 3473), 'clustering.utils.reset_estimator', 'reset_estimator', (['clus_obj'], {}), '(clus_obj)\n', (3463, 3473), False, 'from clustering.utils import reset_estimator, compare_arrays\n'), ((5826, 5865), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', ([], {'max_workers': 'n_jobs'}), '(max_workers=n_jobs)\n', (5845, 5865), False, 'from concurrent.futures import ProcessPoolExecutor, as_completed\n'), ((8669, 8688), 'numpy.mean', 'np.mean', (['ari_values'], {}), '(ari_values)\n', (8676, 8688), True, 'import numpy as np\n'), ((8712, 8733), 'numpy.median', 'np.median', (['ari_values'], {}), '(ari_values)\n', (8721, 8733), True, 'import numpy as np\n'), ((8754, 8772), 'numpy.std', 'np.std', (['ari_values'], {}), '(ari_values)\n', (8760, 8772), True, 'import numpy as np\n'), ((8794, 8813), 'numpy.mean', 'np.mean', (['ami_values'], {}), '(ami_values)\n', (8801, 8813), True, 'import numpy as np\n'), ((8837, 8858), 'numpy.median', 'np.median', (['ami_values'], {}), '(ami_values)\n', (8846, 8858), True, 'import numpy as np\n'), ((8879, 8897), 'numpy.std', 'np.std', (['ami_values'], {}), '(ami_values)\n', (8885, 8897), True, 'import numpy as np\n'), ((8919, 8938), 'numpy.mean', 'np.mean', (['nmi_values'], {}), '(nmi_values)\n', (8926, 8938), True, 'import numpy as np\n'), ((8962, 8983), 'numpy.median', 'np.median', (['nmi_values'], {}), '(nmi_values)\n', (8971, 8983), True, 'import numpy as np\n'), ((9004, 9022), 'numpy.std', 'np.std', (['nmi_values'], {}), '(nmi_values)\n', (9010, 9022), True, 'import numpy as np\n'), ((2759, 2784), 'clustering.utils.reset_estimator', 'reset_estimator', (['clus_obj'], {}), '(clus_obj)\n', (2774, 2784), False, 'from clustering.utils import reset_estimator, compare_arrays\n'), ((3486, 3508), 'pandas.DataFrame', 'pd.DataFrame', (['ensemble'], {}), '(ensemble)\n', (3498, 3508), True, 'import pandas as pd\n'), ((4368, 4384), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (4376, 4384), True, 'import numpy as np\n'), ((5999, 6018), 'concurrent.futures.as_completed', 'as_completed', (['tasks'], {}), '(tasks)\n', (6011, 6018), False, 'from concurrent.futures import ProcessPoolExecutor, as_completed\n'), ((8152, 8214), 'clustering.utils.compare_arrays', 'compare_arrays', (['ensemble_member', 'part', 'nmi'], {'use_weighting': '(True)'}), '(ensemble_member, part, nmi, use_weighting=True)\n', (8166, 8214), False, 'from clustering.utils import reset_estimator, compare_arrays\n'), ((8325, 8387), 'clustering.utils.compare_arrays', 'compare_arrays', (['ensemble_member', 'part', 'ami'], {'use_weighting': '(True)'}), '(ensemble_member, part, ami, use_weighting=True)\n', (8339, 8387), False, 'from clustering.utils import reset_estimator, compare_arrays\n'), ((8498, 8560), 'clustering.utils.compare_arrays', 'compare_arrays', (['ensemble_member', 'part', 'ari'], {'use_weighting': '(True)'}), '(ensemble_member, part, ari, use_weighting=True)\n', (8512, 8560), False, 'from clustering.utils import reset_estimator, compare_arrays\n'), ((2605, 2624), 'numpy.isnan', 'np.isnan', (['partition'], {}), '(partition)\n', (2613, 2624), True, 'import numpy as np\n'), ((2647, 2674), 'numpy.unique', 'np.unique', (['partition_no_nan'], {}), '(partition_no_nan)\n', (2656, 2674), True, 'import numpy as np\n'), ((4404, 4416), 'numpy.isnan', 'np.isnan', (['xy'], {}), '(xy)\n', (4412, 4416), True, 'import numpy as np\n')]
|
"""
Provides a class that handles the fits metadata required by PypeIt.
.. include common links, assuming primary doc root is up one directory
.. include:: ../include/links.rst
"""
import os
import io
import string
from copy import deepcopy
import datetime
from IPython import embed
import numpy as np
import yaml
from astropy import table, coordinates, time, units
from pypeit import msgs
from pypeit import utils
from pypeit.core import framematch
from pypeit.core import flux_calib
from pypeit.core import parse
from pypeit.core import meta
from pypeit.io import dict_to_lines
from pypeit.par import PypeItPar
from pypeit.par.util import make_pypeit_file
from pypeit.bitmask import BitMask
# TODO: Turn this into a DataContainer
# Initially tried to subclass this from astropy.table.Table, but that
# proved too difficult.
class PypeItMetaData:
"""
Provides a table and interface to the relevant fits file metadata
used during the reduction.
The content of the fits table is dictated by the header keywords
specified for the provided spectrograph. It is expected that this
table can be used to set the frame type of each file.
The metadata is validated using checks specified by the provided
spectrograph class.
For the data table, one should typically provide either the file
list from which to grab the data from the fits headers or the
data directly. If neither are provided the table is instantiated
without any data.
Args:
spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`):
The spectrograph used to collect the data save to each file.
The class is used to provide the header keyword data to
include in the table and specify any validation checks.
par (:obj:`pypeit.par.pypeitpar.PypeItPar`):
PypeIt parameters used to set the code behavior.
files (:obj:`str`, :obj:`list`, optional):
The list of files to include in the table.
data (table-like, optional):
The data to include in the table. The type can be anything
allowed by the instantiation of
:class:`astropy.table.Table`.
usrdata (:obj:`astropy.table.Table`, optional):
A user provided set of data used to supplement or overwrite
metadata read from the file headers. The table must have a
`filename` column that is used to match to the metadata
table generated within PypeIt. **Note**: This is ignored if
`data` is also provided. This functionality is only used
when building the metadata from the fits files.
strict (:obj:`bool`, optional):
Function will fault if there is a problem with the reading
the header for any of the provided files; see
:func:`pypeit.spectrographs.spectrograph.get_headarr`. Set
to False to instead report a warning and continue.
Attributes:
spectrograph
(:class:`pypeit.spectrographs.spectrograph.Spectrograph`):
The spectrograph used to collect the data save to each file.
The class is used to provide the header keyword data to
include in the table and specify any validation checks.
par (:class:`pypeit.par.pypeitpar.PypeItPar`):
PypeIt parameters used to set the code behavior. If not
provided, the default parameters specific to the provided
spectrograph are used.
configs (:obj:`dict`):
A dictionary of the unique configurations identified.
type_bitmask (:class:`pypeit.core.framematch.FrameTypeBitMask`):
The bitmask used to set the frame type of each fits file.
calib_bitmask (:class:`BitMask`):
The bitmask used to keep track of the calibration group bits.
table (:class:`astropy.table.Table`):
The table with the relevant metadata for each fits file to
use in the data reduction.
"""
def __init__(self, spectrograph, par, files=None, data=None, usrdata=None,
strict=True):
if data is None and files is None:
# Warn that table will be empty
msgs.warn('Both data and files are None in the instantiation of PypeItMetaData.'
' The table will be empty!')
# Initialize internals
self.spectrograph = spectrograph
self.par = par
if not isinstance(self.par, PypeItPar):
raise TypeError('Input parameter set must be of type PypeItPar.')
self.type_bitmask = framematch.FrameTypeBitMask()
# Build table
self.table = table.Table(data if files is None
else self._build(files, strict=strict,
usrdata=usrdata))
# Merge with user data, if present
if usrdata is not None:
self.merge(usrdata)
# Impose types on specific columns
self._impose_types(['comb_id', 'bkg_id', 'manual'], [int, int, str])
# Initialize internal attributes
self.configs = None
self.calib_bitmask = None
# Initialize columns that the user might add
self.set_user_added_columns()
# Validate instrument name
self.spectrograph.vet_instrument(self.table)
def _impose_types(self, columns, types):
"""
Impose a set of types on certain columns.
.. note::
:attr:`table` is edited in place.
Args:
columns (:obj:`list`):
List of column names
types (:obj:`list`):
List of types
"""
for c,t in zip(columns, types):
if c in self.keys():
self.table[c] = self.table[c].astype(t)
def _build(self, files, strict=True, usrdata=None):
"""
Generate the fitstbl that will be at the heart of PypeItMetaData.
Args:
files (:obj:`str`, :obj:`list`):
One or more files to use to build the table.
strict (:obj:`bool`, optional):
Function will fault if :func:`fits.getheader` fails to
read any of the headers. Set to False to report a
warning and continue.
usrdata (astropy.table.Table, optional):
Parsed for frametype for a few instruments (e.g. VLT)
where meta data may not be required
Returns:
dict: Dictionary with the data to assign to :attr:`table`.
"""
# Allow for single files
_files = files if hasattr(files, '__len__') else [files]
# Build lists to fill
data = {k:[] for k in self.spectrograph.meta.keys()}
data['directory'] = ['None']*len(_files)
data['filename'] = ['None']*len(_files)
# Build the table
for idx, ifile in enumerate(_files):
# User data (for frame type)
if usrdata is None:
usr_row = None
else:
# TODO: This check should be done elsewhere
# Check
if os.path.basename(ifile) != usrdata['filename'][idx]:
msgs.error('File name list does not match user-provided metadata table. See '
'usrdata argument of instantiation of PypeItMetaData.')
usr_row = usrdata[idx]
# Add the directory and file name to the table
data['directory'][idx], data['filename'][idx] = os.path.split(ifile)
if not data['directory'][idx]:
data['directory'][idx] = '.'
# Read the fits headers
headarr = self.spectrograph.get_headarr(ifile, strict=strict)
# Grab Meta
for meta_key in self.spectrograph.meta.keys():
value = self.spectrograph.get_meta_value(headarr, meta_key,
required=strict,
usr_row=usr_row,
ignore_bad_header = self.par['rdx']['ignore_bad_headers'])
if isinstance(value, str) and '#' in value:
value = value.replace('#', '')
msgs.warn('Removing troublesome # character from {0}. Returning {1}.'.format(
meta_key, value))
data[meta_key].append(value)
msgs.info('Added metadata for {0}'.format(os.path.split(ifile)[1]))
# JFH Changed the below to not crash if some files have None in
# their MJD. This is the desired behavior since if there are
# empty or corrupt files we still want this to run.
# Validate, print out a warning if there is problem
try:
time.Time(data['mjd'], format='mjd')
except ValueError:
mjd = np.asarray(data['mjd'])
filenames = np.asarray(data['filename'])
bad_files = filenames[mjd == None]
# Print status message
msg = 'Time invalid for {0} files.\n'.format(len(bad_files))
msg += 'Continuing, but the following frames may be empty or have corrupt headers:\n'
for file in bad_files:
msg += ' {0}\n'.format(file)
msgs.warn(msg)
# Return
return data
# TODO: In this implementation, slicing the PypeItMetaData object
# will return an astropy.table.Table, not a PypeItMetaData object.
def __getitem__(self, item):
return self.table.__getitem__(item)
def __setitem__(self, item, value):
return self.table.__setitem__(item, value)
def __len__(self):
return self.table.__len__()
def __repr__(self):
return self.table._base_repr_(html=False,
descr_vals=['PypeItMetaData:\n',
' spectrograph={0}\n'.format(
self.spectrograph.name),
' length={0}\n'.format(len(self))])
def _repr_html_(self):
return self.table._base_repr_(html=True, max_width=-1,
descr_vals=['PypeItMetaData: spectrograph={0}, length={1}\n'.format(
self.spectrograph.name, len(self))])
@staticmethod
def default_keys():
return [ 'directory', 'filename', 'instrume' ]
def keys(self):
return self.table.keys()
def sort(self, col):
return self.table.sort(col)
def merge(self, usrdata, match_type=True):
"""
Use the provided table to supplement or overwrite the metadata.
If the internal table already contains the column in `usrdata`,
the function will try to match the data type of the `usrdata`
column to the existing data type. If it can't it will just add
the column anyway, with the type in `usrdata`. You can avoid
this step by setting `match_type=False`.
Args:
usrdata (:obj:`astropy.table.Table`):
A user provided set of data used to supplement or
overwrite metadata read from the file headers. The
table must have a `filename` column that is used to
match to the metadata table generated within PypeIt.
match_type (:obj:`bool`, optional):
Attempt to match the data type in `usrdata` to the type
in the internal table. See above.
Raises:
TypeError:
Raised if `usrdata` is not an `astropy.io.table.Table`
KeyError:
Raised if `filename` is not a key in the provided table.
"""
meta_data_model = meta.get_meta_data_model()
# Check the input
if not isinstance(usrdata, table.Table):
raise TypeError('Must provide an astropy.io.table.Table instance.')
if 'filename' not in usrdata.keys():
raise KeyError('The user-provided table must have \'filename\' column!')
# Make sure the data are correctly ordered
srt = [np.where(f == self.table['filename'])[0][0] for f in usrdata['filename']]
# Convert types if possible
existing_keys = list(set(self.table.keys()) & set(usrdata.keys()))
radec_done = False
if len(existing_keys) > 0 and match_type:
for key in existing_keys:
if len(self.table[key].shape) > 1: # NOT ALLOWED!!
# TODO: This should be converted to an assert statement...
raise ValueError('CODING ERROR: Found high-dimensional column.')
#embed(header='372 of metadata')
elif key in meta_data_model.keys(): # Is this meta data??
dtype = meta_data_model[key]['dtype']
else:
dtype = self.table[key].dtype
# Deal with None's properly
nones = usrdata[key] == 'None'
usrdata[key][nones] = None
# Rest
# Allow for str RA, DEC (backwards compatability)
if key in ['ra', 'dec'] and not radec_done:
ras, decs = meta.convert_radec(usrdata['ra'][~nones].data,
usrdata['dec'][~nones].data)
usrdata['ra'][~nones] = ras.astype(dtype)
usrdata['dec'][~nones] = decs.astype(dtype)
radec_done = True
else:
usrdata[key][~nones] = usrdata[key][~nones].astype(dtype)
# Include the user data in the table
for key in usrdata.keys():
self.table[key] = usrdata[key][srt]
def finalize_usr_build(self, frametype, setup):
"""
Finalize the build of the table based on user-provided data,
typically pulled from the PypeIt file.
This function:
- sets the frame types based on the provided object
- sets all the configurations to the provided `setup`
- assigns all frames to a single calibration group, if the
'calib' column does not exist
- if the 'comb_id' column does not exist, this sets the
combination groups to be either undefined or to be unique
for each science or standard frame, see
:func:`set_combination_groups`.
.. note::
This should only be run if all files are from a single
instrument configuration. :attr:`table` is modified
in-place.
See also: :func:`pypeit.pypeitsetup.PypeItSetup.run`.
.. todo::
- Why isn't frametype just in the user-provided data? It
may be (see get_frame_types) and I'm just not using it...
Args:
frametype (:obj:`dict`):
A dictionary with the types designated by the user. The
file name and type are expected to be the key and value
of the dictionary, respectively. The number of keys
therefore *must* match the number of files in
:attr:`table`. For frames that have multiple types, the
types should be provided as a string with
comma-separated types.
setup (:obj:`str`):
If the 'setup' columns does not exist, fill the
configuration setup columns with this single identifier.
"""
self.get_frame_types(user=frametype)
# TODO: Add in a call to clean_configurations? I didn't add it
# here, because this method is only called for a preconstructed
# pypeit file, which should nominally follow an execution of
# pypeit_setup. If the user edits back in a frame that has an
# invalid key, at least for now the DEIMOS image reader will
# fault.
self.set_configurations(fill=setup)
self.set_calibration_groups(default=True)
self.set_combination_groups()
def get_configuration(self, indx, cfg_keys=None):
"""
Return the configuration dictionary for a given frame.
This is not the same as the backwards compatible "setup"
dictionary.
Args:
indx (:obj:`int`):
The index of the table row to use to construct the
configuration.
cfg_keys (:obj:`list`, optional):
The list of metadata keys to use to construct the
configuration. If None, the `configuration_keys` of
:attr:`spectrograph` is used.
Returns:
dict: A dictionary with the metadata values from the
selected row.
"""
_cfg_keys = self.spectrograph.configuration_keys() if cfg_keys is None else cfg_keys
return {k:self.table[k][indx] for k in _cfg_keys}
def master_key(self, row, det=1):
"""
Construct the master key for the file in the provided row.
The master key is the combination of the configuration, the
calibration group, and the detector. The configuration ID is
the same as included in the configuration column (A, B, C, etc),
the calibration group is the same as the calibration bit number,
and the detector number is provided as an argument and converted
to a zero-filled string with two digits (the maximum number of
detectors is 99).
Using the calibration bit in the keyword allows MasterFrames to
be used with multiple calibration groups.
Args:
row (:obj:`int`):
The 0-indexed row used to construct the key.
det (:obj:`int`, :obj:`tuple`, optional):
The 1-indexed detector number(s). If a tuple, it must include
detectors designated as a viable mosaic for
:attr:`spectrograph`; see
:func:`~pypeit.spectrographs.spectrograph.Spectrograph.allowed_mosaics`.
Returns:
:obj:`str`: Master key with configuration, calibration group(s), and
detector.
Raises:
PypeItError:
Raised if the 'setup' or 'calibbit' columns
haven't been defined.
"""
if 'setup' not in self.keys() or 'calibbit' not in self.keys():
msgs.error('Cannot provide master key string without setup and calibbit; '
'run set_configurations and set_calibration_groups.')
det_name = self.spectrograph.get_det_name(det)
return f"{self['setup'][row]}_{self['calibbit'][row]}_{det_name}"
def construct_obstime(self, row):
"""
Construct the MJD of when the frame was observed.
.. todo::
- Consolidate with :func:`convert_time` ?
Args:
row (:obj:`int`):
The 0-indexed row of the frame.
Returns:
astropy.time.Time: The MJD of the observation.
"""
return time.Time(self['mjd'][row], format='mjd')
def construct_basename(self, row, obstime=None):
"""
Construct the root name primarily for PypeIt file output.
Args:
row (:obj:`int`):
The 0-indexed row of the frame.
obstime (:class:`astropy.time.Time`, optional):
The MJD of the observation. If None, constructed using
:func:`construct_obstime`.
Returns:
str: The root name for file output.
"""
_obstime = self.construct_obstime(row) if obstime is None else obstime
tiso = time.Time(_obstime, format='isot')
dtime = datetime.datetime.strptime(tiso.value, '%Y-%m-%dT%H:%M:%S.%f')
return '{0}-{1}_{2}_{3}{4}'.format(self['filename'][row].split('.fits')[0],
self['target'][row].replace(" ", ""),
self.spectrograph.camera,
datetime.datetime.strftime(dtime, '%Y%m%dT'),
tiso.value.split("T")[1].replace(':',''))
def get_setup(self, row, det=None, config_only=False):
"""
Construct the setup dictionary.
.. todo::
- This is for backwards compatibility, but we should
consider reformatting it. And it may be something to put
in the relevant spectrograph class.
Args:
row (:obj:`int`):
The 0-indexed row used to construct the setup.
det (:obj:`int`, optional):
The 1-indexed detector to include. If None, all
detectors are included.
config_only (:obj:`bool`, optional):
Just return the dictionary with the configuration, don't
include the top-level designation of the configuration
itself.
Returns:
dict: The pypeit setup dictionary with the default format.
Raises:
PypeItError:
Raised if the 'setup' isn't been defined.
"""
if 'setup' not in self.keys():
msgs.error('Cannot provide instrument setup without \'setup\' column; '
'run set_configurations.')
dispname = 'none' if 'dispname' not in self.keys() else self['dispname'][row]
dispangle = 'none' if 'dispangle' not in self.keys() else self['dispangle'][row]
dichroic = 'none' if 'dichroic' not in self.keys() else self['dichroic'][row]
decker = 'none' if 'decker' not in self.keys() else self['decker'][row]
slitwid = 'none' if 'slitwid' not in self.keys() else self['slitwid'][row]
slitlen = 'none' if 'slitlen' not in self.keys() else self['slitlen'][row]
binning = '1,1' if 'binning' not in self.keys() else self['binning'][row]
skey = 'Setup {}'.format(self['setup'][row])
# Key names *must* match configuration_keys() for spectrographs
setup = {skey:
{'--':
{'disperser': {'dispname': dispname, 'dispangle':dispangle},
'dichroic': dichroic,
'slit': {'decker': decker, 'slitwid':slitwid, 'slitlen':slitlen},
'binning': binning, # PypeIt orientation binning of a science image
}
}
}
#_det = np.arange(self.spectrograph.ndet)+1 if det is None else [det]
#for d in _det:
# setup[skey][str(d).zfill(2)] \
# = {'binning': binning, 'det': d,
# 'namp': self.spectrograph.detector[d-1]['numamplifiers']}
return setup[skey] if config_only else setup
def get_configuration_names(self, ignore=None, return_index=False, configs=None):
"""
Get the list of the unique configuration names.
This provides just the list of setup identifiers ('A', 'B',
etc.) and the row index where it first occurs. This is
different from :func:`unique_configurations` because the latter
determines and provides the configurations themselves.
This is mostly a convenience function for the writing routines.
Args:
ignore (:obj:`list`, optional):
Ignore configurations in the provided list.
return_index (:obj:`bool`, optional):
Return row indices with the first occurence of these
configurations.
configs (:obj:`str`, :obj:`list`, optional):
One or more strings used to select the configurations
to include in the returned objects. If ``'all'``,
pass back all configurations. Otherwise, only return
the configurations matched to this provided string or
list of strings (e.g., ['A','C']).
Returns:
numpy.array: The list of unique setup names. A second
returned object provides the indices of the first occurrence
of these setups, if requested.
Raises:
PypeItError:
Raised if the 'setup' isn't been defined.
"""
if 'setup' not in self.keys():
msgs.error('Cannot get setup names; run set_configurations.')
# Unique configurations
setups, indx = np.unique(self['setup'], return_index=True)
if ignore is not None:
# Remove the selected configurations to ignore
rm = np.logical_not(np.isin(setups, ignore))
setups = setups[rm]
indx = indx[rm]
# Restrict
_configs = None if configs is None else np.atleast_1d(configs)
# TODO: Why do we need to specify 'all' here? Can't `configs is
# None` mean that you want all the configurations? Or can we
# make the default 'all'?
if configs is not None and 'all' not in _configs:
use = np.isin(setups, _configs)
setups = setups[use]
indx = indx[use]
return setups, indx if return_index else setups
def _get_cfgs(self, copy=False, rm_none=False):
"""
Convenience method to return :attr:`configs` with possible
alterations.
This method *should not* be called by any method outside of
this class; use :func:`unique_configurations` instead.
Args:
copy (:obj:`bool`, optional):
Return a deep copy of :attr:`configs` instead of the
object itself.
rm_none (:obj:`bool`, optional):
Remove any configurations set to 'None'. If copy is
True, this is done *after* :attr:`configs` is copied
to a new dictionary.
Returns:
:obj:`dict`: A nested dictionary, one dictionary per
configuration with the associated metadata for each.
"""
_cfg = deepcopy(self.configs) if copy else self.configs
if rm_none and 'None' in _cfg.keys():
del _cfg['None']
return _cfg
def unique_configurations(self, force=False, copy=False, rm_none=False):
"""
Return the unique instrument configurations.
If run before the ``'setup'`` column is initialized, this function
determines the unique instrument configurations by finding
unique combinations of the items in the metadata table listed by
the spectrograph ``configuration_keys`` method.
If run after the ``'setup'`` column has been set, this simply
constructs the configuration dictionary using the unique
configurations in that column.
This is used to set the internal :attr:`configs`. If this
attribute is not None, this function simply returns
:attr:`config` (cf. ``force``).
.. warning::
Any frame types returned by the
:func:`~pypeit.spectrographs.spectrograph.Spectrograph.config_independent_frames`
method for :attr:`spectrograph` will be ignored in the
construction of the unique configurations. If
:func:`~pypeit.spectrographs.spectrograph.Spectrograph.config_independent_frames`
does not return None and the frame types have not yet
been defined (see :func:`get_frame_types`), this method
will fault!
Args:
force (:obj:`bool`, optional):
Force the configurations to be redetermined. Otherwise
the configurations are only determined if
:attr:`configs` has not yet been defined.
copy (:obj:`bool`, optional):
Return a deep copy of :attr:`configs` instead of the
object itself.
rm_none (:obj:`bool`, optional):
Remove any configurations set to 'None'. If copy is
True, this is done *after* :attr:`configs` is copied
to a new dictionary.
Returns:
:obj:`dict`: A nested dictionary, one dictionary per
configuration with the associated metadata for each.
Raises:
PypeItError:
Raised if there are list of frame types to ignore but
the frame types have not been defined yet.
"""
if self.configs is not None and not force:
return self._get_cfgs(copy=copy, rm_none=rm_none)
if 'setup' in self.keys():
msgs.info('Setup column already set. Finding unique configurations.')
uniq, indx = np.unique(self['setup'], return_index=True)
ignore = uniq == 'None'
if np.sum(ignore) > 0:
msgs.warn('Ignoring {0} frames with configuration set to None.'.format(
np.sum(ignore)))
self.configs = {}
for i in range(len(uniq)):
if ignore[i]:
continue
self.configs[uniq[i]] = self.get_configuration(indx[i])
msgs.info('Found {0} unique configurations.'.format(len(self.configs)))
return self._get_cfgs(copy=copy, rm_none=rm_none)
msgs.info('Using metadata to determine unique configurations.')
# If the frame types have been set, ignore anything listed in
# the ignore_frames
indx = np.arange(len(self))
ignore_frames = self.spectrograph.config_independent_frames()
if ignore_frames is not None:
if 'frametype' not in self.keys():
msgs.error('To ignore frames, types must have been defined; run get_frame_types.')
ignore_frames = list(ignore_frames.keys())
msgs.info('Unique configurations ignore frames with type: {0}'.format(ignore_frames))
use = np.ones(len(self), dtype=bool)
for ftype in ignore_frames:
use &= np.logical_not(self.find_frames(ftype))
indx = indx[use]
if len(indx) == 0:
msgs.error('No frames to use to define configurations!')
# Get the list of keys to use
cfg_keys = self.spectrograph.configuration_keys()
# Configuration identifiers are iterations through the
# upper-case letters: A, B, C, etc.
double_alphabet = [str_i + str_j for str_i in string.ascii_uppercase for str_j in string.ascii_uppercase]
cfg_iter = list(string.ascii_uppercase) + double_alphabet
cfg_indx = 0
# TODO: Placeholder: Allow an empty set of configuration keys
# meaning that the instrument setup has only one configuration.
if len(cfg_keys) == 0:
self.configs = {}
self.configs[cfg_iter[cfg_indx]] = {}
msgs.info('All files assumed to be from a single configuration.')
return self._get_cfgs(copy=copy, rm_none=rm_none)
# Use the first file to set the first unique configuration
self.configs = {}
self.configs[cfg_iter[cfg_indx]] = self.get_configuration(indx[0], cfg_keys=cfg_keys)
cfg_indx += 1
# Check if any of the other files show a different
# configuration.
for i in indx[1:]:
j = 0
for c in self.configs.values():
if row_match_config(self.table[i], c, self.spectrograph):
break
j += 1
unique = j == len(self.configs)
if unique:
if cfg_indx == len(cfg_iter):
msgs.error('Cannot assign more than {0} configurations!'.format(len(cfg_iter)))
self.configs[cfg_iter[cfg_indx]] = self.get_configuration(i, cfg_keys=cfg_keys)
cfg_indx += 1
msgs.info('Found {0} unique configurations.'.format(len(self.configs)))
return self._get_cfgs(copy=copy, rm_none=rm_none)
def set_configurations(self, configs=None, force=False, fill=None):
"""
Assign each frame to a configuration (setup) and include it
in the metadata table.
The internal table is edited *in place*. If the 'setup'
column already exists, the configurations are **not** reset
unless you call the function with ``force=True``.
Args:
configs (:obj:`dict`, optional):
A nested dictionary, one dictionary per configuration
with the associated values of the metadata associated
with each configuration. The metadata keywords in the
dictionary should be the same as in the table, and the
keywords used to set the configuration should be the
same as returned by the spectrograph
`configuration_keys` method. The latter is not checked.
If None, this is set by :func:`unique_configurations`.
force (:obj:`bool`, optional):
Force the configurations to be reset.
fill (:obj:`str`, optional):
If the 'setup' column does not exist, fill the
configuration setup columns with this single identifier.
Ignores other inputs.
Raises:
PypeItError:
Raised if none of the keywords in the provided
configuration match with the metadata keywords. Also
raised when some frames cannot be assigned to a
configuration, the spectrograph defined frames that
have been ignored in the determination of the unique
configurations, but the frame types have not been set
yet.
"""
# Configurations have already been set
if 'setup' in self.keys() and not force:
return
if 'setup' not in self.keys() and fill is not None:
self['setup'] = fill
return
_configs = self.unique_configurations() if configs is None else configs
for k, cfg in _configs.items():
if len(set(cfg.keys()) - set(self.keys())) > 0:
msgs.error('Configuration {0} defined using unavailable keywords!'.format(k))
self.table['setup'] = 'None'
nrows = len(self)
for i in range(nrows):
for d, cfg in _configs.items():
if row_match_config(self.table[i], cfg, self.spectrograph):
self.table['setup'][i] = d
# Check if any of the configurations are not set
not_setup = self.table['setup'] == 'None'
if not np.any(not_setup):
# All are set, so we're done
return
# Some frame types may have been ignored
ignore_frames = self.spectrograph.config_independent_frames()
if ignore_frames is None:
# Nope, we're still done
return
# At this point, we need the frame type to continue
if 'frametype' not in self.keys():
msgs.error('To account for ignored frames, types must have been defined; run '
'get_frame_types.')
# For each configuration, determine if any of the frames with
# the ignored frame types should be assigned to it:
for cfg_key in _configs.keys():
in_cfg = self.table['setup'] == cfg_key
for ftype, metakey in ignore_frames.items():
# TODO: For now, use this assert to check that the
# metakey is either not set or a string
assert metakey is None or isinstance(metakey, str), \
'CODING ERROR: metadata keywords set by config_indpendent_frames are not ' \
'correctly defined for {0}; values must be None or a string.'.format(
self.spectrograph.__class__.__name__)
# Get the list of frames of this type without a
# configuration
indx = (self.table['setup'] == 'None') & self.find_frames(ftype)
if not np.any(indx):
continue
if metakey is None:
# No matching meta data defined, so just set all
# the frames to this (first) configuration
self.table['setup'][indx] = cfg_key
continue
# Find the unique values of meta for this configuration
uniq_meta = np.unique(self.table[metakey][in_cfg].data)
# Warn the user that the matching meta values are not
# unique for this configuration.
if uniq_meta.size != 1:
msgs.warn('When setting the instrument configuration for {0} '.format(ftype)
+ 'frames, configuration {0} does not have unique '.format(cfg_key)
+ '{0} values.' .format(meta))
# Find the frames of this type that match any of the
# meta data values
indx &= np.isin(self.table[metakey], uniq_meta)
self.table['setup'][indx] = cfg_key
def clean_configurations(self):
"""
Ensure that configuration-defining keywords all have values
that will yield good PypeIt reductions. Any frames that do
not are removed from :attr:`table`, meaning this method may
modify that attribute directly.
The valid values for configuration keys is set by
:func:`~pypeit.spectrographs.spectrograph.Spectrograph.valid_configuration_values`.
"""
cfg_limits = self.spectrograph.valid_configuration_values()
if cfg_limits is None:
# No values specified, so we're done
return
good = np.ones(len(self), dtype=bool)
for key in cfg_limits.keys():
# NOTE: For now, check that the configuration values were
# correctly assigned in the spectrograph class definition.
# This should probably go somewhere else or just removed.
assert isinstance(cfg_limits[key], list), \
'CODING ERROR: valid_configuration_values is not correctly defined ' \
'for {0}; values must be a list.'.format(self.spectrograph.__class__.__name__)
# Check that the metadata are valid for this column.
indx = np.isin(self[key], cfg_limits[key])
if not np.all(indx):
msgs.warn('Found frames with invalid {0}.'.format(key))
good &= indx
if np.all(good):
# All values good, so we're done
return
# Alert the user that some of the frames are going to be
# removed
msg = 'The following frames have configurations that cannot be reduced by PypeIt' \
' and will be removed from the metadata table (pypeit file):\n'
indx = np.where(np.logical_not(good))[0]
for i in indx:
msg += ' {0}\n'.format(self['filename'][i])
msgs.warn(msg)
# And remove 'em
self.table = self.table[good]
def _set_calib_group_bits(self):
"""
Set the calibration group bit based on the string values of the
'calib' column.
"""
# Find the number groups by searching for the maximum number
# provided, regardless of whether or not a science frame is
# assigned to that group.
ngroups = 0
for i in range(len(self)):
if self['calib'][i] in ['all', 'None']:
# No information, keep going
continue
# Convert to a list of numbers
l = np.amax([ 0 if len(n) == 0 else int(n)
for n in self['calib'][i].replace(':',',').split(',')])
# Check against current maximum
ngroups = max(l+1, ngroups)
# Define the bitmask and initialize the bits
self.calib_bitmask = BitMask(np.arange(ngroups))
self['calibbit'] = 0
# Set the calibration bits
for i in range(len(self)):
# Convert the string to the group list
grp = parse.str2list(self['calib'][i], ngroups)
if grp is None:
# No group selected
continue
# Assign the group; ensure the integers are unique
self['calibbit'][i] = self.calib_bitmask.turn_on(self['calibbit'][i], grp)
def _check_calib_groups(self):
"""
Check that the calibration groups are valid.
This currently only checks that the science frames are
associated with one calibration group.
TODO: Is this appropriate for NIR data?
"""
is_science = self.find_frames('science')
for i in range(len(self)):
if not is_science[i]:
continue
if len(self.calib_bitmask.flagged_bits(self['calibbit'][i])) > 1:
msgs.error('Science frames can only be assigned to a single calibration group.')
@property
def n_calib_groups(self):
"""Return the number of calibration groups."""
return None if self.calib_bitmask is None else self.calib_bitmask.nbits
def set_calibration_groups(self, global_frames=None, default=False, force=False):
"""
Group calibration frames into sets.
Requires the 'setup' column to have been defined. For now this
is a simple grouping of frames with the same configuration.
.. todo::
- Maintain a detailed description of the logic.
The 'calib' column has a string type to make sure that it
matches with what can be read from the pypeit file. The
'calibbit' column is actually what is used to determine the
calibration group of each frame; see :attr:`calib_bitmask`.
Args:
global_frames (:obj:`list`, optional):
A list of strings with the frame types to use in all
calibration groups (e.g., ['bias', 'dark']).
default (:obj:`bool`, optional):
If the 'calib' column is not present, set a single
calibration group *for all rows*.
force (:obj:`bool`, optional):
Force the calibration groups to be reconstructed if
the 'calib' column already exists.
Raises:
PypeItError:
Raised if 'setup' column is not defined, or if
`global_frames` is provided but the frame types have not
been defined yet.
"""
# Set the default if requested and 'calib' doesn't exist yet
if 'calib' not in self.keys() and default:
self['calib'] = '0'
# Make sure the calibbit column does not exist
if 'calibbit' in self.keys():
del self['calibbit']
# Groups have already been set
if 'calib' in self.keys() and 'calibbit' in self.keys() and not force:
return
# Groups have been set but the bits have not (likely because the
# data was read from a pypeit file)
if 'calib' in self.keys() and 'calibbit' not in self.keys() and not force:
self._set_calib_group_bits()
self._check_calib_groups()
return
# TODO: The rest of this just nominally sets the calibration
# group based on the configuration. This will change!
# The configuration must be present to determine the calibration
# group
if 'setup' not in self.keys():
msgs.error('Must have defined \'setup\' column first; try running set_configurations.')
configs = np.unique(self['setup'].data).tolist()
if 'None' in configs:
configs.remove('None') # Ignore frames with undefined configurations
n_cfg = len(configs)
# TODO: Science frames can only have one calibration group
# Assign everything from the same configuration to the same
# calibration group; this needs to have dtype=object, otherwise
# any changes to the strings will be truncated at 4 characters.
self.table['calib'] = np.full(len(self), 'None', dtype=object)
for i in range(n_cfg):
self['calib'][(self['setup'] == configs[i]) & (self['framebit'] > 0)] = str(i)
# Allow some frame types to be used in all calibration groups
# (like biases and darks)
if global_frames is not None:
if 'frametype' not in self.keys():
msgs.error('To set global frames, types must have been defined; '
'run get_frame_types.')
calibs = '0' if n_cfg == 1 else ','.join(np.arange(n_cfg).astype(str))
for ftype in global_frames:
indx = np.where(self.find_frames(ftype))[0]
for i in indx:
self['calib'][i] = calibs
# Set the bits based on the string representation of the groups
self._set_calib_group_bits()
# Check that the groups are valid
self._check_calib_groups()
def find_frames(self, ftype, calib_ID=None, index=False):
"""
Find the rows with the associated frame type.
If the index is provided, the frames must also be matched to the
relevant science frame.
Args:
ftype (str):
The frame type identifier. See the keys for
:class:`pypeit.core.framematch.FrameTypeBitMask`. If
set to the string 'None', this returns all frames
without a known type.
calib_ID (:obj:`int`, optional):
Index of the calibration group that it must match. If None,
any row of the specified frame type is included.
index (:obj:`bool`, optional):
Return an array of 0-indexed indices instead of a
boolean array.
Returns:
numpy.ndarray: A boolean array, or an integer array if
index=True, with the rows that contain the frames of the
requested type.
Raises:
PypeItError:
Raised if the `framebit` column is not set in the table.
"""
if 'framebit' not in self.keys():
msgs.error('Frame types are not set. First run get_frame_types.')
if ftype == 'None':
return self['framebit'] == 0
# Select frames
indx = self.type_bitmask.flagged(self['framebit'], ftype)
if calib_ID is not None:
# Select frames in the same calibration group
indx &= self.find_calib_group(calib_ID)
# Return
return np.where(indx)[0] if index else indx
def find_frame_files(self, ftype, calib_ID=None):
"""
Return the list of files with a given frame type.
The frames must also match the science frame index, if it is
provided.
Args:
ftype (str):
The frame type identifier. See the keys for
:class:`pypeit.core.framematch.FrameTypeBitMask`.
calib_ID (:obj:`int`, optional):
Index of the calibration group that it must match. If None,
any row of the specified frame type is included.
Returns:
list: List of file paths that match the frame type and
science frame ID, if the latter is provided.
"""
return self.frame_paths(self.find_frames(ftype, calib_ID=calib_ID))
def frame_paths(self, indx):
"""
Return the full paths to one or more frames.
Args:
indx (:obj:`int`, array-like):
One or more 0-indexed rows in the table with the frames
to return. Can be an array of indices or a boolean
array of the correct length.
Returns:
list: List of the full paths of one or more frames.
"""
if isinstance(indx, (int,np.integer)):
return os.path.join(self['directory'][indx], self['filename'][indx])
return [os.path.join(d,f) for d,f in zip(self['directory'][indx], self['filename'][indx])]
def set_frame_types(self, type_bits, merge=True):
"""
Set and return a Table with the frame types and bits.
Args:
type_bits (numpy.ndarray):
Integer bitmask with the frame types. The length must
match the existing number of table rows.
merge (:obj:`bool`, optional):
Merge the types and bits into the existing table. This
will *overwrite* any existing columns.
Returns:
`astropy.table.Table`: Table with two columns, the frame
type name and bits.
"""
# Making Columns to pad string array
ftype_colmA = table.Column(self.type_bitmask.type_names(type_bits), name='frametype')
# KLUDGE ME
#
# TODO: It would be good to get around this. Is it related to
# this change?
# http://docs.astropy.org/en/stable/table/access_table.html#bytestring-columns-in-python-3
#
# See also:
#
# http://docs.astropy.org/en/stable/api/astropy.table.Table.html#astropy.table.Table.convert_bytestring_to_unicode
#
# Or we can force type_names() in bitmask to always return the
# correct type...
if int(str(ftype_colmA.dtype)[2:]) < 9:
ftype_colm = table.Column(self.type_bitmask.type_names(type_bits), dtype='U9',
name='frametype')
else:
ftype_colm = ftype_colmA
fbits_colm = table.Column(type_bits, name='framebit')
t = table.Table([ftype_colm, fbits_colm])
if merge:
self['frametype'] = t['frametype']
self['framebit'] = t['framebit']
return t
def edit_frame_type(self, indx, frame_type, append=False):
"""
Edit the frame type by hand.
Args:
indx (:obj:`int`):
The 0-indexed row in the table to edit
frame_type (:obj:`str`, :obj:`list`):
One or more frame types to append/overwrite.
append (:obj:`bool`, optional):
Append the frame type. If False, all existing frame
types are overwitten by the provided type.
"""
if not append:
self['framebit'][indx] = 0
self['framebit'][indx] = self.type_bitmask.turn_on(self['framebit'][indx], flag=frame_type)
self['frametype'][indx] = self.type_bitmask.type_names(self['framebit'][indx])
def get_frame_types(self, flag_unknown=False, user=None, merge=True):
"""
Generate a table of frame types from the input metadata object.
.. todo::
- Here's where we could add a SPIT option.
Args:
flag_unknown (:obj:`bool`, optional):
Instead of crashing out if there are unidentified files,
leave without a type and continue.
user (:obj:`dict`, optional):
A dictionary with the types designated by the user. The
file name and type are expected to be the key and value
of the dictionary, respectively. The number of keys
therefore *must* match the number of files in
:attr:`table`. For frames that have multiple types, the
types should be provided as a string with
comma-separated types.
merge (:obj:`bool`, optional):
Merge the frame typing into the exiting table.
Returns:
:obj:`astropy.table.Table`: A Table with two columns, the
type names and the type bits. See
:class:`pypeit.core.framematch.FrameTypeBitMask` for the
allowed frame types.
"""
# Checks
if 'frametype' in self.keys() or 'framebit' in self.keys():
msgs.warn('Removing existing frametype and framebit columns.')
if 'frametype' in self.keys():
del self.table['frametype']
if 'framebit' in self.keys():
del self.table['framebit']
# # TODO: This needs to be moved into each Spectrograph
# if useIDname and 'idname' not in self.keys():
# raise ValueError('idname is not set in table; cannot use it for file typing.')
# Start
msgs.info("Typing files")
type_bits = np.zeros(len(self), dtype=self.type_bitmask.minimum_dtype())
# Use the user-defined frame types from the input dictionary
if user is not None:
if len(user.keys()) != len(self):
raise ValueError('The user-provided dictionary does not match table length.')
msgs.info('Using user-provided frame types.')
for ifile,ftypes in user.items():
indx = self['filename'] == ifile
type_bits[indx] = self.type_bitmask.turn_on(type_bits[indx], flag=ftypes.split(','))
return self.set_frame_types(type_bits, merge=merge)
# Loop over the frame types
for i, ftype in enumerate(self.type_bitmask.keys()):
# # Initialize: Flag frames with the correct ID name or start by
# # flagging all as true
# indx = self['idname'] == self.spectrograph.idname(ftype) if useIDname \
# else np.ones(len(self), dtype=bool)
# Include a combination of instrument-specific checks using
# combinations of the full set of metadata
exprng = self.par['scienceframe']['exprng'] if ftype == 'science' \
else self.par['calibrations']['{0}frame'.format(ftype)]['exprng']
# TODO: Use & or | ? Using idname above gets overwritten by
# this if the frames to meet the other checks in this call.
# indx &= self.spectrograph.check_frame_type(ftype, self.table, exprng=exprng)
indx = self.spectrograph.check_frame_type(ftype, self.table, exprng=exprng)
# Turn on the relevant bits
type_bits[indx] = self.type_bitmask.turn_on(type_bits[indx], flag=ftype)
# Find the nearest standard star to each science frame
# TODO: Should this be 'standard' or 'science' or both?
if 'ra' not in self.keys() or 'dec' not in self.keys():
msgs.warn('Cannot associate standard with science frames without sky coordinates.')
else:
# TODO: Do we want to do this here?
indx = self.type_bitmask.flagged(type_bits, flag='standard')
for b, f, ra, dec in zip(type_bits[indx], self['filename'][indx], self['ra'][indx],
self['dec'][indx]):
if ra == 'None' or dec == 'None':
msgs.warn('RA and DEC must not be None for file:' + msgs.newline() + f)
msgs.warn('The above file could be a twilight flat frame that was'
+ msgs.newline() + 'missed by the automatic identification.')
b = self.type_bitmask.turn_off(b, flag='standard')
continue
# If an object exists within 20 arcmins of a listed standard,
# then it is probably a standard star
foundstd = flux_calib.find_standard_file(ra, dec, check=True)
b = self.type_bitmask.turn_off(b, flag='science' if foundstd else 'standard')
# Find the files without any types
indx = np.logical_not(self.type_bitmask.flagged(type_bits))
if np.any(indx):
msgs.info("Couldn't identify the following files:")
for f in self['filename'][indx]:
msgs.info(f)
if not flag_unknown:
msgs.error("Check these files before continuing")
# Finish up (note that this is called above if user is not None!)
msgs.info("Typing completed!")
return self.set_frame_types(type_bits, merge=merge)
def set_pypeit_cols(self, write_bkg_pairs=False, write_manual=False):
"""
Generate the list of columns to be included in the fitstbl
(nearly the complete list).
Args:
write_bkg_pairs (:obj:`bool`, optional):
Add additional ``PypeIt`` columns for calib, comb_id
and bkg_id
write_manual (:obj:`bool`, optional):
Add additional ``PypeIt`` columns for manual extraction
Returns:
`numpy.ndarray`_: Array of columns to be used in the fits
table>
"""
# Columns for output
columns = self.spectrograph.pypeit_file_keys()
extras = []
# comb, bkg columns
if write_bkg_pairs:
extras += ['calib', 'comb_id', 'bkg_id']
# manual
if write_manual:
extras += ['manual']
for key in extras:
if key not in columns:
columns += [key]
# Take only those present
output_cols = np.array(columns)
return output_cols[np.isin(output_cols, self.keys())].tolist()
def set_combination_groups(self, assign_objects=True):
"""
Set combination groups.
.. note::
:attr:`table` is edited in place.
This function can be used to initialize the combination group
and background group columns, and/or to initialize the combination
groups to the set of objects (science or standard frames) to a
unique integer.
If the 'comb_id' or 'bkg_id' columns do not exist, they're set
to -1.
Args:
assign_objects (:obj:`bool`, optional):
If all of 'comb_id' values are less than 0 (meaning
they're unassigned), the combination groups are set to
be unique for each standard and science frame.
"""
if 'comb_id' not in self.keys():
self['comb_id'] = -1
if 'bkg_id' not in self.keys():
self['bkg_id'] = -1
if assign_objects and np.all(self['comb_id'] < 0):
# find_frames will throw an exception if framebit is not
# set...
sci_std_idx = np.where(np.any([self.find_frames('science'),
self.find_frames('standard')], axis=0))[0]
self['comb_id'][sci_std_idx] = np.arange(len(sci_std_idx), dtype=int) + 1
def set_user_added_columns(self):
"""
Set columns that the user *might* add
.. note::
:attr:`table` is edited in place.
This function can be used to initialize columns
that the user might add
"""
if 'manual' not in self.keys():
self['manual'] = ''
def write_sorted(self, ofile, overwrite=True, ignore=None,
write_bkg_pairs=False, write_manual=False):
"""
Write the sorted file.
The sorted file lists all the unique instrument configurations
(setups) and the frames associated with each configuration. The
output data table is identical to the pypeit file output.
.. todo::
- This is for backwards compatibility, but we should
consider reformatting/removing it.
Args:
ofile (:obj:`str`):
Name for the output sorted file.
overwrite (:obj:`bool`, optional):
Overwrite any existing file with the same name.
ignore (:obj:`list`, optional):
Ignore configurations in the provided list.
write_bkg_pairs (:obj:`bool`, optional):
Add additional ``PypeIt`` columns for calib, comb_id
and bkg_id
write_manual (:obj:`bool`, optional):
Add additional ``PypeIt`` columns for manual extraction
Raises:
PypeItError:
Raised if the 'setup' isn't been defined.
"""
if 'setup' not in self.keys():
msgs.error('Cannot write sorted instrument configuration table without \'setup\' '
'column; run set_configurations.')
if os.path.isfile(ofile) and not overwrite:
msgs.error('{0} already exists. Use ovewrite=True to overwrite.'.format(ofile))
# Grab output columns
output_cols = self.set_pypeit_cols(write_bkg_pairs=write_bkg_pairs,
write_manual=write_manual)
cfgs = self.unique_configurations(copy=ignore is not None)
if ignore is not None:
for key in cfgs.keys():
if key in ignore:
del cfgs[key]
# Construct file
ff = open(ofile, 'w')
for setup in cfgs.keys():
# Get the subtable of frames taken in this configuration
indx = self['setup'] == setup
if not np.any(indx):
continue
subtbl = self.table[output_cols][indx]
# Write the file
ff.write('##########################################################\n')
ff.write('Setup {:s}\n'.format(setup))
ff.write('\n'.join(dict_to_lines(cfgs[setup], level=1)) + '\n')
ff.write('#---------------------------------------------------------\n')
mjd = subtbl['mjd'].copy()
# Deal with possibly None mjds if there were corrupt header cards
mjd[mjd == None] = -99999.0
isort = np.argsort(mjd)
subtbl = subtbl[isort]
subtbl.write(ff, format='ascii.fixed_width')
ff.write('##end\n')
ff.close()
# TODO: Do we need a calib file?
def write_calib(self, ofile, overwrite=True, ignore=None):
"""
Write the calib file.
The calib file provides the unique instrument configurations
(setups) and the association of each frame from that
configuration with a given calibration group.
.. todo::
- This is for backwards compatibility, but we should
consider reformatting/removing it.
- This is complicated by allowing some frame types to have
no association with an instrument configuration
- This is primarily used for QA now; but could probably use the pypeit file instead
Args:
ofile (:obj:`str`):
Name for the output sorted file.
overwrite (:obj:`bool`, optional):
Overwrite any existing file with the same name.
ignore (:obj:`list`, optional):
Ignore calibration groups in the provided list.
Raises:
PypeItError:
Raised if the 'setup' or 'calibbit' columns haven't been
defined.
"""
if 'setup' not in self.keys() or 'calibbit' not in self.keys():
msgs.error('Cannot write calibration groups without \'setup\' and \'calibbit\' '
'columns; run set_configurations and set_calibration_groups.')
if os.path.isfile(ofile) and not overwrite:
msgs.error('{0} already exists. Use ovewrite=True to overwrite.'.format(ofile))
# Construct the setups dictionary
cfg = self.unique_configurations(copy=True, rm_none=True)
# TODO: We should edit the relevant follow-on code so that we
# don't have to do these gymnastics. Or better yet, just stop
# producing/using the *.calib file.
_cfg = {}
for setup in cfg.keys():
_cfg[setup] = {}
_cfg[setup]['--'] = deepcopy(cfg[setup])
cfg = _cfg
# Iterate through the calibration bit names as these are the root of the
# MasterFrames and QA
for icbit in np.unique(self['calibbit'].data):
cbit = int(icbit) # for yaml
# Skip this group
if ignore is not None and cbit in ignore:
continue
# Find the frames in this group
#in_group = self.find_calib_group(i)
in_cbit = self['calibbit'] == cbit
# Find the unique configurations in this group, ignoring any
# undefined ('None') configurations
#setup = np.unique(self['setup'][in_group]).tolist()
setup = np.unique(self['setup'][in_cbit]).tolist()
if 'None' in setup:
setup.remove('None')
# Make sure that each calibration group should only contain
# frames from a single configuration
if len(setup) != 1:
msgs.error('Each calibration group must be from one and only one instrument '
'configuration with a valid letter identifier; i.e., the '
'configuration cannot be None.')
# Find the frames of each type in this group
cfg[setup[0]][cbit] = {}
for key in self.type_bitmask.keys():
#ftype_in_group = self.find_frames(key) & in_group
ftype_in_group = self.find_frames(key) & in_cbit
cfg[setup[0]][cbit][key] = [ os.path.join(d,f)
for d,f in zip(self['directory'][ftype_in_group],
self['filename'][ftype_in_group])]
# Write it
ff = open(ofile, 'w')
ff.write(yaml.dump(utils.yamlify(cfg)))
ff.close()
def write_pypeit(self, output_path=None, cfg_lines=None,
write_bkg_pairs=False, write_manual=False,
configs=None):
"""
Write a pypeit file in data-table format.
The pypeit file is the main configuration file for PypeIt,
configuring the control-flow and algorithmic parameters and
listing the data files to read. This function writes the
columns selected by the
:func:`pypeit.spectrographs.spectrograph.Spectrograph.pypeit_file_keys`,
which can be specific to each instrument.
Args:
output_path (:obj:`str`, optional):
Root path for the output pypeit files. If None, set
to current directory. If the output directory does
not exist, it is created.
cfg_lines (:obj:`list`, optional):
The list of configuration lines to include in the file.
If None are provided, the vanilla configuration is
included.
write_bkg_pairs (:obj:`bool`, optional):
When constructing the
:class:`pypeit.metadata.PypeItMetaData` object, include
two columns called `comb_id` and `bkg_id` that identify
object and background frame pairs.
write_manual (:obj:`bool`, optional):
Add additional ``PypeIt`` columns for manual extraction
configs (:obj:`str`, :obj:`list`, optional):
One or more strings used to select the configurations
to include in the returned objects. If ``'all'``,
pass back all configurations. Otherwise, only return
the configurations matched to this provided string or
list of strings (e.g., ['A','C']). See
:attr:`configs`.
Raises:
PypeItError:
Raised if the 'setup' isn't defined and split is True.
Returns:
:obj:`list`: List of ``PypeIt`` files generated.
"""
# Set output path
if output_path is None:
output_path = os.getcwd()
# Find unique configurations, always ignoring any 'None'
# configurations...
cfg = self.unique_configurations(copy=True, rm_none=True)
# Get the setups to write
if configs is None or configs == 'all' or configs == ['all']:
cfg_keys = list(cfg.keys())
else:
_configs = configs if isinstance(configs, list) else [configs]
cfg_keys = [key for key in cfg.keys() if key in _configs]
if len(cfg_keys) == 0:
msgs.error('No setups to write!')
# Grab output columns
output_cols = self.set_pypeit_cols(write_bkg_pairs=write_bkg_pairs,
write_manual=write_manual)
# Write the pypeit files
ofiles = [None]*len(cfg_keys)
for j,setup in enumerate(cfg_keys):
# Create the output directory
root = '{0}_{1}'.format(self.spectrograph.name, setup)
odir = os.path.join(output_path, root)
if not os.path.isdir(odir):
os.makedirs(odir)
# Create the output file name
ofiles[j] = os.path.join(odir, '{0}.pypeit'.format(root))
# Get the setup lines
setup_lines = dict_to_lines({'Setup {0}'.format(setup):
utils.yamlify(cfg[setup])}, level=1)
# Get the paths
in_cfg = self['setup'] == setup
if not np.any(in_cfg):
continue
paths = np.unique(self['directory'][in_cfg]).tolist()
# Get the data lines
subtbl = self.table[output_cols][in_cfg]
subtbl.sort(['frametype','filename'])
with io.StringIO() as ff:
subtbl.write(ff, format='ascii.fixed_width')
data_lines = ff.getvalue().split('\n')[:-1]
# Write the file
make_pypeit_file(ofiles[j], self.spectrograph.name, [], cfg_lines=cfg_lines,
setup_lines=setup_lines, sorted_files=data_lines, paths=paths)
# Return
return ofiles
def write(self, output=None, rows=None, columns=None, sort_col=None, overwrite=False,
header=None):
"""
Write the metadata either to a file or to the screen.
The method allows you to set the columns to print and which column to
use for sorting.
Args:
output (:obj:`str`, optional):
Output signature or file name. If None, the table contents
are printed to the screen. If ``'table'``, the table that
would have been printed/written to disk is returned.
Otherwise, the string is interpreted as the name of an ascii
file to which to write the table contents.
rows (`numpy.ndarray`_, optional):
A boolean vector selecting the rows of the table to write. If
None, all rows are written. Shape must match the number of
the rows in the table.
columns (:obj:`str`, :obj:`list`, optional):
A list of columns to include in the output file. Can be
provided as a list directly or as a comma-separated string.
If None or ``'all'``, all columns in are written; if
``'pypeit'``, the columns are the same as those included in
the pypeit file. Each selected column must be a valid pypeit
metadata keyword, specific to :attr:`spectrograph`.
Additional valid keywords, depending on the processing level
of the metadata table, are directory, filename, frametype,
framebit, setup, calib, and calibbit.
sort_col (:obj:`str`, optional):
Name of the column to use for sorting the output. If
None, the table is printed in its current state.
overwrite (:obj:`bool`, optional):
Overwrite any existing file; otherwise raise an
exception.
header (:obj:`str`, :obj:`list`, optional):
One or more strings to write to the top of the file, on
string per file line; ``# `` is added to the beginning of
each string. Ignored if ``output`` does not specify an output
file.
Returns:
`astropy.table.Table`: The table object that would have been
written/printed if ``output == 'table'``. Otherwise, the method
always returns None.
Raises:
ValueError:
Raised if the columns to include are not valid, or if the
column to use for sorting is not valid.
FileExistsError:
Raised if overwrite is False and the file exists.
"""
# Check the file can be written (this is here because the spectrograph
# needs to be defined first)
ofile = None if output in [None, 'table'] else output
if ofile is not None and os.path.isfile(ofile) and not overwrite:
raise FileExistsError(f'{ofile} already exists; set flag to overwrite.')
# Check the rows input
if rows is not None and len(rows) != len(self.table):
raise ValueError('Boolean vector selecting output rows has incorrect length.')
# Get the columns to return
if columns in [None, 'all']:
tbl_cols = list(self.keys())
elif columns == 'pypeit':
tbl_cols = self.set_pypeit_cols(write_bkg_pairs=True)
else:
all_cols = list(self.keys())
tbl_cols = columns if isinstance(columns, list) else columns.split(',')
badcol = [col not in all_cols for col in tbl_cols]
if np.any(badcol):
raise ValueError('The following columns are not valid: {0}'.format(
', '.join(tbl_cols[badcol])))
# Make sure the basic parameters are the first few columns; do them in
# reverse order so I can always insert at the beginning of the list
for col in ['framebit', 'frametype', 'filename', 'directory']:
if col not in tbl_cols:
continue
indx = np.where([t == col for t in tbl_cols])[0][0]
if indx != 0:
tbl_cols.insert(0, tbl_cols.pop(indx))
# Make sure the dithers and combination and background IDs are the last
# few columns
ncol = len(tbl_cols)
for col in ['dithpat', 'dithpos', 'dithoff', 'calib', 'comb_id', 'bkg_id']:
if col not in tbl_cols:
continue
indx = np.where([t == col for t in tbl_cols])[0][0]
if indx != ncol-1:
tbl_cols.insert(ncol-1, tbl_cols.pop(indx))
# Copy the internal table so that it is unaltered
output_tbl = self.table.copy()
# Select the output rows if a vector was provided
if rows is not None:
output_tbl = output_tbl[rows]
# Select and sort the data by a given column
if sort_col is not None:
if sort_col not in self.keys():
raise ValueError(f'Cannot sort by {sort_col}. Not a valid column.')
# Ignore any NoneTypes
indx = output_tbl[sort_col] != None
is_None = np.logical_not(indx)
srt = np.append(np.where(is_None)[0],
np.where(indx)[0][np.argsort(output_tbl[sort_col][indx].data)])
output_tbl = output_tbl[tbl_cols][srt]
else:
output_tbl = output_tbl[tbl_cols]
if output == 'table':
# Instead of writing, just return the modified table
return output_tbl
# Always write the table in ascii format
with io.StringIO() as ff:
output_tbl.write(ff, format='ascii.fixed_width')
data_lines = ff.getvalue().split('\n')[:-1]
if ofile is None:
# Output file not defined so just print it
print('\n'.join(data_lines))
return None
# Write the output to an ascii file
with open(ofile, 'w') as f:
if header is not None:
_header = header if isinstance(header, list) else [header]
for h in _header:
f.write(f'# {h}\n')
f.write('\n')
f.write('\n'.join(data_lines))
f.write('\n')
# Just to be explicit that the method returns None when writing to a
# file...
return None
def find_calib_group(self, grp):
"""
Find all the frames associated with the provided calibration group.
Args:
grp (:obj:`int`):
The calibration group integer.
Returns:
numpy.ndarray: Boolean array selecting those frames in the
table included in the selected calibration group.
Raises:
PypeItError:
Raised if the 'calibbit' column is not defined.
"""
if 'calibbit' not in self.keys():
msgs.error('Calibration groups are not set. First run set_calibration_groups.')
return self.calib_bitmask.flagged(self['calibbit'].data, grp)
def find_frame_calib_groups(self, row):
"""
Find the calibration groups associated with a specific frame.
"""
return self.calib_bitmask.flagged_bits(self['calibbit'][row])
# TODO: Is there a reason why this is not an attribute of
# PypeItMetaData?
def row_match_config(row, config, spectrograph):
"""
Queries whether a row from the fitstbl matches the
input configuration
Args:
row (astropy.table.Row): From fitstbl
config (dict): Defines the configuration
spectrograph (pypeit.spectrographs.spectrograph.Spectrograph):
Used to grab the rtol value for float meta (e.g. dispangle)
Returns:
bool: True if the row matches the input configuration
"""
# Loop on keys in config
match = []
for k in config.keys():
# Deal with floating configs (e.g. grating angle)
if isinstance(config[k], float):
if row[k] is None:
match.append(False)
elif np.abs(config[k]-row[k])/config[k] < spectrograph.meta[k]['rtol']:
match.append(True)
else:
match.append(False)
else:
# The np.all allows for arrays in the Table (e.g. binning)
match.append(np.all(config[k] == row[k]))
# Check
return np.all(match)
|
[
"pypeit.core.framematch.FrameTypeBitMask",
"astropy.table.Table",
"pypeit.io.dict_to_lines",
"pypeit.core.parse.str2list",
"numpy.logical_not",
"pypeit.msgs.newline",
"numpy.isin",
"numpy.argsort",
"numpy.array",
"pypeit.core.meta.convert_radec",
"copy.deepcopy",
"numpy.arange",
"pypeit.msgs.error",
"numpy.where",
"pypeit.utils.yamlify",
"numpy.asarray",
"os.path.split",
"pypeit.msgs.warn",
"os.path.isdir",
"io.StringIO",
"numpy.abs",
"pypeit.core.flux_calib.find_standard_file",
"pypeit.msgs.info",
"numpy.any",
"os.path.isfile",
"astropy.table.Column",
"pypeit.par.util.make_pypeit_file",
"pypeit.core.meta.get_meta_data_model",
"numpy.atleast_1d",
"numpy.unique",
"os.makedirs",
"datetime.datetime.strptime",
"os.path.join",
"os.getcwd",
"astropy.time.Time",
"numpy.sum",
"os.path.basename",
"numpy.all",
"datetime.datetime.strftime"
] |
[((78727, 78740), 'numpy.all', 'np.all', (['match'], {}), '(match)\n', (78733, 78740), True, 'import numpy as np\n'), ((4634, 4663), 'pypeit.core.framematch.FrameTypeBitMask', 'framematch.FrameTypeBitMask', ([], {}), '()\n', (4661, 4663), False, 'from pypeit.core import framematch\n'), ((11930, 11956), 'pypeit.core.meta.get_meta_data_model', 'meta.get_meta_data_model', ([], {}), '()\n', (11954, 11956), False, 'from pypeit.core import meta\n'), ((19255, 19296), 'astropy.time.Time', 'time.Time', (["self['mjd'][row]"], {'format': '"""mjd"""'}), "(self['mjd'][row], format='mjd')\n", (19264, 19296), False, 'from astropy import table, coordinates, time, units\n'), ((19877, 19911), 'astropy.time.Time', 'time.Time', (['_obstime'], {'format': '"""isot"""'}), "(_obstime, format='isot')\n", (19886, 19911), False, 'from astropy import table, coordinates, time, units\n'), ((19928, 19990), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['tiso.value', '"""%Y-%m-%dT%H:%M:%S.%f"""'], {}), "(tiso.value, '%Y-%m-%dT%H:%M:%S.%f')\n", (19954, 19990), False, 'import datetime\n'), ((24686, 24729), 'numpy.unique', 'np.unique', (["self['setup']"], {'return_index': '(True)'}), "(self['setup'], return_index=True)\n", (24695, 24729), True, 'import numpy as np\n'), ((29498, 29561), 'pypeit.msgs.info', 'msgs.info', (['"""Using metadata to determine unique configurations."""'], {}), "('Using metadata to determine unique configurations.')\n", (29507, 29561), False, 'from pypeit import msgs\n'), ((38774, 38786), 'numpy.all', 'np.all', (['good'], {}), '(good)\n', (38780, 38786), True, 'import numpy as np\n'), ((39245, 39259), 'pypeit.msgs.warn', 'msgs.warn', (['msg'], {}), '(msg)\n', (39254, 39259), False, 'from pypeit import msgs\n'), ((50007, 50047), 'astropy.table.Column', 'table.Column', (['type_bits'], {'name': '"""framebit"""'}), "(type_bits, name='framebit')\n", (50019, 50047), False, 'from astropy import table, coordinates, time, units\n'), ((50060, 50097), 'astropy.table.Table', 'table.Table', (['[ftype_colm, fbits_colm]'], {}), '([ftype_colm, fbits_colm])\n', (50071, 50097), False, 'from astropy import table, coordinates, time, units\n'), ((52805, 52830), 'pypeit.msgs.info', 'msgs.info', (['"""Typing files"""'], {}), "('Typing files')\n", (52814, 52830), False, 'from pypeit import msgs\n'), ((56013, 56025), 'numpy.any', 'np.any', (['indx'], {}), '(indx)\n', (56019, 56025), True, 'import numpy as np\n'), ((56351, 56381), 'pypeit.msgs.info', 'msgs.info', (['"""Typing completed!"""'], {}), "('Typing completed!')\n", (56360, 56381), False, 'from pypeit import msgs\n'), ((57480, 57497), 'numpy.array', 'np.array', (['columns'], {}), '(columns)\n', (57488, 57497), True, 'import numpy as np\n'), ((64257, 64289), 'numpy.unique', 'np.unique', (["self['calibbit'].data"], {}), "(self['calibbit'].data)\n", (64266, 64289), True, 'import numpy as np\n'), ((4251, 4368), 'pypeit.msgs.warn', 'msgs.warn', (['"""Both data and files are None in the instantiation of PypeItMetaData. The table will be empty!"""'], {}), "(\n 'Both data and files are None in the instantiation of PypeItMetaData. The table will be empty!'\n )\n", (4260, 4368), False, 'from pypeit import msgs\n'), ((7604, 7624), 'os.path.split', 'os.path.split', (['ifile'], {}), '(ifile)\n', (7617, 7624), False, 'import os\n'), ((8888, 8924), 'astropy.time.Time', 'time.Time', (["data['mjd']"], {'format': '"""mjd"""'}), "(data['mjd'], format='mjd')\n", (8897, 8924), False, 'from astropy import table, coordinates, time, units\n'), ((18587, 18722), 'pypeit.msgs.error', 'msgs.error', (['"""Cannot provide master key string without setup and calibbit; run set_configurations and set_calibration_groups."""'], {}), "(\n 'Cannot provide master key string without setup and calibbit; run set_configurations and set_calibration_groups.'\n )\n", (18597, 18722), False, 'from pypeit import msgs\n'), ((20268, 20312), 'datetime.datetime.strftime', 'datetime.datetime.strftime', (['dtime', '"""%Y%m%dT"""'], {}), "(dtime, '%Y%m%dT')\n", (20294, 20312), False, 'import datetime\n'), ((21439, 21542), 'pypeit.msgs.error', 'msgs.error', (['"""Cannot provide instrument setup without \'setup\' column; run set_configurations."""'], {}), '(\n "Cannot provide instrument setup without \'setup\' column; run set_configurations."\n )\n', (21449, 21542), False, 'from pypeit import msgs\n'), ((24568, 24629), 'pypeit.msgs.error', 'msgs.error', (['"""Cannot get setup names; run set_configurations."""'], {}), "('Cannot get setup names; run set_configurations.')\n", (24578, 24629), False, 'from pypeit import msgs\n'), ((25006, 25028), 'numpy.atleast_1d', 'np.atleast_1d', (['configs'], {}), '(configs)\n', (25019, 25028), True, 'import numpy as np\n'), ((25280, 25305), 'numpy.isin', 'np.isin', (['setups', '_configs'], {}), '(setups, _configs)\n', (25287, 25305), True, 'import numpy as np\n'), ((26261, 26283), 'copy.deepcopy', 'deepcopy', (['self.configs'], {}), '(self.configs)\n', (26269, 26283), False, 'from copy import deepcopy\n'), ((28799, 28869), 'pypeit.msgs.info', 'msgs.info', (['"""Setup column already set. Finding unique configurations."""'], {}), "('Setup column already set. Finding unique configurations.')\n", (28808, 28869), False, 'from pypeit import msgs\n'), ((28895, 28938), 'numpy.unique', 'np.unique', (["self['setup']"], {'return_index': '(True)'}), "(self['setup'], return_index=True)\n", (28904, 28938), True, 'import numpy as np\n'), ((30324, 30380), 'pypeit.msgs.error', 'msgs.error', (['"""No frames to use to define configurations!"""'], {}), "('No frames to use to define configurations!')\n", (30334, 30380), False, 'from pypeit import msgs\n'), ((31053, 31118), 'pypeit.msgs.info', 'msgs.info', (['"""All files assumed to be from a single configuration."""'], {}), "('All files assumed to be from a single configuration.')\n", (31062, 31118), False, 'from pypeit import msgs\n'), ((34828, 34845), 'numpy.any', 'np.any', (['not_setup'], {}), '(not_setup)\n', (34834, 34845), True, 'import numpy as np\n'), ((35233, 35338), 'pypeit.msgs.error', 'msgs.error', (['"""To account for ignored frames, types must have been defined; run get_frame_types."""'], {}), "(\n 'To account for ignored frames, types must have been defined; run get_frame_types.'\n )\n", (35243, 35338), False, 'from pypeit import msgs\n'), ((38596, 38631), 'numpy.isin', 'np.isin', (['self[key]', 'cfg_limits[key]'], {}), '(self[key], cfg_limits[key])\n', (38603, 38631), True, 'import numpy as np\n'), ((40190, 40208), 'numpy.arange', 'np.arange', (['ngroups'], {}), '(ngroups)\n', (40199, 40208), True, 'import numpy as np\n'), ((40379, 40420), 'pypeit.core.parse.str2list', 'parse.str2list', (["self['calib'][i]", 'ngroups'], {}), "(self['calib'][i], ngroups)\n", (40393, 40420), False, 'from pypeit.core import parse\n'), ((43833, 43923), 'pypeit.msgs.error', 'msgs.error', (['"""Must have defined \'setup\' column first; try running set_configurations."""'], {}), '(\n "Must have defined \'setup\' column first; try running set_configurations.")\n', (43843, 43923), False, 'from pypeit import msgs\n'), ((46578, 46644), 'pypeit.msgs.error', 'msgs.error', (['"""Frame types are not set. First run get_frame_types."""'], {}), "('Frame types are not set. First run get_frame_types.')\n", (46588, 46644), False, 'from pypeit import msgs\n'), ((48316, 48377), 'os.path.join', 'os.path.join', (["self['directory'][indx]", "self['filename'][indx]"], {}), "(self['directory'][indx], self['filename'][indx])\n", (48328, 48377), False, 'import os\n'), ((48394, 48412), 'os.path.join', 'os.path.join', (['d', 'f'], {}), '(d, f)\n', (48406, 48412), False, 'import os\n'), ((52350, 52412), 'pypeit.msgs.warn', 'msgs.warn', (['"""Removing existing frametype and framebit columns."""'], {}), "('Removing existing frametype and framebit columns.')\n", (52359, 52412), False, 'from pypeit import msgs\n'), ((53167, 53212), 'pypeit.msgs.info', 'msgs.info', (['"""Using user-provided frame types."""'], {}), "('Using user-provided frame types.')\n", (53176, 53212), False, 'from pypeit import msgs\n'), ((54788, 54876), 'pypeit.msgs.warn', 'msgs.warn', (['"""Cannot associate standard with science frames without sky coordinates."""'], {}), "(\n 'Cannot associate standard with science frames without sky coordinates.')\n", (54797, 54876), False, 'from pypeit import msgs\n'), ((56039, 56090), 'pypeit.msgs.info', 'msgs.info', (['"""Couldn\'t identify the following files:"""'], {}), '("Couldn\'t identify the following files:")\n', (56048, 56090), False, 'from pypeit import msgs\n'), ((58525, 58552), 'numpy.all', 'np.all', (["(self['comb_id'] < 0)"], {}), "(self['comb_id'] < 0)\n", (58531, 58552), True, 'import numpy as np\n'), ((60486, 60608), 'pypeit.msgs.error', 'msgs.error', (['"""Cannot write sorted instrument configuration table without \'setup\' column; run set_configurations."""'], {}), '(\n "Cannot write sorted instrument configuration table without \'setup\' column; run set_configurations."\n )\n', (60496, 60608), False, 'from pypeit import msgs\n'), ((60639, 60660), 'os.path.isfile', 'os.path.isfile', (['ofile'], {}), '(ofile)\n', (60653, 60660), False, 'import os\n'), ((61966, 61981), 'numpy.argsort', 'np.argsort', (['mjd'], {}), '(mjd)\n', (61976, 61981), True, 'import numpy as np\n'), ((63363, 63509), 'pypeit.msgs.error', 'msgs.error', (['"""Cannot write calibration groups without \'setup\' and \'calibbit\' columns; run set_configurations and set_calibration_groups."""'], {}), '(\n "Cannot write calibration groups without \'setup\' and \'calibbit\' columns; run set_configurations and set_calibration_groups."\n )\n', (63373, 63509), False, 'from pypeit import msgs\n'), ((63542, 63563), 'os.path.isfile', 'os.path.isfile', (['ofile'], {}), '(ofile)\n', (63556, 63563), False, 'import os\n'), ((64082, 64102), 'copy.deepcopy', 'deepcopy', (['cfg[setup]'], {}), '(cfg[setup])\n', (64090, 64102), False, 'from copy import deepcopy\n'), ((68106, 68117), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (68115, 68117), False, 'import os\n'), ((68626, 68659), 'pypeit.msgs.error', 'msgs.error', (['"""No setups to write!"""'], {}), "('No setups to write!')\n", (68636, 68659), False, 'from pypeit import msgs\n'), ((69081, 69112), 'os.path.join', 'os.path.join', (['output_path', 'root'], {}), '(output_path, root)\n', (69093, 69112), False, 'import os\n'), ((69989, 70132), 'pypeit.par.util.make_pypeit_file', 'make_pypeit_file', (['ofiles[j]', 'self.spectrograph.name', '[]'], {'cfg_lines': 'cfg_lines', 'setup_lines': 'setup_lines', 'sorted_files': 'data_lines', 'paths': 'paths'}), '(ofiles[j], self.spectrograph.name, [], cfg_lines=cfg_lines,\n setup_lines=setup_lines, sorted_files=data_lines, paths=paths)\n', (70005, 70132), False, 'from pypeit.par.util import make_pypeit_file\n'), ((73155, 73176), 'os.path.isfile', 'os.path.isfile', (['ofile'], {}), '(ofile)\n', (73169, 73176), False, 'import os\n'), ((75475, 75495), 'numpy.logical_not', 'np.logical_not', (['indx'], {}), '(indx)\n', (75489, 75495), True, 'import numpy as np\n'), ((75938, 75951), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (75949, 75951), False, 'import io\n'), ((77248, 77333), 'pypeit.msgs.error', 'msgs.error', (['"""Calibration groups are not set. First run set_calibration_groups."""'], {}), "('Calibration groups are not set. First run set_calibration_groups.'\n )\n", (77258, 77333), False, 'from pypeit import msgs\n'), ((8970, 8993), 'numpy.asarray', 'np.asarray', (["data['mjd']"], {}), "(data['mjd'])\n", (8980, 8993), True, 'import numpy as np\n'), ((9018, 9046), 'numpy.asarray', 'np.asarray', (["data['filename']"], {}), "(data['filename'])\n", (9028, 9046), True, 'import numpy as np\n'), ((9395, 9409), 'pypeit.msgs.warn', 'msgs.warn', (['msg'], {}), '(msg)\n', (9404, 9409), False, 'from pypeit import msgs\n'), ((24853, 24876), 'numpy.isin', 'np.isin', (['setups', 'ignore'], {}), '(setups, ignore)\n', (24860, 24876), True, 'import numpy as np\n'), ((28990, 29004), 'numpy.sum', 'np.sum', (['ignore'], {}), '(ignore)\n', (28996, 29004), True, 'import numpy as np\n'), ((29868, 29955), 'pypeit.msgs.error', 'msgs.error', (['"""To ignore frames, types must have been defined; run get_frame_types."""'], {}), "(\n 'To ignore frames, types must have been defined; run get_frame_types.')\n", (29878, 29955), False, 'from pypeit import msgs\n'), ((36676, 36719), 'numpy.unique', 'np.unique', (['self.table[metakey][in_cfg].data'], {}), '(self.table[metakey][in_cfg].data)\n', (36685, 36719), True, 'import numpy as np\n'), ((37263, 37302), 'numpy.isin', 'np.isin', (['self.table[metakey]', 'uniq_meta'], {}), '(self.table[metakey], uniq_meta)\n', (37270, 37302), True, 'import numpy as np\n'), ((38651, 38663), 'numpy.all', 'np.all', (['indx'], {}), '(indx)\n', (38657, 38663), True, 'import numpy as np\n'), ((39130, 39150), 'numpy.logical_not', 'np.logical_not', (['good'], {}), '(good)\n', (39144, 39150), True, 'import numpy as np\n'), ((41171, 41256), 'pypeit.msgs.error', 'msgs.error', (['"""Science frames can only be assigned to a single calibration group."""'], {}), "('Science frames can only be assigned to a single calibration group.'\n )\n", (41181, 41256), False, 'from pypeit import msgs\n'), ((43939, 43968), 'numpy.unique', 'np.unique', (["self['setup'].data"], {}), "(self['setup'].data)\n", (43948, 43968), True, 'import numpy as np\n'), ((44811, 44902), 'pypeit.msgs.error', 'msgs.error', (['"""To set global frames, types must have been defined; run get_frame_types."""'], {}), "(\n 'To set global frames, types must have been defined; run get_frame_types.')\n", (44821, 44902), False, 'from pypeit import msgs\n'), ((46981, 46995), 'numpy.where', 'np.where', (['indx'], {}), '(indx)\n', (46989, 46995), True, 'import numpy as np\n'), ((55741, 55791), 'pypeit.core.flux_calib.find_standard_file', 'flux_calib.find_standard_file', (['ra', 'dec'], {'check': '(True)'}), '(ra, dec, check=True)\n', (55770, 55791), False, 'from pypeit.core import flux_calib\n'), ((56152, 56164), 'pypeit.msgs.info', 'msgs.info', (['f'], {}), '(f)\n', (56161, 56164), False, 'from pypeit import msgs\n'), ((56214, 56263), 'pypeit.msgs.error', 'msgs.error', (['"""Check these files before continuing"""'], {}), "('Check these files before continuing')\n", (56224, 56263), False, 'from pypeit import msgs\n'), ((61373, 61385), 'numpy.any', 'np.any', (['indx'], {}), '(indx)\n', (61379, 61385), True, 'import numpy as np\n'), ((65071, 65244), 'pypeit.msgs.error', 'msgs.error', (['"""Each calibration group must be from one and only one instrument configuration with a valid letter identifier; i.e., the configuration cannot be None."""'], {}), "(\n 'Each calibration group must be from one and only one instrument configuration with a valid letter identifier; i.e., the configuration cannot be None.'\n )\n", (65081, 65244), False, 'from pypeit import msgs\n'), ((65906, 65924), 'pypeit.utils.yamlify', 'utils.yamlify', (['cfg'], {}), '(cfg)\n', (65919, 65924), False, 'from pypeit import utils\n'), ((69132, 69151), 'os.path.isdir', 'os.path.isdir', (['odir'], {}), '(odir)\n', (69145, 69151), False, 'import os\n'), ((69169, 69186), 'os.makedirs', 'os.makedirs', (['odir'], {}), '(odir)\n', (69180, 69186), False, 'import os\n'), ((69546, 69560), 'numpy.any', 'np.any', (['in_cfg'], {}), '(in_cfg)\n', (69552, 69560), True, 'import numpy as np\n'), ((69806, 69819), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (69817, 69819), False, 'import io\n'), ((73898, 73912), 'numpy.any', 'np.any', (['badcol'], {}), '(badcol)\n', (73904, 73912), True, 'import numpy as np\n'), ((78675, 78702), 'numpy.all', 'np.all', (['(config[k] == row[k])'], {}), '(config[k] == row[k])\n', (78681, 78702), True, 'import numpy as np\n'), ((7206, 7229), 'os.path.basename', 'os.path.basename', (['ifile'], {}), '(ifile)\n', (7222, 7229), False, 'import os\n'), ((7279, 7420), 'pypeit.msgs.error', 'msgs.error', (['"""File name list does not match user-provided metadata table. See usrdata argument of instantiation of PypeItMetaData."""'], {}), "(\n 'File name list does not match user-provided metadata table. See usrdata argument of instantiation of PypeItMetaData.'\n )\n", (7289, 7420), False, 'from pypeit import msgs\n'), ((12309, 12346), 'numpy.where', 'np.where', (["(f == self.table['filename'])"], {}), "(f == self.table['filename'])\n", (12317, 12346), True, 'import numpy as np\n'), ((13414, 13489), 'pypeit.core.meta.convert_radec', 'meta.convert_radec', (["usrdata['ra'][~nones].data", "usrdata['dec'][~nones].data"], {}), "(usrdata['ra'][~nones].data, usrdata['dec'][~nones].data)\n", (13432, 13489), False, 'from pypeit.core import meta\n'), ((36279, 36291), 'numpy.any', 'np.any', (['indx'], {}), '(indx)\n', (36285, 36291), True, 'import numpy as np\n'), ((64789, 64822), 'numpy.unique', 'np.unique', (["self['setup'][in_cbit]"], {}), "(self['setup'][in_cbit])\n", (64798, 64822), True, 'import numpy as np\n'), ((65616, 65634), 'os.path.join', 'os.path.join', (['d', 'f'], {}), '(d, f)\n', (65628, 65634), False, 'import os\n'), ((69418, 69443), 'pypeit.utils.yamlify', 'utils.yamlify', (['cfg[setup]'], {}), '(cfg[setup])\n', (69431, 69443), False, 'from pypeit import utils\n'), ((69607, 69643), 'numpy.unique', 'np.unique', (["self['directory'][in_cfg]"], {}), "(self['directory'][in_cfg])\n", (69616, 69643), True, 'import numpy as np\n'), ((74368, 74408), 'numpy.where', 'np.where', (['[(t == col) for t in tbl_cols]'], {}), '([(t == col) for t in tbl_cols])\n', (74376, 74408), True, 'import numpy as np\n'), ((74790, 74830), 'numpy.where', 'np.where', (['[(t == col) for t in tbl_cols]'], {}), '([(t == col) for t in tbl_cols])\n', (74798, 74830), True, 'import numpy as np\n'), ((75524, 75541), 'numpy.where', 'np.where', (['is_None'], {}), '(is_None)\n', (75532, 75541), True, 'import numpy as np\n'), ((75592, 75635), 'numpy.argsort', 'np.argsort', (['output_tbl[sort_col][indx].data'], {}), '(output_tbl[sort_col][indx].data)\n', (75602, 75635), True, 'import numpy as np\n'), ((8574, 8594), 'os.path.split', 'os.path.split', (['ifile'], {}), '(ifile)\n', (8587, 8594), False, 'import os\n'), ((29126, 29140), 'numpy.sum', 'np.sum', (['ignore'], {}), '(ignore)\n', (29132, 29140), True, 'import numpy as np\n'), ((61659, 61694), 'pypeit.io.dict_to_lines', 'dict_to_lines', (['cfgs[setup]'], {'level': '(1)'}), '(cfgs[setup], level=1)\n', (61672, 61694), False, 'from pypeit.io import dict_to_lines\n'), ((75574, 75588), 'numpy.where', 'np.where', (['indx'], {}), '(indx)\n', (75582, 75588), True, 'import numpy as np\n'), ((78409, 78435), 'numpy.abs', 'np.abs', (['(config[k] - row[k])'], {}), '(config[k] - row[k])\n', (78415, 78435), True, 'import numpy as np\n'), ((44982, 44998), 'numpy.arange', 'np.arange', (['n_cfg'], {}), '(n_cfg)\n', (44991, 44998), True, 'import numpy as np\n'), ((55282, 55296), 'pypeit.msgs.newline', 'msgs.newline', ([], {}), '()\n', (55294, 55296), False, 'from pypeit import msgs\n'), ((55421, 55435), 'pypeit.msgs.newline', 'msgs.newline', ([], {}), '()\n', (55433, 55435), False, 'from pypeit import msgs\n')]
|
import rospy
from sensor_msgs.msg import Image
from std_msgs.msg import String
from cv_bridge import CvBridge
import cv2
import numpy as np
import tensorflow as tf
import classify_image
class RosTensorFlow():
def __init__(self):
classify_image.maybe_download_and_extract()
self._session = tf.Session()
classify_image.create_graph()
self._cv_bridge = CvBridge()
self._sub = rospy.Subscriber('/usb_cam/image_raw', Image, self.callback, queue_size=1)
self._pub = rospy.Publisher('result', String, queue_size=1)
self.score_threshold = rospy.get_param('~score_threshold', 0.1)
self.use_top_k = rospy.get_param('~use_top_k', 5)
def callback(self, image_msg):
cv_image = self._cv_bridge.imgmsg_to_cv2(image_msg, "bgr8")
# copy from
# classify_image.py
image_data = cv2.imencode('.jpg', cv_image)[1].tostring()
# Creates graph from saved GraphDef.
softmax_tensor = self._session.graph.get_tensor_by_name('softmax:0')
predictions = self._session.run(
softmax_tensor, {'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
# Creates node ID --> English string lookup.
node_lookup = classify_image.NodeLookup()
top_k = predictions.argsort()[-self.use_top_k:][::-1]
for node_id in top_k:
human_string = node_lookup.id_to_string(node_id)
score = predictions[node_id]
if score > self.score_threshold:
rospy.loginfo('%s (score = %.5f)' % (human_string, score))
self._pub.publish(human_string)
def main(self):
rospy.spin()
if __name__ == '__main__':
classify_image.setup_args()
rospy.init_node('rostensorflow')
tensor = RosTensorFlow()
tensor.main()
|
[
"rospy.Publisher",
"rospy.Subscriber",
"cv2.imencode",
"rospy.init_node",
"tensorflow.Session",
"rospy.get_param",
"numpy.squeeze",
"cv_bridge.CvBridge",
"rospy.spin",
"classify_image.setup_args",
"classify_image.NodeLookup",
"classify_image.create_graph",
"classify_image.maybe_download_and_extract",
"rospy.loginfo"
] |
[((1726, 1753), 'classify_image.setup_args', 'classify_image.setup_args', ([], {}), '()\n', (1751, 1753), False, 'import classify_image\n'), ((1758, 1790), 'rospy.init_node', 'rospy.init_node', (['"""rostensorflow"""'], {}), "('rostensorflow')\n", (1773, 1790), False, 'import rospy\n'), ((243, 286), 'classify_image.maybe_download_and_extract', 'classify_image.maybe_download_and_extract', ([], {}), '()\n', (284, 286), False, 'import classify_image\n'), ((311, 323), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (321, 323), True, 'import tensorflow as tf\n'), ((332, 361), 'classify_image.create_graph', 'classify_image.create_graph', ([], {}), '()\n', (359, 361), False, 'import classify_image\n'), ((388, 398), 'cv_bridge.CvBridge', 'CvBridge', ([], {}), '()\n', (396, 398), False, 'from cv_bridge import CvBridge\n'), ((420, 494), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/usb_cam/image_raw"""', 'Image', 'self.callback'], {'queue_size': '(1)'}), "('/usb_cam/image_raw', Image, self.callback, queue_size=1)\n", (436, 494), False, 'import rospy\n'), ((515, 562), 'rospy.Publisher', 'rospy.Publisher', (['"""result"""', 'String'], {'queue_size': '(1)'}), "('result', String, queue_size=1)\n", (530, 562), False, 'import rospy\n'), ((594, 634), 'rospy.get_param', 'rospy.get_param', (['"""~score_threshold"""', '(0.1)'], {}), "('~score_threshold', 0.1)\n", (609, 634), False, 'import rospy\n'), ((660, 692), 'rospy.get_param', 'rospy.get_param', (['"""~use_top_k"""', '(5)'], {}), "('~use_top_k', 5)\n", (675, 692), False, 'import rospy\n'), ((1163, 1186), 'numpy.squeeze', 'np.squeeze', (['predictions'], {}), '(predictions)\n', (1173, 1186), True, 'import numpy as np\n'), ((1262, 1289), 'classify_image.NodeLookup', 'classify_image.NodeLookup', ([], {}), '()\n', (1287, 1289), False, 'import classify_image\n'), ((1681, 1693), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (1691, 1693), False, 'import rospy\n'), ((1545, 1603), 'rospy.loginfo', 'rospy.loginfo', (["('%s (score = %.5f)' % (human_string, score))"], {}), "('%s (score = %.5f)' % (human_string, score))\n", (1558, 1603), False, 'import rospy\n'), ((866, 896), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'cv_image'], {}), "('.jpg', cv_image)\n", (878, 896), False, 'import cv2\n')]
|
import numpy as np
def segment_Y(Y, **params):
Y_segments = params.get("Y_segments")
Y_quantile = params.get("Y_quantile")
print("segmenting Y")
Y = Y.values.reshape(-1)
Y_quantile = np.quantile(Y, Y_quantile, axis = 0)
bigger_mask = (Y > Y_quantile).copy()
smaller_mask = (Y <= Y_quantile).copy()
Y[bigger_mask] = 1
Y[smaller_mask] = 0
Y = Y.astype(int)
return Y
|
[
"numpy.quantile"
] |
[((191, 225), 'numpy.quantile', 'np.quantile', (['Y', 'Y_quantile'], {'axis': '(0)'}), '(Y, Y_quantile, axis=0)\n', (202, 225), True, 'import numpy as np\n')]
|
import numpy
def lax_friedrichs(cons_minus, cons_plus, simulation, tl):
alpha = tl.grid.dx / tl.dt
flux = numpy.zeros_like(cons_minus)
prim_minus, aux_minus = simulation.model.cons2all(cons_minus, tl.prim)
prim_plus, aux_plus = simulation.model.cons2all(cons_plus , tl.prim)
f_minus = simulation.model.flux(cons_minus, prim_minus, aux_minus)
f_plus = simulation.model.flux(cons_plus, prim_plus, aux_plus )
flux[:, 1:-1] = 0.5 * ( (f_plus[:,0:-2] + f_minus[:,1:-1]) + \
alpha * (cons_plus[:,0:-2] - cons_minus[:,1:-1]) )
return flux
def upwind(cons_minus, cons_plus, simulation, patch):
flux = numpy.zeros_like(cons_minus)
flux[:, 1:-1] = simulation.model.riemann_problem_flux(cons_plus [:, 0:-2],
cons_minus[:, 1:-1])
return flux
|
[
"numpy.zeros_like"
] |
[((115, 143), 'numpy.zeros_like', 'numpy.zeros_like', (['cons_minus'], {}), '(cons_minus)\n', (131, 143), False, 'import numpy\n'), ((666, 694), 'numpy.zeros_like', 'numpy.zeros_like', (['cons_minus'], {}), '(cons_minus)\n', (682, 694), False, 'import numpy\n')]
|
import numpy as np
import random
from collections import namedtuple
def generate_prob_matrix(n):
matrix = np.random.rand(n, n)
for i in range(n):
matrix[i][i] = 0
for i in range(n):
matrix[i] = (1/np.sum(matrix[i]))*matrix[i]
return matrix
def categorical(p):
return np.random.choice(len(p), 1, p=p)[0]
Drone = namedtuple('Drone', 'speed probability')
Site = namedtuple('Site', 'location')
class System:
def __init__(self, sites, drones):
self.sites = {}
self.drones = {}
n = len(sites)
for i, drone in enumerate(drones):
self.drones[i] = drone
for i, site in enumerate(sites):
self.sites[i] = site
distance = np.zeros([n, n])
for i in range(n):
for j in range(n):
if i < j:
x = np.subtract(sites[i], sites[j])
d = np.linalg.norm(x)
distance[i][j] = d
distance[j][i] = d
self.distance = distance
def get_site(self, site_id):
return self.sites[site_id]
def get_drone(self, drone_id):
return self.drones[drone_id]
def compute_path_distance(self, path):
n = len(path)
d = 0
for i in range(n - 1):
d += self.distance[path[i]][path[i + 1]]
return d
def compute_path_time(self, path, drone_id):
d = self.compute_path_distance(path)
return d/self.get_drone(drone_id).speed
def generate_path_of_length(self, length, drone_id):
path = []
P = self.get_drone(drone_id).probability
num_sites = len(self.sites)
s = categorical([1/num_sites]*num_sites)
path.append(s)
site = s
for i in range(length):
site = categorical(P[site])
path.append(site)
return path
def generate_path(self, s, t, drone_id):
path = [s]
P = self.get_drone(drone_id).probability
site = categorical(P[s])
path.append(site)
while site != t:
site = categorical(P[site])
path.append(site)
return path
@staticmethod
def generate_random_system(n, k):
locations = np.random.rand(n, 2)
sites = []
for i in locations:
sites.append(Site(i))
drones = []
for i in range(k):
speed = abs(random.random())
probability = generate_prob_matrix(n)
drones.append(Drone(speed, probability))
return System(sites, drones)
def _compute_arrival_times(path, drone_id, sites, speed):
arrival_times = []
t = 0
for i in range(len(path) - 1):
t += system.compute_path_time(path[i:i+2], drone_id=drone_id)
arrival_times.append((drone_id, path[i], path[i+1], t))
return arrival_times
def _generate_arrival_times(system, num_drones, length):
arrival_times = [[] for _ in range(len(system.sites))]
events = []
for i in range(system):
pass
events.extend(compute_arrival_times(path, i))
def get_key(item):
return item[3]
events = sorted(events, key=get_key)
for event in events:
drone_id = event[0]
site_id = event[2]
time = event[3]
arrival_times[site_id].append((drone_id, time))
return arrival_times
def compute_cost(system, n):
arrival_times = generate_arrival_times(system, n)
interarrival_times = [[] for _ in range(len(system.sites))]
for i in range(len(arrival_times)):
arrivals = arrival_times[i]
for j in range(len(arrivals) - 1):
interarrival_times[i].append(arrivals[j+1][1] - arrivals[j][1])
interarrival_avgs = [compute_average(i) for i in interarrival_times]
return max(interarrival_avgs)
def compute_average(data):
return (1/len(data))*sum(data)
|
[
"collections.namedtuple",
"numpy.random.rand",
"numpy.subtract",
"numpy.sum",
"numpy.zeros",
"numpy.linalg.norm",
"random.random"
] |
[((357, 397), 'collections.namedtuple', 'namedtuple', (['"""Drone"""', '"""speed probability"""'], {}), "('Drone', 'speed probability')\n", (367, 397), False, 'from collections import namedtuple\n'), ((405, 435), 'collections.namedtuple', 'namedtuple', (['"""Site"""', '"""location"""'], {}), "('Site', 'location')\n", (415, 435), False, 'from collections import namedtuple\n'), ((113, 133), 'numpy.random.rand', 'np.random.rand', (['n', 'n'], {}), '(n, n)\n', (127, 133), True, 'import numpy as np\n'), ((738, 754), 'numpy.zeros', 'np.zeros', (['[n, n]'], {}), '([n, n])\n', (746, 754), True, 'import numpy as np\n'), ((2252, 2272), 'numpy.random.rand', 'np.random.rand', (['n', '(2)'], {}), '(n, 2)\n', (2266, 2272), True, 'import numpy as np\n'), ((230, 247), 'numpy.sum', 'np.sum', (['matrix[i]'], {}), '(matrix[i])\n', (236, 247), True, 'import numpy as np\n'), ((2426, 2441), 'random.random', 'random.random', ([], {}), '()\n', (2439, 2441), False, 'import random\n'), ((863, 894), 'numpy.subtract', 'np.subtract', (['sites[i]', 'sites[j]'], {}), '(sites[i], sites[j])\n', (874, 894), True, 'import numpy as np\n'), ((919, 936), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {}), '(x)\n', (933, 936), True, 'import numpy as np\n')]
|
#-*- coding:utf-8 -*-
# &Author AnFany
# 引入方法
import Kmeans_AnFany as K_Af # AnFany
import Kmeans_Sklearn as K_Sk # Sklearn
import matplotlib.pyplot as plt
from pylab import mpl # 作图显示中文
mpl.rcParams['font.sans-serif'] = ['FangSong'] # 设置中文字体新宋体
mpl.rcParams['axes.unicode_minus'] = False
import numpy as np
# 利用sklearn生成数据集
from sklearn.datasets import make_blobs
X, Y = make_blobs(n_samples=600, centers=6, n_features=2)
# 绘制散点图
def fig_scatter(exdata, eydata, titl='训练数据散点图', co=['r', 'g', 'k', 'b', 'y', 'm'], marker=['o','^','H','v','d','>']):
typeclass = sorted(list(set(eydata)))
for ii in range(len(typeclass)):
datax = exdata[eydata == typeclass[ii]]
plt.scatter(datax[:, 0], datax[:, -1], c=co[ii], s=50, marker=marker[ii])
plt.title(titl)
#plt.legend(['%d类'%i for i in typeclass], bbox_to_anchor=(1.2, 0.9))
plt.xlabel('特征1')
plt.ylabel('特征2')
# 调用不同的方法
# AnFany
kresult = K_Af.op_kmeans(X, countcen=6)
# Sklearn
sk = K_Sk.KMeans(init='k-means++', n_clusters=6, n_init=10)
train = sk.fit(X)
result = sk.predict(X)
skru = K_Sk.trans(result)
#绘制算法后的类别的散点图
def sca(Xdata, Center, signdict, co=['r', 'g', 'y', 'b', 'c', 'm'], marker=['o','^','H','s','d','*'], titl = 'AnFany 结果'):
du = 1
for jj in signdict:
xdata = Xdata[signdict[jj]]
plt.scatter(xdata[:, 0], xdata[:, -1], c=co[jj], s=50, marker=marker[jj], label='%d类' % jj) # 绘制样本散点图
for ss in Center:
if du:
plt.scatter(ss[0], ss[1], c='k', s=100, marker='8', label='类别中心') #绘制类别中心点
du = 0
else:
plt.scatter(ss[0], ss[1], c='k', s=100, marker='8') # 绘制类别中心点
plt.legend(bbox_to_anchor=(1.2, 1))
plt.title(titl)
plt.xlabel('特征1')
plt.ylabel('特征2')
# 定义欧几里得距离
def dis(sample, center):
cen = np.array([center])
sample = np.array(sample)
if len(sample) != 0:
usb = np.sum((sample - cen) ** 2, axis=1) ** 0.5
return usb
else:
return 0
# 计算最终的分类结果的成本值
def Cost(Xdata, typedict):
center = {}
for kk in typedict:
center[kk] = np.mean(Xdata[typedict[kk]], axis=0) # 均值
cio = 0
for cc in typedict:
cio += np.sum(dis(Xdata[typedict[cc]], center[cc]))
return cio
# 最终的结果展示
plt.subplot(2, 2, 1)
fig_scatter(X, Y)
plt.subplot(2, 2, 2)
sca(X, kresult[0], kresult[2])
plt.subplot(2, 2, 3)
sca(X, train.cluster_centers_, skru, titl='Sklearn 结果')
plt.subplot(2, 2, 4)
plt.axis('off')
plt.text(0.3, 0.6, 'AnFany 最终的分类成本值为:%.5f'%Cost(X, kresult[2]))
plt.text(0.3, 0.3, 'Sklearn 最终的分类成本值为:%.5f'%Cost(X, skru))
plt.show()
|
[
"numpy.mean",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"sklearn.datasets.make_blobs",
"numpy.array",
"numpy.sum",
"Kmeans_AnFany.op_kmeans",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.axis",
"Kmeans_Sklearn.KMeans",
"matplotlib.pyplot.subplot",
"Kmeans_Sklearn.trans",
"matplotlib.pyplot.show"
] |
[((393, 443), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'n_samples': '(600)', 'centers': '(6)', 'n_features': '(2)'}), '(n_samples=600, centers=6, n_features=2)\n', (403, 443), False, 'from sklearn.datasets import make_blobs\n'), ((964, 993), 'Kmeans_AnFany.op_kmeans', 'K_Af.op_kmeans', (['X'], {'countcen': '(6)'}), '(X, countcen=6)\n', (978, 993), True, 'import Kmeans_AnFany as K_Af\n'), ((1015, 1069), 'Kmeans_Sklearn.KMeans', 'K_Sk.KMeans', ([], {'init': '"""k-means++"""', 'n_clusters': '(6)', 'n_init': '(10)'}), "(init='k-means++', n_clusters=6, n_init=10)\n", (1026, 1069), True, 'import Kmeans_Sklearn as K_Sk\n'), ((1123, 1141), 'Kmeans_Sklearn.trans', 'K_Sk.trans', (['result'], {}), '(result)\n', (1133, 1141), True, 'import Kmeans_Sklearn as K_Sk\n'), ((2335, 2355), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (2346, 2355), True, 'import matplotlib.pyplot as plt\n'), ((2378, 2398), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (2389, 2398), True, 'import matplotlib.pyplot as plt\n'), ((2434, 2454), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (2445, 2454), True, 'import matplotlib.pyplot as plt\n'), ((2515, 2535), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (2526, 2535), True, 'import matplotlib.pyplot as plt\n'), ((2537, 2552), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2545, 2552), True, 'import matplotlib.pyplot as plt\n'), ((2681, 2691), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2689, 2691), True, 'import matplotlib.pyplot as plt\n'), ((792, 807), 'matplotlib.pyplot.title', 'plt.title', (['titl'], {}), '(titl)\n', (801, 807), True, 'import matplotlib.pyplot as plt\n'), ((887, 904), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""特征1"""'], {}), "('特征1')\n", (897, 904), True, 'import matplotlib.pyplot as plt\n'), ((910, 927), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""特征2"""'], {}), "('特征2')\n", (920, 927), True, 'import matplotlib.pyplot as plt\n'), ((1718, 1753), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.2, 1)'}), '(bbox_to_anchor=(1.2, 1))\n', (1728, 1753), True, 'import matplotlib.pyplot as plt\n'), ((1759, 1774), 'matplotlib.pyplot.title', 'plt.title', (['titl'], {}), '(titl)\n', (1768, 1774), True, 'import matplotlib.pyplot as plt\n'), ((1780, 1797), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""特征1"""'], {}), "('特征1')\n", (1790, 1797), True, 'import matplotlib.pyplot as plt\n'), ((1803, 1820), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""特征2"""'], {}), "('特征2')\n", (1813, 1820), True, 'import matplotlib.pyplot as plt\n'), ((1872, 1890), 'numpy.array', 'np.array', (['[center]'], {}), '([center])\n', (1880, 1890), True, 'import numpy as np\n'), ((1905, 1921), 'numpy.array', 'np.array', (['sample'], {}), '(sample)\n', (1913, 1921), True, 'import numpy as np\n'), ((713, 786), 'matplotlib.pyplot.scatter', 'plt.scatter', (['datax[:, 0]', 'datax[:, -1]'], {'c': 'co[ii]', 's': '(50)', 'marker': 'marker[ii]'}), '(datax[:, 0], datax[:, -1], c=co[ii], s=50, marker=marker[ii])\n', (724, 786), True, 'import matplotlib.pyplot as plt\n'), ((1370, 1465), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xdata[:, 0]', 'xdata[:, -1]'], {'c': 'co[jj]', 's': '(50)', 'marker': 'marker[jj]', 'label': "('%d类' % jj)"}), "(xdata[:, 0], xdata[:, -1], c=co[jj], s=50, marker=marker[jj],\n label='%d类' % jj)\n", (1381, 1465), True, 'import matplotlib.pyplot as plt\n'), ((2164, 2200), 'numpy.mean', 'np.mean', (['Xdata[typedict[kk]]'], {'axis': '(0)'}), '(Xdata[typedict[kk]], axis=0)\n', (2171, 2200), True, 'import numpy as np\n'), ((1525, 1590), 'matplotlib.pyplot.scatter', 'plt.scatter', (['ss[0]', 'ss[1]'], {'c': '"""k"""', 's': '(100)', 'marker': '"""8"""', 'label': '"""类别中心"""'}), "(ss[0], ss[1], c='k', s=100, marker='8', label='类别中心')\n", (1536, 1590), True, 'import matplotlib.pyplot as plt\n'), ((1648, 1699), 'matplotlib.pyplot.scatter', 'plt.scatter', (['ss[0]', 'ss[1]'], {'c': '"""k"""', 's': '(100)', 'marker': '"""8"""'}), "(ss[0], ss[1], c='k', s=100, marker='8')\n", (1659, 1699), True, 'import matplotlib.pyplot as plt\n'), ((1963, 1998), 'numpy.sum', 'np.sum', (['((sample - cen) ** 2)'], {'axis': '(1)'}), '((sample - cen) ** 2, axis=1)\n', (1969, 1998), True, 'import numpy as np\n')]
|
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
"""
| This file contains SDC utility functions related to typing compilation phase
"""
import numpy
import numba
import sdc
from numba import types
from numba.core.errors import TypingError
from numba.np import numpy_support
from sdc.datatypes.indexes import *
from sdc.str_arr_type import string_array_type, StringArrayType
from sdc.datatypes.categorical.types import Categorical
sdc_old_index_types = (types.Array, StringArrayType, )
sdc_pandas_index_types = (
EmptyIndexType,
PositionalIndexType,
RangeIndexType,
Int64IndexType,
MultiIndexType,
) + sdc_old_index_types
sdc_indexes_range_like = (
PositionalIndexType,
RangeIndexType,
)
# TO-DO: support caching of data allocated for range indexes at request for .values
sdc_indexes_wo_values_cache = (
EmptyIndexType,
PositionalIndexType,
RangeIndexType,
)
sdc_pandas_df_column_types = (
types.Array,
StringArrayType,
Categorical,
)
class TypeChecker:
"""
Validate object type and raise TypingError if the type is invalid, e.g.:
Method nsmallest(). The object n
given: bool
expected: int
"""
msg_template = '{} The object {}\n given: {}\n expected: {}'
def __init__(self, func_name):
"""
Parameters
----------
func_name: :obj:`str`
name of the function where types checking
"""
self.func_name = func_name
def raise_exc(self, data, expected_types, name=''):
"""
Raise exception with unified message
Parameters
----------
data: :obj:`any`
real type of the data
expected_types: :obj:`str`
expected types inserting directly to the exception
name: :obj:`str`
name of the parameter
"""
msg = self.msg_template.format(self.func_name, name, data, expected_types)
raise TypingError(msg)
def check(self, data, accepted_type, name=''):
"""
Check data type belongs to specified type
Parameters
----------
data: :obj:`any`
real type of the data
accepted_type: :obj:`type`
accepted type
name: :obj:`str`
name of the parameter
"""
if not isinstance(data, accepted_type):
self.raise_exc(data, accepted_type.__name__, name=name)
class SDCLimitation(Exception):
"""Exception to be raised in case of SDC limitation"""
pass
def kwsparams2list(params):
"""Convert parameters dict to a list of string of a format 'key=value'"""
return ['{}={}'.format(k, v) for k, v in params.items()]
def sigparams2list(param_names, defaults):
"""Creates a list of strings of a format 'key=value' from parameter names and default values"""
return [(f'{param}' if param not in defaults else f'{param}={defaults[param]}') for param in param_names]
def has_literal_value(var, value):
"""Used during typing to check that variable var is a Numba literal value equal to value"""
if not isinstance(var, types.Literal):
return False
if value is None:
return isinstance(var, types.NoneType) or var.literal_value is value
elif isinstance(value, type(bool)):
return var.literal_value is value
else:
return var.literal_value == value
def has_python_value(var, value):
"""Used during typing to check that variable var was resolved as Python type and has specific value"""
if not isinstance(var, type(value)):
return False
if value is None or isinstance(value, type(bool)):
return var is value
else:
return var == value
def is_default(var, value):
return has_literal_value(var, value) or has_python_value(var, value) or isinstance(var, types.Omitted)
def check_is_numeric_array(type_var):
"""Used during typing to check that type_var is a numeric numpy arrays"""
return check_is_array_of_dtype(type_var, types.Number)
def check_index_is_numeric(ty_series):
"""Used during typing to check that series has numeric index"""
return isinstance(ty_series.index.dtype, types.Number)
def check_types_comparable(ty_left, ty_right):
"""Used during typing to check that specified types can be compared"""
if hasattr(ty_left, 'dtype'):
ty_left = ty_left.dtype
if hasattr(ty_right, 'dtype'):
ty_right = ty_right.dtype
# add the rest of supported types here
if isinstance(ty_left, types.Number):
return isinstance(ty_right, types.Number)
if isinstance(ty_left, types.UnicodeType):
return isinstance(ty_right, types.UnicodeType)
if isinstance(ty_left, types.Boolean):
return isinstance(ty_right, types.Boolean)
if isinstance(ty_left, (types.Tuple, types.UniTuple)):
# FIXME: just for now to unblock compilation
return ty_left == ty_right
return False
def check_arrays_comparable(ty_left, ty_right):
"""Used during typing to check that underlying arrays of specified types can be compared"""
return ((ty_left == string_array_type and ty_right == string_array_type)
or (check_is_numeric_array(ty_left) and check_is_numeric_array(ty_right)))
def check_is_array_of_dtype(type_var, dtype):
"""Used during typing to check that type_var is a numeric numpy array of specific dtype"""
return isinstance(type_var, types.Array) and isinstance(type_var.dtype, dtype)
def find_common_dtype_from_numpy_dtypes(array_types, scalar_types):
"""Used to find common numba dtype for a sequences of numba dtypes each representing some numpy dtype"""
np_array_dtypes = [numpy_support.as_dtype(dtype) for dtype in array_types]
np_scalar_dtypes = [numpy_support.as_dtype(dtype) for dtype in scalar_types]
np_common_dtype = numpy.find_common_type(np_array_dtypes, np_scalar_dtypes)
numba_common_dtype = numpy_support.from_dtype(np_common_dtype)
return numba_common_dtype
def find_index_common_dtype(left, right):
"""Used to find common dtype for indexes of two series and verify if index dtypes are equal"""
left_index_dtype = left.dtype
right_index_dtype = right.dtype
index_dtypes_match = left_index_dtype == right_index_dtype
if not index_dtypes_match:
numba_index_common_dtype = find_common_dtype_from_numpy_dtypes(
[left_index_dtype, right_index_dtype], [])
else:
numba_index_common_dtype = left_index_dtype
return index_dtypes_match, numba_index_common_dtype
def gen_impl_generator(codegen, impl_name):
"""Generate generator of an implementation"""
def _df_impl_generator(*args, **kwargs):
func_text, global_vars = codegen(*args, **kwargs)
loc_vars = {}
exec(func_text, global_vars, loc_vars)
_impl = loc_vars[impl_name]
return _impl
return _df_impl_generator
def check_signed_integer(ty):
return isinstance(ty, types.Integer) and ty.signed
def _check_dtype_param_type(dtype):
""" Returns True is dtype is a valid type for dtype parameter and False otherwise.
Used in RangeIndex ctor and other methods that take dtype parameter. """
valid_dtype_types = (types.NoneType, types.Omitted, types.UnicodeType, types.NumberClass)
return isinstance(dtype, valid_dtype_types) or dtype is None
|
[
"numba.np.numpy_support.from_dtype",
"numba.np.numpy_support.as_dtype",
"numba.core.errors.TypingError",
"numpy.find_common_type"
] |
[((7400, 7457), 'numpy.find_common_type', 'numpy.find_common_type', (['np_array_dtypes', 'np_scalar_dtypes'], {}), '(np_array_dtypes, np_scalar_dtypes)\n', (7422, 7457), False, 'import numpy\n'), ((7483, 7524), 'numba.np.numpy_support.from_dtype', 'numpy_support.from_dtype', (['np_common_dtype'], {}), '(np_common_dtype)\n', (7507, 7524), False, 'from numba.np import numpy_support\n'), ((3501, 3517), 'numba.core.errors.TypingError', 'TypingError', (['msg'], {}), '(msg)\n', (3512, 3517), False, 'from numba.core.errors import TypingError\n'), ((7241, 7270), 'numba.np.numpy_support.as_dtype', 'numpy_support.as_dtype', (['dtype'], {}), '(dtype)\n', (7263, 7270), False, 'from numba.np import numpy_support\n'), ((7321, 7350), 'numba.np.numpy_support.as_dtype', 'numpy_support.as_dtype', (['dtype'], {}), '(dtype)\n', (7343, 7350), False, 'from numba.np import numpy_support\n')]
|
import argparse
import numpy as np
import os
import sys
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from saliency.visualizer.smiles_visualizer import SmilesVisualizer
def visualize(dir_path):
parent_dir = os.path.dirname(dir_path)
saliency_vanilla = np.load(os.path.join(dir_path, "saliency_vanilla.npy"))
saliency_smooth = np.load(os.path.join(dir_path, "saliency_smooth.npy"))
saliency_bayes = np.load(os.path.join(dir_path, "saliency_bayes.npy"))
visualizer = SmilesVisualizer()
os.makedirs(os.path.join(parent_dir, "result_vanilla"), exist_ok=True)
os.makedirs(os.path.join(parent_dir, "result_smooth"), exist_ok=True)
os.makedirs(os.path.join(parent_dir, "result_bayes"), exist_ok=True)
test_idx = np.load(os.path.join(dir_path, "test_idx.npy"))
answer = np.load(os.path.join(dir_path, "answer.npy"))
output = np.load(os.path.join(dir_path, "output.npy"))
smiles_all = np.load(os.path.join(parent_dir, "smiles.npy"))
def calc_range(saliency):
vmax = float('-inf')
vmin = float('inf')
for v in saliency:
vmax = max(vmax, np.max(v))
vmin = min(vmin, np.min(v))
return vmin, vmax
v_range_vanilla = calc_range(saliency_vanilla)
v_range_smooth = calc_range(saliency_smooth)
v_range_bayes = calc_range(saliency_bayes)
def get_scaler(v_range):
def scaler(saliency_):
saliency = np.copy(saliency_)
minv, maxv = v_range
if maxv == minv:
saliency = np.zeros_like(saliency)
else:
pos = saliency >= 0.0
saliency[pos] = saliency[pos]/maxv
nega = saliency < 0.0
saliency[nega] = saliency[nega]/(np.abs(minv))
return saliency
return scaler
scaler_vanilla = get_scaler(v_range_vanilla)
scaler_smooth = get_scaler(v_range_smooth)
scaler_bayes = get_scaler(v_range_bayes)
def color(x):
if x > 0:
# Red for positive value
return 1., 1. - x, 1. - x
else:
# Blue for negative value
x *= -1
return 1. - x, 1. - x, 1.
for i, id in enumerate(test_idx):
smiles = smiles_all[id]
out = output[i]
ans = answer[i]
# legend = "t:{}, p:{}".format(ans, out)
legend = ''
ext = '.png' # '.svg'
# visualizer.visualize(
# saliency_vanilla[id], smiles, save_filepath=os.path.join(parent_dir, "result_vanilla", str(id) + ext),
# visualize_ratio=1.0, legend=legend, scaler=scaler_vanilla, color_fn=color)
# visualizer.visualize(
# saliency_smooth[id], smiles, save_filepath=os.path.join(parent_dir, "result_smooth", str(id) + ext),
# visualize_ratio=1.0, legend=legend, scaler=scaler_smooth, color_fn=color)
visualizer.visualize(
saliency_bayes[id], smiles, save_filepath=os.path.join(parent_dir, "result_bayes", str(id) + ext),
visualize_ratio=1.0, legend=legend, scaler=scaler_bayes, color_fn=color)
def plot_result(prediction, answer, save_filepath='result.png'):
plt.scatter(prediction, answer, marker='.')
plt.plot([-100, 100], [-100, 100], c='r')
max_v = max(np.max(prediction), np.max(answer))
min_v = min(np.min(prediction), np.min(answer))
plt.xlim([min_v-0.1, max_v+0.1])
plt.xlabel("prediction")
plt.ylim([min_v-0.1, max_v+0.1])
plt.ylabel("ground truth")
plt.savefig(save_filepath)
plt.close()
def main():
parser = argparse.ArgumentParser(
description='Regression with own dataset.')
parser.add_argument('--dirpath', '-d', type=str, default='./results/M_30_3_32_32')
args = parser.parse_args()
path = args.dirpath
n_split = 5
output = []
answer = []
for i in range(n_split):
suffix = str(i) + "-" + str(n_split)
output.append(np.load(os.path.join(path, suffix, "output.npy")))
answer.append(np.load(os.path.join(path, suffix, "answer.npy")))
output = np.concatenate(output)
answer = np.concatenate(answer)
plot_result(output, answer, save_filepath=os.path.join(path, "result.png"))
for i in range(n_split):
suffix = str(i) + "-" + str(n_split)
print(suffix)
visualize(os.path.join(path, suffix))
if __name__ == '__main__':
main()
|
[
"matplotlib.pyplot.ylabel",
"argparse.ArgumentParser",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.max",
"matplotlib.pyplot.close",
"matplotlib.pyplot.scatter",
"numpy.min",
"numpy.concatenate",
"matplotlib.pyplot.ylim",
"numpy.abs",
"matplotlib.pyplot.savefig",
"matplotlib.use",
"os.path.dirname",
"matplotlib.pyplot.xlim",
"numpy.copy",
"os.path.join",
"saliency.visualizer.smiles_visualizer.SmilesVisualizer",
"os.path.abspath",
"numpy.zeros_like"
] |
[((76, 97), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (90, 97), False, 'import matplotlib\n'), ((336, 361), 'os.path.dirname', 'os.path.dirname', (['dir_path'], {}), '(dir_path)\n', (351, 361), False, 'import os\n'), ((611, 629), 'saliency.visualizer.smiles_visualizer.SmilesVisualizer', 'SmilesVisualizer', ([], {}), '()\n', (627, 629), False, 'from saliency.visualizer.smiles_visualizer import SmilesVisualizer\n'), ((3296, 3339), 'matplotlib.pyplot.scatter', 'plt.scatter', (['prediction', 'answer'], {'marker': '"""."""'}), "(prediction, answer, marker='.')\n", (3307, 3339), True, 'import matplotlib.pyplot as plt\n'), ((3344, 3385), 'matplotlib.pyplot.plot', 'plt.plot', (['[-100, 100]', '[-100, 100]'], {'c': '"""r"""'}), "([-100, 100], [-100, 100], c='r')\n", (3352, 3385), True, 'import matplotlib.pyplot as plt\n'), ((3494, 3530), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[min_v - 0.1, max_v + 0.1]'], {}), '([min_v - 0.1, max_v + 0.1])\n', (3502, 3530), True, 'import matplotlib.pyplot as plt\n'), ((3531, 3555), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""prediction"""'], {}), "('prediction')\n", (3541, 3555), True, 'import matplotlib.pyplot as plt\n'), ((3560, 3596), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[min_v - 0.1, max_v + 0.1]'], {}), '([min_v - 0.1, max_v + 0.1])\n', (3568, 3596), True, 'import matplotlib.pyplot as plt\n'), ((3597, 3623), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""ground truth"""'], {}), "('ground truth')\n", (3607, 3623), True, 'import matplotlib.pyplot as plt\n'), ((3628, 3654), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_filepath'], {}), '(save_filepath)\n', (3639, 3654), True, 'import matplotlib.pyplot as plt\n'), ((3659, 3670), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3668, 3670), True, 'import matplotlib.pyplot as plt\n'), ((3698, 3765), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Regression with own dataset."""'}), "(description='Regression with own dataset.')\n", (3721, 3765), False, 'import argparse\n'), ((4198, 4220), 'numpy.concatenate', 'np.concatenate', (['output'], {}), '(output)\n', (4212, 4220), True, 'import numpy as np\n'), ((4234, 4256), 'numpy.concatenate', 'np.concatenate', (['answer'], {}), '(answer)\n', (4248, 4256), True, 'import numpy as np\n'), ((393, 439), 'os.path.join', 'os.path.join', (['dir_path', '"""saliency_vanilla.npy"""'], {}), "(dir_path, 'saliency_vanilla.npy')\n", (405, 439), False, 'import os\n'), ((471, 516), 'os.path.join', 'os.path.join', (['dir_path', '"""saliency_smooth.npy"""'], {}), "(dir_path, 'saliency_smooth.npy')\n", (483, 516), False, 'import os\n'), ((547, 591), 'os.path.join', 'os.path.join', (['dir_path', '"""saliency_bayes.npy"""'], {}), "(dir_path, 'saliency_bayes.npy')\n", (559, 591), False, 'import os\n'), ((646, 688), 'os.path.join', 'os.path.join', (['parent_dir', '"""result_vanilla"""'], {}), "(parent_dir, 'result_vanilla')\n", (658, 688), False, 'import os\n'), ((721, 762), 'os.path.join', 'os.path.join', (['parent_dir', '"""result_smooth"""'], {}), "(parent_dir, 'result_smooth')\n", (733, 762), False, 'import os\n'), ((795, 835), 'os.path.join', 'os.path.join', (['parent_dir', '"""result_bayes"""'], {}), "(parent_dir, 'result_bayes')\n", (807, 835), False, 'import os\n'), ((876, 914), 'os.path.join', 'os.path.join', (['dir_path', '"""test_idx.npy"""'], {}), "(dir_path, 'test_idx.npy')\n", (888, 914), False, 'import os\n'), ((937, 973), 'os.path.join', 'os.path.join', (['dir_path', '"""answer.npy"""'], {}), "(dir_path, 'answer.npy')\n", (949, 973), False, 'import os\n'), ((996, 1032), 'os.path.join', 'os.path.join', (['dir_path', '"""output.npy"""'], {}), "(dir_path, 'output.npy')\n", (1008, 1032), False, 'import os\n'), ((1060, 1098), 'os.path.join', 'os.path.join', (['parent_dir', '"""smiles.npy"""'], {}), "(parent_dir, 'smiles.npy')\n", (1072, 1098), False, 'import os\n'), ((3402, 3420), 'numpy.max', 'np.max', (['prediction'], {}), '(prediction)\n', (3408, 3420), True, 'import numpy as np\n'), ((3422, 3436), 'numpy.max', 'np.max', (['answer'], {}), '(answer)\n', (3428, 3436), True, 'import numpy as np\n'), ((3454, 3472), 'numpy.min', 'np.min', (['prediction'], {}), '(prediction)\n', (3460, 3472), True, 'import numpy as np\n'), ((3474, 3488), 'numpy.min', 'np.min', (['answer'], {}), '(answer)\n', (3480, 3488), True, 'import numpy as np\n'), ((1553, 1571), 'numpy.copy', 'np.copy', (['saliency_'], {}), '(saliency_)\n', (1560, 1571), True, 'import numpy as np\n'), ((4304, 4336), 'os.path.join', 'os.path.join', (['path', '"""result.png"""'], {}), "(path, 'result.png')\n", (4316, 4336), False, 'import os\n'), ((4452, 4478), 'os.path.join', 'os.path.join', (['path', 'suffix'], {}), '(path, suffix)\n', (4464, 4478), False, 'import os\n'), ((195, 220), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (210, 220), False, 'import os\n'), ((1244, 1253), 'numpy.max', 'np.max', (['v'], {}), '(v)\n', (1250, 1253), True, 'import numpy as np\n'), ((1284, 1293), 'numpy.min', 'np.min', (['v'], {}), '(v)\n', (1290, 1293), True, 'import numpy as np\n'), ((1661, 1684), 'numpy.zeros_like', 'np.zeros_like', (['saliency'], {}), '(saliency)\n', (1674, 1684), True, 'import numpy as np\n'), ((4069, 4109), 'os.path.join', 'os.path.join', (['path', 'suffix', '"""output.npy"""'], {}), "(path, suffix, 'output.npy')\n", (4081, 4109), False, 'import os\n'), ((4142, 4182), 'os.path.join', 'os.path.join', (['path', 'suffix', '"""answer.npy"""'], {}), "(path, suffix, 'answer.npy')\n", (4154, 4182), False, 'import os\n'), ((1879, 1891), 'numpy.abs', 'np.abs', (['minv'], {}), '(minv)\n', (1885, 1891), True, 'import numpy as np\n')]
|
import numpy as np
from PySide2.QtCore import QSignalBlocker, Signal
from PySide2.QtWidgets import QGridLayout, QWidget
from hexrd.ui.scientificspinbox import ScientificDoubleSpinBox
DEFAULT_ENABLED_STYLE_SHEET = 'background-color: white'
DEFAULT_DISABLED_STYLE_SHEET = 'background-color: #F0F0F0'
INVALID_MATRIX_STYLE_SHEET = 'background-color: red'
class MatrixEditor(QWidget):
data_modified = Signal()
def __init__(self, data, parent=None):
super().__init__(parent)
self._data = data
# If this is not None, then only the elements present in the
# list (as (i, j) items) will be enabled.
self._enabled_elements = None
# If this is set, it will be called every time the data updates
# to apply equality constraints.
self._apply_constraints_func = None
# Whether or not the matrix is currently invalid
self.matrix_invalid = False
# Reason the matrix is currently invalid
self.matrix_invalid_reason = ''
self.setLayout(QGridLayout())
self.add_spin_boxes()
self.update_gui()
def add_spin_boxes(self):
layout = self.layout()
for i in range(self.rows):
for j in range(self.cols):
sb = self.create_spin_box()
layout.addWidget(sb, i, j)
def create_spin_box(self):
sb = ScientificDoubleSpinBox()
sb.setKeyboardTracking(False)
sb.valueChanged.connect(self.element_modified)
return sb
def element_modified(self):
self.update_data()
@property
def data(self):
return self._data
@data.setter
def data(self, v):
if not np.array_equal(self._data, v):
if self._data.shape != v.shape:
msg = (f'Shape {v.shape} does not match original shape '
f'{self._data.shape}')
raise AttributeError(msg)
self._data = v
self.reset_disabled_values()
self.update_gui()
@property
def rows(self):
return self.data.shape[0]
@property
def cols(self):
return self.data.shape[1]
def update_data(self):
self.data[:] = self.gui_data
self.apply_constraints()
self.data_modified.emit()
def update_gui(self):
self.gui_data = self.data
@property
def gui_data(self):
row_range = range(self.rows)
col_range = range(self.cols)
return [[self.gui_value(i, j) for j in col_range] for i in row_range]
@gui_data.setter
def gui_data(self, v):
blockers = [QSignalBlocker(w) for w in self.all_widgets] # noqa: F841
for i in range(self.rows):
for j in range(self.cols):
self.set_gui_value(i, j, v[i][j])
@property
def all_widgets(self):
row_range = range(self.rows)
col_range = range(self.cols)
return [self.widget(i, j) for j in col_range for i in row_range]
@property
def enabled_widgets(self):
widgets = []
for i in range(self.rows):
for j in range(self.cols):
if (i, j) in self.enabled_elements:
widgets.append(self.widget(i, j))
return widgets
def widget(self, row, col):
return self.layout().itemAtPosition(row, col).widget()
def gui_value(self, row, col):
return self.widget(row, col).value()
def set_gui_value(self, row, col, val):
self.widget(row, col).setValue(val)
def set_matrix_invalid(self, s):
self.matrix_invalid = True
self.matrix_invalid_reason = s
self.update_tooltips()
self.update_enable_states()
def set_matrix_valid(self):
self.matrix_invalid = False
self.matrix_invalid_reason = ''
self.update_tooltips()
self.update_enable_states()
def update_tooltips(self):
if self.matrix_invalid:
tooltip = self.matrix_invalid_reason
else:
tooltip = ''
for w in self.enabled_widgets:
w.setToolTip(tooltip)
def update_enable_states(self):
enable_all = self.enabled_elements is None
for i in range(self.rows):
for j in range(self.cols):
w = self.widget(i, j)
enable = enable_all or (i, j) in self.enabled_elements
w.setEnabled(enable)
enabled_str = 'enabled' if enable else 'disabled'
style_sheet = getattr(self, f'{enabled_str}_style_sheet')
w.setStyleSheet(style_sheet)
def reset_disabled_values(self):
# Resets all disabled values to zero, then applies constraints
for i in range(self.rows):
for j in range(self.cols):
if not self.widget(i, j).isEnabled():
self.data[i, j] = 0.0
self.apply_constraints()
self.update_gui()
@property
def enabled_style_sheet(self):
if self.matrix_invalid:
return INVALID_MATRIX_STYLE_SHEET
return DEFAULT_ENABLED_STYLE_SHEET
@property
def disabled_style_sheet(self):
return DEFAULT_DISABLED_STYLE_SHEET
@property
def enabled_elements(self):
return self._enabled_elements
@enabled_elements.setter
def enabled_elements(self, v):
if self._enabled_elements != v:
self._enabled_elements = v
self.update_enable_states()
self.reset_disabled_values()
@property
def apply_constraints_func(self):
return self._apply_constraints_func
@apply_constraints_func.setter
def apply_constraints_func(self, v):
if self._apply_constraints_func != v:
self._apply_constraints_func = v
self.apply_constraints()
def apply_constraints(self):
if (func := self.apply_constraints_func) is None:
return
func(self.data)
self.update_gui()
if __name__ == '__main__':
import sys
from PySide2.QtWidgets import QApplication, QDialog, QVBoxLayout
if len(sys.argv) < 2:
sys.exit('Usage: <script> <matrix_size>')
rows, cols = [int(x) for x in sys.argv[1].split('x')]
data = np.ones((rows, cols))
app = QApplication(sys.argv)
dialog = QDialog()
layout = QVBoxLayout()
dialog.setLayout(layout)
editor = MatrixEditor(data)
layout.addWidget(editor)
# def constraints(x):
# x[2][2] = x[1][1]
# editor.enabled_elements = [(1, 1), (3, 4)]
# editor.apply_constraints_func = constraints
def on_data_modified():
print(f'Data modified: {editor.data}')
editor.data_modified.connect(on_data_modified)
dialog.finished.connect(app.quit)
dialog.show()
app.exec_()
|
[
"hexrd.ui.scientificspinbox.ScientificDoubleSpinBox",
"PySide2.QtWidgets.QGridLayout",
"numpy.ones",
"PySide2.QtCore.QSignalBlocker",
"PySide2.QtCore.Signal",
"PySide2.QtWidgets.QApplication",
"numpy.array_equal",
"sys.exit",
"PySide2.QtWidgets.QVBoxLayout",
"PySide2.QtWidgets.QDialog"
] |
[((407, 415), 'PySide2.QtCore.Signal', 'Signal', ([], {}), '()\n', (413, 415), False, 'from PySide2.QtCore import QSignalBlocker, Signal\n'), ((6248, 6269), 'numpy.ones', 'np.ones', (['(rows, cols)'], {}), '((rows, cols))\n', (6255, 6269), True, 'import numpy as np\n'), ((6281, 6303), 'PySide2.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (6293, 6303), False, 'from PySide2.QtWidgets import QApplication, QDialog, QVBoxLayout\n'), ((6317, 6326), 'PySide2.QtWidgets.QDialog', 'QDialog', ([], {}), '()\n', (6324, 6326), False, 'from PySide2.QtWidgets import QApplication, QDialog, QVBoxLayout\n'), ((6340, 6353), 'PySide2.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (6351, 6353), False, 'from PySide2.QtWidgets import QApplication, QDialog, QVBoxLayout\n'), ((1383, 1408), 'hexrd.ui.scientificspinbox.ScientificDoubleSpinBox', 'ScientificDoubleSpinBox', ([], {}), '()\n', (1406, 1408), False, 'from hexrd.ui.scientificspinbox import ScientificDoubleSpinBox\n'), ((6136, 6177), 'sys.exit', 'sys.exit', (['"""Usage: <script> <matrix_size>"""'], {}), "('Usage: <script> <matrix_size>')\n", (6144, 6177), False, 'import sys\n'), ((1044, 1057), 'PySide2.QtWidgets.QGridLayout', 'QGridLayout', ([], {}), '()\n', (1055, 1057), False, 'from PySide2.QtWidgets import QGridLayout, QWidget\n'), ((1697, 1726), 'numpy.array_equal', 'np.array_equal', (['self._data', 'v'], {}), '(self._data, v)\n', (1711, 1726), True, 'import numpy as np\n'), ((2623, 2640), 'PySide2.QtCore.QSignalBlocker', 'QSignalBlocker', (['w'], {}), '(w)\n', (2637, 2640), False, 'from PySide2.QtCore import QSignalBlocker, Signal\n')]
|
import sqlite3
from random import randint, choice
import numpy as np
conn = sqlite3.connect('ej.db')
c = conn.cursor()
#OBTENIENDO TAMAnOS MAXIMOS MINIMOS Y PROMEDIO#
c.execute('SELECT MAX(alto) FROM features')
resultado = c.fetchone()
if resultado:
altoMax = resultado[0]
c.execute('SELECT MIN(alto) FROM features')
resultado = c.fetchone()
if resultado:
altoMin = resultado[0]
altoProm = abs((altoMax + altoMin) / 2)
#print altoMax , altoProm , altoMin
arrAlto = [altoMax , altoProm , altoMin]
c.execute('SELECT MAX(ancho) FROM features')
resultado = c.fetchone()
if resultado:
anchoMax = resultado[0]
c.execute('SELECT MIN(ancho) FROM features')
resultado = c.fetchone()
if resultado:
anchoMin = resultado[0]
anchoProm = abs((anchoMax + anchoMin) / 2)
anchoMaxProm = abs((anchoMax + anchoProm) / 2)
anchoMinProm = abs((anchoMin + anchoProm) / 2)
arrAncho = [anchoMax, anchoMaxProm, anchoProm, anchoMinProm, anchoMin]
#### CREANDO CLASES NEGATIVAS
for i in range(0,3):
for j in range(0,5):
for _ in range(10):
negAncho = arrAncho[j]
negAlto = arrAlto[i]
rand_alto_max = int(negAlto * 1.5)
rand_alto_min = int(negAlto * 0.5)
r3 = rand_alto_max * 2
rand_ancho_max = int(negAncho*1.5)
rand_ancho_min = int(negAncho*0.5)
r33 = rand_ancho_max * 2
f1 = choice([np.random.randint(1, rand_alto_min), np.random.randint(rand_alto_max, r3)])
f2 = choice([np.random.randint(1, rand_ancho_min), np.random.randint(rand_ancho_max, r33)])
c.execute("insert into features (ancho, alto, area, clase) values (?, ?, ?, ?)",
(f2, f1, f2*f1, 0))
conn.commit()
conn.close()
|
[
"numpy.random.randint",
"sqlite3.connect"
] |
[((78, 102), 'sqlite3.connect', 'sqlite3.connect', (['"""ej.db"""'], {}), "('ej.db')\n", (93, 102), False, 'import sqlite3\n'), ((1413, 1448), 'numpy.random.randint', 'np.random.randint', (['(1)', 'rand_alto_min'], {}), '(1, rand_alto_min)\n', (1430, 1448), True, 'import numpy as np\n'), ((1450, 1486), 'numpy.random.randint', 'np.random.randint', (['rand_alto_max', 'r3'], {}), '(rand_alto_max, r3)\n', (1467, 1486), True, 'import numpy as np\n'), ((1514, 1550), 'numpy.random.randint', 'np.random.randint', (['(1)', 'rand_ancho_min'], {}), '(1, rand_ancho_min)\n', (1531, 1550), True, 'import numpy as np\n'), ((1552, 1590), 'numpy.random.randint', 'np.random.randint', (['rand_ancho_max', 'r33'], {}), '(rand_ancho_max, r33)\n', (1569, 1590), True, 'import numpy as np\n')]
|
from itertools import count
import numpy as np
class Particle(object):
"""Object containing all the properties for a single particle"""
_ids = count(0)
def __init__(self, main_data=None, x=np.zeros(2)):
self.id = next(self._ids)
self.main_data = main_data
self.x = np.array(x)
self.v = np.zeros(2)
self.a = np.zeros(2)
self.D = 0
self.rho = main_data.rho0
self.P = 0
self.m = main_data.dx ** 2 * main_data.rho0 # initial mass depends on the initial particle spacing
self.boundary = False # Particle by default is not on the boundary
# For predictor corrector
self.prev_x = np.array(x)
self.prev_v = np.zeros(2)
self.prev_rho = main_data.rho0
def calc_index(self):
"""Calculates the 2D integer index for the particle's location in the search grid"""
# Calculates the bucket coordinates
self.list_num = np.array((self.x - self.main_data.min_x) /
(2.0 * self.main_data.h), int)
def B(self):
return (self.main_data.rho0 * self.main_data.c0 ** 2) / self.main_data.gamma
def update_P(self):
"""
Equation of state
System is assumed slightly compressible
"""
rho0 = self.main_data.rho0
gamma = self.main_data.gamma
self.P = self.B() * ((self.rho / rho0)**gamma - 1)
def set_main_data(self, main_data):
self.main_data = main_data
def set_x(self, x):
self.x = x
self.calc_index()
def set_v(self, v):
self.v = v
def set_a(self, a):
self.a = a
def set_D(self, D):
self.D = D
def set_rho(self, rho):
self.rho = rho
self.update_P()
def m(self, m):
self.m = m
def list_attributes(self):
x_s = "position: " + str(self.x) + ", "
v_s = "velocity: " + str(self.v) + ", "
a_s = "acceleration: " + str(self.a) + ", "
D_s = "derivative of density: " + str(self.D) + ", "
rho_s = "density: " + str(self.rho) + ", "
m_s = "mass: " + str(self.m) + ", "
P_s = "pressure: " + str(self.P) + ", "
boundary_s = "is boundary: " + str(self.boundary)
return [x_s + v_s + a_s + D_s + rho_s + m_s + P_s + boundary_s]
|
[
"numpy.array",
"numpy.zeros",
"itertools.count"
] |
[((154, 162), 'itertools.count', 'count', (['(0)'], {}), '(0)\n', (159, 162), False, 'from itertools import count\n'), ((205, 216), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (213, 216), True, 'import numpy as np\n'), ((305, 316), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (313, 316), True, 'import numpy as np\n'), ((334, 345), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (342, 345), True, 'import numpy as np\n'), ((363, 374), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (371, 374), True, 'import numpy as np\n'), ((687, 698), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (695, 698), True, 'import numpy as np\n'), ((721, 732), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (729, 732), True, 'import numpy as np\n'), ((960, 1033), 'numpy.array', 'np.array', (['((self.x - self.main_data.min_x) / (2.0 * self.main_data.h))', 'int'], {}), '((self.x - self.main_data.min_x) / (2.0 * self.main_data.h), int)\n', (968, 1033), True, 'import numpy as np\n')]
|
import os
import glob
import cv2
import numpy as np
import torch
from torchvision.transforms import transforms
from natsort import natsorted
from models import resmasking_dropout1
from utils.datasets.fer2013dataset import EMOTION_DICT
from barez import show
transform = transforms.Compose(
[
transforms.ToPILImage(),
transforms.ToTensor(),
]
)
def activations_mask(tensor):
tensor = torch.squeeze(tensor, 0)
tensor = torch.mean(tensor, 0)
tensor = tensor.detach().cpu().numpy()
tensor = np.maximum(tensor, 0)
tensor = cv2.resize(tensor, (224, 224))
tensor = tensor - np.min(tensor)
tensor = tensor / np.max(tensor)
heatmap = cv2.applyColorMap(np.uint8(255 * tensor), cv2.COLORMAP_JET)
return heatmap
model = resmasking_dropout1(3, 7)
# state = torch.load('./saved/checkpoints/resmasking_dropout1_rot30_2019Nov17_14.33')
state = torch.load("./saved/checkpoints/Z_resmasking_dropout1_rot30_2019Nov30_13.32")
model.load_state_dict(state["net"])
model.cuda()
model.eval()
for image_path in natsorted(
glob.glob("/home/z/research/bkemo/images/**/*.png", recursive=True)
):
image_name = os.path.basename(image_path)
print(image_name)
# image_path = '/home/z/research/bkemo/images/disgust/0.0_dc10a3_1976_0.png'
image = cv2.imread(image_path)
image = cv2.resize(image, (224, 224))
tensor = transform(image)
tensor = torch.unsqueeze(tensor, 0)
tensor = tensor.cuda()
# output = model(tensor)
x = model.conv1(tensor) # 112
x = model.bn1(x)
x = model.relu(x)
x = model.maxpool(x) # 56
x = model.layer1(x) # 56
m = model.mask1(x)
x = x * (1 + m)
x = model.layer2(x) # 28
m = model.mask2(x)
x = x * (1 + m)
x = model.layer3(x) # 14
heat_1 = activations_mask(x)
m = model.mask3(x)
x = x * (1 + m)
# heat_2 = activations_mask(m)
x = model.layer4(x) # 7
m = model.mask4(x)
x = x * (1 + m)
x = model.avgpool(x)
x = torch.flatten(x, 1)
output = model.fc(x)
# print(np.sum(heat_1 - heat_2))
# show(np.concatenate((image, heat_1, heat_2), axis=1))
cv2.imwrite(
"./masking_provements/{}".format(image_name),
np.concatenate((image, heat_1), axis=1),
)
# np.concatenate((image, heat_1, heat_2), axis=1))
# output = output.cpu().numpy()
# print(EMOTION_DICT[torch.argmax(output, 1).item()])
|
[
"torchvision.transforms.transforms.ToPILImage",
"models.resmasking_dropout1",
"numpy.uint8",
"cv2.resize",
"torch.mean",
"torch.unsqueeze",
"torch.load",
"numpy.min",
"torch.flatten",
"numpy.max",
"torchvision.transforms.transforms.ToTensor",
"os.path.basename",
"numpy.concatenate",
"torch.squeeze",
"numpy.maximum",
"cv2.imread",
"glob.glob"
] |
[((774, 799), 'models.resmasking_dropout1', 'resmasking_dropout1', (['(3)', '(7)'], {}), '(3, 7)\n', (793, 799), False, 'from models import resmasking_dropout1\n'), ((894, 971), 'torch.load', 'torch.load', (['"""./saved/checkpoints/Z_resmasking_dropout1_rot30_2019Nov30_13.32"""'], {}), "('./saved/checkpoints/Z_resmasking_dropout1_rot30_2019Nov30_13.32')\n", (904, 971), False, 'import torch\n'), ((414, 438), 'torch.squeeze', 'torch.squeeze', (['tensor', '(0)'], {}), '(tensor, 0)\n', (427, 438), False, 'import torch\n'), ((452, 473), 'torch.mean', 'torch.mean', (['tensor', '(0)'], {}), '(tensor, 0)\n', (462, 473), False, 'import torch\n'), ((530, 551), 'numpy.maximum', 'np.maximum', (['tensor', '(0)'], {}), '(tensor, 0)\n', (540, 551), True, 'import numpy as np\n'), ((565, 595), 'cv2.resize', 'cv2.resize', (['tensor', '(224, 224)'], {}), '(tensor, (224, 224))\n', (575, 595), False, 'import cv2\n'), ((1068, 1135), 'glob.glob', 'glob.glob', (['"""/home/z/research/bkemo/images/**/*.png"""'], {'recursive': '(True)'}), "('/home/z/research/bkemo/images/**/*.png', recursive=True)\n", (1077, 1135), False, 'import glob\n'), ((1156, 1184), 'os.path.basename', 'os.path.basename', (['image_path'], {}), '(image_path)\n', (1172, 1184), False, 'import os\n'), ((1300, 1322), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (1310, 1322), False, 'import cv2\n'), ((1335, 1364), 'cv2.resize', 'cv2.resize', (['image', '(224, 224)'], {}), '(image, (224, 224))\n', (1345, 1364), False, 'import cv2\n'), ((1408, 1434), 'torch.unsqueeze', 'torch.unsqueeze', (['tensor', '(0)'], {}), '(tensor, 0)\n', (1423, 1434), False, 'import torch\n'), ((1999, 2018), 'torch.flatten', 'torch.flatten', (['x', '(1)'], {}), '(x, 1)\n', (2012, 2018), False, 'import torch\n'), ((305, 328), 'torchvision.transforms.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (326, 328), False, 'from torchvision.transforms import transforms\n'), ((338, 359), 'torchvision.transforms.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (357, 359), False, 'from torchvision.transforms import transforms\n'), ((618, 632), 'numpy.min', 'np.min', (['tensor'], {}), '(tensor)\n', (624, 632), True, 'import numpy as np\n'), ((655, 669), 'numpy.max', 'np.max', (['tensor'], {}), '(tensor)\n', (661, 669), True, 'import numpy as np\n'), ((703, 725), 'numpy.uint8', 'np.uint8', (['(255 * tensor)'], {}), '(255 * tensor)\n', (711, 725), True, 'import numpy as np\n'), ((2223, 2262), 'numpy.concatenate', 'np.concatenate', (['(image, heat_1)'], {'axis': '(1)'}), '((image, heat_1), axis=1)\n', (2237, 2262), True, 'import numpy as np\n')]
|
import os
import numpy as np
import tensorflow as tf
from image_quality.utils import utils
class TrainDataGenerator(tf.keras.utils.Sequence):
'''inherits from Keras Sequence base object, allows to use multiprocessing in .fit_generator'''
def __init__(self, samples, img_dir, batch_size, n_classes, basenet_preprocess,
img_load_dims=(256, 256), img_crop_dims=(224, 224), shuffle=True):
self.samples = samples
self.img_dir = img_dir
self.batch_size = batch_size
self.n_classes = n_classes
self.basenet_preprocess = basenet_preprocess # Keras basenet specific preprocessing function
self.img_load_dims = img_load_dims # dimensions that images get resized into when loaded
self.img_crop_dims = img_crop_dims # dimensions that images get randomly cropped to
self.shuffle = shuffle
self.on_epoch_end() # call ensures that samples are shuffled in first epoch if shuffle is set to True
def __len__(self):
return int(np.ceil(len(self.samples) / self.batch_size)) # number of batches per epoch
def __getitem__(self, index):
batch_indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size] # get batch indexes
batch_samples = [self.samples[i] for i in batch_indexes] # get batch samples
X, y = self.__data_generator(batch_samples)
return X, y
def on_epoch_end(self):
self.indexes = np.arange(len(self.samples))
if self.shuffle is True:
np.random.shuffle(self.indexes)
def __data_generator(self, batch_samples):
# initialize images and labels tensors for faster processing
X = np.empty((len(batch_samples), *self.img_crop_dims, 3))
y = np.empty((len(batch_samples), self.n_classes))
for i, sample in enumerate(batch_samples):
# load and randomly augment image
img_file = os.path.join(self.img_dir, '{}'.format(sample['image_id']))
img = utils.load_image(img_file, self.img_load_dims)
if img is not None:
img = utils.random_crop(img, self.img_crop_dims)
img = utils.random_horizontal_flip(img)
X[i, ] = img
# normalize labels
y[i, ] = utils.normalize_labels(sample['label'])
# apply basenet specific preprocessing
# input is 4D numpy array of RGB values within [0, 255]
X = self.basenet_preprocess(X)
return X, y
class TestDataGenerator(tf.keras.utils.Sequence):
'''inherits from Keras Sequence base object, allows to use multiprocessing in .fit_generator'''
def __init__(self, samples, img_dir, batch_size, n_classes, basenet_preprocess,
img_load_dims=(224, 224)):
self.samples = samples
self.img_dir = img_dir
self.batch_size = batch_size
self.n_classes = n_classes
self.basenet_preprocess = basenet_preprocess # Keras basenet specific preprocessing function
self.img_load_dims = img_load_dims # dimensions that images get resized into when loaded
self.on_epoch_end() # call ensures that samples are shuffled in first epoch if shuffle is set to True
def __len__(self):
return int(np.ceil(len(self.samples) / self.batch_size)) # number of batches per epoch
def __getitem__(self, index):
batch_indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size] # get batch indexes
batch_samples = [self.samples[i] for i in batch_indexes] # get batch samples
X, y = self.__data_generator(batch_samples)
return X, y
def on_epoch_end(self):
self.indexes = np.arange(len(self.samples))
def __data_generator(self, batch_samples):
# initialize images and labels tensors for faster processing
X = np.empty((len(batch_samples), *self.img_load_dims, 3))
y = np.empty((len(batch_samples), self.n_classes))
for i, sample in enumerate(batch_samples):
# load and randomly augment image
img_file = os.path.join(self.img_dir, '{}'.format(sample['image_id']))
img = utils.load_image(img_file, self.img_load_dims)
if img is not None:
X[i, ] = img
# normalize labels
if sample.get('label') is not None:
y[i, ] = utils.normalize_labels(sample['label'])
# apply basenet specific preprocessing
# input is 4D numpy array of RGB values within [0, 255]
X = self.basenet_preprocess(X)
return X, y
|
[
"image_quality.utils.utils.random_crop",
"image_quality.utils.utils.load_image",
"image_quality.utils.utils.random_horizontal_flip",
"image_quality.utils.utils.normalize_labels",
"numpy.random.shuffle"
] |
[((1440, 1471), 'numpy.random.shuffle', 'np.random.shuffle', (['self.indexes'], {}), '(self.indexes)\n', (1457, 1471), True, 'import numpy as np\n'), ((1878, 1924), 'image_quality.utils.utils.load_image', 'utils.load_image', (['img_file', 'self.img_load_dims'], {}), '(img_file, self.img_load_dims)\n', (1894, 1924), False, 'from image_quality.utils import utils\n'), ((2118, 2157), 'image_quality.utils.utils.normalize_labels', 'utils.normalize_labels', (["sample['label']"], {}), "(sample['label'])\n", (2140, 2157), False, 'from image_quality.utils import utils\n'), ((3876, 3922), 'image_quality.utils.utils.load_image', 'utils.load_image', (['img_file', 'self.img_load_dims'], {}), '(img_file, self.img_load_dims)\n', (3892, 3922), False, 'from image_quality.utils import utils\n'), ((1965, 2007), 'image_quality.utils.utils.random_crop', 'utils.random_crop', (['img', 'self.img_crop_dims'], {}), '(img, self.img_crop_dims)\n', (1982, 2007), False, 'from image_quality.utils import utils\n'), ((2022, 2055), 'image_quality.utils.utils.random_horizontal_flip', 'utils.random_horizontal_flip', (['img'], {}), '(img)\n', (2050, 2055), False, 'from image_quality.utils import utils\n'), ((4055, 4094), 'image_quality.utils.utils.normalize_labels', 'utils.normalize_labels', (["sample['label']"], {}), "(sample['label'])\n", (4077, 4094), False, 'from image_quality.utils import utils\n')]
|
# -*- coding: utf-8 -*-
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
from qiskit.quantum_info.operators.channel import Choi, PTM, Kraus, Chi, SuperOp
import numpy as np
from qat.comm.quops.ttypes import QuantumChannel, RepresentationType
from qat.comm.datamodel.ttypes import Matrix, ComplexNumber
def array_to_matrix(array):
"""
Transform a two dimmentional numpy array to a myqlm Matrix.
Args:
array: (ndarray) a two dimmentional numpy array
Returns:
(Matrix): a myqlm Matrix
"""
assert len(array.shape) == 2, "The array must be two dimmentional"
data = []
for arr in array:
for elem in arr:
data.append(ComplexNumber(np.real(elem), np.imag(elem)))
matri = Matrix(array.shape[0], array.shape[1], data)
return matri
def qiskit_to_qchannel(representation):
"""
Create a myqlm representation of quantum channel from a qiskit representation
of a quantum channel.
Args:
representation: (Kraus|Choi|Chi|SuperOp|PTM) qiskit representation of a quantum channel.
Returns:
(QuantumChannel): myqlm representation of a quantum channel.
"""
qchannel = None
qiskit_data = representation.data
# Find what representation it is.
# Then create the corresponding matrix (kraus_ops|basis|matrix)from the data
# of the representation.
# Finally, create the QuantumChannel with the RepresentationType, the arity
# (got from the qiskit representation) and the matrix.
if isinstance(representation, Kraus):
kraus_ops = []
for arr in qiskit_data:
kraus_ops.append(array_to_matrix(arr))
qchannel = QuantumChannel(
representation=RepresentationType.KRAUS,
arity=representation.num_qubits,
kraus_ops=kraus_ops)
elif isinstance(representation, Chi):
basis = []
basis.append(array_to_matrix(qiskit_data))
qchannel = QuantumChannel(
representation=RepresentationType.CHI,
arity=representation.num_qubits,
basis=basis)
elif isinstance(representation, SuperOp):
basis = []
basis.append(array_to_matrix(qiskit_data))
qchannel = QuantumChannel(
representation=RepresentationType.SUPEROP,
arity=representation.num_qubits,
basis=basis)
elif isinstance(representation, PTM):
matri = array_to_matrix(qiskit_data)
qchannel = QuantumChannel(
representation=RepresentationType.PTM,
arity=representation.num_qubits,
matrix=matri)
elif isinstance(representation, Choi):
matri = array_to_matrix(qiskit_data)
qchannel = QuantumChannel(
representation=RepresentationType.CHOI,
arity=representation.num_qubits,
matrix=matri)
return qchannel
def qchannel_to_qiskit(representation):
"""
Create a qiskit representation of quantum channel from a myqlm representation
of a quantum channel.
Args:
representation: (QuantumChannel) myqlm representation of a quantum channel.
Returns:
(Kraus|Choi|Chi|SuperOp|PTM): qiskit representation of a quantum channel.
"""
rep = representation.representation
# Find what representation it is.
# Then create the corresponding matrix and shape it like qiskit is expecting it.
# Finally, create the qiskit representation from that matrix.
if rep in (RepresentationType.PTM, RepresentationType.CHOI):
matri = representation.matrix
data_re = []
data_im = []
for i in range(matri.nRows):
for j in range(matri.nCols):
data_re.append(matri.data[i * matri.nRows + j].re + 0.j)
data_im.append(matri.data[i * matri.nRows + j].im)
data = np.array(data_re)
data.imag = np.array(data_im)
data = data.reshape((matri.nRows, matri.nCols))
return PTM(data) if (rep == RepresentationType.PTM) else Choi(data)
if rep in (RepresentationType.CHI, RepresentationType.SUPEROP):
final_data = []
for matri in representation.basis:
data_re = []
data_im = []
for i in range(matri.nRows):
for j in range(matri.nCols):
data_re.append(matri.data[i * matri.nRows + j].re + 0.j)
data_im.append(matri.data[i * matri.nRows + j].im)
data = np.array(data_re)
data.imag = np.array(data_im)
data = data.reshape((matri.nRows, matri.nCols))
final_data.append(data)
if rep == RepresentationType.CHI:
return Chi(final_data) if len(final_data) > 1 else Chi(final_data[0])
return SuperOp(final_data) if len(final_data) > 1 else SuperOp(final_data[0])
if rep == RepresentationType.KRAUS:
final_data = []
for matri in representation.kraus_ops:
data_re = []
data_im = []
for i in range(matri.nRows):
for j in range(matri.nCols):
data_re.append(matri.data[i * matri.nRows + j].re + 0.j)
data_im.append(matri.data[i * matri.nRows + j].im)
data = np.array(data_re)
data.imag = np.array(data_im)
data = data.reshape((matri.nRows, matri.nCols))
final_data.append(data)
return Kraus(final_data)
return None
|
[
"qiskit.quantum_info.operators.channel.Chi",
"qiskit.quantum_info.operators.channel.Kraus",
"qiskit.quantum_info.operators.channel.Choi",
"qiskit.quantum_info.operators.channel.PTM",
"qat.comm.quops.ttypes.QuantumChannel",
"numpy.array",
"qiskit.quantum_info.operators.channel.SuperOp",
"numpy.real",
"qat.comm.datamodel.ttypes.Matrix",
"numpy.imag"
] |
[((1512, 1556), 'qat.comm.datamodel.ttypes.Matrix', 'Matrix', (['array.shape[0]', 'array.shape[1]', 'data'], {}), '(array.shape[0], array.shape[1], data)\n', (1518, 1556), False, 'from qat.comm.datamodel.ttypes import Matrix, ComplexNumber\n'), ((2444, 2558), 'qat.comm.quops.ttypes.QuantumChannel', 'QuantumChannel', ([], {'representation': 'RepresentationType.KRAUS', 'arity': 'representation.num_qubits', 'kraus_ops': 'kraus_ops'}), '(representation=RepresentationType.KRAUS, arity=\n representation.num_qubits, kraus_ops=kraus_ops)\n', (2458, 2558), False, 'from qat.comm.quops.ttypes import QuantumChannel, RepresentationType\n'), ((4610, 4627), 'numpy.array', 'np.array', (['data_re'], {}), '(data_re)\n', (4618, 4627), True, 'import numpy as np\n'), ((4648, 4665), 'numpy.array', 'np.array', (['data_im'], {}), '(data_im)\n', (4656, 4665), True, 'import numpy as np\n'), ((6187, 6204), 'qiskit.quantum_info.operators.channel.Kraus', 'Kraus', (['final_data'], {}), '(final_data)\n', (6192, 6204), False, 'from qiskit.quantum_info.operators.channel import Choi, PTM, Kraus, Chi, SuperOp\n'), ((2722, 2826), 'qat.comm.quops.ttypes.QuantumChannel', 'QuantumChannel', ([], {'representation': 'RepresentationType.CHI', 'arity': 'representation.num_qubits', 'basis': 'basis'}), '(representation=RepresentationType.CHI, arity=representation.\n num_qubits, basis=basis)\n', (2736, 2826), False, 'from qat.comm.quops.ttypes import QuantumChannel, RepresentationType\n'), ((4737, 4746), 'qiskit.quantum_info.operators.channel.PTM', 'PTM', (['data'], {}), '(data)\n', (4740, 4746), False, 'from qiskit.quantum_info.operators.channel import Choi, PTM, Kraus, Chi, SuperOp\n'), ((4787, 4797), 'qiskit.quantum_info.operators.channel.Choi', 'Choi', (['data'], {}), '(data)\n', (4791, 4797), False, 'from qiskit.quantum_info.operators.channel import Choi, PTM, Kraus, Chi, SuperOp\n'), ((5236, 5253), 'numpy.array', 'np.array', (['data_re'], {}), '(data_re)\n', (5244, 5253), True, 'import numpy as np\n'), ((5278, 5295), 'numpy.array', 'np.array', (['data_im'], {}), '(data_im)\n', (5286, 5295), True, 'import numpy as np\n'), ((5531, 5550), 'qiskit.quantum_info.operators.channel.SuperOp', 'SuperOp', (['final_data'], {}), '(final_data)\n', (5538, 5550), False, 'from qiskit.quantum_info.operators.channel import Choi, PTM, Kraus, Chi, SuperOp\n'), ((5579, 5601), 'qiskit.quantum_info.operators.channel.SuperOp', 'SuperOp', (['final_data[0]'], {}), '(final_data[0])\n', (5586, 5601), False, 'from qiskit.quantum_info.operators.channel import Choi, PTM, Kraus, Chi, SuperOp\n'), ((6016, 6033), 'numpy.array', 'np.array', (['data_re'], {}), '(data_re)\n', (6024, 6033), True, 'import numpy as np\n'), ((6058, 6075), 'numpy.array', 'np.array', (['data_im'], {}), '(data_im)\n', (6066, 6075), True, 'import numpy as np\n'), ((2994, 3102), 'qat.comm.quops.ttypes.QuantumChannel', 'QuantumChannel', ([], {'representation': 'RepresentationType.SUPEROP', 'arity': 'representation.num_qubits', 'basis': 'basis'}), '(representation=RepresentationType.SUPEROP, arity=\n representation.num_qubits, basis=basis)\n', (3008, 3102), False, 'from qat.comm.quops.ttypes import QuantumChannel, RepresentationType\n'), ((5453, 5468), 'qiskit.quantum_info.operators.channel.Chi', 'Chi', (['final_data'], {}), '(final_data)\n', (5456, 5468), False, 'from qiskit.quantum_info.operators.channel import Choi, PTM, Kraus, Chi, SuperOp\n'), ((5497, 5515), 'qiskit.quantum_info.operators.channel.Chi', 'Chi', (['final_data[0]'], {}), '(final_data[0])\n', (5500, 5515), False, 'from qiskit.quantum_info.operators.channel import Choi, PTM, Kraus, Chi, SuperOp\n'), ((1469, 1482), 'numpy.real', 'np.real', (['elem'], {}), '(elem)\n', (1476, 1482), True, 'import numpy as np\n'), ((1484, 1497), 'numpy.imag', 'np.imag', (['elem'], {}), '(elem)\n', (1491, 1497), True, 'import numpy as np\n'), ((3241, 3346), 'qat.comm.quops.ttypes.QuantumChannel', 'QuantumChannel', ([], {'representation': 'RepresentationType.PTM', 'arity': 'representation.num_qubits', 'matrix': 'matri'}), '(representation=RepresentationType.PTM, arity=representation.\n num_qubits, matrix=matri)\n', (3255, 3346), False, 'from qat.comm.quops.ttypes import QuantumChannel, RepresentationType\n'), ((3486, 3592), 'qat.comm.quops.ttypes.QuantumChannel', 'QuantumChannel', ([], {'representation': 'RepresentationType.CHOI', 'arity': 'representation.num_qubits', 'matrix': 'matri'}), '(representation=RepresentationType.CHOI, arity=representation\n .num_qubits, matrix=matri)\n', (3500, 3592), False, 'from qat.comm.quops.ttypes import QuantumChannel, RepresentationType\n')]
|
#!/usr/bin/env python3
"""
Copyright (c) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from openvino.runtime import Core, get_version
import cv2 as cv
import numpy as np
import logging as log
from time import perf_counter
import sys
from argparse import ArgumentParser, SUPPRESS
from pathlib import Path
sys.path.append(str(Path(__file__).resolve().parents[2] / 'common/python'))
sys.path.append(str(Path(__file__).resolve().parents[2] / 'common/python/openvino/model_zoo'))
import monitors
from images_capture import open_images_capture
from model_api.performance_metrics import PerformanceMetrics
log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.DEBUG, stream=sys.stdout)
def build_arg():
parser = ArgumentParser(add_help=False)
in_args = parser.add_argument_group('Options')
in_args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Help with the script.')
in_args.add_argument("-m", "--model", help="Required. Path to .xml file with pre-trained model.",
required=True, type=Path)
in_args.add_argument("-d", "--device",
help="Optional. Specify target device for infer: CPU, GPU, HDDL or MYRIAD. "
"Default: CPU",
default="CPU", type=str)
in_args.add_argument('-i', "--input", required=True,
help='Required. An input to process. The input must be a single image, '
'a folder of images, video file or camera id.')
in_args.add_argument('--loop', default=False, action='store_true',
help='Optional. Enable reading the input in a loop.')
in_args.add_argument('-o', '--output', required=False,
help='Optional. Name of the output file(s) to save.')
in_args.add_argument('-limit', '--output_limit', required=False, default=1000, type=int,
help='Optional. Number of frames to store in output. '
'If 0 is set, all frames are stored.')
in_args.add_argument("--no_show", help="Optional. Don't show output.",
action='store_true', default=False)
in_args.add_argument("-u", "--utilization_monitors", default="", type=str,
help="Optional. List of monitors to show initially.")
return parser
def main(args):
cap = open_images_capture(args.input, args.loop)
log.info('OpenVINO Inference Engine')
log.info('\tbuild: {}'.format(get_version()))
core = Core()
log.info('Reading model {}'.format(args.model))
model = core.read_model(args.model, args.model.with_suffix(".bin"))
input_tensor_name = 'data_l'
input_shape = model.input(input_tensor_name).shape
assert input_shape[1] == 1, "Expected model input shape with 1 channel"
inputs = {}
for input in model.inputs:
inputs[input.get_any_name()] = np.zeros(input.shape)
assert len(model.outputs) == 1, "Expected number of outputs is equal 1"
compiled_model = core.compile_model(model, device_name=args.device)
infer_request = compiled_model.create_infer_request()
log.info('The model {} is loaded to {}'.format(args.model, args.device))
_, _, h_in, w_in = input_shape
frames_processed = 0
imshow_size = (640, 480)
graph_size = (imshow_size[0] // 2, imshow_size[1] // 4)
presenter = monitors.Presenter(args.utilization_monitors, imshow_size[1] * 2 - graph_size[1], graph_size)
metrics = PerformanceMetrics()
video_writer = cv.VideoWriter()
if args.output and not video_writer.open(args.output, cv.VideoWriter_fourcc(*'MJPG'),
cap.fps(), (imshow_size[0] * 2, imshow_size[1] * 2)):
raise RuntimeError("Can't open video writer")
start_time = perf_counter()
original_frame = cap.read()
if original_frame is None:
raise RuntimeError("Can't read an image from the input")
while original_frame is not None:
(h_orig, w_orig) = original_frame.shape[:2]
if original_frame.shape[2] > 1:
frame = cv.cvtColor(cv.cvtColor(original_frame, cv.COLOR_BGR2GRAY), cv.COLOR_GRAY2RGB)
else:
frame = cv.cvtColor(original_frame, cv.COLOR_GRAY2RGB)
img_rgb = frame.astype(np.float32) / 255
img_lab = cv.cvtColor(img_rgb, cv.COLOR_RGB2Lab)
img_l_rs = cv.resize(img_lab.copy(), (w_in, h_in))[:, :, 0]
inputs[input_tensor_name] = np.expand_dims(img_l_rs, axis=[0, 1])
res = next(iter(infer_request.infer(inputs).values()))
update_res = np.squeeze(res)
out = update_res.transpose((1, 2, 0))
out = cv.resize(out, (w_orig, h_orig))
img_lab_out = np.concatenate((img_lab[:, :, 0][:, :, np.newaxis], out), axis=2)
img_bgr_out = np.clip(cv.cvtColor(img_lab_out, cv.COLOR_Lab2BGR), 0, 1)
original_image = cv.resize(original_frame, imshow_size)
grayscale_image = cv.resize(frame, imshow_size)
colorize_image = (cv.resize(img_bgr_out, imshow_size) * 255).astype(np.uint8)
lab_image = cv.resize(img_lab_out, imshow_size).astype(np.uint8)
original_image = cv.putText(original_image, 'Original', (25, 50),
cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv.LINE_AA)
grayscale_image = cv.putText(grayscale_image, 'Grayscale', (25, 50),
cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv.LINE_AA)
colorize_image = cv.putText(colorize_image, 'Colorize', (25, 50),
cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv.LINE_AA)
lab_image = cv.putText(lab_image, 'LAB interpretation', (25, 50),
cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv.LINE_AA)
ir_image = [cv.hconcat([original_image, grayscale_image]),
cv.hconcat([lab_image, colorize_image])]
final_image = cv.vconcat(ir_image)
metrics.update(start_time, final_image)
frames_processed += 1
if video_writer.isOpened() and (args.output_limit <= 0 or frames_processed <= args.output_limit):
video_writer.write(final_image)
presenter.drawGraphs(final_image)
if not args.no_show:
cv.imshow('Colorization Demo', final_image)
key = cv.waitKey(1)
if key in {ord("q"), ord("Q"), 27}:
break
presenter.handleKey(key)
start_time = perf_counter()
original_frame = cap.read()
metrics.log_total()
for rep in presenter.reportMeans():
log.info(rep)
if __name__ == "__main__":
args = build_arg().parse_args()
sys.exit(main(args) or 0)
|
[
"cv2.vconcat",
"model_api.performance_metrics.PerformanceMetrics",
"images_capture.open_images_capture",
"cv2.imshow",
"logging.info",
"argparse.ArgumentParser",
"pathlib.Path",
"time.perf_counter",
"cv2.VideoWriter",
"numpy.concatenate",
"cv2.VideoWriter_fourcc",
"cv2.waitKey",
"numpy.squeeze",
"cv2.putText",
"cv2.cvtColor",
"cv2.resize",
"cv2.hconcat",
"logging.basicConfig",
"openvino.runtime.Core",
"numpy.zeros",
"numpy.expand_dims",
"monitors.Presenter",
"openvino.runtime.get_version"
] |
[((1125, 1220), 'logging.basicConfig', 'log.basicConfig', ([], {'format': '"""[ %(levelname)s ] %(message)s"""', 'level': 'log.DEBUG', 'stream': 'sys.stdout'}), "(format='[ %(levelname)s ] %(message)s', level=log.DEBUG,\n stream=sys.stdout)\n", (1140, 1220), True, 'import logging as log\n'), ((1249, 1279), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'add_help': '(False)'}), '(add_help=False)\n', (1263, 1279), False, 'from argparse import ArgumentParser, SUPPRESS\n'), ((2931, 2973), 'images_capture.open_images_capture', 'open_images_capture', (['args.input', 'args.loop'], {}), '(args.input, args.loop)\n', (2950, 2973), False, 'from images_capture import open_images_capture\n'), ((2979, 3016), 'logging.info', 'log.info', (['"""OpenVINO Inference Engine"""'], {}), "('OpenVINO Inference Engine')\n", (2987, 3016), True, 'import logging as log\n'), ((3078, 3084), 'openvino.runtime.Core', 'Core', ([], {}), '()\n', (3082, 3084), False, 'from openvino.runtime import Core, get_version\n'), ((3936, 4033), 'monitors.Presenter', 'monitors.Presenter', (['args.utilization_monitors', '(imshow_size[1] * 2 - graph_size[1])', 'graph_size'], {}), '(args.utilization_monitors, imshow_size[1] * 2 -\n graph_size[1], graph_size)\n', (3954, 4033), False, 'import monitors\n'), ((4044, 4064), 'model_api.performance_metrics.PerformanceMetrics', 'PerformanceMetrics', ([], {}), '()\n', (4062, 4064), False, 'from model_api.performance_metrics import PerformanceMetrics\n'), ((4085, 4101), 'cv2.VideoWriter', 'cv.VideoWriter', ([], {}), '()\n', (4099, 4101), True, 'import cv2 as cv\n'), ((4363, 4377), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (4375, 4377), False, 'from time import perf_counter\n'), ((3462, 3483), 'numpy.zeros', 'np.zeros', (['input.shape'], {}), '(input.shape)\n', (3470, 3483), True, 'import numpy as np\n'), ((4886, 4924), 'cv2.cvtColor', 'cv.cvtColor', (['img_rgb', 'cv.COLOR_RGB2Lab'], {}), '(img_rgb, cv.COLOR_RGB2Lab)\n', (4897, 4924), True, 'import cv2 as cv\n'), ((5030, 5067), 'numpy.expand_dims', 'np.expand_dims', (['img_l_rs'], {'axis': '[0, 1]'}), '(img_l_rs, axis=[0, 1])\n', (5044, 5067), True, 'import numpy as np\n'), ((5154, 5169), 'numpy.squeeze', 'np.squeeze', (['res'], {}), '(res)\n', (5164, 5169), True, 'import numpy as np\n'), ((5231, 5263), 'cv2.resize', 'cv.resize', (['out', '(w_orig, h_orig)'], {}), '(out, (w_orig, h_orig))\n', (5240, 5263), True, 'import cv2 as cv\n'), ((5286, 5351), 'numpy.concatenate', 'np.concatenate', (['(img_lab[:, :, 0][:, :, np.newaxis], out)'], {'axis': '(2)'}), '((img_lab[:, :, 0][:, :, np.newaxis], out), axis=2)\n', (5300, 5351), True, 'import numpy as np\n'), ((5458, 5496), 'cv2.resize', 'cv.resize', (['original_frame', 'imshow_size'], {}), '(original_frame, imshow_size)\n', (5467, 5496), True, 'import cv2 as cv\n'), ((5523, 5552), 'cv2.resize', 'cv.resize', (['frame', 'imshow_size'], {}), '(frame, imshow_size)\n', (5532, 5552), True, 'import cv2 as cv\n'), ((5738, 5846), 'cv2.putText', 'cv.putText', (['original_image', '"""Original"""', '(25, 50)', 'cv.FONT_HERSHEY_SIMPLEX', '(1)', '(0, 0, 255)', '(2)', 'cv.LINE_AA'], {}), "(original_image, 'Original', (25, 50), cv.FONT_HERSHEY_SIMPLEX, 1,\n (0, 0, 255), 2, cv.LINE_AA)\n", (5748, 5846), True, 'import cv2 as cv\n'), ((5905, 6015), 'cv2.putText', 'cv.putText', (['grayscale_image', '"""Grayscale"""', '(25, 50)', 'cv.FONT_HERSHEY_SIMPLEX', '(1)', '(0, 0, 255)', '(2)', 'cv.LINE_AA'], {}), "(grayscale_image, 'Grayscale', (25, 50), cv.FONT_HERSHEY_SIMPLEX,\n 1, (0, 0, 255), 2, cv.LINE_AA)\n", (5915, 6015), True, 'import cv2 as cv\n'), ((6074, 6182), 'cv2.putText', 'cv.putText', (['colorize_image', '"""Colorize"""', '(25, 50)', 'cv.FONT_HERSHEY_SIMPLEX', '(1)', '(0, 0, 255)', '(2)', 'cv.LINE_AA'], {}), "(colorize_image, 'Colorize', (25, 50), cv.FONT_HERSHEY_SIMPLEX, 1,\n (0, 0, 255), 2, cv.LINE_AA)\n", (6084, 6182), True, 'import cv2 as cv\n'), ((6235, 6349), 'cv2.putText', 'cv.putText', (['lab_image', '"""LAB interpretation"""', '(25, 50)', 'cv.FONT_HERSHEY_SIMPLEX', '(1)', '(0, 0, 255)', '(2)', 'cv.LINE_AA'], {}), "(lab_image, 'LAB interpretation', (25, 50), cv.\n FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv.LINE_AA)\n", (6245, 6349), True, 'import cv2 as cv\n'), ((6527, 6547), 'cv2.vconcat', 'cv.vconcat', (['ir_image'], {}), '(ir_image)\n', (6537, 6547), True, 'import cv2 as cv\n'), ((7066, 7080), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (7078, 7080), False, 'from time import perf_counter\n'), ((7190, 7203), 'logging.info', 'log.info', (['rep'], {}), '(rep)\n', (7198, 7203), True, 'import logging as log\n'), ((3051, 3064), 'openvino.runtime.get_version', 'get_version', ([], {}), '()\n', (3062, 3064), False, 'from openvino.runtime import Core, get_version\n'), ((4771, 4817), 'cv2.cvtColor', 'cv.cvtColor', (['original_frame', 'cv.COLOR_GRAY2RGB'], {}), '(original_frame, cv.COLOR_GRAY2RGB)\n', (4782, 4817), True, 'import cv2 as cv\n'), ((5382, 5424), 'cv2.cvtColor', 'cv.cvtColor', (['img_lab_out', 'cv.COLOR_Lab2BGR'], {}), '(img_lab_out, cv.COLOR_Lab2BGR)\n', (5393, 5424), True, 'import cv2 as cv\n'), ((6397, 6442), 'cv2.hconcat', 'cv.hconcat', (['[original_image, grayscale_image]'], {}), '([original_image, grayscale_image])\n', (6407, 6442), True, 'import cv2 as cv\n'), ((6464, 6503), 'cv2.hconcat', 'cv.hconcat', (['[lab_image, colorize_image]'], {}), '([lab_image, colorize_image])\n', (6474, 6503), True, 'import cv2 as cv\n'), ((6862, 6905), 'cv2.imshow', 'cv.imshow', (['"""Colorization Demo"""', 'final_image'], {}), "('Colorization Demo', final_image)\n", (6871, 6905), True, 'import cv2 as cv\n'), ((6924, 6937), 'cv2.waitKey', 'cv.waitKey', (['(1)'], {}), '(1)\n', (6934, 6937), True, 'import cv2 as cv\n'), ((4160, 4190), 'cv2.VideoWriter_fourcc', 'cv.VideoWriter_fourcc', (["*'MJPG'"], {}), "(*'MJPG')\n", (4181, 4190), True, 'import cv2 as cv\n'), ((4670, 4716), 'cv2.cvtColor', 'cv.cvtColor', (['original_frame', 'cv.COLOR_BGR2GRAY'], {}), '(original_frame, cv.COLOR_BGR2GRAY)\n', (4681, 4716), True, 'import cv2 as cv\n'), ((5659, 5694), 'cv2.resize', 'cv.resize', (['img_lab_out', 'imshow_size'], {}), '(img_lab_out, imshow_size)\n', (5668, 5694), True, 'import cv2 as cv\n'), ((5579, 5614), 'cv2.resize', 'cv.resize', (['img_bgr_out', 'imshow_size'], {}), '(img_bgr_out, imshow_size)\n', (5588, 5614), True, 'import cv2 as cv\n'), ((848, 862), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (852, 862), False, 'from pathlib import Path\n'), ((924, 938), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (928, 938), False, 'from pathlib import Path\n')]
|
import math
import imageio
import cv2 as cv
import numpy as np
import transformer
def fix_rotation(img):
img_copy = img.copy()
img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
rows, cols = img.shape
img = cv.adaptiveThreshold(img, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY_INV, 15, 9)
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (3, 3))
img = cv.morphologyEx(img, cv.MORPH_OPEN, kernel)
img = cv.medianBlur(img, 3)
contours, hierarchy = cv.findContours(img, cv.RETR_LIST, cv.CHAIN_APPROX_NONE)
roi = max(contours, key=cv.contourArea)
x, y, w, h = cv.boundingRect(roi)
corners = [[x, y], [x + w, y], [x, y + h], [x + w, y + h]]
src = np.float32(corners)
# src = np.reshape(src, (len(src), 1, 2))
# perimeter = cv.arcLength(src, True)
# corners = cv.approxPolyDP(src, perimeter // 10, True)
# corners = np.vstack(corners)
dst = np.float32([[0, 0], [cols, 0], [0, rows], [cols, rows]])
matrix = cv.getPerspectiveTransform(src, dst)
rotated_img = cv.warpPerspective(img_copy, matrix, (cols, rows))
cv.imshow('', rotated_img)
D1 = 105
D2 = 175
D3 = 275
if __name__ == "__main__":
cap = cv.VideoCapture('samples/delta.mp4')
if not cap.isOpened():
raise IOError("Video was not opened!")
mse = 0
count = 0
reader = imageio.get_reader('samples/delta.mp4')
fps = reader.get_meta_data()['fps']
writer = imageio.get_writer('samples/result.mp4', fps=fps)
while True:
res, frame = cap.read()
if not res:
break
mean_error = 0
holes_count = 0
img = frame.copy()
cv.imshow('dfa', img)
frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
frame_copy = frame.copy()
# frame = cv.adaptiveThreshold(frame, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY_INV, 15, 9)
# kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (3, 3))
# frame = cv.morphologyEx(frame, cv.MORPH_OPEN, kernel)
# frame = cv.medianBlur(frame, 3)
# contours, hierarchy = cv.findContours(frame, cv.RETR_LIST, cv.CHAIN_APPROX_NONE)
# roi = max(contours, key=cv.contourArea)
# x, y, w, h = cv.boundingRect(roi)
x, y, w, h = 115, 0, 445, 360
img = img[y: y+h, x: x+w]
img = transformer.rotate_along_axis(img, theta=40)
frame_copy = frame_copy[y: y+h, x: x+w]
frame_copy = transformer.rotate_along_axis(frame_copy, theta=40)
# cv.imshow('', frame_copy)
# cv.rectangle(frame_copy, (x, y), (x + w, y + h), (0, 255, 0), 2)
# cv.drawContours(frame_copy, roi, -1, (0, 0, 255), 2)
# res, mask = cv.threshold(frame_copy, 0, 255, cv.THRESH_BINARY)
# frame_copy = cv.bitwise_and(frame_copy, frame_copy, mask=mask)
# corners = cv.goodFeaturesToTrack(frame_copy, 1000, 0.0001, 1)
# corners = list(sorted(corners, key=lambda x: x[0][1]))
# print(corners[-1], corners[-2])
# print()
# corners = np.array([[38, 293], [407, 293]])
# for item in corners:
# # x, y = map(int, item.ravel())
# x, y = item
# cv.circle(img, (x, y), 5, (0, 0, 255), -1)
src = np.float32([[0, 0], [w, 0], [38, 293], [407, 293]])
dst = np.float32([[0, 0], [w, 0], [30, h], [w - 30, h]])
matrix = cv.getPerspectiveTransform(src, dst)
img = cv.warpPerspective(img, matrix, (w, h))
cv.imshow('', img)
img_copy = img.copy()
img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
img = cv.adaptiveThreshold(img, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY_INV, 15, 9)
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (3, 3))
img = cv.morphologyEx(img, cv.MORPH_OPEN, kernel)
img = cv.medianBlur(img, 3)
origin = (w // 2 + 4, h // 2 + 2)
o1, o2 = origin
r = w // 2 + 1
ORIGIN = (0, 0)
R = 300 # mm
contours, hierarchy = cv.findContours(img, cv.RETR_LIST, cv.CHAIN_APPROX_NONE)
contours = list(filter(lambda x: 50 < cv.contourArea(x) < 175, contours))
factor = 0.1
smooth_contours = []
for i in range(len(contours)):
epsilon = factor * cv.arcLength(contours[i], True)
approx = cv.approxPolyDP(contours[i], epsilon, True)
x, y, width, height = cv.boundingRect(approx)
area = width*height
if len(approx) == 4 and 75 < area < 200:
smooth_contours.append(contours[i])
center, radius = cv.minEnclosingCircle(approx)
radius = int(radius)
center = tuple(map(int, center))
x, y = center
X = ((x - o1) * R) / r
Y = ((y - o2) * R) / r
X, Y = round(X, 2), round(Y, 2)
cv.circle(img_copy, center, radius, (0, 255, 0), 2)
cv.putText(img_copy, str((X, Y)), center, cv.FONT_HERSHEY_SIMPLEX, 0.3, (255, 0, 255, 255), 1, cv.LINE_AA)
e1, e2, e3 = map(lambda d: abs(math.hypot(X, Y) - d), [D1, D2, D3])
error = min(e1, e2, e3)
if error < 10:
mean_error += error ** 2
holes_count += 1
cv.circle(img_copy, origin, 4, (0, 0, 255), -1)
# cv.line(img_copy, origin, (origin[0], origin[1]), (255, 0, 255), 2)
mean_error /= holes_count
mse += mean_error
count += 1
cv.imshow("Final", img_copy)
writer.append_data(img_copy)
# cv.imshow("Chg", img)
if cv.waitKey(30) == 27:
break
print("E:", mse / count, "N:", count)
writer.close()
cap.release()
cv.destroyAllWindows()
|
[
"cv2.imshow",
"cv2.warpPerspective",
"cv2.destroyAllWindows",
"cv2.approxPolyDP",
"math.hypot",
"imageio.get_writer",
"imageio.get_reader",
"cv2.arcLength",
"cv2.medianBlur",
"cv2.contourArea",
"cv2.waitKey",
"cv2.getPerspectiveTransform",
"cv2.minEnclosingCircle",
"cv2.morphologyEx",
"cv2.circle",
"cv2.cvtColor",
"transformer.rotate_along_axis",
"cv2.adaptiveThreshold",
"cv2.VideoCapture",
"cv2.findContours",
"cv2.getStructuringElement",
"numpy.float32",
"cv2.boundingRect"
] |
[((142, 177), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_BGR2GRAY'], {}), '(img, cv.COLOR_BGR2GRAY)\n', (153, 177), True, 'import cv2 as cv\n'), ((215, 306), 'cv2.adaptiveThreshold', 'cv.adaptiveThreshold', (['img', '(255)', 'cv.ADAPTIVE_THRESH_MEAN_C', 'cv.THRESH_BINARY_INV', '(15)', '(9)'], {}), '(img, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.\n THRESH_BINARY_INV, 15, 9)\n', (235, 306), True, 'import cv2 as cv\n'), ((316, 366), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['cv.MORPH_ELLIPSE', '(3, 3)'], {}), '(cv.MORPH_ELLIPSE, (3, 3))\n', (340, 366), True, 'import cv2 as cv\n'), ((377, 420), 'cv2.morphologyEx', 'cv.morphologyEx', (['img', 'cv.MORPH_OPEN', 'kernel'], {}), '(img, cv.MORPH_OPEN, kernel)\n', (392, 420), True, 'import cv2 as cv\n'), ((432, 453), 'cv2.medianBlur', 'cv.medianBlur', (['img', '(3)'], {}), '(img, 3)\n', (445, 453), True, 'import cv2 as cv\n'), ((481, 537), 'cv2.findContours', 'cv.findContours', (['img', 'cv.RETR_LIST', 'cv.CHAIN_APPROX_NONE'], {}), '(img, cv.RETR_LIST, cv.CHAIN_APPROX_NONE)\n', (496, 537), True, 'import cv2 as cv\n'), ((604, 624), 'cv2.boundingRect', 'cv.boundingRect', (['roi'], {}), '(roi)\n', (619, 624), True, 'import cv2 as cv\n'), ((698, 717), 'numpy.float32', 'np.float32', (['corners'], {}), '(corners)\n', (708, 717), True, 'import numpy as np\n'), ((912, 968), 'numpy.float32', 'np.float32', (['[[0, 0], [cols, 0], [0, rows], [cols, rows]]'], {}), '([[0, 0], [cols, 0], [0, rows], [cols, rows]])\n', (922, 968), True, 'import numpy as np\n'), ((983, 1019), 'cv2.getPerspectiveTransform', 'cv.getPerspectiveTransform', (['src', 'dst'], {}), '(src, dst)\n', (1009, 1019), True, 'import cv2 as cv\n'), ((1038, 1088), 'cv2.warpPerspective', 'cv.warpPerspective', (['img_copy', 'matrix', '(cols, rows)'], {}), '(img_copy, matrix, (cols, rows))\n', (1056, 1088), True, 'import cv2 as cv\n'), ((1093, 1119), 'cv2.imshow', 'cv.imshow', (['""""""', 'rotated_img'], {}), "('', rotated_img)\n", (1102, 1119), True, 'import cv2 as cv\n'), ((1187, 1223), 'cv2.VideoCapture', 'cv.VideoCapture', (['"""samples/delta.mp4"""'], {}), "('samples/delta.mp4')\n", (1202, 1223), True, 'import cv2 as cv\n'), ((1339, 1378), 'imageio.get_reader', 'imageio.get_reader', (['"""samples/delta.mp4"""'], {}), "('samples/delta.mp4')\n", (1357, 1378), False, 'import imageio\n'), ((1432, 1481), 'imageio.get_writer', 'imageio.get_writer', (['"""samples/result.mp4"""'], {'fps': 'fps'}), "('samples/result.mp4', fps=fps)\n", (1450, 1481), False, 'import imageio\n'), ((5765, 5787), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (5785, 5787), True, 'import cv2 as cv\n'), ((1653, 1674), 'cv2.imshow', 'cv.imshow', (['"""dfa"""', 'img'], {}), "('dfa', img)\n", (1662, 1674), True, 'import cv2 as cv\n'), ((1691, 1728), 'cv2.cvtColor', 'cv.cvtColor', (['frame', 'cv.COLOR_BGR2GRAY'], {}), '(frame, cv.COLOR_BGR2GRAY)\n', (1702, 1728), True, 'import cv2 as cv\n'), ((2320, 2364), 'transformer.rotate_along_axis', 'transformer.rotate_along_axis', (['img'], {'theta': '(40)'}), '(img, theta=40)\n', (2349, 2364), False, 'import transformer\n'), ((2434, 2485), 'transformer.rotate_along_axis', 'transformer.rotate_along_axis', (['frame_copy'], {'theta': '(40)'}), '(frame_copy, theta=40)\n', (2463, 2485), False, 'import transformer\n'), ((3234, 3285), 'numpy.float32', 'np.float32', (['[[0, 0], [w, 0], [38, 293], [407, 293]]'], {}), '([[0, 0], [w, 0], [38, 293], [407, 293]])\n', (3244, 3285), True, 'import numpy as np\n'), ((3300, 3350), 'numpy.float32', 'np.float32', (['[[0, 0], [w, 0], [30, h], [w - 30, h]]'], {}), '([[0, 0], [w, 0], [30, h], [w - 30, h]])\n', (3310, 3350), True, 'import numpy as np\n'), ((3368, 3404), 'cv2.getPerspectiveTransform', 'cv.getPerspectiveTransform', (['src', 'dst'], {}), '(src, dst)\n', (3394, 3404), True, 'import cv2 as cv\n'), ((3419, 3458), 'cv2.warpPerspective', 'cv.warpPerspective', (['img', 'matrix', '(w, h)'], {}), '(img, matrix, (w, h))\n', (3437, 3458), True, 'import cv2 as cv\n'), ((3467, 3485), 'cv2.imshow', 'cv.imshow', (['""""""', 'img'], {}), "('', img)\n", (3476, 3485), True, 'import cv2 as cv\n'), ((3531, 3566), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_BGR2GRAY'], {}), '(img, cv.COLOR_BGR2GRAY)\n', (3542, 3566), True, 'import cv2 as cv\n'), ((3581, 3672), 'cv2.adaptiveThreshold', 'cv.adaptiveThreshold', (['img', '(255)', 'cv.ADAPTIVE_THRESH_MEAN_C', 'cv.THRESH_BINARY_INV', '(15)', '(9)'], {}), '(img, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.\n THRESH_BINARY_INV, 15, 9)\n', (3601, 3672), True, 'import cv2 as cv\n'), ((3686, 3736), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['cv.MORPH_ELLIPSE', '(3, 3)'], {}), '(cv.MORPH_ELLIPSE, (3, 3))\n', (3710, 3736), True, 'import cv2 as cv\n'), ((3751, 3794), 'cv2.morphologyEx', 'cv.morphologyEx', (['img', 'cv.MORPH_OPEN', 'kernel'], {}), '(img, cv.MORPH_OPEN, kernel)\n', (3766, 3794), True, 'import cv2 as cv\n'), ((3809, 3830), 'cv2.medianBlur', 'cv.medianBlur', (['img', '(3)'], {}), '(img, 3)\n', (3822, 3830), True, 'import cv2 as cv\n'), ((3998, 4054), 'cv2.findContours', 'cv.findContours', (['img', 'cv.RETR_LIST', 'cv.CHAIN_APPROX_NONE'], {}), '(img, cv.RETR_LIST, cv.CHAIN_APPROX_NONE)\n', (4013, 4054), True, 'import cv2 as cv\n'), ((5318, 5365), 'cv2.circle', 'cv.circle', (['img_copy', 'origin', '(4)', '(0, 0, 255)', '(-1)'], {}), '(img_copy, origin, 4, (0, 0, 255), -1)\n', (5327, 5365), True, 'import cv2 as cv\n'), ((5532, 5560), 'cv2.imshow', 'cv.imshow', (['"""Final"""', 'img_copy'], {}), "('Final', img_copy)\n", (5541, 5560), True, 'import cv2 as cv\n'), ((4319, 4362), 'cv2.approxPolyDP', 'cv.approxPolyDP', (['contours[i]', 'epsilon', '(True)'], {}), '(contours[i], epsilon, True)\n', (4334, 4362), True, 'import cv2 as cv\n'), ((4410, 4433), 'cv2.boundingRect', 'cv.boundingRect', (['approx'], {}), '(approx)\n', (4425, 4433), True, 'import cv2 as cv\n'), ((5641, 5655), 'cv2.waitKey', 'cv.waitKey', (['(30)'], {}), '(30)\n', (5651, 5655), True, 'import cv2 as cv\n'), ((4266, 4297), 'cv2.arcLength', 'cv.arcLength', (['contours[i]', '(True)'], {}), '(contours[i], True)\n', (4278, 4297), True, 'import cv2 as cv\n'), ((4605, 4634), 'cv2.minEnclosingCircle', 'cv.minEnclosingCircle', (['approx'], {}), '(approx)\n', (4626, 4634), True, 'import cv2 as cv\n'), ((4896, 4947), 'cv2.circle', 'cv.circle', (['img_copy', 'center', 'radius', '(0, 255, 0)', '(2)'], {}), '(img_copy, center, radius, (0, 255, 0), 2)\n', (4905, 4947), True, 'import cv2 as cv\n'), ((4101, 4118), 'cv2.contourArea', 'cv.contourArea', (['x'], {}), '(x)\n', (4115, 4118), True, 'import cv2 as cv\n'), ((5119, 5135), 'math.hypot', 'math.hypot', (['X', 'Y'], {}), '(X, Y)\n', (5129, 5135), False, 'import math\n')]
|
# This is the code to train the xgboost model with cross-validation for each unique room in the dataset.
# Models are dumped into ./models and results are dumped into two csv files in the current work directory.
import argparse
import json
import math
import os
import pickle
import warnings
from typing import Tuple
import numpy as np
import pandas as pd
import xgboost as xgb
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
from imblearn.over_sampling import SMOTE
from numpy.random import RandomState
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.utils import compute_sample_weight
from tqdm import tqdm
from xgboost import DMatrix, cv
# Set up an argument parser to decide the metric function
parser = argparse.ArgumentParser()
parser.add_argument("--metric", choices=['R2', 'RMSE'], type=str, required=False, default='R2',
help="The evaluation metric you want to use to train the XGBoost model")
parser.add_argument("--log", choices=[0, 1, 100], type=int, required=False, default=0,
help="Whether to print out the training progress")
parser.add_argument("--SMOTE", choices=[0, 1], type=int, required=False, default=1, help="Whether use the SMOTE or not")
parser.add_argument("--SMOGN", choices=[0, 1], type=int, required=False, default=0, help="Whether use the SMOGN or not")
parser.add_argument("--SampleWeight", choices=[0, 1], type=int, required=False, default=0,
help="Whether use the sample weight")
args = parser.parse_args()
# Ignore all the warnings and set pandas to display every column and row everytime we print a dataframe
warnings.filterwarnings('ignore')
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
assert args.SMOTE != args.SMOGN, "Can't use SMOTE and SMOGN at the same time!"
# Load the data with a positive AC electricity consumption value, and drop the time data as we don't need them
data = pd.read_csv("summer_data_compiled.csv", index_col=0)
data = data[data.AC > 0].drop(['Time', 'Date', 'Hour'], axis=1).reset_index(drop=True)
# Create some directory to store the models and future analysis figures.
# log_folder_name = "Test_{}_{}".format(args.metric, datetime.now().strftime("%Y_%m_%d_%H_%M_%S"))
log_folder_name = "Test_R2_HYPEROPT"
log_folder_name = log_folder_name + "_SMOTE" if args.SMOTE else log_folder_name
log_folder_name = log_folder_name + "_SMOGN" if args.SMOGN else log_folder_name
log_folder_name = log_folder_name + "_SW" if args.SampleWeight else log_folder_name
previous_parameter_folder = "Test_R2_HYPEROPT"
assert log_folder_name != previous_parameter_folder, "Previous folder name exists"
if not os.path.exists('./{}/'.format(log_folder_name)):
os.mkdir('./{}'.format(log_folder_name))
os.mkdir('./{}/models/'.format(log_folder_name))
os.mkdir('./{}/trntst_models/'.format(log_folder_name))
# Define our evaluation functions
def RMSE(predt: np.ndarray, dtrain: DMatrix) -> Tuple[str, float]:
truth_value = dtrain.get_label()
root_squard_error = math.sqrt(mean_squared_error(truth_value, predt))
return "RMSE", root_squard_error
def R2(predt: np.ndarray, dtrain: DMatrix) -> Tuple[str, float]:
truth_value = dtrain.get_label()
r2_value = r2_score(truth_value, predt)
return "R2", r2_value
def fobjective(space):
param_dict_tunning = {'max_depth': int(space['max_depth']),
'learning_rate': space['learning_rate'],
'colsample_bytree': space['colsample_bytree'],
'min_child_weight': int(space['min_child_weight']),
'reg_alpha': int(space['reg_alpha']),
'reg_lambda': space['reg_lambda'],
'subsample': space['subsample'],
'min_split_loss': space['min_split_loss'],
'objective': 'reg:squarederror'}
xgb_cv_result = xgb.cv(dtrain=data_matrix, params=param_dict_tunning, nfold=5,
early_stopping_rounds=30, as_pandas=True, num_boost_round=200,
seed=seed, metrics='rmse', maximize=False, shuffle=True)
return {"loss": (xgb_cv_result["test-rmse-mean"]).tail(1).iloc[0], "status": STATUS_OK}
eval_dict = {'RMSE': RMSE, 'R2': R2}
print("Start Training The Models")
# Create two dataframes to store the result during the training and after the training.
error_csv = pd.DataFrame(
columns=['room', 'train-{}-mean'.format(args.metric), 'train-{}-std'.format(args.metric), 'train-rmse-mean',
'train-rmse-std', 'test-{}-mean'.format(args.metric), 'test-{}-std'.format(args.metric), 'test-rmse-mean',
'test-rmse-std'])
prediction_csv = pd.DataFrame(columns=['room', 'observation', 'prediction'])
room_list = data['Location'].unique()
# ranging through all the rooms and do the training and cross-validation for each room.
for room in tqdm(room_list):
seed = 2030 + room
# Four rooms have low quality data and we delete them manually
if room == 309 or room == 312 or room == 826 or room == 917 or room == 1001:
continue
# We extract the data of particular room and run the SMOTE algorithm on it.
room_data = data[data.Location == room].drop(['Location'], axis=1).reset_index(drop=True)
if args.SMOTE:
# Label all the AC data by 0.75, all AC above 0.75 will be marked as 1, otherwise 0. Split into X and y
room_data['SMOTE_split'] = (room_data['AC'] > 0.75).astype('int')
X = room_data.drop(['SMOTE_split'], axis=1)
y = room_data['SMOTE_split']
# Run the SMOTE algorithm and retrieve the result.
model_smote = SMOTE(random_state=621, k_neighbors=3)
room_data_smote, smote_split = model_smote.fit_resample(X, y)
# concat the result from SMOTE and split the result into X and y for training.
room_data_smote = pd.concat([room_data_smote, smote_split], axis=1)
y = room_data_smote['AC']
X = room_data_smote.drop(['AC', 'SMOTE_split'], axis=1)
elif args.SMOGN:
if len(room_data) < 500:
room_data['SMOTE_split'] = (room_data['AC'] > 0.75).astype('int')
X = room_data.drop(['SMOTE_split'], axis=1)
y = room_data['SMOTE_split']
# Run the SMOTE algorithm and retrieve the result.
model_smote = SMOTE(random_state=621, k_neighbors=3)
room_data_smote, smote_split = model_smote.fit_resample(X, y)
# concat the result from SMOTE and split the result into X and y for training.
room_data_smote = pd.concat([room_data_smote, smote_split], axis=1)
y = room_data_smote['AC']
X = room_data_smote.drop(['AC', 'SMOTE_split'], axis=1)
else:
room_data = pd.read_csv('./SMOGN_processed/{}.csv'.format(room), index_col=0)
y = room_data['AC']
X = room_data.drop(['AC'], axis=1)
else:
y = pd.DataFrame(room_data['AC'].fillna(method='pad'))
X = room_data.drop(['AC'], axis=1).fillna(method='pad')
if args.SampleWeight:
class_sample = pd.cut(y, bins=15)
weight = compute_sample_weight(class_weight="balanced", y=class_sample)
X = X.to_numpy()
# Build another full data matrix for the built-in cross validation function to work.
data_matrix = DMatrix(data=X, label=y, weight=weight) if args.SampleWeight else DMatrix(data=X, label=y)
# Cross_validation with hyper-parameter tuning
space = {'max_depth': hp.quniform("max_depth", 3, 10, 1),
'learning_rate': hp.uniform("learning_rate", 0.1, 3),
'colsample_bytree': hp.uniform("colsample_bytree", 0.5, 1),
'min_child_weight': hp.quniform("min_child_weight", 1, 20, 1),
'reg_alpha': hp.quniform("reg_alpha", 0, 100, 1),
'reg_lambda': hp.uniform("reg_lambda", 0, 2),
'subsample': hp.uniform("subsample", 0.5, 1),
'min_split_loss': hp.uniform("min_split_loss", 0, 9)}
if os.path.exists('./{}/models/{}_parameter.npy'.format(previous_parameter_folder, room)):
best_param_dict = np.load('./{}/models/{}_parameter.npy'.format(previous_parameter_folder, room),
allow_pickle=True).item()
np.save('./{}/models/{}_parameter.npy'.format(log_folder_name, room), best_param_dict)
else:
trials = Trials()
best_hyperparams = fmin(fn=fobjective, space=space, algo=tpe.suggest, max_evals=400, trials=trials,
rstate=RandomState(seed))
# setup our training parameters and a model variable as model checkpoint
best_param_dict = {'objective': 'reg:squarederror', 'max_depth': int(best_hyperparams['max_depth']),
'reg_alpha': best_hyperparams['reg_alpha'], 'reg_lambda': best_hyperparams['reg_lambda'],
'min_child_weight': best_hyperparams['min_child_weight'],
'colsample_bytree': best_hyperparams['colsample_bytree'],
'learning_rate': best_hyperparams['learning_rate'],
'subsample': best_hyperparams['subsample'],
'min_split_loss': best_hyperparams['min_split_loss']}
np.save('./{}/models/{}_parameter.npy'.format(log_folder_name, room), best_param_dict)
# Use the built-in cv function to do the cross validation, still with ten folds, this will return us the results.
xgb_cv_result = cv(dtrain=data_matrix, params=best_param_dict, nfold=5,
early_stopping_rounds=30, as_pandas=True, num_boost_round=200,
seed=seed, shuffle=True, feval=eval_dict[args.metric], maximize=True)
xgb_cv_result['room'] = room
error_csv.loc[len(error_csv)] = xgb_cv_result.loc[len(xgb_cv_result) - 1]
# Use one training_testing for ploting, and save both ground truth and prediction value into the dataframe
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=seed)
d_train = DMatrix(X_train, label=y_train)
d_test = DMatrix(X_test, label=y_test)
watchlist = [(d_test, 'eval'), (d_train, 'train')]
xgb_model_train_test = xgb.train(params=best_param_dict, dtrain=d_train, num_boost_round=200, evals=watchlist,
verbose_eval=args.log, xgb_model=None, feval=eval_dict[args.metric], maximize=True)
prediction = np.array(xgb_model_train_test.predict(d_test)).tolist()
real = np.array(y_test).tolist()
prediction_csv.loc[len(prediction_csv)] = {'room': room, 'observation': json.dumps(real),
'prediction': json.dumps(prediction)}
# Dump the error dataframes into csv files.
error_csv.to_csv('./{}/error.csv'.format(log_folder_name), index=False)
prediction_csv.to_csv('./{}/prediction.csv'.format(log_folder_name), index=False)
# Develop a model using the whole orignial dataset, and save the model
xgb_model_full = xgb.train(params=best_param_dict, dtrain=data_matrix, num_boost_round=200, evals=watchlist,
verbose_eval=args.log, xgb_model=None, feval=eval_dict[args.metric], maximize=True)
# Save all the models we trained for future use
pickle.dump(xgb_model_train_test, open('./{}/trntst_models/{}.pickle.bat'.format(log_folder_name, room), 'wb'))
pickle.dump(xgb_model_full, open('./{}/models/{}.pickle.bat'.format(log_folder_name, room), 'wb'))
print("Training finished!")
|
[
"pandas.read_csv",
"numpy.array",
"xgboost.DMatrix",
"sklearn.metrics.r2_score",
"numpy.random.RandomState",
"argparse.ArgumentParser",
"xgboost.train",
"json.dumps",
"pandas.set_option",
"xgboost.cv",
"pandas.DataFrame",
"sklearn.model_selection.train_test_split",
"hyperopt.hp.quniform",
"sklearn.metrics.mean_squared_error",
"hyperopt.hp.uniform",
"sklearn.utils.compute_sample_weight",
"warnings.filterwarnings",
"hyperopt.Trials",
"imblearn.over_sampling.SMOTE",
"tqdm.tqdm",
"pandas.cut",
"pandas.concat"
] |
[((792, 817), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (815, 817), False, 'import argparse\n'), ((1688, 1721), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (1711, 1721), False, 'import warnings\n'), ((1722, 1764), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', 'None'], {}), "('display.max_columns', None)\n", (1735, 1764), True, 'import pandas as pd\n'), ((1765, 1804), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', 'None'], {}), "('display.max_rows', None)\n", (1778, 1804), True, 'import pandas as pd\n'), ((2004, 2056), 'pandas.read_csv', 'pd.read_csv', (['"""summer_data_compiled.csv"""'], {'index_col': '(0)'}), "('summer_data_compiled.csv', index_col=0)\n", (2015, 2056), True, 'import pandas as pd\n'), ((4810, 4869), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['room', 'observation', 'prediction']"}), "(columns=['room', 'observation', 'prediction'])\n", (4822, 4869), True, 'import pandas as pd\n'), ((5010, 5025), 'tqdm.tqdm', 'tqdm', (['room_list'], {}), '(room_list)\n', (5014, 5025), False, 'from tqdm import tqdm\n'), ((3315, 3343), 'sklearn.metrics.r2_score', 'r2_score', (['truth_value', 'predt'], {}), '(truth_value, predt)\n', (3323, 3343), False, 'from sklearn.metrics import r2_score, mean_squared_error\n'), ((4010, 4201), 'xgboost.cv', 'xgb.cv', ([], {'dtrain': 'data_matrix', 'params': 'param_dict_tunning', 'nfold': '(5)', 'early_stopping_rounds': '(30)', 'as_pandas': '(True)', 'num_boost_round': '(200)', 'seed': 'seed', 'metrics': '"""rmse"""', 'maximize': '(False)', 'shuffle': '(True)'}), "(dtrain=data_matrix, params=param_dict_tunning, nfold=5,\n early_stopping_rounds=30, as_pandas=True, num_boost_round=200, seed=\n seed, metrics='rmse', maximize=False, shuffle=True)\n", (4016, 4201), True, 'import xgboost as xgb\n'), ((9618, 9815), 'xgboost.cv', 'cv', ([], {'dtrain': 'data_matrix', 'params': 'best_param_dict', 'nfold': '(5)', 'early_stopping_rounds': '(30)', 'as_pandas': '(True)', 'num_boost_round': '(200)', 'seed': 'seed', 'shuffle': '(True)', 'feval': 'eval_dict[args.metric]', 'maximize': '(True)'}), '(dtrain=data_matrix, params=best_param_dict, nfold=5,\n early_stopping_rounds=30, as_pandas=True, num_boost_round=200, seed=\n seed, shuffle=True, feval=eval_dict[args.metric], maximize=True)\n', (9620, 9815), False, 'from xgboost import DMatrix, cv\n'), ((10116, 10172), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': 'seed'}), '(X, y, test_size=0.2, random_state=seed)\n', (10132, 10172), False, 'from sklearn.model_selection import train_test_split\n'), ((10187, 10218), 'xgboost.DMatrix', 'DMatrix', (['X_train'], {'label': 'y_train'}), '(X_train, label=y_train)\n', (10194, 10218), False, 'from xgboost import DMatrix, cv\n'), ((10232, 10261), 'xgboost.DMatrix', 'DMatrix', (['X_test'], {'label': 'y_test'}), '(X_test, label=y_test)\n', (10239, 10261), False, 'from xgboost import DMatrix, cv\n'), ((10346, 10526), 'xgboost.train', 'xgb.train', ([], {'params': 'best_param_dict', 'dtrain': 'd_train', 'num_boost_round': '(200)', 'evals': 'watchlist', 'verbose_eval': 'args.log', 'xgb_model': 'None', 'feval': 'eval_dict[args.metric]', 'maximize': '(True)'}), '(params=best_param_dict, dtrain=d_train, num_boost_round=200,\n evals=watchlist, verbose_eval=args.log, xgb_model=None, feval=eval_dict\n [args.metric], maximize=True)\n', (10355, 10526), True, 'import xgboost as xgb\n'), ((11154, 11338), 'xgboost.train', 'xgb.train', ([], {'params': 'best_param_dict', 'dtrain': 'data_matrix', 'num_boost_round': '(200)', 'evals': 'watchlist', 'verbose_eval': 'args.log', 'xgb_model': 'None', 'feval': 'eval_dict[args.metric]', 'maximize': '(True)'}), '(params=best_param_dict, dtrain=data_matrix, num_boost_round=200,\n evals=watchlist, verbose_eval=args.log, xgb_model=None, feval=eval_dict\n [args.metric], maximize=True)\n', (11163, 11338), True, 'import xgboost as xgb\n'), ((3119, 3157), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['truth_value', 'predt'], {}), '(truth_value, predt)\n', (3137, 3157), False, 'from sklearn.metrics import r2_score, mean_squared_error\n'), ((5767, 5805), 'imblearn.over_sampling.SMOTE', 'SMOTE', ([], {'random_state': '(621)', 'k_neighbors': '(3)'}), '(random_state=621, k_neighbors=3)\n', (5772, 5805), False, 'from imblearn.over_sampling import SMOTE\n'), ((5990, 6039), 'pandas.concat', 'pd.concat', (['[room_data_smote, smote_split]'], {'axis': '(1)'}), '([room_data_smote, smote_split], axis=1)\n', (5999, 6039), True, 'import pandas as pd\n'), ((7218, 7236), 'pandas.cut', 'pd.cut', (['y'], {'bins': '(15)'}), '(y, bins=15)\n', (7224, 7236), True, 'import pandas as pd\n'), ((7254, 7316), 'sklearn.utils.compute_sample_weight', 'compute_sample_weight', ([], {'class_weight': '"""balanced"""', 'y': 'class_sample'}), "(class_weight='balanced', y=class_sample)\n", (7275, 7316), False, 'from sklearn.utils import compute_sample_weight\n'), ((7447, 7486), 'xgboost.DMatrix', 'DMatrix', ([], {'data': 'X', 'label': 'y', 'weight': 'weight'}), '(data=X, label=y, weight=weight)\n', (7454, 7486), False, 'from xgboost import DMatrix, cv\n'), ((7513, 7537), 'xgboost.DMatrix', 'DMatrix', ([], {'data': 'X', 'label': 'y'}), '(data=X, label=y)\n', (7520, 7537), False, 'from xgboost import DMatrix, cv\n'), ((7616, 7650), 'hyperopt.hp.quniform', 'hp.quniform', (['"""max_depth"""', '(3)', '(10)', '(1)'], {}), "('max_depth', 3, 10, 1)\n", (7627, 7650), False, 'from hyperopt import fmin, tpe, hp, STATUS_OK, Trials\n'), ((7682, 7717), 'hyperopt.hp.uniform', 'hp.uniform', (['"""learning_rate"""', '(0.1)', '(3)'], {}), "('learning_rate', 0.1, 3)\n", (7692, 7717), False, 'from hyperopt import fmin, tpe, hp, STATUS_OK, Trials\n'), ((7752, 7790), 'hyperopt.hp.uniform', 'hp.uniform', (['"""colsample_bytree"""', '(0.5)', '(1)'], {}), "('colsample_bytree', 0.5, 1)\n", (7762, 7790), False, 'from hyperopt import fmin, tpe, hp, STATUS_OK, Trials\n'), ((7825, 7866), 'hyperopt.hp.quniform', 'hp.quniform', (['"""min_child_weight"""', '(1)', '(20)', '(1)'], {}), "('min_child_weight', 1, 20, 1)\n", (7836, 7866), False, 'from hyperopt import fmin, tpe, hp, STATUS_OK, Trials\n'), ((7894, 7929), 'hyperopt.hp.quniform', 'hp.quniform', (['"""reg_alpha"""', '(0)', '(100)', '(1)'], {}), "('reg_alpha', 0, 100, 1)\n", (7905, 7929), False, 'from hyperopt import fmin, tpe, hp, STATUS_OK, Trials\n'), ((7958, 7988), 'hyperopt.hp.uniform', 'hp.uniform', (['"""reg_lambda"""', '(0)', '(2)'], {}), "('reg_lambda', 0, 2)\n", (7968, 7988), False, 'from hyperopt import fmin, tpe, hp, STATUS_OK, Trials\n'), ((8016, 8047), 'hyperopt.hp.uniform', 'hp.uniform', (['"""subsample"""', '(0.5)', '(1)'], {}), "('subsample', 0.5, 1)\n", (8026, 8047), False, 'from hyperopt import fmin, tpe, hp, STATUS_OK, Trials\n'), ((8080, 8114), 'hyperopt.hp.uniform', 'hp.uniform', (['"""min_split_loss"""', '(0)', '(9)'], {}), "('min_split_loss', 0, 9)\n", (8090, 8114), False, 'from hyperopt import fmin, tpe, hp, STATUS_OK, Trials\n'), ((8500, 8508), 'hyperopt.Trials', 'Trials', ([], {}), '()\n', (8506, 8508), False, 'from hyperopt import fmin, tpe, hp, STATUS_OK, Trials\n'), ((10743, 10759), 'json.dumps', 'json.dumps', (['real'], {}), '(real)\n', (10753, 10759), False, 'import json\n'), ((10822, 10844), 'json.dumps', 'json.dumps', (['prediction'], {}), '(prediction)\n', (10832, 10844), False, 'import json\n'), ((10640, 10656), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (10648, 10656), True, 'import numpy as np\n'), ((6457, 6495), 'imblearn.over_sampling.SMOTE', 'SMOTE', ([], {'random_state': '(621)', 'k_neighbors': '(3)'}), '(random_state=621, k_neighbors=3)\n', (6462, 6495), False, 'from imblearn.over_sampling import SMOTE\n'), ((6692, 6741), 'pandas.concat', 'pd.concat', (['[room_data_smote, smote_split]'], {'axis': '(1)'}), '([room_data_smote, smote_split], axis=1)\n', (6701, 6741), True, 'import pandas as pd\n'), ((8656, 8673), 'numpy.random.RandomState', 'RandomState', (['seed'], {}), '(seed)\n', (8667, 8673), False, 'from numpy.random import RandomState\n')]
|
import numpy as np
from pyad.nn import NeuralNet
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
np.random.seed(0)
data = load_breast_cancer()
X_train, X_test, y_train, y_test = train_test_split(
data.data, data.target, train_size=0.8, random_state=0
)
nn = NeuralNet(loss_fn='cross_entropy')
nn.add_layer(X_train.shape[1], 100, activation='linear')
nn.add_layer(100, 100, activation='logistic')
nn.add_layer(100, 1 + np.max(y_train), activation='linear')
nn.train(
X_train, y_train, X_test, y_test,
batch_size=1, learning_rate=1e-3, epochs=20
)
print('Predictions:', nn.predict(X_test))
|
[
"sklearn.model_selection.train_test_split",
"sklearn.datasets.load_breast_cancer",
"numpy.max",
"pyad.nn.NeuralNet",
"numpy.random.seed"
] |
[((151, 168), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (165, 168), True, 'import numpy as np\n'), ((176, 196), 'sklearn.datasets.load_breast_cancer', 'load_breast_cancer', ([], {}), '()\n', (194, 196), False, 'from sklearn.datasets import load_breast_cancer\n'), ((233, 305), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data.data', 'data.target'], {'train_size': '(0.8)', 'random_state': '(0)'}), '(data.data, data.target, train_size=0.8, random_state=0)\n', (249, 305), False, 'from sklearn.model_selection import train_test_split\n'), ((318, 352), 'pyad.nn.NeuralNet', 'NeuralNet', ([], {'loss_fn': '"""cross_entropy"""'}), "(loss_fn='cross_entropy')\n", (327, 352), False, 'from pyad.nn import NeuralNet\n'), ((478, 493), 'numpy.max', 'np.max', (['y_train'], {}), '(y_train)\n', (484, 493), True, 'import numpy as np\n')]
|
# required modules
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib import cm
from matplotlib.colors import Normalize
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.animation import FuncAnimation
# two-dimesional version
def plot_mse_loss_surface_2d(fig, ax, x, y, v=0.0, l2=0.0, w1_range=(-2, 2), w2_range=(2, -2)):
# create weight space
n_w = 100
w1 = np.linspace(w1_range[0], w1_range[1], num=n_w) # weight 1
w2 = np.linspace(w2_range[0], w2_range[1], num=n_w) # weight 2
ws_x, ws_y = np.meshgrid(w1, w2)
cost_ws = np.zeros((n_w, n_w)) # initialize cost matrix
# Fill the cost matrix for each combination of weights
for i in range(n_w):
for j in range(n_w):
y_pred = ws_x[i, j] * ws_y[i, j] * x
y_true = y
cost_ws[i, j] = 0.5 * (y_true - y_pred)**2 + \
0.5 * l2 * (ws_x[i, j]**2 + ws_y[i, j]**2) + 0.5 * v * (ws_x[i, j]*ws_y[i, j])**2
# compute gradients
dy, dx = np.gradient(cost_ws)
# plot vector space
skip = (slice(None, None, 5), slice(None, None, 5))
# fig, ax = plt.subplots(figsize=(8, 8))
#ax.contour(ws_x, ws_y, cost_ws, 200)
im = ax.imshow(cost_ws, extent=[ws_x.min(), ws_x.max(
), ws_y.min(), ws_y.max()], cmap=cm.coolwarm)
ax.quiver(ws_x[skip], ws_y[skip], -dx[skip], dy[skip], cost_ws[skip])
cbar = fig.colorbar(im, ax=ax)
# ax.set(aspect=1, title='Loss Surface')
cbar.ax.set_ylabel('$Loss$', fontsize=15)
ax.set_xlabel('$w_1$', fontsize=15)
ax.set_ylabel('$w_2$', fontsize=15)
# ax.grid()
# add saddle point
ax.scatter(0, 0, label='Saddle point', c='red', marker='*')
# ax.scatter(0,0, c='black', marker=r'$\rightarrow$', label='Negative gradient')
settings = (x, y, v, l2, w1_range, w2_range)
return ax, settings
# three-dimensional version
def plot_mse_loss_surface_3d(ax, x, y, v=0.0, l2=0.0, w1_range=(-2, 2), w2_range=(2, -2), angle=30):
# create weight space
n_w = 100
w1 = np.linspace(w1_range[0], w1_range[1], num=n_w) # weight 1
w2 = np.linspace(w2_range[0], w2_range[1], num=n_w) # weight 2
ws_x, ws_y = np.meshgrid(w1, w2)
cost_ws = np.zeros((n_w, n_w)) # initialize cost matrix
# Fill the cost matrix for each combination of weights
for i in range(n_w):
for j in range(n_w):
y_pred = ws_x[i, j] * ws_y[i, j] * x
y_true = y
cost_ws[i, j] = 0.5 * (y_true - y_pred)**2 + \
0.5 * l2 * (ws_x[i, j]**2 + ws_y[i, j]**2) + 0.5 * v * (ws_x[i, j]*ws_y[i, j])**2
X = ws_x
Y = ws_y
Z = cost_ws
#fig, ax = plt.subplots(figsize=(8, 8))
#ax = fig.add_subplot(1,1,1, projection='3d')
# fourth dimention - colormap
# create colormap according to x-value (can use any 50x50 array)
color_dimension = Z # change to desired fourth dimension
minn, maxx = color_dimension.min(), color_dimension.max()
norm = Normalize(minn, maxx)
m = plt.cm.ScalarMappable(norm=norm, cmap='jet')
m.set_array([])
fcolors = m.to_rgba(color_dimension)
# plot
# fig = plt.figure(figsize=(8, 8))
# ax = fig.gca(projection='3d')
ax.set_zlim(0, 50)
ax.plot([0], [0], 'ro', c='red', marker='*', label='Saddle point')
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, facecolors=fcolors,
vmin=minn, vmax=maxx, shade=False, alpha=1)
ax.set_xlabel('$w_1$', fontsize=20)
ax.set_ylabel('$w_2$', fontsize=20)
ax.set_zlabel('$Loss$', fontsize=20)
settings = (x, y, v, l2, w1_range, w2_range)
ax.view_init(angle, 10)
return ax, settings
def plot_global_minimum_manifold_2d(ax, settings):
# retieve cached settings
x, y, v, l2, w1_range, w2_range = settings
n_w = 1000
man_w1 = np.linspace(w1_range[0], w1_range[1], num=n_w)
man_w2 = np.linspace(w2_range[0], w2_range[1], num=n_w)
man_ws_x, man_ws_y = np.meshgrid(man_w1, man_w2)
loss = 0.5 * y *(1 - man_ws_x * man_ws_y * x)**2 + \
0.5 * l2 * (man_ws_x**2 + man_ws_y**2) + 0.5 * v * (man_ws_x * man_ws_y)**2
min_loss = np.min(loss)
manifold_indices = loss < min_loss + 1e-5
manifold_x = man_ws_x[manifold_indices]
manifold_y = man_ws_y[manifold_indices]
# plot manifold of global minima
ax.scatter(manifold_y, manifold_x, s=0.1, c='cyan',
label='Manifold of global minima')
def plot_global_minimum_manifold_3d(ax, settings):
# retieve cached settings
x, y, v, l2, w1_range, w2_range = settings
n_w = 1000
man_w1 = np.linspace(w1_range[0], w1_range[1], num=n_w)
man_w2 = np.linspace(w2_range[0], w2_range[1], num=n_w)
man_ws_x, man_ws_y = np.meshgrid(man_w1, man_w2)
loss = 0.5 * y * (1 - man_ws_x * man_ws_y * x)**2 + \
0.5 * l2 * (man_ws_x**2 + man_ws_y**2) + 0.5 * v * (man_ws_x*man_ws_y)**2
min_loss = np.min(loss)
manifold_indices = loss < min_loss + 1e-5
manifold_x = man_ws_x[manifold_indices]
manifold_y = man_ws_y[manifold_indices]
pos = np.where(np.abs(np.diff(manifold_y)) >= 0.1)[0]+1
x = np.insert(manifold_x, pos, np.nan)
y = np.insert(manifold_y, pos, np.nan)
# plot manifold of global minima
#ax.scatter(manifold_y, manifold_x, 0, s=0.5, c='cyan',
# label='Manifold of global minima')
ax.plot(y, x, c='cyan',
label='Manifold of global minima')
def plot_optimiser_trajectory_2d(ax, weights, **kwargs):
w1_vals = weights['w1']
w2_vals = weights['w2']
ax.plot(w1_vals, w2_vals, **kwargs)
def plot_optimiser_trajectory_3d(ax, settings, weights, **kwargs):
x, y, v, l2, _, _ = settings
w1_vals = np.array(weights['w1'])
w2_vals = np.array(weights['w2'])
loss = 0.5 * y * (1 - w1_vals * w2_vals * x)**2 + \
0.5 * l2 * (w1_vals**2 + w2_vals**2) + 0.5 * v * (w1_vals*w2_vals)**2
ax.plot(w1_vals, w2_vals, loss, **kwargs)
def plot_optimiser_trajectory(x, y, weights, dim='2d', angle=45, manifold=False, **kwargs):
if dim == '3d':
ax, settings = plot_mse_loss_surface_3d(x, y, angle=angle)
if manifold:
plot_global_minimum_manifold_3d(ax, settings)
plot_optimiser_trajectory_3d(ax, settings, weights, **kwargs)
else:
ax, settings = plot_mse_loss_surface_2d(x, y)
if manifold:
plot_global_minimum_manifold_2d(ax, settings)
plot_optimiser_trajectory_2d(ax, weights, **kwargs)
def plot_weight_norm(ax, weights, **kwargs):
w1_vals = np.array(weights['w1'])
w2_vals = np.array(weights['w2'])
epochs = np.arange(0, len(w1_vals), 1)
norms = np.sqrt(w1_vals**2 + w2_vals**2)
ax.set_xlabel('Epoch', fontsize=12)
ax.set_ylabel('Weight norm', fontsize=12)
ax.plot(epochs, norms, linewidth=2.0, **kwargs)
def animate_optimiser_trajectory_2d(i, ax, weights, **kwargs):
w1_vals = weights['w1']
w2_vals = weights['w2']
ax.plot(w1_vals[:i], w2_vals[:i], **kwargs)
return ax
def animate_optimiser_trajectory_3d(i, ax, settings, weights, **kwargs):
x, y, v, l2, _, _ = settings
w1_vals = np.array(weights['w1'])
w2_vals = np.array(weights['w2'])
loss = 0.5 * y * (1 - w1_vals * w2_vals * x)**2 + \
0.5 * l2 * (w1_vals**2 + w2_vals**2) + 0.5 * v * (w1_vals*w2_vals)**2
ax.plot(w1_vals[:i], w2_vals[:i], loss[:i], **kwargs)
return ax
def plot_optimiser_loss(x, y, v, l2, weights, **kwargs):
loss = []
epoch = np.arange(0, len(weights['w1']))
for w1, w2 in zip(weights['w1'], weights['w2']):
loss_val = 0.5 * y * (1 - w1 * w2 * x)**2 + 0.5 * l2 * (w1**2 + w2**2) + 0.5 * v * (w1 * w2)**2
loss.append(loss_val)
plt.plot(epoch, loss, **kwargs)
plt.xlabel('Epoch')
plt.ylabel('Loss')
def plot_interpolated_trajectory_2d(ax, w1_a, w2_a, w1_b, w2_b, start=0, end=1, **kwargs):
alpha = np.arange(start, end, 0.001)
w1_path = []
w2_path = []
for a in alpha:
ww1 = (1 - a) * w1_a + a * w1_b
ww2 = (1 - a) * w2_a + a * w2_b
w1_path.append(ww1)
w2_path.append(ww2)
ax.plot(w1_path, w2_path, **kwargs)
def plot_interpolated_trajectory_3d(ax, settings, w1_a, w2_a, w1_b, w2_b, start=0, end=1, **kwargs):
x, y, _, _ = settings
alpha = np.arange(start, end, 0.001)
w1_path = []
w2_path = []
loss = []
for a in alpha:
ww1 = (1 - a) * w1_a + a * w1_b
ww2 = (1 - a) * w2_a + a * w2_b
loss_val = 0.5 * (y - ww1 * ww2 * x)**2 + 0.5 * l2 * (ww1**2 + ww2**2)
loss.append(loss_val)
w1_path.append(ww1)
w2_path.append(ww2)
ax.plot(w1_path, w2_path, loss, **kwargs)
def plot_interpolated_loss(x, y, w1_a, w2_a, w1_b, w2_b, start=0, end=1, **kwargs):
alpha = np.arange(start, end, 0.001)
interpolated_loss = []
for a in alpha:
ww1 = (1 - a) * w1_a + a * w1_b
ww2 = (1 - a) * w2_a + a * w2_b
loss_val = 0.5 * (y - ww1 * ww2 * x)**2 + 0.5 * l2 * (ww1**2 + ww2**2)
interpolated_loss.append(loss_val)
plt.plot(alpha, interpolated_loss, **kwargs)
plt.xlabel(r'$\alpha$')
plt.ylabel('Loss')
def plot_learning_dynamics(ax, weights, **kwargs):
epoch = np.arange(0, len(weights['w1']))
scores = []
for w1, w2 in zip(weights['w1'], weights['w2']):
scores.append(w1 * w2)
ax.plot(epoch, scores, **kwargs)
def animate_learning_dynamics(i, ax, weights, y, **kwargs):
n_epoch = len(weights['w1'])
epoch = np.arange(1, n_epoch)
scores = []
for w1, w2 in zip(weights['w1'], weights['w2']):
scores.append(w1 * w2)
ax.set_xlim((1, n_epoch))
ax.set_ylim((0, y))
ax.set_xlabel('Epoch', fontsize=15)
ax.set_ylabel('$w_2 \cdot w_1$', fontsize=15)
ax.plot(epoch[:i], scores[:i], **kwargs)
return ax
def animate_learning(weights, save=False, name='anim'):
gs = gridspec.GridSpec(2, 4)
gs.update(wspace=0.5)
fig = plt.figure(figsize=(12, 8))
ax1 = fig.add_subplot(gs[0, :2], )
ax2 = fig.add_subplot(gs[0, 2:], projection='3d')
ax3 = fig.add_subplot(gs[1, 1:3])
# ax1 = fig.add_subplot(2, 2, 1)
# ax2 = fig.add_subplot(2, 2, 2, projection = '3d')
# ax3 = fig.add_subplot(2, 2, 3)
# ax4 = fig.add_subplot(2, 2, 4)
ax1, settings = plot_mse_loss_surface_2d(ax1, 1, 1)
ax2, settings = plot_mse_loss_surface_3d(ax2, 1, 1, angle=60)
plot_global_minimum_manifold_2d(ax1, settings)
plot_global_minimum_manifold_3d(ax2, settings)
def update(i):
animate_optimiser_trajectory_2d(
i, ax1, settings, weights, 'Gradient descent')
animate_optimiser_trajectory_3d(
i, ax2, settings, weights, 'Gradient descent')
animate_learning_dynamics(i, ax3, weights, 1)
# animate_weight_norm(i, ax4, scalarNet.history)
# suncAnimation will call the 'update' function for each frame
anim = FuncAnimation(fig, update, frames=100, interval=5, save_count=50)
# HTML(anim.to_html5_video())
if save:
anim.save(name + '.gif', dpi=80, writer='imagemagick')
plt.show()
|
[
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"numpy.array",
"numpy.gradient",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.diff",
"numpy.linspace",
"matplotlib.gridspec.GridSpec",
"numpy.min",
"numpy.meshgrid",
"matplotlib.pyplot.cm.ScalarMappable",
"matplotlib.colors.Normalize",
"matplotlib.pyplot.show",
"numpy.insert",
"matplotlib.animation.FuncAnimation",
"numpy.zeros",
"matplotlib.pyplot.figure"
] |
[((434, 480), 'numpy.linspace', 'np.linspace', (['w1_range[0]', 'w1_range[1]'], {'num': 'n_w'}), '(w1_range[0], w1_range[1], num=n_w)\n', (445, 480), True, 'import numpy as np\n'), ((502, 548), 'numpy.linspace', 'np.linspace', (['w2_range[0]', 'w2_range[1]'], {'num': 'n_w'}), '(w2_range[0], w2_range[1], num=n_w)\n', (513, 548), True, 'import numpy as np\n'), ((578, 597), 'numpy.meshgrid', 'np.meshgrid', (['w1', 'w2'], {}), '(w1, w2)\n', (589, 597), True, 'import numpy as np\n'), ((612, 632), 'numpy.zeros', 'np.zeros', (['(n_w, n_w)'], {}), '((n_w, n_w))\n', (620, 632), True, 'import numpy as np\n'), ((1040, 1060), 'numpy.gradient', 'np.gradient', (['cost_ws'], {}), '(cost_ws)\n', (1051, 1060), True, 'import numpy as np\n'), ((2062, 2108), 'numpy.linspace', 'np.linspace', (['w1_range[0]', 'w1_range[1]'], {'num': 'n_w'}), '(w1_range[0], w1_range[1], num=n_w)\n', (2073, 2108), True, 'import numpy as np\n'), ((2130, 2176), 'numpy.linspace', 'np.linspace', (['w2_range[0]', 'w2_range[1]'], {'num': 'n_w'}), '(w2_range[0], w2_range[1], num=n_w)\n', (2141, 2176), True, 'import numpy as np\n'), ((2206, 2225), 'numpy.meshgrid', 'np.meshgrid', (['w1', 'w2'], {}), '(w1, w2)\n', (2217, 2225), True, 'import numpy as np\n'), ((2240, 2260), 'numpy.zeros', 'np.zeros', (['(n_w, n_w)'], {}), '((n_w, n_w))\n', (2248, 2260), True, 'import numpy as np\n'), ((3007, 3028), 'matplotlib.colors.Normalize', 'Normalize', (['minn', 'maxx'], {}), '(minn, maxx)\n', (3016, 3028), False, 'from matplotlib.colors import Normalize\n'), ((3037, 3081), 'matplotlib.pyplot.cm.ScalarMappable', 'plt.cm.ScalarMappable', ([], {'norm': 'norm', 'cmap': '"""jet"""'}), "(norm=norm, cmap='jet')\n", (3058, 3081), True, 'import matplotlib.pyplot as plt\n'), ((3843, 3889), 'numpy.linspace', 'np.linspace', (['w1_range[0]', 'w1_range[1]'], {'num': 'n_w'}), '(w1_range[0], w1_range[1], num=n_w)\n', (3854, 3889), True, 'import numpy as np\n'), ((3903, 3949), 'numpy.linspace', 'np.linspace', (['w2_range[0]', 'w2_range[1]'], {'num': 'n_w'}), '(w2_range[0], w2_range[1], num=n_w)\n', (3914, 3949), True, 'import numpy as np\n'), ((3975, 4002), 'numpy.meshgrid', 'np.meshgrid', (['man_w1', 'man_w2'], {}), '(man_w1, man_w2)\n', (3986, 4002), True, 'import numpy as np\n'), ((4159, 4171), 'numpy.min', 'np.min', (['loss'], {}), '(loss)\n', (4165, 4171), True, 'import numpy as np\n'), ((4609, 4655), 'numpy.linspace', 'np.linspace', (['w1_range[0]', 'w1_range[1]'], {'num': 'n_w'}), '(w1_range[0], w1_range[1], num=n_w)\n', (4620, 4655), True, 'import numpy as np\n'), ((4669, 4715), 'numpy.linspace', 'np.linspace', (['w2_range[0]', 'w2_range[1]'], {'num': 'n_w'}), '(w2_range[0], w2_range[1], num=n_w)\n', (4680, 4715), True, 'import numpy as np\n'), ((4741, 4768), 'numpy.meshgrid', 'np.meshgrid', (['man_w1', 'man_w2'], {}), '(man_w1, man_w2)\n', (4752, 4768), True, 'import numpy as np\n'), ((4924, 4936), 'numpy.min', 'np.min', (['loss'], {}), '(loss)\n', (4930, 4936), True, 'import numpy as np\n'), ((5140, 5174), 'numpy.insert', 'np.insert', (['manifold_x', 'pos', 'np.nan'], {}), '(manifold_x, pos, np.nan)\n', (5149, 5174), True, 'import numpy as np\n'), ((5183, 5217), 'numpy.insert', 'np.insert', (['manifold_y', 'pos', 'np.nan'], {}), '(manifold_y, pos, np.nan)\n', (5192, 5217), True, 'import numpy as np\n'), ((5717, 5740), 'numpy.array', 'np.array', (["weights['w1']"], {}), "(weights['w1'])\n", (5725, 5740), True, 'import numpy as np\n'), ((5755, 5778), 'numpy.array', 'np.array', (["weights['w2']"], {}), "(weights['w2'])\n", (5763, 5778), True, 'import numpy as np\n'), ((6553, 6576), 'numpy.array', 'np.array', (["weights['w1']"], {}), "(weights['w1'])\n", (6561, 6576), True, 'import numpy as np\n'), ((6591, 6614), 'numpy.array', 'np.array', (["weights['w2']"], {}), "(weights['w2'])\n", (6599, 6614), True, 'import numpy as np\n'), ((6670, 6706), 'numpy.sqrt', 'np.sqrt', (['(w1_vals ** 2 + w2_vals ** 2)'], {}), '(w1_vals ** 2 + w2_vals ** 2)\n', (6677, 6706), True, 'import numpy as np\n'), ((7146, 7169), 'numpy.array', 'np.array', (["weights['w1']"], {}), "(weights['w1'])\n", (7154, 7169), True, 'import numpy as np\n'), ((7184, 7207), 'numpy.array', 'np.array', (["weights['w2']"], {}), "(weights['w2'])\n", (7192, 7207), True, 'import numpy as np\n'), ((7723, 7754), 'matplotlib.pyplot.plot', 'plt.plot', (['epoch', 'loss'], {}), '(epoch, loss, **kwargs)\n', (7731, 7754), True, 'import matplotlib.pyplot as plt\n'), ((7759, 7778), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (7769, 7778), True, 'import matplotlib.pyplot as plt\n'), ((7783, 7801), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (7793, 7801), True, 'import matplotlib.pyplot as plt\n'), ((7907, 7935), 'numpy.arange', 'np.arange', (['start', 'end', '(0.001)'], {}), '(start, end, 0.001)\n', (7916, 7935), True, 'import numpy as np\n'), ((8307, 8335), 'numpy.arange', 'np.arange', (['start', 'end', '(0.001)'], {}), '(start, end, 0.001)\n', (8316, 8335), True, 'import numpy as np\n'), ((8793, 8821), 'numpy.arange', 'np.arange', (['start', 'end', '(0.001)'], {}), '(start, end, 0.001)\n', (8802, 8821), True, 'import numpy as np\n'), ((9075, 9119), 'matplotlib.pyplot.plot', 'plt.plot', (['alpha', 'interpolated_loss'], {}), '(alpha, interpolated_loss, **kwargs)\n', (9083, 9119), True, 'import matplotlib.pyplot as plt\n'), ((9124, 9147), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\alpha$"""'], {}), "('$\\\\alpha$')\n", (9134, 9147), True, 'import matplotlib.pyplot as plt\n'), ((9152, 9170), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (9162, 9170), True, 'import matplotlib.pyplot as plt\n'), ((9513, 9534), 'numpy.arange', 'np.arange', (['(1)', 'n_epoch'], {}), '(1, n_epoch)\n', (9522, 9534), True, 'import numpy as np\n'), ((9905, 9928), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(2)', '(4)'], {}), '(2, 4)\n', (9922, 9928), True, 'import matplotlib.gridspec as gridspec\n'), ((9966, 9993), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (9976, 9993), True, 'import matplotlib.pyplot as plt\n'), ((10929, 10994), 'matplotlib.animation.FuncAnimation', 'FuncAnimation', (['fig', 'update'], {'frames': '(100)', 'interval': '(5)', 'save_count': '(50)'}), '(fig, update, frames=100, interval=5, save_count=50)\n', (10942, 10994), False, 'from matplotlib.animation import FuncAnimation\n'), ((11110, 11120), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11118, 11120), True, 'import matplotlib.pyplot as plt\n'), ((5098, 5117), 'numpy.diff', 'np.diff', (['manifold_y'], {}), '(manifold_y)\n', (5105, 5117), True, 'import numpy as np\n')]
|
# Copyright 2019 Google LLC
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test layers from qconvolutional.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from numpy.testing import assert_allclose
import pytest
import tempfile
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.backend import clear_session
from qkeras import binary
from qkeras import ternary
from qkeras import QActivation
from qkeras import QDense
from qkeras import QConv1D
from qkeras import QConv2D
from qkeras import QSeparableConv2D
from qkeras import quantized_bits
from qkeras import quantized_relu
from qkeras.utils import model_save_quantized_weights
from qkeras.utils import quantized_model_from_json
from qkeras.utils import load_qmodel
from qkeras import print_qstats
from qkeras import extract_model_operations
# TODO(hzhuang):
# qoctave_conv test
# qbatchnorm test
def test_qnetwork():
x = x_in = Input((28, 28, 1), name='input')
x = QSeparableConv2D(
32, (2, 2),
strides=(2, 2),
depthwise_quantizer=binary(alpha=1.0),
pointwise_quantizer=quantized_bits(4, 0, 1, alpha=1.0),
depthwise_activation=quantized_bits(6, 2, 1, alpha=1.0),
bias_quantizer=quantized_bits(4, 0, 1),
name='conv2d_0_m')(
x)
x = QActivation('quantized_relu(6,2,1)', name='act0_m')(x)
x = QConv2D(
64, (3, 3),
strides=(2, 2),
kernel_quantizer=ternary(alpha=1.0),
bias_quantizer=quantized_bits(4, 0, 1),
name='conv2d_1_m',
activation=quantized_relu(6, 3, 1))(
x)
x = QConv2D(
64, (2, 2),
strides=(2, 2),
kernel_quantizer=quantized_bits(6, 2, 1, alpha=1.0),
bias_quantizer=quantized_bits(4, 0, 1),
name='conv2d_2_m')(
x)
x = QActivation('quantized_relu(6,4,1)', name='act2_m')(x)
x = Flatten(name='flatten')(x)
x = QDense(
10,
kernel_quantizer=quantized_bits(6, 2, 1, alpha=1.0),
bias_quantizer=quantized_bits(4, 0, 1),
name='dense')(
x)
x = Activation('softmax', name='softmax')(x)
model = Model(inputs=[x_in], outputs=[x])
# reload the model to ensure saving/loading works
json_string = model.to_json()
clear_session()
model = quantized_model_from_json(json_string)
# generate same output for weights
np.random.seed(42)
for layer in model.layers:
all_weights = []
for i, weights in enumerate(layer.get_weights()):
input_size = np.prod(layer.input.shape.as_list()[1:])
if input_size is None:
input_size = 576 * 10 # to avoid learning sizes
shape = weights.shape
assert input_size > 0, 'input size for {} {}'.format(layer.name, i)
# he normal initialization with a scale factor of 2.0
all_weights.append(
10.0 * np.random.normal(0.0, np.sqrt(2.0 / input_size), shape))
if all_weights:
layer.set_weights(all_weights)
# apply quantizer to weights
model_save_quantized_weights(model)
all_weights = []
for layer in model.layers:
for i, weights in enumerate(layer.get_weights()):
w = np.sum(weights)
all_weights.append(w)
all_weights = np.array(all_weights)
# test_qnetwork_weight_quantization
all_weights_signature = np.array(
[2., -6.75, -0.625, -2., -0.25, -56., 1.125, -1.625, -1.125])
assert all_weights.size == all_weights_signature.size
assert np.all(all_weights == all_weights_signature)
# test_qnetwork_forward:
expected_output = np.array(
[[0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00,
0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00],
[0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00,
0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00],
[0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00,
0.e+00, 0.e+00, 0.e+00, 6.e-08, 1.e+00],
[0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00,
0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00],
[ 0.e+00 ,0.e+00, 0.e+00, 0.e+00, 0.e+00,
0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00],
[0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00,
0.e+00, 0.e+00, 0.e+00, 5.e-07, 1.e+00],
[0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00,
0.e+00 ,1.e+00, 0.e+00, 0.e+00, 0.e+00],
[0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00,
0.e+00 ,0.e+00, 0.e+00, 0.e+00, 0.e+00],
[0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00,
0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00],
[0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00,
1.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00]]).astype(np.float16)
inputs = 2 * np.random.rand(10, 28, 28, 1)
actual_output = model.predict(inputs).astype(np.float16)
assert_allclose(actual_output, expected_output, rtol=1e-4)
def test_qconv1d():
np.random.seed(33)
x = Input((4, 4,))
y = QConv1D(
2, 1,
kernel_quantizer=quantized_bits(6, 2, 1, alpha=1.0),
bias_quantizer=quantized_bits(4, 0, 1),
name='qconv1d')(
x)
model = Model(inputs=x, outputs=y)
# Extract model operations
model_ops = extract_model_operations(model)
# Assertion about the number of operations for this Conv1D layer
assert model_ops['qconv1d']['number_of_operations'] == 32
# Print qstats to make sure it works with Conv1D layer
print_qstats(model)
# reload the model to ensure saving/loading works
# json_string = model.to_json()
# clear_session()
# model = quantized_model_from_json(json_string)
for layer in model.layers:
all_weights = []
for i, weights in enumerate(layer.get_weights()):
input_size = np.prod(layer.input.shape.as_list()[1:])
if input_size is None:
input_size = 10 * 10
shape = weights.shape
assert input_size > 0, 'input size for {} {}'.format(layer.name, i)
all_weights.append(
10.0 * np.random.normal(0.0, np.sqrt(2.0 / input_size), shape))
if all_weights:
layer.set_weights(all_weights)
# Save the model as an h5 file using Keras's model.save()
fd, fname = tempfile.mkstemp('.h5')
model.save(fname)
del model # Delete the existing model
# Return a compiled model identical to the previous one
model = load_qmodel(fname)
# Clean the created h5 file after loading the model
os.close(fd)
os.remove(fname)
# apply quantizer to weights
model_save_quantized_weights(model)
inputs = np.random.rand(2, 4, 4)
p = model.predict(inputs).astype(np.float16)
y = np.array([[[-2.441, 3.816], [-3.807, -1.426], [-2.684, -1.317],
[-1.659, 0.9834]],
[[-4.99, 1.139], [-2.559, -1.216], [-2.285, 1.905],
[-2.652, -0.467]]]).astype(np.float16)
assert np.all(p == y)
if __name__ == '__main__':
pytest.main([__file__])
|
[
"qkeras.quantized_bits",
"numpy.sqrt",
"numpy.random.rand",
"qkeras.utils.quantized_model_from_json",
"qkeras.QActivation",
"numpy.array",
"tensorflow.keras.backend.clear_session",
"qkeras.extract_model_operations",
"os.remove",
"tensorflow.keras.layers.Input",
"qkeras.binary",
"numpy.testing.assert_allclose",
"pytest.main",
"numpy.random.seed",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Activation",
"qkeras.utils.model_save_quantized_weights",
"os.close",
"qkeras.utils.load_qmodel",
"tensorflow.keras.layers.Flatten",
"tempfile.mkstemp",
"qkeras.print_qstats",
"qkeras.quantized_relu",
"numpy.sum",
"qkeras.ternary",
"numpy.all"
] |
[((1761, 1793), 'tensorflow.keras.layers.Input', 'Input', (['(28, 28, 1)'], {'name': '"""input"""'}), "((28, 28, 1), name='input')\n", (1766, 1793), False, 'from tensorflow.keras.layers import Input\n'), ((2913, 2946), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[x_in]', 'outputs': '[x]'}), '(inputs=[x_in], outputs=[x])\n', (2918, 2946), False, 'from tensorflow.keras.models import Model\n'), ((3034, 3049), 'tensorflow.keras.backend.clear_session', 'clear_session', ([], {}), '()\n', (3047, 3049), False, 'from tensorflow.keras.backend import clear_session\n'), ((3060, 3098), 'qkeras.utils.quantized_model_from_json', 'quantized_model_from_json', (['json_string'], {}), '(json_string)\n', (3085, 3098), False, 'from qkeras.utils import quantized_model_from_json\n'), ((3139, 3157), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (3153, 3157), True, 'import numpy as np\n'), ((3761, 3796), 'qkeras.utils.model_save_quantized_weights', 'model_save_quantized_weights', (['model'], {}), '(model)\n', (3789, 3796), False, 'from qkeras.utils import model_save_quantized_weights\n'), ((3972, 3993), 'numpy.array', 'np.array', (['all_weights'], {}), '(all_weights)\n', (3980, 3993), True, 'import numpy as np\n'), ((4059, 4132), 'numpy.array', 'np.array', (['[2.0, -6.75, -0.625, -2.0, -0.25, -56.0, 1.125, -1.625, -1.125]'], {}), '([2.0, -6.75, -0.625, -2.0, -0.25, -56.0, 1.125, -1.625, -1.125])\n', (4067, 4132), True, 'import numpy as np\n'), ((4203, 4247), 'numpy.all', 'np.all', (['(all_weights == all_weights_signature)'], {}), '(all_weights == all_weights_signature)\n', (4209, 4247), True, 'import numpy as np\n'), ((5385, 5445), 'numpy.testing.assert_allclose', 'assert_allclose', (['actual_output', 'expected_output'], {'rtol': '(0.0001)'}), '(actual_output, expected_output, rtol=0.0001)\n', (5400, 5445), False, 'from numpy.testing import assert_allclose\n'), ((5468, 5486), 'numpy.random.seed', 'np.random.seed', (['(33)'], {}), '(33)\n', (5482, 5486), True, 'import numpy as np\n'), ((5493, 5506), 'tensorflow.keras.layers.Input', 'Input', (['(4, 4)'], {}), '((4, 4))\n', (5498, 5506), False, 'from tensorflow.keras.layers import Input\n'), ((5686, 5712), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'x', 'outputs': 'y'}), '(inputs=x, outputs=y)\n', (5691, 5712), False, 'from tensorflow.keras.models import Model\n'), ((5757, 5788), 'qkeras.extract_model_operations', 'extract_model_operations', (['model'], {}), '(model)\n', (5781, 5788), False, 'from qkeras import extract_model_operations\n'), ((5977, 5996), 'qkeras.print_qstats', 'print_qstats', (['model'], {}), '(model)\n', (5989, 5996), False, 'from qkeras import print_qstats\n'), ((6711, 6734), 'tempfile.mkstemp', 'tempfile.mkstemp', (['""".h5"""'], {}), "('.h5')\n", (6727, 6734), False, 'import tempfile\n'), ((6865, 6883), 'qkeras.utils.load_qmodel', 'load_qmodel', (['fname'], {}), '(fname)\n', (6876, 6883), False, 'from qkeras.utils import load_qmodel\n'), ((6941, 6953), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (6949, 6953), False, 'import os\n'), ((6956, 6972), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (6965, 6972), False, 'import os\n'), ((7007, 7042), 'qkeras.utils.model_save_quantized_weights', 'model_save_quantized_weights', (['model'], {}), '(model)\n', (7035, 7042), False, 'from qkeras.utils import model_save_quantized_weights\n'), ((7055, 7078), 'numpy.random.rand', 'np.random.rand', (['(2)', '(4)', '(4)'], {}), '(2, 4, 4)\n', (7069, 7078), True, 'import numpy as np\n'), ((7365, 7379), 'numpy.all', 'np.all', (['(p == y)'], {}), '(p == y)\n', (7371, 7379), True, 'import numpy as np\n'), ((7411, 7434), 'pytest.main', 'pytest.main', (['[__file__]'], {}), '([__file__])\n', (7422, 7434), False, 'import pytest\n'), ((2119, 2170), 'qkeras.QActivation', 'QActivation', (['"""quantized_relu(6,2,1)"""'], {'name': '"""act0_m"""'}), "('quantized_relu(6,2,1)', name='act0_m')\n", (2130, 2170), False, 'from qkeras import QActivation\n'), ((2604, 2655), 'qkeras.QActivation', 'QActivation', (['"""quantized_relu(6,4,1)"""'], {'name': '"""act2_m"""'}), "('quantized_relu(6,4,1)', name='act2_m')\n", (2615, 2655), False, 'from qkeras import QActivation\n'), ((2665, 2688), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {'name': '"""flatten"""'}), "(name='flatten')\n", (2672, 2688), False, 'from tensorflow.keras.layers import Flatten\n'), ((2861, 2898), 'tensorflow.keras.layers.Activation', 'Activation', (['"""softmax"""'], {'name': '"""softmax"""'}), "('softmax', name='softmax')\n", (2871, 2898), False, 'from tensorflow.keras.layers import Activation\n'), ((5294, 5323), 'numpy.random.rand', 'np.random.rand', (['(10)', '(28)', '(28)', '(1)'], {}), '(10, 28, 28, 1)\n', (5308, 5323), True, 'import numpy as np\n'), ((3911, 3926), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (3917, 3926), True, 'import numpy as np\n'), ((4296, 4863), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 6e-08, 1.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0], [0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 5e-07, 1.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, \n 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, \n 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0,\n 1.0, 0.0, 0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0], [0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 6e-08, 1.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, \n 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0], [0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 5e-07, 1.0], [0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0], [0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, \n 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0]])\n', (4304, 4863), True, 'import numpy as np\n'), ((7132, 7296), 'numpy.array', 'np.array', (['[[[-2.441, 3.816], [-3.807, -1.426], [-2.684, -1.317], [-1.659, 0.9834]], [\n [-4.99, 1.139], [-2.559, -1.216], [-2.285, 1.905], [-2.652, -0.467]]]'], {}), '([[[-2.441, 3.816], [-3.807, -1.426], [-2.684, -1.317], [-1.659, \n 0.9834]], [[-4.99, 1.139], [-2.559, -1.216], [-2.285, 1.905], [-2.652, \n -0.467]]])\n', (7140, 7296), True, 'import numpy as np\n'), ((1884, 1901), 'qkeras.binary', 'binary', ([], {'alpha': '(1.0)'}), '(alpha=1.0)\n', (1890, 1901), False, 'from qkeras import binary\n'), ((1929, 1963), 'qkeras.quantized_bits', 'quantized_bits', (['(4)', '(0)', '(1)'], {'alpha': '(1.0)'}), '(4, 0, 1, alpha=1.0)\n', (1943, 1963), False, 'from qkeras import quantized_bits\n'), ((1992, 2026), 'qkeras.quantized_bits', 'quantized_bits', (['(6)', '(2)', '(1)'], {'alpha': '(1.0)'}), '(6, 2, 1, alpha=1.0)\n', (2006, 2026), False, 'from qkeras import quantized_bits\n'), ((2049, 2072), 'qkeras.quantized_bits', 'quantized_bits', (['(4)', '(0)', '(1)'], {}), '(4, 0, 1)\n', (2063, 2072), False, 'from qkeras import quantized_bits\n'), ((2252, 2270), 'qkeras.ternary', 'ternary', ([], {'alpha': '(1.0)'}), '(alpha=1.0)\n', (2259, 2270), False, 'from qkeras import ternary\n'), ((2293, 2316), 'qkeras.quantized_bits', 'quantized_bits', (['(4)', '(0)', '(1)'], {}), '(4, 0, 1)\n', (2307, 2316), False, 'from qkeras import quantized_bits\n'), ((2360, 2383), 'qkeras.quantized_relu', 'quantized_relu', (['(6)', '(3)', '(1)'], {}), '(6, 3, 1)\n', (2374, 2383), False, 'from qkeras import quantized_relu\n'), ((2477, 2511), 'qkeras.quantized_bits', 'quantized_bits', (['(6)', '(2)', '(1)'], {'alpha': '(1.0)'}), '(6, 2, 1, alpha=1.0)\n', (2491, 2511), False, 'from qkeras import quantized_bits\n'), ((2534, 2557), 'qkeras.quantized_bits', 'quantized_bits', (['(4)', '(0)', '(1)'], {}), '(4, 0, 1)\n', (2548, 2557), False, 'from qkeras import quantized_bits\n'), ((2739, 2773), 'qkeras.quantized_bits', 'quantized_bits', (['(6)', '(2)', '(1)'], {'alpha': '(1.0)'}), '(6, 2, 1, alpha=1.0)\n', (2753, 2773), False, 'from qkeras import quantized_bits\n'), ((2796, 2819), 'qkeras.quantized_bits', 'quantized_bits', (['(4)', '(0)', '(1)'], {}), '(4, 0, 1)\n', (2810, 2819), False, 'from qkeras import quantized_bits\n'), ((5558, 5592), 'qkeras.quantized_bits', 'quantized_bits', (['(6)', '(2)', '(1)'], {'alpha': '(1.0)'}), '(6, 2, 1, alpha=1.0)\n', (5572, 5592), False, 'from qkeras import quantized_bits\n'), ((5615, 5638), 'qkeras.quantized_bits', 'quantized_bits', (['(4)', '(0)', '(1)'], {}), '(4, 0, 1)\n', (5629, 5638), False, 'from qkeras import quantized_bits\n'), ((3635, 3660), 'numpy.sqrt', 'np.sqrt', (['(2.0 / input_size)'], {}), '(2.0 / input_size)\n', (3642, 3660), True, 'import numpy as np\n'), ((6545, 6570), 'numpy.sqrt', 'np.sqrt', (['(2.0 / input_size)'], {}), '(2.0 / input_size)\n', (6552, 6570), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# License: BSD-3 (https://tldrlegal.com/license/bsd-3-clause-license-(revised))
# Copyright (c) 2016-2021, <NAME>; Luczywo, Nadia
# All rights reserved.
# =============================================================================
# DOCS
# =============================================================================
"""Functionalities for remove negatives from criteria.
In addition to the main functionality, an MCDA agnostic function is offered
to push negatives values on an array along an arbitrary axis.
"""
# =============================================================================
# IMPORTS
# =============================================================================
import numpy as np
from ..core import SKCMatrixAndWeightTransformerABC
from ..utils import doc_inherit
# =============================================================================
# FUNCTIONS
# =============================================================================
def push_negatives(arr, axis):
r"""Increment the array until all the valuer are sean >= 0.
If an array has negative values this function increment the values
proportionally to made all the array positive along an axis.
.. math::
\overline{X}_{ij} =
\begin{cases}
X_{ij} + min_{X_{ij}} & \text{if } X_{ij} < 0\\
X_{ij} & \text{otherwise}
\end{cases}
Parameters
----------
arr: :py:class:`numpy.ndarray` like.
A array with values
axis : :py:class:`int` optional
Axis along which to operate. By default, flattened input is used.
Returns
-------
:py:class:`numpy.ndarray`
array with all values >= 0.
Examples
--------
.. code-block:: pycon
>>> from skcriteria.preprocess import push_negatives
>>> mtx = [[1, 2], [3, 4]]
>>> mtx_lt0 = [[-1, 2], [3, 4]] # has a negative value
>>> push_negatives(mtx) # array without negatives don't be affected
array([[1, 2],
[3, 4]])
# all the array is incremented by 1 to eliminate the negative
>>> push_negatives(mtx_lt0)
array([[0, 3],
[4, 5]])
# by column only the first one (with the negative value) is affected
>>> push_negatives(mtx_lt0, axis=0)
array([[0, 2],
[4, 4]])
# by row only the first row (with the negative value) is affected
>>> push_negatives(mtx_lt0, axis=1)
array([[0, 3],
[3, 4]])
"""
arr = np.asarray(arr)
mins = np.min(arr, axis=axis, keepdims=True)
delta = (mins < 0) * mins
return arr - delta
class PushNegatives(SKCMatrixAndWeightTransformerABC):
r"""Increment the matrix/weights until all the valuer are sean >= 0.
If the matrix/weights has negative values this function increment the
values proportionally to made all the matrix/weights positive along an
axis.
.. math::
\overline{X}_{ij} =
\begin{cases}
X_{ij} + min_{X_{ij}} & \text{if } X_{ij} < 0\\
X_{ij} & \text{otherwise}
\end{cases}
"""
@doc_inherit(SKCMatrixAndWeightTransformerABC._transform_weights)
def _transform_weights(self, weights):
return push_negatives(weights, axis=None)
@doc_inherit(SKCMatrixAndWeightTransformerABC._transform_matrix)
def _transform_matrix(self, matrix):
return push_negatives(matrix, axis=0)
|
[
"numpy.asarray",
"numpy.min"
] |
[((2605, 2620), 'numpy.asarray', 'np.asarray', (['arr'], {}), '(arr)\n', (2615, 2620), True, 'import numpy as np\n'), ((2632, 2669), 'numpy.min', 'np.min', (['arr'], {'axis': 'axis', 'keepdims': '(True)'}), '(arr, axis=axis, keepdims=True)\n', (2638, 2669), True, 'import numpy as np\n')]
|
import numpy as np
from treelas import post_order, TreeInstance
def test_demo_3x7_postord():
parent = np.array([0, 4, 5, 0, 3, 4, 7, 8, 5, 6, 7, 8,
9, 14, 17, 12, 15, 16, 19, 16, 17])
po = post_order(parent, include_root=True)
expect = np.array([12, 11, 19, 20, 21, 14, 15, 18, 17, 16, 13,
10, 7, 8, 9, 3, 6, 2, 5, 4, 1], dtype='i4') - 1
assert (po == expect).all()
def test_demo_3x7():
y = np.fromstring("0.62 0.73 0.71 1.5 1.17 0.43 1.08 0.62 " +
"1.73 0.95 1.46 1.6 1.16 0.38 0.9 0.32 " +
"-0.48 0.95 1.08 0.02 0.4", sep=" ")
parent = np.array([0, 4, 5, 0, 3, 4, 7, 8, 5, 6, 7, 8,
9, 14, 17, 12, 15, 16, 19, 16, 17])
lam = 1.0
prob = TreeInstance(y, parent, lam=lam)
assert prob.root == 0
assert prob.parent.dtype == np.int32
prob.solve()
assert abs(prob.x.mean() - prob.y.mean()) < 1e-15
assert len(np.unique(prob.x)) == 2
assert max(np.abs(prob.dual[2:]) - lam) < 1e-12
assert max(np.abs(prob.gamma)) < 1e-15
|
[
"numpy.abs",
"numpy.unique",
"treelas.post_order",
"treelas.TreeInstance",
"numpy.array",
"numpy.fromstring"
] |
[((108, 193), 'numpy.array', 'np.array', (['[0, 4, 5, 0, 3, 4, 7, 8, 5, 6, 7, 8, 9, 14, 17, 12, 15, 16, 19, 16, 17]'], {}), '([0, 4, 5, 0, 3, 4, 7, 8, 5, 6, 7, 8, 9, 14, 17, 12, 15, 16, 19, 16,\n 17])\n', (116, 193), True, 'import numpy as np\n'), ((222, 259), 'treelas.post_order', 'post_order', (['parent'], {'include_root': '(True)'}), '(parent, include_root=True)\n', (232, 259), False, 'from treelas import post_order, TreeInstance\n'), ((461, 606), 'numpy.fromstring', 'np.fromstring', (["('0.62 0.73 0.71 1.5 1.17 0.43 1.08 0.62 ' +\n '1.73 0.95 1.46 1.6 1.16 0.38 0.9 0.32 ' + '-0.48 0.95 1.08 0.02 0.4')"], {'sep': '""" """'}), "('0.62 0.73 0.71 1.5 1.17 0.43 1.08 0.62 ' +\n '1.73 0.95 1.46 1.6 1.16 0.38 0.9 0.32 ' + '-0.48 0.95 1.08 0.02 0.4',\n sep=' ')\n", (474, 606), True, 'import numpy as np\n'), ((656, 741), 'numpy.array', 'np.array', (['[0, 4, 5, 0, 3, 4, 7, 8, 5, 6, 7, 8, 9, 14, 17, 12, 15, 16, 19, 16, 17]'], {}), '([0, 4, 5, 0, 3, 4, 7, 8, 5, 6, 7, 8, 9, 14, 17, 12, 15, 16, 19, 16,\n 17])\n', (664, 741), True, 'import numpy as np\n'), ((786, 818), 'treelas.TreeInstance', 'TreeInstance', (['y', 'parent'], {'lam': 'lam'}), '(y, parent, lam=lam)\n', (798, 818), False, 'from treelas import post_order, TreeInstance\n'), ((273, 374), 'numpy.array', 'np.array', (['[12, 11, 19, 20, 21, 14, 15, 18, 17, 16, 13, 10, 7, 8, 9, 3, 6, 2, 5, 4, 1]'], {'dtype': '"""i4"""'}), "([12, 11, 19, 20, 21, 14, 15, 18, 17, 16, 13, 10, 7, 8, 9, 3, 6, 2,\n 5, 4, 1], dtype='i4')\n", (281, 374), True, 'import numpy as np\n'), ((972, 989), 'numpy.unique', 'np.unique', (['prob.x'], {}), '(prob.x)\n', (981, 989), True, 'import numpy as np\n'), ((1063, 1081), 'numpy.abs', 'np.abs', (['prob.gamma'], {}), '(prob.gamma)\n', (1069, 1081), True, 'import numpy as np\n'), ((1011, 1032), 'numpy.abs', 'np.abs', (['prob.dual[2:]'], {}), '(prob.dual[2:])\n', (1017, 1032), True, 'import numpy as np\n')]
|
from tqdm import tqdm
import pandas as pd
import numpy as np, argparse, time, pickle, random, os, datetime
import torch
import torch.optim as optim
from model import MaskedNLLLoss, BC_LSTM
from dataloader import MELDDataLoader
from sklearn.metrics import f1_score, confusion_matrix, accuracy_score, classification_report
def setup_seed(seed):
""" Manually Fix the random seed to get deterministic results.
"""
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.benchmark = False
torch.backends.cudnn.deterministic = True
def train_or_eval_model(model, loss_function, dataloader, epoch, optimizer=None, mode='train'):
losses, preds, labels, masks, losses_sense = [], [], [], [], []
max_sequence_len = []
assert mode != 'train' or optimizer != None
if mode == 'train':
model.train()
else:
model.eval()
with tqdm(dataloader) as td:
for data in td:
if mode == 'train':
optimizer.zero_grad()
textf, acouf, mask, label = [d.cuda() for d in data[:-1]] if args.cuda else data[:-1]
log_prob, _ = model(textf, None, acouf, None, mask)
lp_ = log_prob.transpose(0,1).contiguous().view(-1, log_prob.size()[2]) # batch*seq_len, n_classes
labels_ = label.view(-1) # batch*seq_len
loss = loss_function(lp_, labels_, mask)
pred_ = torch.argmax(lp_,1) # batch*seq_len
preds.append(pred_.data.cpu().numpy())
labels.append(labels_.data.cpu().numpy())
masks.append(mask.view(-1).cpu().numpy())
losses.append(loss.item()*masks[-1].sum())
if mode == 'train':
total_loss = loss
total_loss.backward()
optimizer.step()
if preds!=[]:
preds = np.concatenate(preds)
labels = np.concatenate(labels)
masks = np.concatenate(masks)
else:
return float('nan'), float('nan'), float('nan'), [], [], [], float('nan'),[]
avg_loss = round(np.sum(losses)/np.sum(masks), 4)
avg_sense_loss = round(np.sum(losses_sense)/np.sum(masks), 4)
avg_accuracy = round(accuracy_score(labels,preds, sample_weight=masks)*100, 2)
avg_fscore = round(f1_score(labels,preds, sample_weight=masks, average='weighted')*100, 2)
if mode == 'test':
class_report = classification_report(labels, preds, sample_weight=masks, target_names=['neutral', 'surprise', 'fear', 'sadness', 'joy', 'disgust', 'anger'], digits=6)
print(class_report)
return avg_loss, avg_accuracy, labels, preds, masks, [avg_fscore]
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--num_workers', type=int, default=0,
help='num workers of loading data')
# dataloader settings
parser.add_argument('--batch-size', type=int, default=32, metavar='BS', help='batch size')
parser.add_argument('--data_path', type=str, default='../TextCnn/dataset/MELD_features_raw.pkl')
# model settings.
parser.add_argument('--attention_type', type=str, default='general2')
parser.add_argument('--utterance_dim', type=int, default=600,
help='embedding dims to use')
parser.add_argument('--emotion_state_dim', type=int, default=100)
parser.add_argument('--hidden_layer_dim', type=int, default=100)
parser.add_argument('--dropout', type=float, default=0.25)
parser.add_argument('--n_classes', type=int, default=7)
# late fusion module.
parser.add_argument('--lateFusionModule', type=str, default='concat')
parser.add_argument('--input_features', type=tuple, default=(100, 300))
parser.add_argument('--pre_fusion_hidden_dims', type=tuple, default=(24, 7))
parser.add_argument('--pre_fusion_dropout', type=float, default=0.4)
parser.add_argument('--post_fusion_dropout', type=float, default=0.3)
# train settings.
parser.add_argument('--lr', type=float, default=1e-4, metavar='LR', help='learning rate')
parser.add_argument('--l2', type=float, default=1e-5, metavar='L2', help='L2 regularization weight')
parser.add_argument('--epochs', type=int, default=100, metavar='E', help='number of epochs')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
args.cuda = torch.cuda.is_available()
if args.cuda:
print('Running on GPU')
else:
print('Running on CPU')
for seed in [1, 11, 111, 1111, 11111]:
setup_seed(seed)
args.seed = seed
print(args)
model = BC_LSTM(args)
print('MELD BC_LSTM MODULE ...')
if args.cuda:
model.cuda()
loss_weights = torch.FloatTensor([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
loss_function = MaskedNLLLoss(loss_weights.cuda() if args.cuda else loss_weights)
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.l2)
lf = open('logs/cnn_meld_logs.txt', 'a')
dataloader = MELDDataLoader(args)
valid_losses, valid_fscores = [], []
test_fscores, test_accuracys, test_losses = [], [], []
best_loss, best_label, best_pred, best_mask = None, None, None, None
for e in range(args.epochs):
start_time = time.time()
train_loss, train_acc, _, _, _, train_fscore = train_or_eval_model(model, loss_function, dataloader['train'], e, optimizer, mode='train')
valid_loss, valid_acc, _, _, _, valid_fscore = train_or_eval_model(model, loss_function, dataloader['valid'], e, mode='valid')
test_loss, test_acc, test_label, test_pred, test_mask, test_fscore = train_or_eval_model(model, loss_function, dataloader['test'], e, mode='test')
valid_losses.append(valid_loss)
valid_fscores.append(valid_fscore)
test_losses.append(test_loss)
test_accuracys.append(test_acc)
test_fscores.append(test_fscore)
x = 'epoch: {}, train_loss: {}, acc: {}, fscore: {}, valid_loss: {}, acc: {}, fscore: {}, test_loss: {}, acc: {}, fscore: {}, time: {} sec'.format(e+1, train_loss, train_acc, train_fscore, valid_loss, valid_acc, valid_fscore, test_loss, test_acc, test_fscore, round(time.time()-start_time, 2))
print (x)
lf.write(x + '\n')
valid_fscores = np.array(valid_fscores).transpose()
test_fscores = np.array(test_fscores).transpose() # [1, epoches]
test_accuracys = np.array(test_accuracys).transpose() # [epoches]
f1_score1 = test_fscores[0][np.argmin(valid_losses)]
acc_score1 = test_accuracys[np.argmin(valid_losses)]
f1_score2 = test_fscores[0][np.argmax(valid_fscores[0])]
acc_score2 = test_accuracys[np.argmax(valid_fscores[0])]
scores = [acc_score1, f1_score1, acc_score2, f1_score2]
scores = [str(item) for item in scores]
print ('Test Scores: Weighted F1')
print('@Best Valid Loss: Test Acc: {}, Test F1: {}'.format(acc_score1, f1_score1))
print('@Best Valid F1: Test Acc: {}, Test F1: {}'.format(acc_score2, f1_score2))
rf = open('results/cnn_meld_results.txt', 'a')
rf.write('\t'.join(scores) + '\t' + str(args) + '\n')
rf.close()
|
[
"sklearn.metrics.classification_report",
"numpy.array",
"torch.cuda.is_available",
"argparse.ArgumentParser",
"model.BC_LSTM",
"dataloader.MELDDataLoader",
"numpy.random.seed",
"numpy.concatenate",
"numpy.argmin",
"torch.argmax",
"numpy.argmax",
"time.time",
"sklearn.metrics.accuracy_score",
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"sklearn.metrics.f1_score",
"tqdm.tqdm",
"random.seed",
"numpy.sum",
"torch.cuda.manual_seed",
"torch.FloatTensor"
] |
[((427, 450), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (444, 450), False, 'import torch\n'), ((455, 483), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (477, 483), False, 'import torch\n'), ((488, 520), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (514, 520), False, 'import torch\n'), ((525, 545), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (539, 545), True, 'import numpy as np, argparse, time, pickle, random, os, datetime\n'), ((550, 567), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (561, 567), False, 'import numpy as np, argparse, time, pickle, random, os, datetime\n'), ((2807, 2832), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2830, 2832), False, 'import numpy as np, argparse, time, pickle, random, os, datetime\n'), ((4486, 4511), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4509, 4511), False, 'import torch\n'), ((978, 994), 'tqdm.tqdm', 'tqdm', (['dataloader'], {}), '(dataloader)\n', (982, 994), False, 'from tqdm import tqdm\n'), ((1982, 2003), 'numpy.concatenate', 'np.concatenate', (['preds'], {}), '(preds)\n', (1996, 2003), True, 'import numpy as np, argparse, time, pickle, random, os, datetime\n'), ((2021, 2043), 'numpy.concatenate', 'np.concatenate', (['labels'], {}), '(labels)\n', (2035, 2043), True, 'import numpy as np, argparse, time, pickle, random, os, datetime\n'), ((2061, 2082), 'numpy.concatenate', 'np.concatenate', (['masks'], {}), '(masks)\n', (2075, 2082), True, 'import numpy as np, argparse, time, pickle, random, os, datetime\n'), ((2524, 2684), 'sklearn.metrics.classification_report', 'classification_report', (['labels', 'preds'], {'sample_weight': 'masks', 'target_names': "['neutral', 'surprise', 'fear', 'sadness', 'joy', 'disgust', 'anger']", 'digits': '(6)'}), "(labels, preds, sample_weight=masks, target_names=[\n 'neutral', 'surprise', 'fear', 'sadness', 'joy', 'disgust', 'anger'],\n digits=6)\n", (2545, 2684), False, 'from sklearn.metrics import f1_score, confusion_matrix, accuracy_score, classification_report\n'), ((4744, 4757), 'model.BC_LSTM', 'BC_LSTM', (['args'], {}), '(args)\n', (4751, 4757), False, 'from model import MaskedNLLLoss, BC_LSTM\n'), ((4879, 4933), 'torch.FloatTensor', 'torch.FloatTensor', (['[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])\n', (4896, 4933), False, 'import torch\n'), ((5207, 5227), 'dataloader.MELDDataLoader', 'MELDDataLoader', (['args'], {}), '(args)\n', (5221, 5227), False, 'from dataloader import MELDDataLoader\n'), ((1541, 1561), 'torch.argmax', 'torch.argmax', (['lp_', '(1)'], {}), '(lp_, 1)\n', (1553, 1561), False, 'import torch\n'), ((2200, 2214), 'numpy.sum', 'np.sum', (['losses'], {}), '(losses)\n', (2206, 2214), True, 'import numpy as np, argparse, time, pickle, random, os, datetime\n'), ((2215, 2228), 'numpy.sum', 'np.sum', (['masks'], {}), '(masks)\n', (2221, 2228), True, 'import numpy as np, argparse, time, pickle, random, os, datetime\n'), ((2260, 2280), 'numpy.sum', 'np.sum', (['losses_sense'], {}), '(losses_sense)\n', (2266, 2280), True, 'import numpy as np, argparse, time, pickle, random, os, datetime\n'), ((2281, 2294), 'numpy.sum', 'np.sum', (['masks'], {}), '(masks)\n', (2287, 2294), True, 'import numpy as np, argparse, time, pickle, random, os, datetime\n'), ((2325, 2375), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['labels', 'preds'], {'sample_weight': 'masks'}), '(labels, preds, sample_weight=masks)\n', (2339, 2375), False, 'from sklearn.metrics import f1_score, confusion_matrix, accuracy_score, classification_report\n'), ((2406, 2470), 'sklearn.metrics.f1_score', 'f1_score', (['labels', 'preds'], {'sample_weight': 'masks', 'average': '"""weighted"""'}), "(labels, preds, sample_weight=masks, average='weighted')\n", (2414, 2470), False, 'from sklearn.metrics import f1_score, confusion_matrix, accuracy_score, classification_report\n'), ((5478, 5489), 'time.time', 'time.time', ([], {}), '()\n', (5487, 5489), False, 'import numpy as np, argparse, time, pickle, random, os, datetime\n'), ((6811, 6834), 'numpy.argmin', 'np.argmin', (['valid_losses'], {}), '(valid_losses)\n', (6820, 6834), True, 'import numpy as np, argparse, time, pickle, random, os, datetime\n'), ((6872, 6895), 'numpy.argmin', 'np.argmin', (['valid_losses'], {}), '(valid_losses)\n', (6881, 6895), True, 'import numpy as np, argparse, time, pickle, random, os, datetime\n'), ((6933, 6960), 'numpy.argmax', 'np.argmax', (['valid_fscores[0]'], {}), '(valid_fscores[0])\n', (6942, 6960), True, 'import numpy as np, argparse, time, pickle, random, os, datetime\n'), ((6998, 7025), 'numpy.argmax', 'np.argmax', (['valid_fscores[0]'], {}), '(valid_fscores[0])\n', (7007, 7025), True, 'import numpy as np, argparse, time, pickle, random, os, datetime\n'), ((6591, 6614), 'numpy.array', 'np.array', (['valid_fscores'], {}), '(valid_fscores)\n', (6599, 6614), True, 'import numpy as np, argparse, time, pickle, random, os, datetime\n'), ((6650, 6672), 'numpy.array', 'np.array', (['test_fscores'], {}), '(test_fscores)\n', (6658, 6672), True, 'import numpy as np, argparse, time, pickle, random, os, datetime\n'), ((6725, 6749), 'numpy.array', 'np.array', (['test_accuracys'], {}), '(test_accuracys)\n', (6733, 6749), True, 'import numpy as np, argparse, time, pickle, random, os, datetime\n'), ((6472, 6483), 'time.time', 'time.time', ([], {}), '()\n', (6481, 6483), False, 'import numpy as np, argparse, time, pickle, random, os, datetime\n')]
|
import logging
from typing import List, Callable
import numpy as np
from pyquaternion import Quaternion
from pyrep import PyRep
from pyrep.errors import IKError
from pyrep.objects import Dummy, Object
from rlbench import utils
from rlbench.action_modes import ArmActionMode, ActionMode
from rlbench.backend.exceptions import BoundaryError, WaypointError
from rlbench.backend.observation import Observation
from rlbench.backend.robot import Robot
from rlbench.backend.scene import Scene
from rlbench.backend.task import Task
from rlbench.demo import Demo
from rlbench.observation_config import ObservationConfig
_TORQUE_MAX_VEL = 9999
_DT = 0.05
_MAX_RESET_ATTEMPTS = 40
_MAX_DEMO_ATTEMPTS = 10
class InvalidActionError(Exception):
pass
class TaskEnvironmentError(Exception):
pass
class TaskEnvironment(object):
def __init__(self, pyrep: PyRep, robot: Robot, scene: Scene, task: Task,
action_mode: ActionMode, dataset_root: str,
obs_config: ObservationConfig,
static_positions: bool = False,
attach_grasped_objects: bool = True):
self._pyrep = pyrep
self._robot = robot
self._scene = scene
self._task = task
self._variation_number = 0
self._action_mode = action_mode
self._dataset_root = dataset_root
self._obs_config = obs_config
self._static_positions = static_positions
self._attach_grasped_objects = attach_grasped_objects
self._reset_called = False
self._prev_ee_velocity = None
self._enable_path_observations = False
self._scene.load(self._task)
self._pyrep.start()
self._target_workspace_check = Dummy.create()
self._last_e = None
def get_name(self) -> str:
return self._task.get_name()
def sample_variation(self) -> int:
self._variation_number = np.random.randint(
0, self._task.variation_count())
return self._variation_number
def set_variation(self, v: int) -> None:
if v >= self.variation_count():
raise TaskEnvironmentError(
'Requested variation %d, but there are only %d variations.' % (
v, self.variation_count()))
self._variation_number = v
def variation_count(self) -> int:
return self._task.variation_count()
def reset(self) -> (List[str], Observation):
self._scene.reset()
try:
desc = self._scene.init_episode(
self._variation_number, max_attempts=_MAX_RESET_ATTEMPTS,
randomly_place=not self._static_positions)
except (BoundaryError, WaypointError) as e:
raise TaskEnvironmentError(
'Could not place the task %s in the scene. This should not '
'happen, please raise an issues on this task.'
% self._task.get_name()) from e
self._reset_called = True
# redundancy resolution
self._last_e = None
# Returns a list of descriptions and the first observation
return desc, self._scene.get_observation()
def get_observation(self) -> Observation:
return self._scene.get_observation()
def get_joint_upper_velocity_limits(self):
return self._robot.arm.get_joint_upper_velocity_limits()
def get_all_graspable_objects(self):
return self._task.get_graspable_objects()
def get_robot_visuals(self):
return self._robot.arm.get_visuals()
def get_all_graspable_object_positions(self, relative_to_cameras=False):
""" returns the positions of all graspable object relative to all enabled cameras """
objects = self._task.get_graspable_objects()
positions = []
for ob in objects:
if relative_to_camera:
positions.append(self._scene.get_object_position_relative_to_cameras(ob))
else:
positions.append({"left_shoulder_camera": ob.get_position(),
"right_shoulder_camera": ob.get_position(),
"front_camera": ob.get_position(),
"wrist_camera": ob.get_position()})
return positions
def get_all_graspable_object_poses(self, relative_to_cameras=False):
""" returns the pose of all graspable object relative to all enabled cameras """
objects = self._task.get_graspable_objects()
poses = []
for ob in objects:
if relative_to_cameras:
poses.append(self._scene.get_object_pose_relative_to_cameras(ob))
else:
poses.append({"left_shoulder_camera": ob.get_pose(),
"right_shoulder_camera": ob.get_pose(),
"front_camera": ob.get_pose(),
"wrist_camera": ob.get_pose()})
return poses
def _assert_action_space(self, action, expected_shape):
if np.shape(action) != expected_shape:
raise RuntimeError(
'Expected the action shape to be: %s, but was shape: %s' % (
str(expected_shape), str(np.shape(action))))
def _assert_unit_quaternion(self, quat):
if not np.isclose(np.linalg.norm(quat), 1.0):
raise RuntimeError('Action contained non unit quaternion!')
def _torque_action(self, action):
self._robot.arm.set_joint_target_velocities(
[(_TORQUE_MAX_VEL if t < 0 else -_TORQUE_MAX_VEL)
for t in action])
self._robot.arm.set_joint_forces(np.abs(action))
def _ee_action(self, action, relative_to=None):
self._assert_unit_quaternion(action[3:])
try:
joint_positions = self._robot.arm.solve_ik(
action[:3], quaternion=action[3:], relative_to=relative_to)
self._robot.arm.set_joint_target_positions(joint_positions)
except IKError as e:
raise InvalidActionError('Could not find a path.') from e
done = False
prev_values = None
# Move until reached target joint positions or until we stop moving
# (e.g. when we collide wth something)
while not done:
self._scene.step()
cur_positions = self._robot.arm.get_joint_positions()
reached = np.allclose(cur_positions, joint_positions, atol=0.01)
not_moving = False
if prev_values is not None:
not_moving = np.allclose(
cur_positions, prev_values, atol=0.001)
prev_values = cur_positions
done = reached or not_moving
def _path_action(self, action, relative_to=None):
self._assert_unit_quaternion(action[3:])
try:
# Check if the target is in the workspace; if not, then quick reject
# Only checks position, not rotation
pos_to_check = action[:3]
if relative_to is not None:
self._target_workspace_check.set_position(
pos_to_check, relative_to)
pos_to_check = self._target_workspace_check.get_position()
valid = self._scene.check_target_in_workspace(pos_to_check)
if not valid:
raise InvalidActionError('Target is outside of workspace.')
path = self._robot.arm.get_path(
action[:3], quaternion=action[3:], ignore_collisions=True,
relative_to=relative_to)
done = False
observations = []
while not done:
done = path.step()
self._scene.step()
if self._enable_path_observations:
observations.append(self._scene.get_observation())
success, terminate = self._task.success()
# If the task succeeds while traversing path, then break early
if success:
break
observations.append(self._scene.get_observation())
return observations
except IKError as e:
raise InvalidActionError('Could not find a path.') from e
def step(self, action, camcorder=None) -> (Observation, int, bool):
# returns observation, reward, done, info
if not self._reset_called:
raise RuntimeError(
"Call 'reset' before calling 'step' on a task.")
# action should contain 1 extra value for gripper open close state
arm_action = np.array(action[:-1])
ee_action = action[-1]
if 0.0 > ee_action > 1.0:
raise ValueError('Gripper action expected to be within 0 and 1.')
# Discretize the gripper action
current_ee = (1.0 if self._robot.gripper.get_open_amount()[0] > 0.9 else 0.0)
if ee_action > 0.5:
ee_action = 1.0
elif ee_action < 0.5:
ee_action = 0.0
if current_ee != ee_action:
arm_action = np.array([0.0]*7)
if self._action_mode.arm == ArmActionMode.ABS_JOINT_VELOCITY:
self._assert_action_space(arm_action,
(len(self._robot.arm.joints),))
self._robot.arm.set_joint_target_velocities(arm_action)
self._scene.step()
# if needed save some images
if camcorder:
obs = self._scene.get_observation()
camcorder.save(obs, self.get_robot_visuals(), self.get_all_graspable_objects())
elif self._action_mode.arm == ArmActionMode.DELTA_JOINT_VELOCITY:
self._assert_action_space(arm_action,
(len(self._robot.arm.joints),))
cur = np.array(self._robot.arm.get_joint_velocities())
self._robot.arm.set_joint_target_velocities(cur + arm_action)
self._scene.step()
elif self._action_mode.arm == ArmActionMode.ABS_JOINT_POSITION:
self._assert_action_space(arm_action,
(len(self._robot.arm.joints),))
self._robot.arm.set_joint_target_positions(arm_action)
self._scene.step()
elif self._action_mode.arm == ArmActionMode.DELTA_JOINT_POSITION:
self._assert_action_space(arm_action,
(len(self._robot.arm.joints),))
cur = np.array(self._robot.arm.get_joint_positions())
self._robot.arm.set_joint_target_positions(cur + arm_action)
self._scene.step()
elif self._action_mode.arm == ArmActionMode.ABS_JOINT_TORQUE:
self._assert_action_space(
arm_action, (len(self._robot.arm.joints),))
self._torque_action(arm_action)
self._scene.step()
elif self._action_mode.arm == ArmActionMode.DELTA_JOINT_TORQUE:
cur = np.array(self._robot.arm.get_joint_forces())
new_action = cur + arm_action
self._torque_action(new_action)
self._scene.step()
elif self._action_mode.arm == ArmActionMode.ABS_EE_POSE_WORLD_FRAME:
self._assert_action_space(arm_action, (7,))
self._ee_action(list(arm_action))
elif self._action_mode.arm == ArmActionMode.ABS_EE_POSE_PLAN_WORLD_FRAME:
self._assert_action_space(arm_action, (7,))
self._path_observations = []
self._path_observations = self._path_action(list(arm_action))
elif self._action_mode.arm == ArmActionMode.DELTA_EE_POSE_PLAN_WORLD_FRAME:
self._assert_action_space(arm_action, (7,))
a_x, a_y, a_z, a_qx, a_qy, a_qz, a_qw = arm_action
x, y, z, qx, qy, qz, qw = self._robot.arm.get_tip().get_pose()
new_rot = Quaternion(a_qw, a_qx, a_qy, a_qz) * Quaternion(qw, qx,
qy, qz)
qw, qx, qy, qz = list(new_rot)
new_pose = [a_x + x, a_y + y, a_z + z] + [qx, qy, qz, qw]
self._path_observations = []
self._path_observations = self._path_action(list(new_pose))
elif self._action_mode.arm == ArmActionMode.DELTA_EE_POSE_WORLD_FRAME:
self._assert_action_space(arm_action, (7,))
a_x, a_y, a_z, a_qx, a_qy, a_qz, a_qw = arm_action
x, y, z, qx, qy, qz, qw = self._robot.arm.get_tip().get_pose()
new_rot = Quaternion(a_qw, a_qx, a_qy, a_qz) * Quaternion(
qw, qx, qy, qz)
qw, qx, qy, qz = list(new_rot)
new_pose = [a_x + x, a_y + y, a_z + z] + [qx, qy, qz, qw]
self._ee_action(list(new_pose))
elif self._action_mode.arm == ArmActionMode.EE_POSE_EE_FRAME:
self._assert_action_space(arm_action, (7,))
self._ee_action(
list(arm_action), relative_to=self._robot.arm.get_tip())
elif self._action_mode.arm == ArmActionMode.EE_POSE_PLAN_EE_FRAME:
self._assert_action_space(arm_action, (7,))
self._path_observations = []
self._path_observations = self._path_action(
list(arm_action), relative_to=self._robot.arm.get_tip())
else:
raise RuntimeError('Unrecognised action mode.')
if current_ee != ee_action:
done = False
while not done:
done = self._robot.gripper.actuate(ee_action, velocity=0.2)
self._pyrep.step()
self._task.step()
# if needed save some images
if camcorder:
obs = self._scene.get_observation()
camcorder.save(obs, self.get_robot_visuals(), self.get_all_graspable_objects())
if ee_action == 0.0 and self._attach_grasped_objects:
# If gripper close action, the check for grasp.
for g_obj in self._task.get_graspable_objects():
self._robot.gripper.grasp(g_obj)
else:
# If gripper open action, the check for ungrasp.
self._robot.gripper.release()
success, terminate = self._task.success()
task_reward = self._task.reward()
reward = float(success) if task_reward is None else task_reward
return self._scene.get_observation(), reward, terminate
def resolve_redundancy_joint_velocities(self, actions, setup):
"""
Resolves redundant self-motion into the nullspace without changing the gripper tip position
:param actions:
Current actions without redundancy resolution.
:param setup:
Setup for redundancy resolution defining the mode, weighting etc.
:return: Array of joint velocities, which move the robot's tip according to the provided actions yet push
the joint position towards a reference position.
"""
# get the Jacobian
J = self._robot.arm.get_jacobian()
J = np.transpose(J)
J = np.flip(J)
J = J[-3:]
# compute the pseudo inverse
J_plus = np.linalg.pinv(J)
# weighting
if type(setup["W"]) is list:
W = np.array(setup["W"])
elif setup["W"] is None:
# use default weighting later
W = None
else:
raise TypeError("Unsupported type %s for weighting vector." % type(setup["W"]))
# compute the error
if setup["mode"] == "reference_position":
dL, L = self.get_loss_reference_position(setup["ref_position"], W)
elif setup["mode"] == "collision_avoidance":
dL, L = self.get_loss_collision_avoidance(W, setup)
# compute the joint velocities
q_dot_redundancy = setup["alpha"] * np.matmul((np.identity(len(self._robot.arm.joints)) - np.matmul(J_plus, J)), dL)
# the provided jacobian seems to be inaccurate resulting in slight movement of the ee. This is why
# the velocites are set to 0 once the error stops changing much.
e = dL
if setup["cut-off_error"] is not None:
if self._last_e is not None:
e_dot = np.sum(np.abs(e - self._last_e))
if self._last_e is not None and e_dot < setup["cut-off_error"]:
q_dot_redundancy = np.array([0.0] * 7)
self._last_e = e
else:
self._last_e = e
return actions - q_dot_redundancy, L
def get_loss_reference_position(self, ref_pos, W):
"""
Calculates the summed squarred error between the current and the reference consfiguration as well as
its partial derivatives with respect to al q's for redundancy resoltuion.
-> L(q) = 1/2 sum_{i=1}^N w_i (q_i - \tilde{q}_i)^2
:param ref_pos:
Reference position.
:param W:
Weighting vector.
:return:
1: The partial derivatives of the summed squarred error between the current and the
reference configuration -> -> \nabla_q L(q)
2: Summed squarred error between the current and the reference configuration. -> L(q)
"""
if W is None:
# default weighting
W = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
e = (self._robot.arm.get_joint_positions() - ref_pos)
return e * W, 0.5*np.dot(e,e*W)
def get_loss_collision_avoidance(self, W, setup):
"""
Calculates the loss as well as the respective partial derivatives for redundancy resoltuion with
collision avoidance. This only works with tasks that include one obstacles!
L(q) = \sum_{i=1}^N d(q)^{-1}
:param W:
Weighting vector.
:return:
1: The partial derivatives of the loss above. -> \nable_q L(q)
2: The loss shown above.-> L(q)
"""
# get the position of the object
p_obs = self._task.obstacle.get_position() + np.array([0, 0, 0.33]) - self._robot.arm.joints[0].get_position()
#p_obs = self._task.obstacle.get_position()
p_obs = np.append(p_obs, [1])
# get the transformation matrices, their derivatives, and the positions of the links
A_1, A_2, A_3, A_4, A_5, A_6, A_7 = self._robot.get_transformation_matrices()
dA_1, dA_2, dA_3, dA_4, dA_5, dA_6, dA_7 = self._robot.get_transformation_matrices_derivatives()
p_1, p_2, p_3, p_4, p_5, p_6, p_7 = self._robot.get_link_positions_in_ref_frames()
# we use reciprocal of the distance between each link and an obstacle as our Loss
# the chain rule delivers: d/dq L = (p_i^0 (q_1,..., q_i) - p_obs)^T * d/dq (p_i^0 (q_1,..., q_i) - p_obs)
# where p_i^0 = (\prod_{j=1}^i A_j^{j-1}(q_j)) * p_i
# as the left side of d/dq L is used often, let's calculate it in advance
d_1_T = np.transpose(A_1.dot(p_1) - p_obs)
d_2_T = np.transpose(A_1.dot(A_2).dot(p_2) - p_obs)
d_3_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(p_3) - p_obs)
d_4_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(p_4) - p_obs)
d_5_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5) - p_obs)
d_6_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6) - p_obs)
d_7_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7) - p_obs)
# now we can calculate the derivatives in each dimension
dq_1 = -np.matmul(d_1_T, dA_1.dot(p_1)) + \
-np.matmul(d_2_T, dA_1.dot(A_2).dot(p_2)) + \
-np.matmul(d_3_T, dA_1.dot(A_2).dot(A_3).dot(p_3)) + \
-np.matmul(d_4_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(p_4)) + \
-np.matmul(d_5_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5)) + \
-np.matmul(d_6_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6)) + \
-np.matmul(d_7_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7))
dq_2 = -np.matmul(d_2_T, A_1.dot(dA_2).dot(p_2)) + \
-np.matmul(d_3_T, A_1.dot(dA_2).dot(A_3).dot(p_3)) + \
-np.matmul(d_4_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(p_4)) + \
-np.matmul(d_5_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5)) + \
-np.matmul(d_6_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6)) + \
-np.matmul(d_7_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7))
dq_3 = -np.matmul(d_3_T, A_1.dot(A_2).dot(dA_3).dot(p_3)) + \
-np.matmul(d_4_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(p_4)) + \
-np.matmul(d_5_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(p_5)) + \
-np.matmul(d_6_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6)) + \
-np.matmul(d_7_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7))
dq_4 = -np.matmul(d_4_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(p_4)) + \
-np.matmul(d_5_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(p_5)) + \
-np.matmul(d_6_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(A_6).dot(p_6)) + \
-np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7))
dq_5 = -np.matmul(d_5_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(p_5)) + \
-np.matmul(d_6_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(A_6).dot(p_6)) + \
-np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(A_6).dot(A_7).dot(p_7))
dq_6 = -np.matmul(d_6_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(dA_6).dot(p_6)) + \
-np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(dA_6).dot(A_7).dot(p_7))
dq_7 = -np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(dA_7).dot(p_7))
if W is None:
# default weighting vector -> based on the reciprocal of the distance. The greater the distance the smaller
# the weight. That is, it is concentrated on close objects.
W = np.array([1 / np.sum(np.square(d_1_T)),
1 / np.sum(np.square(d_2_T)) ,
1 / np.sum(np.square(d_3_T)) ,
1 / np.sum(np.square(d_4_T)) ,
1 / np.sum(np.square(d_5_T)) ,
1 / np.sum(np.square(d_6_T)) ,
1 / np.sum(np.square(d_7_T)) ]) * 0.1
# --- scaling to keep distance to joint limits ---
# get the minimum distance of each joint to its limit
joint_positions = np.array([j.get_joint_position() for j in self._robot.arm.joints])
lower_joint_limits = np.array(setup["lower_joint_pos_limit"])
upper_joint_limits = np.array(setup["upper_joint_pos_limit"])
min_j_distances = [np.minimum(u-j, j-l) for l,u,j in zip(lower_joint_limits, upper_joint_limits,
joint_positions)]
# start scaling down error when joint limit is 15° away.
# Scaling is done linearly from 0 to 1 for 0° <= d <= 15°
rad_thres = 15*(np.pi/180)
W *= np.array([ np.minimum((1/rad_thres)*d, 1.0) for d in min_j_distances])
# concatenate the derivaties to vector and apply weightig
dL = np.array([dq_1, dq_2, dq_3, dq_4, dq_5, dq_6, dq_7])*W
# calculate the loss
L = np.sqrt(np.dot(d_1_T, d_1_T))*W[0] \
+ np.sqrt(np.dot(d_2_T, d_2_T))*W[1] \
+ np.sqrt(np.dot(d_3_T, d_3_T))*W[2] \
+ np.sqrt(np.dot(d_4_T, d_4_T))*W[3] \
+ np.sqrt(np.dot(d_5_T, d_5_T))*W[4] \
+ np.sqrt(np.dot(d_6_T, d_6_T))*W[5] \
+ np.sqrt(np.dot(d_7_T, d_7_T))*W[6]
return dL, L
def enable_path_observations(self, value: bool) -> None:
if (self._action_mode.arm != ArmActionMode.DELTA_EE_POSE_PLAN_WORLD_FRAME and
self._action_mode.arm != ArmActionMode.ABS_EE_POSE_PLAN_WORLD_FRAME and
self._action_mode.arm != ArmActionMode.EE_POSE_PLAN_EE_FRAME):
raise RuntimeError('Only available in DELTA_EE_POSE_PLAN or '
'ABS_EE_POSE_PLAN action mode.')
self._enable_path_observations = value
def get_path_observations(self):
if (self._action_mode.arm != ArmActionMode.DELTA_EE_POSE_PLAN_WORLD_FRAME and
self._action_mode.arm != ArmActionMode.ABS_EE_POSE_PLAN_WORLD_FRAME and
self._action_mode.arm != ArmActionMode.EE_POSE_PLAN_EE_FRAME):
raise RuntimeError('Only available in DELTA_EE_POSE_PLAN or '
'ABS_EE_POSE_PLAN action mode.')
return self._path_observations
def get_demos(self, amount: int, live_demos: bool = False,
image_paths: bool = False,
callable_each_step: Callable[[Observation], None] = None,
max_attempts: int = _MAX_DEMO_ATTEMPTS,
) -> List[Demo]:
"""Negative means all demos"""
if not live_demos and (self._dataset_root is None
or len(self._dataset_root) == 0):
raise RuntimeError(
"Can't ask for a stored demo when no dataset root provided.")
if not live_demos:
if self._dataset_root is None or len(self._dataset_root) == 0:
raise RuntimeError(
"Can't ask for stored demo when no dataset root provided.")
demos = utils.get_stored_demos(
amount, image_paths, self._dataset_root, self._variation_number,
self._task.get_name(), self._obs_config)
else:
ctr_loop = self._robot.arm.joints[0].is_control_loop_enabled()
self._robot.arm.set_control_loop_enabled(True)
demos = self._get_live_demos(
amount, callable_each_step, max_attempts)
self._robot.arm.set_control_loop_enabled(ctr_loop)
return demos
def _get_live_demos(self, amount: int,
callable_each_step: Callable[
[Observation], None] = None,
max_attempts: int = _MAX_DEMO_ATTEMPTS) -> List[Demo]:
demos = []
for i in range(amount):
attempts = max_attempts
while attempts > 0:
random_seed = np.random.get_state()
self.reset()
logging.info('Collecting demo %d' % i)
try:
demo = self._scene.get_demo(
callable_each_step=callable_each_step)
demo.random_seed = random_seed
demos.append(demo)
break
except Exception as e:
attempts -= 1
logging.info('Bad demo. ' + str(e))
if attempts <= 0:
raise RuntimeError(
'Could not collect demos. Maybe a problem with the task?')
return demos
def reset_to_demo(self, demo: Demo) -> (List[str], Observation):
demo.restore_state()
return self.reset()
|
[
"numpy.flip",
"numpy.abs",
"numpy.allclose",
"numpy.random.get_state",
"numpy.linalg.pinv",
"numpy.minimum",
"pyquaternion.Quaternion",
"numpy.square",
"pyrep.objects.Dummy.create",
"numpy.array",
"numpy.append",
"numpy.dot",
"numpy.matmul",
"numpy.linalg.norm",
"numpy.shape",
"numpy.transpose",
"logging.info"
] |
[((1721, 1735), 'pyrep.objects.Dummy.create', 'Dummy.create', ([], {}), '()\n', (1733, 1735), False, 'from pyrep.objects import Dummy, Object\n'), ((8536, 8557), 'numpy.array', 'np.array', (['action[:-1]'], {}), '(action[:-1])\n', (8544, 8557), True, 'import numpy as np\n'), ((14997, 15012), 'numpy.transpose', 'np.transpose', (['J'], {}), '(J)\n', (15009, 15012), True, 'import numpy as np\n'), ((15025, 15035), 'numpy.flip', 'np.flip', (['J'], {}), '(J)\n', (15032, 15035), True, 'import numpy as np\n'), ((15110, 15127), 'numpy.linalg.pinv', 'np.linalg.pinv', (['J'], {}), '(J)\n', (15124, 15127), True, 'import numpy as np\n'), ((18089, 18110), 'numpy.append', 'np.append', (['p_obs', '[1]'], {}), '(p_obs, [1])\n', (18098, 18110), True, 'import numpy as np\n'), ((22729, 22769), 'numpy.array', 'np.array', (["setup['lower_joint_pos_limit']"], {}), "(setup['lower_joint_pos_limit'])\n", (22737, 22769), True, 'import numpy as np\n'), ((22799, 22839), 'numpy.array', 'np.array', (["setup['upper_joint_pos_limit']"], {}), "(setup['upper_joint_pos_limit'])\n", (22807, 22839), True, 'import numpy as np\n'), ((5012, 5028), 'numpy.shape', 'np.shape', (['action'], {}), '(action)\n', (5020, 5028), True, 'import numpy as np\n'), ((5620, 5634), 'numpy.abs', 'np.abs', (['action'], {}), '(action)\n', (5626, 5634), True, 'import numpy as np\n'), ((6368, 6422), 'numpy.allclose', 'np.allclose', (['cur_positions', 'joint_positions'], {'atol': '(0.01)'}), '(cur_positions, joint_positions, atol=0.01)\n', (6379, 6422), True, 'import numpy as np\n'), ((9006, 9025), 'numpy.array', 'np.array', (['([0.0] * 7)'], {}), '([0.0] * 7)\n', (9014, 9025), True, 'import numpy as np\n'), ((15202, 15222), 'numpy.array', 'np.array', (["setup['W']"], {}), "(setup['W'])\n", (15210, 15222), True, 'import numpy as np\n'), ((17229, 17274), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])\n', (17237, 17274), True, 'import numpy as np\n'), ((22867, 22891), 'numpy.minimum', 'np.minimum', (['(u - j)', '(j - l)'], {}), '(u - j, j - l)\n', (22877, 22891), True, 'import numpy as np\n'), ((23356, 23408), 'numpy.array', 'np.array', (['[dq_1, dq_2, dq_3, dq_4, dq_5, dq_6, dq_7]'], {}), '([dq_1, dq_2, dq_3, dq_4, dq_5, dq_6, dq_7])\n', (23364, 23408), True, 'import numpy as np\n'), ((5294, 5314), 'numpy.linalg.norm', 'np.linalg.norm', (['quat'], {}), '(quat)\n', (5308, 5314), True, 'import numpy as np\n'), ((6523, 6574), 'numpy.allclose', 'np.allclose', (['cur_positions', 'prev_values'], {'atol': '(0.001)'}), '(cur_positions, prev_values, atol=0.001)\n', (6534, 6574), True, 'import numpy as np\n'), ((16317, 16336), 'numpy.array', 'np.array', (['([0.0] * 7)'], {}), '([0.0] * 7)\n', (16325, 16336), True, 'import numpy as np\n'), ((17363, 17379), 'numpy.dot', 'np.dot', (['e', '(e * W)'], {}), '(e, e * W)\n', (17369, 17379), True, 'import numpy as np\n'), ((17954, 17976), 'numpy.array', 'np.array', (['[0, 0, 0.33]'], {}), '([0, 0, 0.33])\n', (17962, 17976), True, 'import numpy as np\n'), ((23216, 23250), 'numpy.minimum', 'np.minimum', (['(1 / rad_thres * d)', '(1.0)'], {}), '(1 / rad_thres * d, 1.0)\n', (23226, 23250), True, 'import numpy as np\n'), ((26437, 26458), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (26456, 26458), True, 'import numpy as np\n'), ((26504, 26542), 'logging.info', 'logging.info', (["('Collecting demo %d' % i)"], {}), "('Collecting demo %d' % i)\n", (26516, 26542), False, 'import logging\n'), ((15838, 15858), 'numpy.matmul', 'np.matmul', (['J_plus', 'J'], {}), '(J_plus, J)\n', (15847, 15858), True, 'import numpy as np\n'), ((16180, 16204), 'numpy.abs', 'np.abs', (['(e - self._last_e)'], {}), '(e - self._last_e)\n', (16186, 16204), True, 'import numpy as np\n'), ((23761, 23781), 'numpy.dot', 'np.dot', (['d_7_T', 'd_7_T'], {}), '(d_7_T, d_7_T)\n', (23767, 23781), True, 'import numpy as np\n'), ((23711, 23731), 'numpy.dot', 'np.dot', (['d_6_T', 'd_6_T'], {}), '(d_6_T, d_6_T)\n', (23717, 23731), True, 'import numpy as np\n'), ((5202, 5218), 'numpy.shape', 'np.shape', (['action'], {}), '(action)\n', (5210, 5218), True, 'import numpy as np\n'), ((23661, 23681), 'numpy.dot', 'np.dot', (['d_5_T', 'd_5_T'], {}), '(d_5_T, d_5_T)\n', (23667, 23681), True, 'import numpy as np\n'), ((22117, 22133), 'numpy.square', 'np.square', (['d_1_T'], {}), '(d_1_T)\n', (22126, 22133), True, 'import numpy as np\n'), ((22173, 22189), 'numpy.square', 'np.square', (['d_2_T'], {}), '(d_2_T)\n', (22182, 22189), True, 'import numpy as np\n'), ((22230, 22246), 'numpy.square', 'np.square', (['d_3_T'], {}), '(d_3_T)\n', (22239, 22246), True, 'import numpy as np\n'), ((22287, 22303), 'numpy.square', 'np.square', (['d_4_T'], {}), '(d_4_T)\n', (22296, 22303), True, 'import numpy as np\n'), ((22344, 22360), 'numpy.square', 'np.square', (['d_5_T'], {}), '(d_5_T)\n', (22353, 22360), True, 'import numpy as np\n'), ((22401, 22417), 'numpy.square', 'np.square', (['d_6_T'], {}), '(d_6_T)\n', (22410, 22417), True, 'import numpy as np\n'), ((22458, 22474), 'numpy.square', 'np.square', (['d_7_T'], {}), '(d_7_T)\n', (22467, 22474), True, 'import numpy as np\n'), ((23611, 23631), 'numpy.dot', 'np.dot', (['d_4_T', 'd_4_T'], {}), '(d_4_T, d_4_T)\n', (23617, 23631), True, 'import numpy as np\n'), ((23561, 23581), 'numpy.dot', 'np.dot', (['d_3_T', 'd_3_T'], {}), '(d_3_T, d_3_T)\n', (23567, 23581), True, 'import numpy as np\n'), ((23461, 23481), 'numpy.dot', 'np.dot', (['d_1_T', 'd_1_T'], {}), '(d_1_T, d_1_T)\n', (23467, 23481), True, 'import numpy as np\n'), ((23511, 23531), 'numpy.dot', 'np.dot', (['d_2_T', 'd_2_T'], {}), '(d_2_T, d_2_T)\n', (23517, 23531), True, 'import numpy as np\n'), ((11795, 11829), 'pyquaternion.Quaternion', 'Quaternion', (['a_qw', 'a_qx', 'a_qy', 'a_qz'], {}), '(a_qw, a_qx, a_qy, a_qz)\n', (11805, 11829), False, 'from pyquaternion import Quaternion\n'), ((11832, 11858), 'pyquaternion.Quaternion', 'Quaternion', (['qw', 'qx', 'qy', 'qz'], {}), '(qw, qx, qy, qz)\n', (11842, 11858), False, 'from pyquaternion import Quaternion\n'), ((12452, 12486), 'pyquaternion.Quaternion', 'Quaternion', (['a_qw', 'a_qx', 'a_qy', 'a_qz'], {}), '(a_qw, a_qx, a_qy, a_qz)\n', (12462, 12486), False, 'from pyquaternion import Quaternion\n'), ((12489, 12515), 'pyquaternion.Quaternion', 'Quaternion', (['qw', 'qx', 'qy', 'qz'], {}), '(qw, qx, qy, qz)\n', (12499, 12515), False, 'from pyquaternion import Quaternion\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 <NAME> <<EMAIL>>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking transformation algorithms (the models package).
"""
import logging
import unittest
import numpy as np
from gensim.corpora.mmcorpus import MmCorpus
from gensim.models import rpmodel
from gensim import matutils
from gensim.test.utils import datapath, get_tmpfile
class TestRpModel(unittest.TestCase):
def setUp(self):
self.corpus = MmCorpus(datapath('testcorpus.mm'))
def test_transform(self):
# create the transformation model
# HACK; set fixed seed so that we always get the same random matrix (and can compare against expected results)
np.random.seed(13)
model = rpmodel.RpModel(self.corpus, num_topics=2)
# transform one document
doc = list(self.corpus)[0]
transformed = model[doc]
vec = matutils.sparse2full(transformed, 2) # convert to dense vector, for easier equality tests
expected = np.array([-0.70710677, 0.70710677])
self.assertTrue(np.allclose(vec, expected)) # transformed entries must be equal up to sign
def test_persistence(self):
fname = get_tmpfile('gensim_models.tst')
model = rpmodel.RpModel(self.corpus, num_topics=2)
model.save(fname)
model2 = rpmodel.RpModel.load(fname)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(np.allclose(model.projection, model2.projection))
tstvec = []
self.assertTrue(np.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def test_persistence_compressed(self):
fname = get_tmpfile('gensim_models.tst.gz')
model = rpmodel.RpModel(self.corpus, num_topics=2)
model.save(fname)
model2 = rpmodel.RpModel.load(fname, mmap=None)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(np.allclose(model.projection, model2.projection))
tstvec = []
self.assertTrue(np.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
|
[
"logging.basicConfig",
"gensim.models.rpmodel.RpModel.load",
"gensim.matutils.sparse2full",
"numpy.allclose",
"gensim.test.utils.get_tmpfile",
"gensim.models.rpmodel.RpModel",
"numpy.array",
"numpy.random.seed",
"gensim.test.utils.datapath",
"unittest.main"
] |
[((2218, 2314), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s : %(levelname)s : %(message)s"""', 'level': 'logging.DEBUG'}), "(format='%(asctime)s : %(levelname)s : %(message)s',\n level=logging.DEBUG)\n", (2237, 2314), False, 'import logging\n'), ((2315, 2330), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2328, 2330), False, 'import unittest\n'), ((779, 797), 'numpy.random.seed', 'np.random.seed', (['(13)'], {}), '(13)\n', (793, 797), True, 'import numpy as np\n'), ((814, 856), 'gensim.models.rpmodel.RpModel', 'rpmodel.RpModel', (['self.corpus'], {'num_topics': '(2)'}), '(self.corpus, num_topics=2)\n', (829, 856), False, 'from gensim.models import rpmodel\n'), ((973, 1009), 'gensim.matutils.sparse2full', 'matutils.sparse2full', (['transformed', '(2)'], {}), '(transformed, 2)\n', (993, 1009), False, 'from gensim import matutils\n'), ((1084, 1119), 'numpy.array', 'np.array', (['[-0.70710677, 0.70710677]'], {}), '([-0.70710677, 0.70710677])\n', (1092, 1119), True, 'import numpy as np\n'), ((1269, 1301), 'gensim.test.utils.get_tmpfile', 'get_tmpfile', (['"""gensim_models.tst"""'], {}), "('gensim_models.tst')\n", (1280, 1301), False, 'from gensim.test.utils import datapath, get_tmpfile\n'), ((1318, 1360), 'gensim.models.rpmodel.RpModel', 'rpmodel.RpModel', (['self.corpus'], {'num_topics': '(2)'}), '(self.corpus, num_topics=2)\n', (1333, 1360), False, 'from gensim.models import rpmodel\n'), ((1404, 1431), 'gensim.models.rpmodel.RpModel.load', 'rpmodel.RpModel.load', (['fname'], {}), '(fname)\n', (1424, 1431), False, 'from gensim.models import rpmodel\n'), ((1750, 1785), 'gensim.test.utils.get_tmpfile', 'get_tmpfile', (['"""gensim_models.tst.gz"""'], {}), "('gensim_models.tst.gz')\n", (1761, 1785), False, 'from gensim.test.utils import datapath, get_tmpfile\n'), ((1802, 1844), 'gensim.models.rpmodel.RpModel', 'rpmodel.RpModel', (['self.corpus'], {'num_topics': '(2)'}), '(self.corpus, num_topics=2)\n', (1817, 1844), False, 'from gensim.models import rpmodel\n'), ((1888, 1926), 'gensim.models.rpmodel.RpModel.load', 'rpmodel.RpModel.load', (['fname'], {'mmap': 'None'}), '(fname, mmap=None)\n', (1908, 1926), False, 'from gensim.models import rpmodel\n'), ((552, 577), 'gensim.test.utils.datapath', 'datapath', (['"""testcorpus.mm"""'], {}), "('testcorpus.mm')\n", (560, 577), False, 'from gensim.test.utils import datapath, get_tmpfile\n'), ((1144, 1170), 'numpy.allclose', 'np.allclose', (['vec', 'expected'], {}), '(vec, expected)\n', (1155, 1170), True, 'import numpy as np\n'), ((1518, 1566), 'numpy.allclose', 'np.allclose', (['model.projection', 'model2.projection'], {}), '(model.projection, model2.projection)\n', (1529, 1566), True, 'import numpy as np\n'), ((1612, 1654), 'numpy.allclose', 'np.allclose', (['model[tstvec]', 'model2[tstvec]'], {}), '(model[tstvec], model2[tstvec])\n', (1623, 1654), True, 'import numpy as np\n'), ((2013, 2061), 'numpy.allclose', 'np.allclose', (['model.projection', 'model2.projection'], {}), '(model.projection, model2.projection)\n', (2024, 2061), True, 'import numpy as np\n'), ((2107, 2149), 'numpy.allclose', 'np.allclose', (['model[tstvec]', 'model2[tstvec]'], {}), '(model[tstvec], model2[tstvec])\n', (2118, 2149), True, 'import numpy as np\n')]
|
import os
import h5py
import nibabel as nb
import numpy as np
import torch
import torch.utils.data as data
from torchvision import transforms
import utils.preprocessor as preprocessor
# transform_train = transforms.Compose([
# transforms.RandomCrop(200, padding=56),
# transforms.ToTensor(),
# ])
class ImdbData(data.Dataset):
def __init__(self, X, y, w, transforms=None):
self.X = X if len(X.shape) == 4 else X[:, np.newaxis, :, :]
self.y = y
self.w = w
self.transforms = transforms
def __getitem__(self, index):
img = torch.from_numpy(self.X[index])
label = torch.from_numpy(self.y[index])
weight = torch.from_numpy(self.w[index])
return img, label, weight
def __len__(self):
return len(self.y)
def get_imdb_dataset(data_params):
data_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_data_file']), 'r')
label_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_label_file']), 'r')
class_weight_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_class_weights_file']), 'r')
weight_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_weights_file']), 'r')
data_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_data_file']), 'r')
label_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_label_file']), 'r')
class_weight_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_class_weights_file']), 'r')
weight_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_weights_file']), 'r')
return (ImdbData(data_train['data'][()], label_train['label'][()], class_weight_train['class_weights'][()]),
ImdbData(data_test['data'][()], label_test['label'][()], class_weight_test['class_weights'][()]))
def load_dataset(file_paths,
orientation,
remap_config,
return_weights=False,
reduce_slices=False,
remove_black=False):
print("Loading and preprocessing data...")
volume_list, labelmap_list, headers, class_weights_list, weights_list = [], [], [], [], []
for file_path in file_paths:
volume, labelmap, class_weights, weights, header = load_and_preprocess(file_path, orientation,
remap_config=remap_config,
reduce_slices=reduce_slices,
remove_black=remove_black,
return_weights=return_weights)
volume_list.append(volume)
labelmap_list.append(labelmap)
if return_weights:
class_weights_list.append(class_weights)
weights_list.append(weights)
headers.append(header)
print("#", end='', flush=True)
print("100%", flush=True)
if return_weights:
return volume_list, labelmap_list, class_weights_list, weights_list, headers
else:
return volume_list, labelmap_list, headers
def load_and_preprocess(file_path, orientation, remap_config, reduce_slices=False,
remove_black=False,
return_weights=False):
volume, labelmap, header = load_data(file_path, orientation)
volume, labelmap, class_weights, weights = preprocess(volume, labelmap, remap_config=remap_config,
reduce_slices=reduce_slices,
remove_black=remove_black,
return_weights=return_weights)
return volume, labelmap, class_weights, weights, header
def load_and_preprocess_eval(file_path, orientation, notlabel=True):
volume_nifty = nb.load(file_path[0])
header = volume_nifty.header
volume = volume_nifty.get_fdata()
if notlabel:
volume = (volume - np.min(volume)) / (np.max(volume) - np.min(volume))
else:
volume = np.round(volume)
if orientation == "COR":
volume = volume.transpose((2, 0, 1))
elif orientation == "AXI":
volume = volume.transpose((1, 2, 0))
return volume, header
def load_data(file_path, orientation):
volume_nifty, labelmap_nifty = nb.load(file_path[0]), nb.load(file_path[1])
volume, labelmap = volume_nifty.get_fdata(), labelmap_nifty.get_fdata()
volume = (volume - np.min(volume)) / (np.max(volume) - np.min(volume))
volume, labelmap = preprocessor.rotate_orientation(volume, labelmap, orientation)
return volume, labelmap, volume_nifty.header
def preprocess(volume, labelmap, remap_config, reduce_slices=False, remove_black=False, return_weights=False):
if reduce_slices:
volume, labelmap = preprocessor.reduce_slices(volume, labelmap)
if remap_config:
labelmap = preprocessor.remap_labels(labelmap, remap_config)
if remove_black:
volume, labelmap = preprocessor.remove_black(volume, labelmap)
if return_weights:
class_weights, weights = preprocessor.estimate_weights_mfb(labelmap)
return volume, labelmap, class_weights, weights
else:
return volume, labelmap, None, None
# def load_file_paths(data_dir, label_dir, volumes_txt_file=None):
# """
# This function returns the file paths combined as a list where each element is a 2 element tuple, 0th being data and 1st being label.
# It should be modified to suit the need of the project
# :param data_dir: Directory which contains the data files
# :param label_dir: Directory which contains the label files
# :param volumes_txt_file: (Optional) Path to the a csv file, when provided only these data points will be read
# :return: list of file paths as string
# """
#
# volume_exclude_list = ['IXI290', 'IXI423']
# if volumes_txt_file:
# with open(volumes_txt_file) as file_handle:
# volumes_to_use = file_handle.read().splitlines()
# else:
# volumes_to_use = [name for name in os.listdir(data_dir) if
# name.startswith('IXI') and name not in volume_exclude_list]
#
# file_paths = [
# [os.path.join(data_dir, vol, 'mri/orig.mgz'), os.path.join(label_dir, vol, 'mri/aseg.auto_noCCseg.mgz')]
# for
# vol in volumes_to_use]
# return file_paths
def load_file_paths(data_dir, label_dir, data_id, volumes_txt_file=None):
"""
This function returns the file paths combined as a list where each element is a 2 element tuple, 0th being data and 1st being label.
It should be modified to suit the need of the project
:param data_dir: Directory which contains the data files
:param label_dir: Directory which contains the label files
:param data_id: A flag indicates the name of Dataset for proper file reading
:param volumes_txt_file: (Optional) Path to the a csv file, when provided only these data points will be read
:return: list of file paths as string
"""
if volumes_txt_file:
with open(volumes_txt_file) as file_handle:
volumes_to_use = file_handle.read().splitlines()
else:
volumes_to_use = [name for name in os.listdir(data_dir)]
if data_id == "MALC":
file_paths = [
[os.path.join(data_dir, vol, 'mri/orig.mgz'), os.path.join(label_dir, vol + '_glm.mgz')]
for
vol in volumes_to_use]
elif data_id == "ADNI":
file_paths = [
[os.path.join(data_dir, vol, 'orig.mgz'), os.path.join(label_dir, vol, 'Lab_con.mgz')]
for
vol in volumes_to_use]
elif data_id == "CANDI":
file_paths = [
[os.path.join(data_dir, vol + '/' + vol + '_1.mgz'),
os.path.join(label_dir, vol + '/' + vol + '_1_seg.mgz')]
for
vol in volumes_to_use]
elif data_id == "IBSR":
file_paths = [
[os.path.join(data_dir, vol, 'mri/orig.mgz'), os.path.join(label_dir, vol + '_map.nii.gz')]
for
vol in volumes_to_use]
elif data_id == "BORIS": #BORIS
file_paths = [
[os.path.join(data_dir, vol), os.path.join(label_dir, vol.replace('.nii', '_seg.nii'))]
for
vol in volumes_to_use]
else:
raise ValueError("Invalid entry, valid options are MALC, ADNI, CANDI and IBSR")
return file_paths
def load_file_paths_eval(data_dir, volumes_txt_file, dir_struct):
"""
This function returns the file paths combined as a list where each element is a 2 element tuple, 0th being data and 1st being label.
It should be modified to suit the need of the project
:param data_dir: Directory which contains the data files
:param volumes_txt_file: Path to the a csv file, when provided only these data points will be read
:param dir_struct: If the id_list is in FreeSurfer style or normal
:return: list of file paths as string
"""
with open(volumes_txt_file) as file_handle:
volumes_to_use = file_handle.read().splitlines()
if dir_struct == "FS":
file_paths = [
[os.path.join(data_dir, vol, 'mri/orig.mgz')]
for
vol in volumes_to_use]
elif dir_struct == "Linear":
file_paths = [
[os.path.join(data_dir, vol)]
for
vol in volumes_to_use]
elif dir_struct == "part_FS":
file_paths = [
[os.path.join(data_dir, vol, 'orig.mgz')]
for
vol in volumes_to_use]
else:
raise ValueError("Invalid entry, valid options are FS and Linear")
return file_paths
|
[
"os.listdir",
"nibabel.load",
"os.path.join",
"torch.from_numpy",
"utils.preprocessor.estimate_weights_mfb",
"utils.preprocessor.remap_labels",
"numpy.max",
"utils.preprocessor.rotate_orientation",
"numpy.min",
"utils.preprocessor.reduce_slices",
"utils.preprocessor.remove_black",
"numpy.round"
] |
[((4062, 4083), 'nibabel.load', 'nb.load', (['file_path[0]'], {}), '(file_path[0])\n', (4069, 4083), True, 'import nibabel as nb\n'), ((4766, 4828), 'utils.preprocessor.rotate_orientation', 'preprocessor.rotate_orientation', (['volume', 'labelmap', 'orientation'], {}), '(volume, labelmap, orientation)\n', (4797, 4828), True, 'import utils.preprocessor as preprocessor\n'), ((582, 613), 'torch.from_numpy', 'torch.from_numpy', (['self.X[index]'], {}), '(self.X[index])\n', (598, 613), False, 'import torch\n'), ((630, 661), 'torch.from_numpy', 'torch.from_numpy', (['self.y[index]'], {}), '(self.y[index])\n', (646, 661), False, 'import torch\n'), ((679, 710), 'torch.from_numpy', 'torch.from_numpy', (['self.w[index]'], {}), '(self.w[index])\n', (695, 710), False, 'import torch\n'), ((860, 929), 'os.path.join', 'os.path.join', (["data_params['data_dir']", "data_params['train_data_file']"], {}), "(data_params['data_dir'], data_params['train_data_file'])\n", (872, 929), False, 'import os\n'), ((964, 1034), 'os.path.join', 'os.path.join', (["data_params['data_dir']", "data_params['train_label_file']"], {}), "(data_params['data_dir'], data_params['train_label_file'])\n", (976, 1034), False, 'import os\n'), ((1076, 1154), 'os.path.join', 'os.path.join', (["data_params['data_dir']", "data_params['train_class_weights_file']"], {}), "(data_params['data_dir'], data_params['train_class_weights_file'])\n", (1088, 1154), False, 'import os\n'), ((1190, 1262), 'os.path.join', 'os.path.join', (["data_params['data_dir']", "data_params['train_weights_file']"], {}), "(data_params['data_dir'], data_params['train_weights_file'])\n", (1202, 1262), False, 'import os\n'), ((1296, 1364), 'os.path.join', 'os.path.join', (["data_params['data_dir']", "data_params['test_data_file']"], {}), "(data_params['data_dir'], data_params['test_data_file'])\n", (1308, 1364), False, 'import os\n'), ((1398, 1467), 'os.path.join', 'os.path.join', (["data_params['data_dir']", "data_params['test_label_file']"], {}), "(data_params['data_dir'], data_params['test_label_file'])\n", (1410, 1467), False, 'import os\n'), ((1508, 1585), 'os.path.join', 'os.path.join', (["data_params['data_dir']", "data_params['test_class_weights_file']"], {}), "(data_params['data_dir'], data_params['test_class_weights_file'])\n", (1520, 1585), False, 'import os\n'), ((1620, 1691), 'os.path.join', 'os.path.join', (["data_params['data_dir']", "data_params['test_weights_file']"], {}), "(data_params['data_dir'], data_params['test_weights_file'])\n", (1632, 1691), False, 'import os\n'), ((4278, 4294), 'numpy.round', 'np.round', (['volume'], {}), '(volume)\n', (4286, 4294), True, 'import numpy as np\n'), ((4547, 4568), 'nibabel.load', 'nb.load', (['file_path[0]'], {}), '(file_path[0])\n', (4554, 4568), True, 'import nibabel as nb\n'), ((4570, 4591), 'nibabel.load', 'nb.load', (['file_path[1]'], {}), '(file_path[1])\n', (4577, 4591), True, 'import nibabel as nb\n'), ((5040, 5084), 'utils.preprocessor.reduce_slices', 'preprocessor.reduce_slices', (['volume', 'labelmap'], {}), '(volume, labelmap)\n', (5066, 5084), True, 'import utils.preprocessor as preprocessor\n'), ((5126, 5175), 'utils.preprocessor.remap_labels', 'preprocessor.remap_labels', (['labelmap', 'remap_config'], {}), '(labelmap, remap_config)\n', (5151, 5175), True, 'import utils.preprocessor as preprocessor\n'), ((5225, 5268), 'utils.preprocessor.remove_black', 'preprocessor.remove_black', (['volume', 'labelmap'], {}), '(volume, labelmap)\n', (5250, 5268), True, 'import utils.preprocessor as preprocessor\n'), ((5326, 5369), 'utils.preprocessor.estimate_weights_mfb', 'preprocessor.estimate_weights_mfb', (['labelmap'], {}), '(labelmap)\n', (5359, 5369), True, 'import utils.preprocessor as preprocessor\n'), ((4691, 4705), 'numpy.min', 'np.min', (['volume'], {}), '(volume)\n', (4697, 4705), True, 'import numpy as np\n'), ((4710, 4724), 'numpy.max', 'np.max', (['volume'], {}), '(volume)\n', (4716, 4724), True, 'import numpy as np\n'), ((4727, 4741), 'numpy.min', 'np.min', (['volume'], {}), '(volume)\n', (4733, 4741), True, 'import numpy as np\n'), ((4199, 4213), 'numpy.min', 'np.min', (['volume'], {}), '(volume)\n', (4205, 4213), True, 'import numpy as np\n'), ((4218, 4232), 'numpy.max', 'np.max', (['volume'], {}), '(volume)\n', (4224, 4232), True, 'import numpy as np\n'), ((4235, 4249), 'numpy.min', 'np.min', (['volume'], {}), '(volume)\n', (4241, 4249), True, 'import numpy as np\n'), ((7469, 7489), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (7479, 7489), False, 'import os\n'), ((7554, 7597), 'os.path.join', 'os.path.join', (['data_dir', 'vol', '"""mri/orig.mgz"""'], {}), "(data_dir, vol, 'mri/orig.mgz')\n", (7566, 7597), False, 'import os\n'), ((7599, 7640), 'os.path.join', 'os.path.join', (['label_dir', "(vol + '_glm.mgz')"], {}), "(label_dir, vol + '_glm.mgz')\n", (7611, 7640), False, 'import os\n'), ((9395, 9438), 'os.path.join', 'os.path.join', (['data_dir', 'vol', '"""mri/orig.mgz"""'], {}), "(data_dir, vol, 'mri/orig.mgz')\n", (9407, 9438), False, 'import os\n'), ((7757, 7796), 'os.path.join', 'os.path.join', (['data_dir', 'vol', '"""orig.mgz"""'], {}), "(data_dir, vol, 'orig.mgz')\n", (7769, 7796), False, 'import os\n'), ((7798, 7841), 'os.path.join', 'os.path.join', (['label_dir', 'vol', '"""Lab_con.mgz"""'], {}), "(label_dir, vol, 'Lab_con.mgz')\n", (7810, 7841), False, 'import os\n'), ((9560, 9587), 'os.path.join', 'os.path.join', (['data_dir', 'vol'], {}), '(data_dir, vol)\n', (9572, 9587), False, 'import os\n'), ((7959, 8009), 'os.path.join', 'os.path.join', (['data_dir', "(vol + '/' + vol + '_1.mgz')"], {}), "(data_dir, vol + '/' + vol + '_1.mgz')\n", (7971, 8009), False, 'import os\n'), ((8024, 8079), 'os.path.join', 'os.path.join', (['label_dir', "(vol + '/' + vol + '_1_seg.mgz')"], {}), "(label_dir, vol + '/' + vol + '_1_seg.mgz')\n", (8036, 8079), False, 'import os\n'), ((9710, 9749), 'os.path.join', 'os.path.join', (['data_dir', 'vol', '"""orig.mgz"""'], {}), "(data_dir, vol, 'orig.mgz')\n", (9722, 9749), False, 'import os\n'), ((8196, 8239), 'os.path.join', 'os.path.join', (['data_dir', 'vol', '"""mri/orig.mgz"""'], {}), "(data_dir, vol, 'mri/orig.mgz')\n", (8208, 8239), False, 'import os\n'), ((8241, 8285), 'os.path.join', 'os.path.join', (['label_dir', "(vol + '_map.nii.gz')"], {}), "(label_dir, vol + '_map.nii.gz')\n", (8253, 8285), False, 'import os\n'), ((8410, 8437), 'os.path.join', 'os.path.join', (['data_dir', 'vol'], {}), '(data_dir, vol)\n', (8422, 8437), False, 'import os\n')]
|
import copy
import numpy as np
import open3d as o3d
from tqdm import tqdm
from scipy import stats
import utils_o3d as utils
def remove_ground_plane(pcd, z_thresh=-2.7):
cropped = copy.deepcopy(pcd)
cropped_points = np.array(cropped.points)
cropped_points = cropped_points[cropped_points[:, -1] > z_thresh]
pcd_final = o3d.geometry.PointCloud()
pcd_final.points = o3d.utility.Vector3dVector(cropped_points)
return pcd_final
def remove_y_plane(pcd, y_thresh=5):
cropped = copy.deepcopy(pcd)
cropped_points = np.array(cropped.points)
cropped_points = cropped_points[cropped_points[:, 0] < y_thresh]
cropped_points[:, -1] = -cropped_points[:, -1]
pcd_final = o3d.geometry.PointCloud()
pcd_final.points = o3d.utility.Vector3dVector(cropped_points)
return pcd_final
def compute_features(pcd, voxel_size, normals_nn=100, features_nn=120, downsample=True):
normals_radius = voxel_size * 2
features_radius = voxel_size * 4
# Downsample the point cloud using Voxel grids
if downsample:
print(':: Input size:', np.array(pcd.points).shape)
pcd_down = utils.downsample_point_cloud(pcd, voxel_size)
print(':: Downsample with a voxel size %.3f' % voxel_size)
print(':: Downsample size', np.array(pcd_down.points).shape)
else: pcd_down = copy.deepcopy(pcd)
# Estimate normals
print(':: Estimate normal with search radius %.3f' % normals_radius)
pcd_down.estimate_normals(
o3d.geometry.KDTreeSearchParamHybrid(radius=normals_radius, max_nn=normals_nn))
# Compute FPFH features
print(':: Compute FPFH feature with search radius %.3f' % features_radius)
features = o3d.registration.compute_fpfh_feature(pcd_down,
o3d.geometry.KDTreeSearchParamHybrid(radius=features_radius, max_nn=features_nn))
return pcd_down, features
def match_features(pcd0, pcd1, feature0, feature1, thresh=None, display=False):
pcd0, pcd1 = copy.deepcopy(pcd0), copy.deepcopy(pcd1)
print(':: Input size 0:', np.array(pcd0.points).shape)
print(':: Input size 1:', np.array(pcd1.points).shape)
print(':: Features size 0:', np.array(feature0.data).shape)
print(':: Features size 1:', np.array(feature1.data).shape)
utils.paint_uniform_color(pcd0, color=[1, 0.706, 0])
utils.paint_uniform_color(pcd1, color=[0, 0.651, 0.929])
scores, indices = [], []
fpfh_tree = o3d.geometry.KDTreeFlann(feature1)
for i in tqdm(range(len(pcd0.points)), desc=':: Feature Matching'):
[_, idx, _] = fpfh_tree.search_knn_vector_xd(feature0.data[:, i], 1)
scores.append(np.linalg.norm(pcd0.points[i] - pcd1.points[idx[0]]))
indices.append([i, idx[0]])
scores, indices = np.array(scores), np.array(indices)
median = np.median(scores)
if thresh is None: thresh = median
inliers_idx = np.where(scores <= thresh)[0]
pcd0_idx = indices[inliers_idx, 0]
pcd1_idx = indices[inliers_idx, 1]
print(':: Score stats: Min=%0.3f, Max=%0.3f, Median=%0.3f, N<Thresh=%d' % (
np.min(scores), np.max(scores), median, len(inliers_idx)))
if display:
for i, j in zip(pcd0_idx, pcd1_idx):
pcd0.colors[i] = [1, 0, 0]
pcd1.colors[j] = [1, 0, 0]
utils.display([pcd0, pcd1])
return pcd0_idx, pcd1_idx
def estimate_scale(pcd0, pcd1, pcd0_idx, pcd1_idx, top_percent=1.0,
ransac_iters=5000, sample_size=50):
points0 = np.asarray(pcd0.points)[pcd0_idx]
points1 = np.asarray(pcd1.points)[pcd1_idx]
mean0 = np.mean(points0, axis=0)
mean1 = np.mean(points1, axis=0)
top_count = int(top_percent * len(pcd0_idx))
assert top_count > sample_size, 'top_count <= sample_size'
scales = []
for i in tqdm(range(ransac_iters), desc=':: Scale Estimation RANSAC'):
args = np.random.choice(top_count, sample_size, replace=False)
points0_r = points0[args]
points1_r = points1[args]
score0 = np.sum((points0_r - mean0) ** 2, axis=1)
score1 = np.sum((points1_r - mean1) ** 2, axis=1)
scale = np.sqrt(np.mean(score1) / np.mean(score0))
scales.append(scale)
best_scale = stats.mode(scales)[0][0]
print(':: Estimated scale:', best_scale)
return best_scale
def global_registration(source_down, target_down, source_fpfh, target_fpfh, voxel_size,
distance_threshold=1.0, num_iters=4000000, num_val_iters=500):
print(':: Distance threshold %.3f' % distance_threshold)
result = o3d.registration.registration_ransac_based_on_feature_matching(
source_down, target_down, source_fpfh, target_fpfh, distance_threshold,
o3d.registration.TransformationEstimationPointToPoint(False), 4, [
o3d.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9),
o3d.registration.CorrespondenceCheckerBasedOnDistance(
distance_threshold)
], o3d.registration.RANSACConvergenceCriteria(num_iters, num_val_iters))
return result
def fast_global_registration(source_down, target_down,
source_fpfh, target_fpfh, voxel_size):
distance_threshold = 1.0
result = o3d.registration.registration_fast_based_on_feature_matching(
source_down, target_down, source_fpfh, target_fpfh,
o3d.registration.FastGlobalRegistrationOption(
maximum_correspondence_distance=distance_threshold))
return result
def refine_registration(source, target, source_fpfh, target_fpfh, initial_result, voxel_size):
distance_threshold = 0.1
print(':: Distance threshold %.3f' % distance_threshold)
result = o3d.registration.registration_icp(
source, target, distance_threshold, initial_result.transformation,
o3d.registration.TransformationEstimationPointToPlane())
return result
def registration(pcd0, pcd1, feature1, feature2, voxel_size, method='global'):
if method == 'global':
print('\nRANSAC global registration on scaled point clouds...')
initial_result = global_registration(pcd0, pcd1, feature1, feature2, voxel_size)
elif method == 'fast_global':
print('\nFast global registration on scaled point clouds...')
initial_result = fast_global_registration(pcd0, pcd1, feature1, feature2, voxel_size)
else:
print(':: Registration method not supported')
return
print(':: Initial registration results:')
print(initial_result)
print('\nDisplaying initial result...')
draw_registration_result(pcd0, pcd1, initial_result.transformation)
print('\nRefine registration...')
result = refine_registration(pcd0, pcd1, feature1, feature2, initial_result, voxel_size)
print(':: Final registration results:')
print(result)
return result
def draw_registration_result(source, target, transformation):
source_temp = copy.deepcopy(source)
target_temp = copy.deepcopy(target)
source_temp.paint_uniform_color([1, 0.706, 0])
target_temp.paint_uniform_color([0, 0.651, 0.929])
source_temp.transform(transformation)
o3d.visualization.draw_geometries([source_temp, target_temp])
def run():
voxel_size = 0.2
dso_scale = 0.03
pcd_lidar = o3d.io.read_point_cloud('../maps/scans/scan_050.pcd')
pcd_lidar = remove_ground_plane(pcd_lidar)
pcd_dso = o3d.io.read_point_cloud('../maps/dso_map_cleaned.pcd')
pcd_dso = remove_ground_plane(pcd_dso, z_thresh=4.5)
pcd_dso = remove_y_plane(pcd_dso, y_thresh=0.2)
# pcd_dso = utils.scale_point_cloud(pcd_dso, dso_scale).rotate([0.5, 0.5, 0.5]).translate([10, 20, 30])
# Ground plane removal results
# utils.display(pcds=[pcd_lidar, pcd_dso], colors=[[1, 0.706, 0], [0, 0.651, 0.929]])
# utils.display(pcds=[pcd_dso], colors=[[0, 0.651, 0.929]])
# return
print('\nComputing FPFH features for lidar point cloud...')
pcd_lidar_down, features_lidar = compute_features(pcd_lidar, voxel_size=voxel_size)
print('\nComputing FPFH features for DSO point cloud...')
pcd_dso_down, features_dso = compute_features(pcd_dso, voxel_size=voxel_size * (dso_scale if dso_scale < 1 else 1))
print('\nMatching FPFH features...')
pcd_lidar_idx, pcd_dso_idx = match_features(pcd_lidar_down, pcd_dso_down,
features_lidar, features_dso, thresh=None)
print('\nEstimating scale using matches...')
scale = estimate_scale(pcd_lidar_down, pcd_dso_down, pcd_lidar_idx, pcd_dso_idx)
scale = 0.06
print('\nCorrecting scale...')
pcd_dso_scaled = utils.scale_point_cloud(pcd_dso, 1.0 / scale)
utils.display(pcds=[pcd_lidar, pcd_dso_scaled], colors=[[1, 0.706, 0], [0, 0.651, 0.929]])
# return
# Registration
pcd_dso_scaled_down, features_dso_scaled = compute_features(
pcd_dso_scaled, voxel_size=voxel_size)
result = registration(pcd_lidar_down, pcd_dso_scaled_down, features_lidar,
features_dso_scaled, voxel_size, method='global')
print('\nDisplaying result...')
draw_registration_result(pcd_lidar, pcd_dso_scaled, result.transformation)
if __name__ == '__main__':
run()
|
[
"open3d.registration.TransformationEstimationPointToPlane",
"numpy.array",
"copy.deepcopy",
"numpy.linalg.norm",
"numpy.mean",
"numpy.where",
"numpy.asarray",
"numpy.max",
"open3d.registration.RANSACConvergenceCriteria",
"numpy.min",
"open3d.geometry.KDTreeFlann",
"open3d.registration.CorrespondenceCheckerBasedOnEdgeLength",
"utils_o3d.downsample_point_cloud",
"numpy.random.choice",
"open3d.geometry.KDTreeSearchParamHybrid",
"open3d.visualization.draw_geometries",
"open3d.io.read_point_cloud",
"open3d.registration.TransformationEstimationPointToPoint",
"utils_o3d.display",
"open3d.utility.Vector3dVector",
"numpy.median",
"open3d.registration.FastGlobalRegistrationOption",
"scipy.stats.mode",
"utils_o3d.scale_point_cloud",
"numpy.sum",
"open3d.geometry.PointCloud",
"utils_o3d.paint_uniform_color",
"open3d.registration.CorrespondenceCheckerBasedOnDistance"
] |
[((187, 205), 'copy.deepcopy', 'copy.deepcopy', (['pcd'], {}), '(pcd)\n', (200, 205), False, 'import copy\n'), ((227, 251), 'numpy.array', 'np.array', (['cropped.points'], {}), '(cropped.points)\n', (235, 251), True, 'import numpy as np\n'), ((338, 363), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (361, 363), True, 'import open3d as o3d\n'), ((387, 429), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['cropped_points'], {}), '(cropped_points)\n', (413, 429), True, 'import open3d as o3d\n'), ((504, 522), 'copy.deepcopy', 'copy.deepcopy', (['pcd'], {}), '(pcd)\n', (517, 522), False, 'import copy\n'), ((544, 568), 'numpy.array', 'np.array', (['cropped.points'], {}), '(cropped.points)\n', (552, 568), True, 'import numpy as np\n'), ((705, 730), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (728, 730), True, 'import open3d as o3d\n'), ((754, 796), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['cropped_points'], {}), '(cropped_points)\n', (780, 796), True, 'import open3d as o3d\n'), ((2258, 2310), 'utils_o3d.paint_uniform_color', 'utils.paint_uniform_color', (['pcd0'], {'color': '[1, 0.706, 0]'}), '(pcd0, color=[1, 0.706, 0])\n', (2283, 2310), True, 'import utils_o3d as utils\n'), ((2315, 2371), 'utils_o3d.paint_uniform_color', 'utils.paint_uniform_color', (['pcd1'], {'color': '[0, 0.651, 0.929]'}), '(pcd1, color=[0, 0.651, 0.929])\n', (2340, 2371), True, 'import utils_o3d as utils\n'), ((2418, 2452), 'open3d.geometry.KDTreeFlann', 'o3d.geometry.KDTreeFlann', (['feature1'], {}), '(feature1)\n', (2442, 2452), True, 'import open3d as o3d\n'), ((2786, 2803), 'numpy.median', 'np.median', (['scores'], {}), '(scores)\n', (2795, 2803), True, 'import numpy as np\n'), ((3542, 3566), 'numpy.mean', 'np.mean', (['points0'], {'axis': '(0)'}), '(points0, axis=0)\n', (3549, 3566), True, 'import numpy as np\n'), ((3579, 3603), 'numpy.mean', 'np.mean', (['points1'], {'axis': '(0)'}), '(points1, axis=0)\n', (3586, 3603), True, 'import numpy as np\n'), ((6823, 6844), 'copy.deepcopy', 'copy.deepcopy', (['source'], {}), '(source)\n', (6836, 6844), False, 'import copy\n'), ((6863, 6884), 'copy.deepcopy', 'copy.deepcopy', (['target'], {}), '(target)\n', (6876, 6884), False, 'import copy\n'), ((7037, 7098), 'open3d.visualization.draw_geometries', 'o3d.visualization.draw_geometries', (['[source_temp, target_temp]'], {}), '([source_temp, target_temp])\n', (7070, 7098), True, 'import open3d as o3d\n'), ((7171, 7224), 'open3d.io.read_point_cloud', 'o3d.io.read_point_cloud', (['"""../maps/scans/scan_050.pcd"""'], {}), "('../maps/scans/scan_050.pcd')\n", (7194, 7224), True, 'import open3d as o3d\n'), ((7287, 7341), 'open3d.io.read_point_cloud', 'o3d.io.read_point_cloud', (['"""../maps/dso_map_cleaned.pcd"""'], {}), "('../maps/dso_map_cleaned.pcd')\n", (7310, 7341), True, 'import open3d as o3d\n'), ((8478, 8523), 'utils_o3d.scale_point_cloud', 'utils.scale_point_cloud', (['pcd_dso', '(1.0 / scale)'], {}), '(pcd_dso, 1.0 / scale)\n', (8501, 8523), True, 'import utils_o3d as utils\n'), ((8528, 8623), 'utils_o3d.display', 'utils.display', ([], {'pcds': '[pcd_lidar, pcd_dso_scaled]', 'colors': '[[1, 0.706, 0], [0, 0.651, 0.929]]'}), '(pcds=[pcd_lidar, pcd_dso_scaled], colors=[[1, 0.706, 0], [0, \n 0.651, 0.929]])\n', (8541, 8623), True, 'import utils_o3d as utils\n'), ((1134, 1179), 'utils_o3d.downsample_point_cloud', 'utils.downsample_point_cloud', (['pcd', 'voxel_size'], {}), '(pcd, voxel_size)\n', (1162, 1179), True, 'import utils_o3d as utils\n'), ((1337, 1355), 'copy.deepcopy', 'copy.deepcopy', (['pcd'], {}), '(pcd)\n', (1350, 1355), False, 'import copy\n'), ((1494, 1572), 'open3d.geometry.KDTreeSearchParamHybrid', 'o3d.geometry.KDTreeSearchParamHybrid', ([], {'radius': 'normals_radius', 'max_nn': 'normals_nn'}), '(radius=normals_radius, max_nn=normals_nn)\n', (1530, 1572), True, 'import open3d as o3d\n'), ((1754, 1839), 'open3d.geometry.KDTreeSearchParamHybrid', 'o3d.geometry.KDTreeSearchParamHybrid', ([], {'radius': 'features_radius', 'max_nn': 'features_nn'}), '(radius=features_radius, max_nn=features_nn\n )\n', (1790, 1839), True, 'import open3d as o3d\n'), ((1966, 1985), 'copy.deepcopy', 'copy.deepcopy', (['pcd0'], {}), '(pcd0)\n', (1979, 1985), False, 'import copy\n'), ((1987, 2006), 'copy.deepcopy', 'copy.deepcopy', (['pcd1'], {}), '(pcd1)\n', (2000, 2006), False, 'import copy\n'), ((2737, 2753), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (2745, 2753), True, 'import numpy as np\n'), ((2755, 2772), 'numpy.array', 'np.array', (['indices'], {}), '(indices)\n', (2763, 2772), True, 'import numpy as np\n'), ((2861, 2887), 'numpy.where', 'np.where', (['(scores <= thresh)'], {}), '(scores <= thresh)\n', (2869, 2887), True, 'import numpy as np\n'), ((3265, 3292), 'utils_o3d.display', 'utils.display', (['[pcd0, pcd1]'], {}), '([pcd0, pcd1])\n', (3278, 3292), True, 'import utils_o3d as utils\n'), ((3448, 3471), 'numpy.asarray', 'np.asarray', (['pcd0.points'], {}), '(pcd0.points)\n', (3458, 3471), True, 'import numpy as np\n'), ((3496, 3519), 'numpy.asarray', 'np.asarray', (['pcd1.points'], {}), '(pcd1.points)\n', (3506, 3519), True, 'import numpy as np\n'), ((3823, 3878), 'numpy.random.choice', 'np.random.choice', (['top_count', 'sample_size'], {'replace': '(False)'}), '(top_count, sample_size, replace=False)\n', (3839, 3878), True, 'import numpy as np\n'), ((3965, 4005), 'numpy.sum', 'np.sum', (['((points0_r - mean0) ** 2)'], {'axis': '(1)'}), '((points0_r - mean0) ** 2, axis=1)\n', (3971, 4005), True, 'import numpy as np\n'), ((4023, 4063), 'numpy.sum', 'np.sum', (['((points1_r - mean1) ** 2)'], {'axis': '(1)'}), '((points1_r - mean1) ** 2, axis=1)\n', (4029, 4063), True, 'import numpy as np\n'), ((4647, 4707), 'open3d.registration.TransformationEstimationPointToPoint', 'o3d.registration.TransformationEstimationPointToPoint', (['(False)'], {}), '(False)\n', (4700, 4707), True, 'import open3d as o3d\n'), ((4902, 4970), 'open3d.registration.RANSACConvergenceCriteria', 'o3d.registration.RANSACConvergenceCriteria', (['num_iters', 'num_val_iters'], {}), '(num_iters, num_val_iters)\n', (4944, 4970), True, 'import open3d as o3d\n'), ((5263, 5365), 'open3d.registration.FastGlobalRegistrationOption', 'o3d.registration.FastGlobalRegistrationOption', ([], {'maximum_correspondence_distance': 'distance_threshold'}), '(maximum_correspondence_distance\n =distance_threshold)\n', (5308, 5365), True, 'import open3d as o3d\n'), ((5713, 5768), 'open3d.registration.TransformationEstimationPointToPlane', 'o3d.registration.TransformationEstimationPointToPlane', ([], {}), '()\n', (5766, 5768), True, 'import open3d as o3d\n'), ((2037, 2058), 'numpy.array', 'np.array', (['pcd0.points'], {}), '(pcd0.points)\n', (2045, 2058), True, 'import numpy as np\n'), ((2096, 2117), 'numpy.array', 'np.array', (['pcd1.points'], {}), '(pcd1.points)\n', (2104, 2117), True, 'import numpy as np\n'), ((2158, 2181), 'numpy.array', 'np.array', (['feature0.data'], {}), '(feature0.data)\n', (2166, 2181), True, 'import numpy as np\n'), ((2222, 2245), 'numpy.array', 'np.array', (['feature1.data'], {}), '(feature1.data)\n', (2230, 2245), True, 'import numpy as np\n'), ((2624, 2676), 'numpy.linalg.norm', 'np.linalg.norm', (['(pcd0.points[i] - pcd1.points[idx[0]])'], {}), '(pcd0.points[i] - pcd1.points[idx[0]])\n', (2638, 2676), True, 'import numpy as np\n'), ((4170, 4188), 'scipy.stats.mode', 'stats.mode', (['scales'], {}), '(scales)\n', (4180, 4188), False, 'from scipy import stats\n'), ((4726, 4786), 'open3d.registration.CorrespondenceCheckerBasedOnEdgeLength', 'o3d.registration.CorrespondenceCheckerBasedOnEdgeLength', (['(0.9)'], {}), '(0.9)\n', (4781, 4786), True, 'import open3d as o3d\n'), ((4800, 4873), 'open3d.registration.CorrespondenceCheckerBasedOnDistance', 'o3d.registration.CorrespondenceCheckerBasedOnDistance', (['distance_threshold'], {}), '(distance_threshold)\n', (4853, 4873), True, 'import open3d as o3d\n'), ((1087, 1107), 'numpy.array', 'np.array', (['pcd.points'], {}), '(pcd.points)\n', (1095, 1107), True, 'import numpy as np\n'), ((1283, 1308), 'numpy.array', 'np.array', (['pcd_down.points'], {}), '(pcd_down.points)\n', (1291, 1308), True, 'import numpy as np\n'), ((3058, 3072), 'numpy.min', 'np.min', (['scores'], {}), '(scores)\n', (3064, 3072), True, 'import numpy as np\n'), ((3074, 3088), 'numpy.max', 'np.max', (['scores'], {}), '(scores)\n', (3080, 3088), True, 'import numpy as np\n'), ((4088, 4103), 'numpy.mean', 'np.mean', (['score1'], {}), '(score1)\n', (4095, 4103), True, 'import numpy as np\n'), ((4106, 4121), 'numpy.mean', 'np.mean', (['score0'], {}), '(score0)\n', (4113, 4121), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
import os
import numpy as np
import pandas as pd
os.getcwd()
# Request for the filename
# Current version of this script works only with TSV type files
mainFilename = input('Input your file name (diabetes.tab.txt or housing.data.txt): ')
print()
# To create proper dataframe, transforming it with numpy
# Then changing it with pandas
filenameData = np.genfromtxt(mainFilename, dtype='str')
filenameData = pd.DataFrame(filenameData)
# Obtains first row to identify header is string or numeric
headers = filenameData.iloc[0]
try:
pd.to_numeric(headers)
except:
filenameData = pd.DataFrame(filenameData.values[1:], columns=headers)
# Changes strings to numbers (self identifies for float or integer)
filenameData = filenameData.apply(pd.to_numeric)
# Obtains the mean and standard deviation of the columns
listMean = filenameData.mean()
listStd = filenameData.std()
print(filenameData)
# Prints out the results
print('Mean for each column:')
for idx in filenameData.columns:
print(idx,':',listMean[idx])
print()
print('Standard deviation for each column:')
for idx in filenameData.columns:
print(idx,':',listStd[idx])
|
[
"pandas.DataFrame",
"numpy.genfromtxt",
"pandas.to_numeric",
"os.getcwd"
] |
[((73, 84), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (82, 84), False, 'import os\n'), ((375, 415), 'numpy.genfromtxt', 'np.genfromtxt', (['mainFilename'], {'dtype': '"""str"""'}), "(mainFilename, dtype='str')\n", (388, 415), True, 'import numpy as np\n'), ((432, 458), 'pandas.DataFrame', 'pd.DataFrame', (['filenameData'], {}), '(filenameData)\n', (444, 458), True, 'import pandas as pd\n'), ((561, 583), 'pandas.to_numeric', 'pd.to_numeric', (['headers'], {}), '(headers)\n', (574, 583), True, 'import pandas as pd\n'), ((611, 665), 'pandas.DataFrame', 'pd.DataFrame', (['filenameData.values[1:]'], {'columns': 'headers'}), '(filenameData.values[1:], columns=headers)\n', (623, 665), True, 'import pandas as pd\n')]
|
import numpy as np
from sklearn.utils.multiclass import type_of_target
from mindware.base_estimator import BaseEstimator
from mindware.components.utils.constants import type_dict, MULTILABEL_CLS, IMG_CLS, TEXT_CLS, OBJECT_DET
from mindware.components.feature_engineering.transformation_graph import DataNode
class Classifier(BaseEstimator):
"""This class implements the classification task. """
def initialize(self, data: DataNode, **kwargs):
if self.metric is None:
self.metric = 'acc'
# Check the task type: {binary, multiclass}
task_type = type_of_target(data.data[1])
if task_type in type_dict:
task_type = type_dict[task_type]
else:
raise ValueError("Invalid Task Type: %s!" % task_type)
self.task_type = task_type
super().initialize(data=data, **kwargs)
def fit(self, data: DataNode, **kwargs):
"""
Fit the classifier to given training data.
:param data: instance of DataNode
:return: self
"""
if self._ml_engine is None:
self.initialize(data=data, **kwargs)
super().fit(data, **kwargs)
return self
def predict(self, X, batch_size=None, n_jobs=1):
"""
Predict classes for X.
:param X: Datanode
:param batch_size: int
:param n_jobs: int
:return: y : array of shape = [n_samples]
The predicted classes.
"""
if not isinstance(X, DataNode):
raise ValueError("X is supposed to be a Data Node, but get %s" % type(X))
return super().predict(X, batch_size=batch_size, n_jobs=n_jobs)
def refit(self):
return super().refit()
def predict_proba(self, X, batch_size=None, n_jobs=1):
"""
Predict probabilities of classes for all samples X.
:param X: Datanode
:param batch_size: int
:param n_jobs: int
:return: y : array of shape = [n_samples, n_classes]
The predicted class probabilities.
"""
if not isinstance(X, DataNode):
raise ValueError("X is supposed to be a Data Node, but get %s" % type(X))
pred_proba = super().predict_proba(X, batch_size=batch_size, n_jobs=n_jobs)
if self.task_type != MULTILABEL_CLS:
assert (
np.allclose(
np.sum(pred_proba, axis=1),
np.ones_like(pred_proba[:, 0]))
), "Prediction probability does not sum up to 1!"
# Check that all probability values lie between 0 and 1.
assert (
(pred_proba >= 0).all() and (pred_proba <= 1).all()
), "Found prediction probability value outside of [0, 1]!"
return pred_proba
def get_tree_importance(self, data: DataNode):
from lightgbm import LGBMClassifier
import pandas as pd
X, y = self.data_transformer(data).data
lgb = LGBMClassifier(random_state=1)
lgb.fit(X, y)
_importance = lgb.feature_importances_
h = {}
h['feature_id'] = np.array(range(len(_importance)))
h['feature_importance'] = _importance
return pd.DataFrame(h)
def get_linear_importance(self, data: DataNode):
from sklearn.linear_model import LogisticRegression
import pandas as pd
X, y = self.data_transformer(data).data
clf = LogisticRegression(random_state=1)
clf.fit(X, y)
_ef = clf.coef_
std_array = np.std(_ef, ddof=1, axis=0)
abs_array = abs(_ef)
mean_array = np.mean(abs_array, axis=0)
_importance = std_array / mean_array
h = {}
h['feature_id'] = np.array(range(len(_importance)))
h['feature_importance'] = _importance
return pd.DataFrame(h)
def get_linear_impact(self, data: DataNode):
from sklearn.linear_model import LogisticRegression
import pandas as pd
if (len(set(data.data[1]))) > 2:
print('ERROR! Only binary classification is supported!')
return 0
X, y = self.data_transformer(data).data
clf = LogisticRegression(random_state=1)
clf.fit(X, y)
_ef = clf.coef_
_impact = _ef[0]
h = {}
h['feature_id'] = np.array(range(len(_impact)))
h['feature_impact'] = _impact
return pd.DataFrame(h)
class Regressor(BaseEstimator):
"""This class implements the regression task. """
def initialize(self, data: DataNode, **kwargs):
self.metric = 'mse' if self.metric is None else self.metric
# Check the task type: {continuous}
task_type = type_dict['continuous']
self.task_type = task_type
super().initialize(data=data, **kwargs)
def fit(self, data, **kwargs):
"""
Fit the regressor to given training data.
:param data: DataNode
:return: self
"""
if self._ml_engine is None:
self.initialize(data=data, **kwargs)
super().fit(data, **kwargs)
return self
def predict(self, X, batch_size=None, n_jobs=1):
"""
Make predictions for X.
:param X: DataNode
:param batch_size: int
:param n_jobs: int
:return: y : array of shape = [n_samples] or [n_samples, n_labels]
The predicted classes.
"""
if not isinstance(X, DataNode):
raise ValueError("X is supposed to be a Data Node, but get %s" % type(X))
return super().predict(X, batch_size=batch_size, n_jobs=n_jobs)
def get_tree_importance(self, data: DataNode):
from lightgbm import LGBMRegressor
import pandas as pd
X, y = self.data_transformer(data).data
lgb = LGBMRegressor(random_state=1)
lgb.fit(X, y)
_importance = lgb.feature_importances_
h = {}
h['feature_id'] = np.array(range(len(_importance)))
h['feature_importance'] = _importance
return pd.DataFrame(h)
def get_linear_impact(self, data: DataNode):
from sklearn.linear_model import LinearRegression
import pandas as pd
X, y = self.data_transformer(data).data
reg = LinearRegression()
reg.fit(X, y)
_impact = reg.coef_
h = {}
h['feature_id'] = np.array(range(len(_impact)))
h['feature_impact'] = _impact
return pd.DataFrame(h)
|
[
"numpy.mean",
"numpy.ones_like",
"lightgbm.LGBMClassifier",
"lightgbm.LGBMRegressor",
"sklearn.linear_model.LogisticRegression",
"numpy.sum",
"sklearn.utils.multiclass.type_of_target",
"numpy.std",
"pandas.DataFrame",
"sklearn.linear_model.LinearRegression"
] |
[((591, 619), 'sklearn.utils.multiclass.type_of_target', 'type_of_target', (['data.data[1]'], {}), '(data.data[1])\n', (605, 619), False, 'from sklearn.utils.multiclass import type_of_target\n'), ((2957, 2987), 'lightgbm.LGBMClassifier', 'LGBMClassifier', ([], {'random_state': '(1)'}), '(random_state=1)\n', (2971, 2987), False, 'from lightgbm import LGBMClassifier\n'), ((3193, 3208), 'pandas.DataFrame', 'pd.DataFrame', (['h'], {}), '(h)\n', (3205, 3208), True, 'import pandas as pd\n'), ((3413, 3447), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(1)'}), '(random_state=1)\n', (3431, 3447), False, 'from sklearn.linear_model import LogisticRegression\n'), ((3514, 3541), 'numpy.std', 'np.std', (['_ef'], {'ddof': '(1)', 'axis': '(0)'}), '(_ef, ddof=1, axis=0)\n', (3520, 3541), True, 'import numpy as np\n'), ((3592, 3618), 'numpy.mean', 'np.mean', (['abs_array'], {'axis': '(0)'}), '(abs_array, axis=0)\n', (3599, 3618), True, 'import numpy as np\n'), ((3800, 3815), 'pandas.DataFrame', 'pd.DataFrame', (['h'], {}), '(h)\n', (3812, 3815), True, 'import pandas as pd\n'), ((4147, 4181), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(1)'}), '(random_state=1)\n', (4165, 4181), False, 'from sklearn.linear_model import LogisticRegression\n'), ((4377, 4392), 'pandas.DataFrame', 'pd.DataFrame', (['h'], {}), '(h)\n', (4389, 4392), True, 'import pandas as pd\n'), ((5766, 5795), 'lightgbm.LGBMRegressor', 'LGBMRegressor', ([], {'random_state': '(1)'}), '(random_state=1)\n', (5779, 5795), False, 'from lightgbm import LGBMRegressor\n'), ((6001, 6016), 'pandas.DataFrame', 'pd.DataFrame', (['h'], {}), '(h)\n', (6013, 6016), True, 'import pandas as pd\n'), ((6215, 6233), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (6231, 6233), False, 'from sklearn.linear_model import LinearRegression\n'), ((6408, 6423), 'pandas.DataFrame', 'pd.DataFrame', (['h'], {}), '(h)\n', (6420, 6423), True, 'import pandas as pd\n'), ((2384, 2410), 'numpy.sum', 'np.sum', (['pred_proba'], {'axis': '(1)'}), '(pred_proba, axis=1)\n', (2390, 2410), True, 'import numpy as np\n'), ((2432, 2462), 'numpy.ones_like', 'np.ones_like', (['pred_proba[:, 0]'], {}), '(pred_proba[:, 0])\n', (2444, 2462), True, 'import numpy as np\n')]
|
import time
import warnings
import matplotlib.pyplot as plt
import numpy as np
import sympy as sp
from .global_qbx import global_qbx_self
from .mesh import apply_interp_mat, gauss_rule, panelize_symbolic_surface, upsample
def find_dcutoff_refine(kernel, src, tol, plot=False):
# prep step 1: find d_cutoff and d_refine
# The goal is to estimate the error due to the QBX local patch
# The local surface will have singularities at the tips where it is cut off
# These singularities will cause error in the QBX expansion. We want to make
# the local patch large enough that these singularities are irrelevant.
# To isolate the QBX patch cutoff error, we will use a very high upsampling.
# We'll also choose p to be the minimum allowed value since that will result in
# the largest cutoff error. Increasing p will reduce the cutoff error guaranteeing that
# we never need to worry about cutoff error.
density = np.ones_like(src.pts[:, 0]) # np.cos(src.pts[:,0] * src.pts[:,1])
if plot:
plt.figure(figsize=(9, 13))
params = []
d_cutoffs = [1.1, 1.3, 1.6, 2.0]
ps = np.arange(1, 55, 3)
for di, direction in enumerate([-1.0, 1.0]):
baseline = global_qbx_self(kernel, src, p=30, kappa=8, direction=direction)
baseline_v = baseline.dot(density)
# Check that the local qbx method matches the simple global qbx approach when d_cutoff is very large
d_refine_high = 8.0
with warnings.catch_warnings():
warnings.simplefilter("ignore")
local_baseline = kernel.integrate(
src.pts,
src,
d_cutoff=3.0,
tol=1e-20,
max_p=50,
d_refine=d_refine_high,
on_src_direction=direction,
)
local_baseline_v = local_baseline.dot(density)
err = np.max(np.abs(baseline_v - local_baseline_v))
print(err)
assert err < tol / 2
n_qbx_panels = []
drefine_optimal = []
p_for_full_accuracy = []
if plot:
plt.subplot(3, 2, 1 + di)
for i_d, d_cutoff in enumerate(d_cutoffs):
errs = []
for i_p, p in enumerate(ps):
# print(p, d_cutoff)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
test, report = kernel.integrate(
src.pts,
src,
d_cutoff=d_cutoff,
tol=1e-15,
max_p=p,
on_src_direction=direction,
d_refine=d_refine_high,
return_report=True,
)
testv = test.dot(density)
err = np.max(np.abs(baseline_v - testv))
errs.append(err)
# print(p, err)
if err < tol:
for d_refine_decrease in np.arange(1.0, d_refine_high, 0.25):
refine_test, refine_report = kernel.integrate(
src.pts,
src,
d_cutoff=d_cutoff,
tol=1e-15,
max_p=p
+ 10, # Increase p here to have a refinement safety margin
on_src_direction=direction,
d_refine=d_refine_decrease,
return_report=True,
)
refine_testv = refine_test.dot(density)
refine_err = np.max(np.abs(baseline_v - refine_testv))
if refine_err < tol:
drefine_optimal.append(d_refine_decrease)
n_qbx_panels.append(refine_report["n_qbx_panels"])
p_for_full_accuracy.append(p)
break
if len(n_qbx_panels) <= i_d:
print(f"Failed to find parameters for {d_cutoff}")
drefine_optimal.append(1000)
n_qbx_panels.append(1e6)
p_for_full_accuracy.append(1e3)
break
if plot:
print(d_cutoff, errs)
plt.plot(ps[: i_p + 1], np.log10(errs), label=str(d_cutoff))
params.append((direction, n_qbx_panels, drefine_optimal, p_for_full_accuracy))
if plot:
plt.legend()
plt.title("interior" if direction > 0 else "exterior")
plt.xlabel(r"$p_{\textrm{max}}$")
if di == 0:
plt.ylabel(r"$\log_{10}(\textrm{error})$")
plt.yticks(-np.arange(0, 16, 3))
plt.xticks(np.arange(0, 61, 10))
plt.ylim([-15, 0])
plt.subplot(3, 2, 3 + di)
plt.plot(d_cutoffs, np.array(n_qbx_panels) / src.n_pts, "k-*")
plt.xlabel(r"$d_{\textrm{cutoff}}$")
plt.ylim([0, 8])
if di == 0:
plt.ylabel("QBX panels per point")
plt.subplot(3, 2, 5 + di)
plt.plot(d_cutoffs, np.array(drefine_optimal), "k-*")
plt.xlabel(r"$d_{\textrm{cutoff}}$")
plt.ylim([0, 6])
if di == 0:
plt.ylabel(r"$d_{\textrm{refine}}$")
if plot:
plt.tight_layout()
plt.show()
total_cost = 0
for i in [0, 1]:
direction, n_qbx_panels, drefine_optimal, p_for_full_accuracy = params[i]
appx_cost = (
np.array(p_for_full_accuracy)
* np.array(n_qbx_panels)
* np.array(drefine_optimal)
)
if plot:
print(direction, appx_cost)
total_cost += appx_cost
if plot:
plt.plot(d_cutoffs, total_cost, "k-o")
plt.show()
best_idx = np.argmin(total_cost)
d_cutoff = d_cutoffs[best_idx]
d_refine = drefine_optimal[best_idx]
return d_cutoff, d_refine
# prep step 2: find the minimum distance at which integrals are computed
# to the required tolerance
def _find_d_up_helper(kernel, nq, max_curvature, start_d, tol, kappa):
t = sp.var("t")
n_panels = 2
while True:
panel_edges = np.linspace(-1, 1, n_panels + 1)
panel_bounds = np.stack((panel_edges[:-1], panel_edges[1:]), axis=1)
circle = panelize_symbolic_surface(
t, sp.cos(sp.pi * t), sp.sin(sp.pi * t), panel_bounds, *gauss_rule(nq)
)
n_panels_new = np.max(circle.panel_length / max_curvature * circle.panel_radius)
if n_panels_new <= n_panels:
break
n_panels = np.ceil(n_panels_new).astype(int)
# print(f"\nusing {n_panels} panels with max_curvature={max_curvature}")
circle_kappa, _ = upsample(circle, kappa)
circle_upsample, interp_mat_upsample = upsample(circle_kappa, 2)
# TODO: Write more about the underlying regularity assumptions!!
# Why is it acceptable to use this test_density here? Empirically, any
# well-resolved density has approximately the same error as integrating sin(x).
# For example, integrating: 1, cos(x)^2.
# If we integrate a poorly resolved density, we do see higher errors.
#
# How poorly resolved does the density need to be in order to see higher error?
# It seems like an interpolation Linfinity error of around 1e-5 causes the d_up value to start to drift upwards.
#
# As a simple heuristic that seems to perform very well, we compute the
# error when integrating a constant and then double the required distance
# in order to account for integrands that are not quite so perfectly
# resolved.
# if assume_regularity:
# omega = 1.0
# else:
# omega = 999.0# / max_curvature
# f = lambda x: np.sin(omega * x)
# test_density = interp_mat_upsample.dot(f(circle.pts[:,0]))
# test_density_upsampled = f(circle_upsample.pts[:,0])
# print('l2 err', np.linalg.norm(test_density - test_density_upsampled) / np.linalg.norm(test_density_upsampled))
# print('linf err', np.max(np.abs(test_density - test_density_upsampled)))
# test_density = f(circle.pts[:,0])
# test_density = np.sin(999 * circle.pts[:,0])
test_density = np.ones(circle_kappa.n_pts)
d_up = 0
for direction in [-1.0, 1.0]:
d = start_d
for i in range(50):
# In actuality, we only need to test interior points because the curvature
# of the surface ensures that more source panels are near the observation
# points and, as a result, the error will be higher for any given value of d.
L = np.repeat(circle_kappa.panel_length, circle_kappa.panel_order)
dist = L * d
test_pts = (
circle_kappa.pts + direction * circle_kappa.normals * dist[:, None]
)
# Check to make sure that the closest distance to a source point is
# truly `dist`. This check might fail if the interior test_pts are
# crossing over into the other half of the circle.
min_src_dist = np.min(
np.linalg.norm((test_pts[:, None] - circle_kappa.pts[None, :]), axis=2),
axis=1,
)
if not np.allclose(min_src_dist, dist):
return False, d
upsample_mat = np.transpose(
apply_interp_mat(
kernel._direct(test_pts, circle_upsample), interp_mat_upsample
),
(0, 2, 1),
)
est_mat = np.transpose(kernel._direct(test_pts, circle_kappa), (0, 2, 1))
# err = np.max(np.abs(upsample_mat - est_mat).sum(axis=2))
err = np.max(
np.abs(upsample_mat.dot(test_density) - est_mat.dot(test_density))
)
# print(d, err)
if err < tol:
d_up = max(d, d_up)
break
d *= 1.2
return True, d_up
def find_d_up(kernel, nq, max_curvature, start_d, tol, kappa):
d = start_d
for i in range(10):
d_up = _find_d_up_helper(kernel, nq, max_curvature * (0.8) ** i, d, tol, kappa)
if d_up[0]:
return d_up[1]
d = d_up[1]
def final_check(kernel, src):
density = np.ones_like(src.pts[:, 0]) # np.cos(source.pts[:,0] * src.pts[:,1])
baseline = global_qbx_self(kernel, src, p=50, kappa=10, direction=1.0)
baseline_v = baseline.dot(density)
tols = 10.0 ** np.arange(0, -15, -1)
errs = []
runtimes = []
for tol in tols:
runs = []
for i in range(10):
start = time.time()
local_baseline, report = kernel.integrate(
src.pts,
src,
tol=tol,
on_src_direction=1.0,
return_report=True,
)
runs.append(time.time() - start)
runtimes.append(np.min(runs))
local_baseline_v = local_baseline.dot(density)
errs.append(np.max(np.abs(baseline_v - local_baseline_v)))
# print(tol, errs[-1], runtime)
# assert(np.max(np.abs(baseline_v-local_baseline_v)) < 5e-14)
plt.figure(figsize=(9, 5))
plt.subplot(1, 2, 1)
plt.plot(-np.log10(tols), np.log10(errs))
plt.subplot(1, 2, 2)
plt.plot(-np.log10(tols), runtimes)
plt.tight_layout()
plt.show()
|
[
"sympy.cos",
"numpy.log10",
"matplotlib.pyplot.ylabel",
"numpy.array",
"sympy.var",
"numpy.linalg.norm",
"numpy.arange",
"numpy.repeat",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.stack",
"numpy.linspace",
"numpy.min",
"warnings.simplefilter",
"numpy.argmin",
"matplotlib.pyplot.ylim",
"numpy.abs",
"sympy.sin",
"numpy.ceil",
"numpy.allclose",
"numpy.ones",
"matplotlib.pyplot.title",
"time.time",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"numpy.ones_like",
"warnings.catch_warnings",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplot"
] |
[((951, 978), 'numpy.ones_like', 'np.ones_like', (['src.pts[:, 0]'], {}), '(src.pts[:, 0])\n', (963, 978), True, 'import numpy as np\n'), ((1130, 1149), 'numpy.arange', 'np.arange', (['(1)', '(55)', '(3)'], {}), '(1, 55, 3)\n', (1139, 1149), True, 'import numpy as np\n'), ((6070, 6091), 'numpy.argmin', 'np.argmin', (['total_cost'], {}), '(total_cost)\n', (6079, 6091), True, 'import numpy as np\n'), ((6380, 6391), 'sympy.var', 'sp.var', (['"""t"""'], {}), "('t')\n", (6386, 6391), True, 'import sympy as sp\n'), ((8460, 8487), 'numpy.ones', 'np.ones', (['circle_kappa.n_pts'], {}), '(circle_kappa.n_pts)\n', (8467, 8487), True, 'import numpy as np\n'), ((10506, 10533), 'numpy.ones_like', 'np.ones_like', (['src.pts[:, 0]'], {}), '(src.pts[:, 0])\n', (10518, 10533), True, 'import numpy as np\n'), ((11396, 11422), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 5)'}), '(figsize=(9, 5))\n', (11406, 11422), True, 'import matplotlib.pyplot as plt\n'), ((11427, 11447), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (11438, 11447), True, 'import matplotlib.pyplot as plt\n'), ((11498, 11518), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (11509, 11518), True, 'import matplotlib.pyplot as plt\n'), ((11563, 11581), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (11579, 11581), True, 'import matplotlib.pyplot as plt\n'), ((11586, 11596), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11594, 11596), True, 'import matplotlib.pyplot as plt\n'), ((1039, 1066), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 13)'}), '(figsize=(9, 13))\n', (1049, 1066), True, 'import matplotlib.pyplot as plt\n'), ((5574, 5592), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5590, 5592), True, 'import matplotlib.pyplot as plt\n'), ((5601, 5611), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5609, 5611), True, 'import matplotlib.pyplot as plt\n'), ((5996, 6034), 'matplotlib.pyplot.plot', 'plt.plot', (['d_cutoffs', 'total_cost', '"""k-o"""'], {}), "(d_cutoffs, total_cost, 'k-o')\n", (6004, 6034), True, 'import matplotlib.pyplot as plt\n'), ((6043, 6053), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6051, 6053), True, 'import matplotlib.pyplot as plt\n'), ((6448, 6480), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(n_panels + 1)'], {}), '(-1, 1, n_panels + 1)\n', (6459, 6480), True, 'import numpy as np\n'), ((6504, 6557), 'numpy.stack', 'np.stack', (['(panel_edges[:-1], panel_edges[1:])'], {'axis': '(1)'}), '((panel_edges[:-1], panel_edges[1:]), axis=1)\n', (6512, 6557), True, 'import numpy as np\n'), ((6718, 6783), 'numpy.max', 'np.max', (['(circle.panel_length / max_curvature * circle.panel_radius)'], {}), '(circle.panel_length / max_curvature * circle.panel_radius)\n', (6724, 6783), True, 'import numpy as np\n'), ((10709, 10730), 'numpy.arange', 'np.arange', (['(0)', '(-15)', '(-1)'], {}), '(0, -15, -1)\n', (10718, 10730), True, 'import numpy as np\n'), ((11478, 11492), 'numpy.log10', 'np.log10', (['errs'], {}), '(errs)\n', (11486, 11492), True, 'import numpy as np\n'), ((1477, 1502), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (1500, 1502), False, 'import warnings\n'), ((1516, 1547), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (1537, 1547), False, 'import warnings\n'), ((1898, 1935), 'numpy.abs', 'np.abs', (['(baseline_v - local_baseline_v)'], {}), '(baseline_v - local_baseline_v)\n', (1904, 1935), True, 'import numpy as np\n'), ((2103, 2128), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(1 + di)'], {}), '(3, 2, 1 + di)\n', (2114, 2128), True, 'import matplotlib.pyplot as plt\n'), ((4696, 4708), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4706, 4708), True, 'import matplotlib.pyplot as plt\n'), ((4721, 4775), 'matplotlib.pyplot.title', 'plt.title', (["('interior' if direction > 0 else 'exterior')"], {}), "('interior' if direction > 0 else 'exterior')\n", (4730, 4775), True, 'import matplotlib.pyplot as plt\n'), ((4788, 4821), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$p_{\\\\textrm{max}}$"""'], {}), "('$p_{\\\\textrm{max}}$')\n", (4798, 4821), True, 'import matplotlib.pyplot as plt\n'), ((5007, 5025), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-15, 0]'], {}), '([-15, 0])\n', (5015, 5025), True, 'import matplotlib.pyplot as plt\n'), ((5039, 5064), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(3 + di)'], {}), '(3, 2, 3 + di)\n', (5050, 5064), True, 'import matplotlib.pyplot as plt\n'), ((5152, 5188), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$d_{\\\\textrm{cutoff}}$"""'], {}), "('$d_{\\\\textrm{cutoff}}$')\n", (5162, 5188), True, 'import matplotlib.pyplot as plt\n'), ((5201, 5217), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 8]'], {}), '([0, 8])\n', (5209, 5217), True, 'import matplotlib.pyplot as plt\n'), ((5306, 5331), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(5 + di)'], {}), '(3, 2, 5 + di)\n', (5317, 5331), True, 'import matplotlib.pyplot as plt\n'), ((5410, 5446), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$d_{\\\\textrm{cutoff}}$"""'], {}), "('$d_{\\\\textrm{cutoff}}$')\n", (5420, 5446), True, 'import matplotlib.pyplot as plt\n'), ((5459, 5475), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 6]'], {}), '([0, 6])\n', (5467, 5475), True, 'import matplotlib.pyplot as plt\n'), ((5850, 5875), 'numpy.array', 'np.array', (['drefine_optimal'], {}), '(drefine_optimal)\n', (5858, 5875), True, 'import numpy as np\n'), ((6617, 6634), 'sympy.cos', 'sp.cos', (['(sp.pi * t)'], {}), '(sp.pi * t)\n', (6623, 6634), True, 'import sympy as sp\n'), ((6636, 6653), 'sympy.sin', 'sp.sin', (['(sp.pi * t)'], {}), '(sp.pi * t)\n', (6642, 6653), True, 'import sympy as sp\n'), ((8863, 8925), 'numpy.repeat', 'np.repeat', (['circle_kappa.panel_length', 'circle_kappa.panel_order'], {}), '(circle_kappa.panel_length, circle_kappa.panel_order)\n', (8872, 8925), True, 'import numpy as np\n'), ((10850, 10861), 'time.time', 'time.time', ([], {}), '()\n', (10859, 10861), False, 'import time\n'), ((11145, 11157), 'numpy.min', 'np.min', (['runs'], {}), '(runs)\n', (11151, 11157), True, 'import numpy as np\n'), ((11462, 11476), 'numpy.log10', 'np.log10', (['tols'], {}), '(tols)\n', (11470, 11476), True, 'import numpy as np\n'), ((11533, 11547), 'numpy.log10', 'np.log10', (['tols'], {}), '(tols)\n', (11541, 11547), True, 'import numpy as np\n'), ((4862, 4905), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\log_{10}(\\\\textrm{error})$"""'], {}), "('$\\\\log_{10}(\\\\textrm{error})$')\n", (4872, 4905), True, 'import matplotlib.pyplot as plt\n'), ((4973, 4993), 'numpy.arange', 'np.arange', (['(0)', '(61)', '(10)'], {}), '(0, 61, 10)\n', (4982, 4993), True, 'import numpy as np\n'), ((5258, 5292), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""QBX panels per point"""'], {}), "('QBX panels per point')\n", (5268, 5292), True, 'import matplotlib.pyplot as plt\n'), ((5364, 5389), 'numpy.array', 'np.array', (['drefine_optimal'], {}), '(drefine_optimal)\n', (5372, 5389), True, 'import numpy as np\n'), ((5516, 5552), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$d_{\\\\textrm{refine}}$"""'], {}), "('$d_{\\\\textrm{refine}}$')\n", (5526, 5552), True, 'import matplotlib.pyplot as plt\n'), ((5769, 5798), 'numpy.array', 'np.array', (['p_for_full_accuracy'], {}), '(p_for_full_accuracy)\n', (5777, 5798), True, 'import numpy as np\n'), ((5813, 5835), 'numpy.array', 'np.array', (['n_qbx_panels'], {}), '(n_qbx_panels)\n', (5821, 5835), True, 'import numpy as np\n'), ((6858, 6879), 'numpy.ceil', 'np.ceil', (['n_panels_new'], {}), '(n_panels_new)\n', (6865, 6879), True, 'import numpy as np\n'), ((9349, 9418), 'numpy.linalg.norm', 'np.linalg.norm', (['(test_pts[:, None] - circle_kappa.pts[None, :])'], {'axis': '(2)'}), '(test_pts[:, None] - circle_kappa.pts[None, :], axis=2)\n', (9363, 9418), True, 'import numpy as np\n'), ((9479, 9510), 'numpy.allclose', 'np.allclose', (['min_src_dist', 'dist'], {}), '(min_src_dist, dist)\n', (9490, 9510), True, 'import numpy as np\n'), ((11241, 11278), 'numpy.abs', 'np.abs', (['(baseline_v - local_baseline_v)'], {}), '(baseline_v - local_baseline_v)\n', (11247, 11278), True, 'import numpy as np\n'), ((2301, 2326), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (2324, 2326), False, 'import warnings\n'), ((2348, 2379), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (2369, 2379), False, 'import warnings\n'), ((4541, 4555), 'numpy.log10', 'np.log10', (['errs'], {}), '(errs)\n', (4549, 4555), True, 'import numpy as np\n'), ((4929, 4948), 'numpy.arange', 'np.arange', (['(0)', '(16)', '(3)'], {}), '(0, 16, 3)\n', (4938, 4948), True, 'import numpy as np\n'), ((5097, 5119), 'numpy.array', 'np.array', (['n_qbx_panels'], {}), '(n_qbx_panels)\n', (5105, 5119), True, 'import numpy as np\n'), ((11100, 11111), 'time.time', 'time.time', ([], {}), '()\n', (11109, 11111), False, 'import time\n'), ((2851, 2877), 'numpy.abs', 'np.abs', (['(baseline_v - testv)'], {}), '(baseline_v - testv)\n', (2857, 2877), True, 'import numpy as np\n'), ((3035, 3070), 'numpy.arange', 'np.arange', (['(1.0)', 'd_refine_high', '(0.25)'], {}), '(1.0, d_refine_high, 0.25)\n', (3044, 3070), True, 'import numpy as np\n'), ((3769, 3802), 'numpy.abs', 'np.abs', (['(baseline_v - refine_testv)'], {}), '(baseline_v - refine_testv)\n', (3775, 3802), True, 'import numpy as np\n')]
|
from setuptools import setup, Extension, find_packages
import subprocess
import errno
import re
import os
import shutil
import sys
import zipfile
from urllib.request import urlretrieve
import numpy
from Cython.Build import cythonize
isWindows = os.name == 'nt'
isMac = sys.platform == 'darwin'
is64Bit = sys.maxsize > 2**32
# adapted from cffi's setup.py
# the following may be overridden if pkg-config exists
libraries = ['lensfun']
include_dirs = []
library_dirs = []
extra_compile_args = []
extra_link_args = []
def _ask_pkg_config(resultlist, option, result_prefix='', sysroot=False):
pkg_config = os.environ.get('PKG_CONFIG','pkg-config')
try:
p = subprocess.Popen([pkg_config, option, 'lensfun'],
stdout=subprocess.PIPE)
except OSError as e:
if e.errno != errno.ENOENT:
raise
else:
t = p.stdout.read().decode().strip()
if p.wait() == 0:
res = t.split()
# '-I/usr/...' -> '/usr/...'
for x in res:
assert x.startswith(result_prefix)
res = [x[len(result_prefix):] for x in res]
sysroot = sysroot and os.environ.get('PKG_CONFIG_SYSROOT_DIR', '')
if sysroot:
# old versions of pkg-config don't support this env var,
# so here we emulate its effect if needed
res = [path if path.startswith(sysroot)
else sysroot + path
for path in res]
resultlist[:] = res
def use_pkg_config():
_ask_pkg_config(include_dirs, '--cflags-only-I', '-I', sysroot=True)
_ask_pkg_config(extra_compile_args, '--cflags-only-other')
_ask_pkg_config(library_dirs, '--libs-only-L', '-L', sysroot=True)
_ask_pkg_config(extra_link_args, '--libs-only-other')
_ask_pkg_config(libraries, '--libs-only-l', '-l')
if isWindows or isMac:
cmake_build = os.path.abspath('external/lensfun/build')
install_dir = os.path.join(cmake_build, 'install')
include_dirs += [os.path.join(install_dir, 'include', 'lensfun')]
library_dirs += [os.path.join(install_dir, 'lib')]
else:
use_pkg_config()
# this must be after use_pkg_config()!
include_dirs += [numpy.get_include()]
# for version_helper.h
include_dirs += [os.path.abspath('lensfunpy')]
def clone_submodules():
if not os.path.exists('external/lensfun/README.md'):
print('lensfun git submodule not cloned yet, will invoke "git submodule update --init" now')
if os.system('git submodule update --init') != 0:
raise Exception('git failed')
def windows_lensfun_compile():
clone_submodules()
cwd = os.getcwd()
# Download cmake to build lensfun
cmake_version = '3.13.4'
cmake_url = 'https://github.com/Kitware/CMake/releases/download/v{v}/cmake-{v}-win32-x86.zip'.format(v=cmake_version)
cmake = os.path.abspath('external/cmake-{}-win32-x86/bin/cmake.exe'.format(cmake_version))
# Download vcpkg to build dependencies of lensfun
vcpkg_commit = '2021.05.12'
vcpkg_url = 'https://github.com/Microsoft/vcpkg/archive/{}.zip'.format(vcpkg_commit)
vcpkg_dir = os.path.abspath('external/vcpkg-{}'.format(vcpkg_commit))
vcpkg_bootstrap = os.path.join(vcpkg_dir, 'bootstrap-vcpkg.bat')
vcpkg = os.path.join(vcpkg_dir, 'vcpkg.exe')
files = [(cmake_url, 'external', cmake),
(vcpkg_url, 'external', vcpkg_bootstrap)]
for url, extractdir, extractcheck in files:
if not os.path.exists(extractcheck):
path = 'external/' + os.path.basename(url)
if not os.path.exists(path):
print('Downloading', url)
try:
urlretrieve(url, path)
except:
# repeat once in case of network issues
urlretrieve(url, path)
with zipfile.ZipFile(path) as z:
print('Extracting', path, 'into', extractdir)
z.extractall(extractdir)
if not os.path.exists(path):
raise RuntimeError(path + ' not found!')
# Bootstrap vcpkg
os.chdir(vcpkg_dir)
if not os.path.exists(vcpkg):
code = os.system(vcpkg_bootstrap)
if code != 0:
sys.exit(code)
# lensfun depends on glib2, so let's build it with vcpkg
vcpkg_arch = 'x64' if is64Bit else 'x86'
vcpkg_triplet = '{}-windows'.format(vcpkg_arch)
code = os.system(vcpkg + ' install glib:' + vcpkg_triplet)
if code != 0:
sys.exit(code)
vcpkg_install_dir = os.path.join(vcpkg_dir, 'installed', vcpkg_triplet)
# bundle runtime dlls
vcpkg_bin_dir = os.path.join(vcpkg_install_dir, 'bin')
glib2_dll = os.path.join(vcpkg_bin_dir, 'glib-2.0-0.dll')
# configure and compile lensfun
if not os.path.exists(cmake_build):
os.mkdir(cmake_build)
os.chdir(cmake_build)
# temporary hack to avoid https://stackoverflow.com/a/53547931
# (python module not needed here anyway)
patch_path = '../apps/CMakeLists.txt'
with open(patch_path) as f:
content = f.read()
content = content.replace('IF(PYTHON)', 'IF(FALSE)')
with open(patch_path, 'w') as f:
f.write(content)
cmds = [cmake + ' .. -G "NMake Makefiles" -DCMAKE_BUILD_TYPE=Release ' +\
'-DBUILD_TESTS=off -DINSTALL_HELPER_SCRIPTS=off ' +\
'-DCMAKE_TOOLCHAIN_FILE={}/scripts/buildsystems/vcpkg.cmake '.format(vcpkg_dir) +\
'-DGLIB2_BASE_DIR={} -DGLIB2_DLL={} -DCMAKE_INSTALL_PREFIX=install'.format(vcpkg_install_dir, glib2_dll),
cmake + ' --build .',
cmake + ' --build . --target install',
]
for cmd in cmds:
print(cmd)
code = os.system(cmd)
if code != 0:
sys.exit(code)
os.chdir(cwd)
dll_runtime_libs = [('lensfun.dll', os.path.join(install_dir, 'bin')),
('glib-2.0-0.dll', vcpkg_bin_dir),
# dependencies of glib
('pcre.dll', vcpkg_bin_dir),
('iconv-2.dll', vcpkg_bin_dir),
('charset-1.dll', vcpkg_bin_dir),
('intl-8.dll', vcpkg_bin_dir),
]
for filename, folder in dll_runtime_libs:
src = os.path.join(folder, filename)
dest = 'lensfunpy/' + filename
print('copying', src, '->', dest)
shutil.copyfile(src, dest)
def mac_lensfun_compile():
clone_submodules()
# configure and compile lensfun
cwd = os.getcwd()
if not os.path.exists(cmake_build):
os.mkdir(cmake_build)
os.chdir(cmake_build)
install_name_dir = os.path.join(install_dir, 'lib')
cmds = ['cmake .. -DCMAKE_BUILD_TYPE=Release ' +\
'-DBUILD_TESTS=off -DINSTALL_HELPER_SCRIPTS=off ' +\
'-DCMAKE_INSTALL_PREFIX=install ' +\
'-DCMAKE_INSTALL_NAME_DIR=' + install_name_dir,
'cmake --build .',
'cmake --build . --target install',
]
for cmd in cmds:
print(cmd)
code = os.system(cmd)
if code != 0:
sys.exit(code)
os.chdir(cwd)
def bundle_db_files():
import glob
db_files = 'lensfunpy/db_files'
if not os.path.exists(db_files):
os.makedirs(db_files)
for path in glob.glob('external/lensfun/data/db/*.xml'):
dest = os.path.join(db_files, os.path.basename(path))
print('copying', path, '->', dest)
shutil.copyfile(path, dest)
package_data = {'lensfunpy': []}
# evil hack, check cmd line for relevant commands
# custom cmdclasses didn't work out in this case
cmdline = ''.join(sys.argv[1:])
needsCompile = any(s in cmdline for s in ['install', 'bdist', 'build_ext', 'wheel', 'nosetests'])
if isWindows and needsCompile:
windows_lensfun_compile()
package_data['lensfunpy'].append('*.dll')
elif isMac and needsCompile:
mac_lensfun_compile()
if any(s in cmdline for s in ['clean', 'sdist']):
# When running sdist after a previous run of bdist or build_ext
# then even with the 'clean' command the .egg-info folder stays.
# This folder contains SOURCES.txt which in turn is used by sdist
# to include package data files, but we don't want .dll's and .xml
# files in our source distribution. Therefore, to prevent accidents,
# we help a little...
egg_info = 'lensfunpy.egg-info'
print('removing', egg_info)
shutil.rmtree(egg_info, ignore_errors=True)
if 'sdist' not in cmdline:
# This assumes that the lensfun version from external/lensfun was used.
# If that's not the case, the bundled files may fail to load, for example,
# if lensfunpy was linked against an older lensfun version already on
# the system (Linux mostly) and the database format changed in an incompatible way.
# In that case, loading of bundled files can still be disabled
# with Database(load_bundled=False).
package_data['lensfunpy'].append('db_files/*.xml')
bundle_db_files()
# Support for optional Cython line tracing
# run the following to generate a test coverage report:
# $ export LINETRACE=1
# $ python setup.py build_ext --inplace
# $ nosetests --with-coverage --cover-html --cover-package=lensfunpy
compdirectives = {}
macros = []
if (os.environ.get('LINETRACE', False)):
compdirectives['linetrace'] = True
macros.append(('CYTHON_TRACE', '1'))
extensions = cythonize([Extension("lensfunpy._lensfun",
include_dirs=include_dirs,
sources=[os.path.join('lensfunpy', '_lensfun.pyx')],
libraries=libraries,
library_dirs=library_dirs,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
define_macros=macros
)],
compiler_directives=compdirectives)
# make __version__ available (https://stackoverflow.com/a/16084844)
exec(open('lensfunpy/_version.py').read())
setup(
name = 'lensfunpy',
version = __version__,
description = 'Lens distortion correction for Python, a wrapper for lensfun',
long_description = open('README.rst').read(),
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/letmaik/lensfunpy',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Cython',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Topic :: Multimedia :: Graphics',
'Topic :: Software Development :: Libraries',
],
packages = find_packages(),
ext_modules = extensions,
package_data = package_data,
install_requires=['numpy']
)
|
[
"zipfile.ZipFile",
"sys.exit",
"os.path.exists",
"urllib.request.urlretrieve",
"subprocess.Popen",
"setuptools.find_packages",
"os.mkdir",
"numpy.get_include",
"glob.glob",
"shutil.copyfile",
"os.makedirs",
"os.environ.get",
"os.path.join",
"os.getcwd",
"os.chdir",
"os.path.basename",
"shutil.rmtree",
"os.path.abspath",
"os.system"
] |
[((9442, 9476), 'os.environ.get', 'os.environ.get', (['"""LINETRACE"""', '(False)'], {}), "('LINETRACE', False)\n", (9456, 9476), False, 'import os\n'), ((613, 655), 'os.environ.get', 'os.environ.get', (['"""PKG_CONFIG"""', '"""pkg-config"""'], {}), "('PKG_CONFIG', 'pkg-config')\n", (627, 655), False, 'import os\n'), ((1962, 2003), 'os.path.abspath', 'os.path.abspath', (['"""external/lensfun/build"""'], {}), "('external/lensfun/build')\n", (1977, 2003), False, 'import os\n'), ((2022, 2058), 'os.path.join', 'os.path.join', (['cmake_build', '"""install"""'], {}), "(cmake_build, 'install')\n", (2034, 2058), False, 'import os\n'), ((2277, 2296), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (2294, 2296), False, 'import numpy\n'), ((2339, 2367), 'os.path.abspath', 'os.path.abspath', (['"""lensfunpy"""'], {}), "('lensfunpy')\n", (2354, 2367), False, 'import os\n'), ((2718, 2729), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2727, 2729), False, 'import os\n'), ((3291, 3337), 'os.path.join', 'os.path.join', (['vcpkg_dir', '"""bootstrap-vcpkg.bat"""'], {}), "(vcpkg_dir, 'bootstrap-vcpkg.bat')\n", (3303, 3337), False, 'import os\n'), ((3350, 3386), 'os.path.join', 'os.path.join', (['vcpkg_dir', '"""vcpkg.exe"""'], {}), "(vcpkg_dir, 'vcpkg.exe')\n", (3362, 3386), False, 'import os\n'), ((4214, 4233), 'os.chdir', 'os.chdir', (['vcpkg_dir'], {}), '(vcpkg_dir)\n', (4222, 4233), False, 'import os\n'), ((4530, 4581), 'os.system', 'os.system', (["(vcpkg + ' install glib:' + vcpkg_triplet)"], {}), "(vcpkg + ' install glib:' + vcpkg_triplet)\n", (4539, 4581), False, 'import os\n'), ((4647, 4698), 'os.path.join', 'os.path.join', (['vcpkg_dir', '"""installed"""', 'vcpkg_triplet'], {}), "(vcpkg_dir, 'installed', vcpkg_triplet)\n", (4659, 4698), False, 'import os\n'), ((4750, 4788), 'os.path.join', 'os.path.join', (['vcpkg_install_dir', '"""bin"""'], {}), "(vcpkg_install_dir, 'bin')\n", (4762, 4788), False, 'import os\n'), ((4805, 4850), 'os.path.join', 'os.path.join', (['vcpkg_bin_dir', '"""glib-2.0-0.dll"""'], {}), "(vcpkg_bin_dir, 'glib-2.0-0.dll')\n", (4817, 4850), False, 'import os\n'), ((4962, 4983), 'os.chdir', 'os.chdir', (['cmake_build'], {}), '(cmake_build)\n', (4970, 4983), False, 'import os\n'), ((5919, 5932), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (5927, 5932), False, 'import os\n'), ((6682, 6693), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6691, 6693), False, 'import os\n'), ((6768, 6789), 'os.chdir', 'os.chdir', (['cmake_build'], {}), '(cmake_build)\n', (6776, 6789), False, 'import os\n'), ((6813, 6845), 'os.path.join', 'os.path.join', (['install_dir', '"""lib"""'], {}), "(install_dir, 'lib')\n", (6825, 6845), False, 'import os\n'), ((7314, 7327), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (7322, 7327), False, 'import os\n'), ((7487, 7530), 'glob.glob', 'glob.glob', (['"""external/lensfun/data/db/*.xml"""'], {}), "('external/lensfun/data/db/*.xml')\n", (7496, 7530), False, 'import glob\n'), ((8600, 8643), 'shutil.rmtree', 'shutil.rmtree', (['egg_info'], {'ignore_errors': '(True)'}), '(egg_info, ignore_errors=True)\n', (8613, 8643), False, 'import shutil\n'), ((676, 749), 'subprocess.Popen', 'subprocess.Popen', (["[pkg_config, option, 'lensfun']"], {'stdout': 'subprocess.PIPE'}), "([pkg_config, option, 'lensfun'], stdout=subprocess.PIPE)\n", (692, 749), False, 'import subprocess\n'), ((2085, 2132), 'os.path.join', 'os.path.join', (['install_dir', '"""include"""', '"""lensfun"""'], {}), "(install_dir, 'include', 'lensfun')\n", (2097, 2132), False, 'import os\n'), ((2155, 2187), 'os.path.join', 'os.path.join', (['install_dir', '"""lib"""'], {}), "(install_dir, 'lib')\n", (2167, 2187), False, 'import os\n'), ((2405, 2449), 'os.path.exists', 'os.path.exists', (['"""external/lensfun/README.md"""'], {}), "('external/lensfun/README.md')\n", (2419, 2449), False, 'import os\n'), ((4245, 4266), 'os.path.exists', 'os.path.exists', (['vcpkg'], {}), '(vcpkg)\n', (4259, 4266), False, 'import os\n'), ((4283, 4309), 'os.system', 'os.system', (['vcpkg_bootstrap'], {}), '(vcpkg_bootstrap)\n', (4292, 4309), False, 'import os\n'), ((4608, 4622), 'sys.exit', 'sys.exit', (['code'], {}), '(code)\n', (4616, 4622), False, 'import sys\n'), ((4899, 4926), 'os.path.exists', 'os.path.exists', (['cmake_build'], {}), '(cmake_build)\n', (4913, 4926), False, 'import os\n'), ((4936, 4957), 'os.mkdir', 'os.mkdir', (['cmake_build'], {}), '(cmake_build)\n', (4944, 4957), False, 'import os\n'), ((5850, 5864), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (5859, 5864), False, 'import os\n'), ((6428, 6458), 'os.path.join', 'os.path.join', (['folder', 'filename'], {}), '(folder, filename)\n', (6440, 6458), False, 'import os\n'), ((6548, 6574), 'shutil.copyfile', 'shutil.copyfile', (['src', 'dest'], {}), '(src, dest)\n', (6563, 6574), False, 'import shutil\n'), ((6705, 6732), 'os.path.exists', 'os.path.exists', (['cmake_build'], {}), '(cmake_build)\n', (6719, 6732), False, 'import os\n'), ((6742, 6763), 'os.mkdir', 'os.mkdir', (['cmake_build'], {}), '(cmake_build)\n', (6750, 6763), False, 'import os\n'), ((7246, 7260), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (7255, 7260), False, 'import os\n'), ((7415, 7439), 'os.path.exists', 'os.path.exists', (['db_files'], {}), '(db_files)\n', (7429, 7439), False, 'import os\n'), ((7449, 7470), 'os.makedirs', 'os.makedirs', (['db_files'], {}), '(db_files)\n', (7460, 7470), False, 'import os\n'), ((7645, 7672), 'shutil.copyfile', 'shutil.copyfile', (['path', 'dest'], {}), '(path, dest)\n', (7660, 7672), False, 'import shutil\n'), ((11227, 11242), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (11240, 11242), False, 'from setuptools import setup, Extension, find_packages\n'), ((2563, 2603), 'os.system', 'os.system', (['"""git submodule update --init"""'], {}), "('git submodule update --init')\n", (2572, 2603), False, 'import os\n'), ((3556, 3584), 'os.path.exists', 'os.path.exists', (['extractcheck'], {}), '(extractcheck)\n', (3570, 3584), False, 'import os\n'), ((4344, 4358), 'sys.exit', 'sys.exit', (['code'], {}), '(code)\n', (4352, 4358), False, 'import sys\n'), ((5899, 5913), 'sys.exit', 'sys.exit', (['code'], {}), '(code)\n', (5907, 5913), False, 'import sys\n'), ((5974, 6006), 'os.path.join', 'os.path.join', (['install_dir', '"""bin"""'], {}), "(install_dir, 'bin')\n", (5986, 6006), False, 'import os\n'), ((7295, 7309), 'sys.exit', 'sys.exit', (['code'], {}), '(code)\n', (7303, 7309), False, 'import sys\n'), ((7570, 7592), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (7586, 7592), False, 'import os\n'), ((1176, 1220), 'os.environ.get', 'os.environ.get', (['"""PKG_CONFIG_SYSROOT_DIR"""', '""""""'], {}), "('PKG_CONFIG_SYSROOT_DIR', '')\n", (1190, 1220), False, 'import os\n'), ((3619, 3640), 'os.path.basename', 'os.path.basename', (['url'], {}), '(url)\n', (3635, 3640), False, 'import os\n'), ((3660, 3680), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (3674, 3680), False, 'import os\n'), ((3941, 3962), 'zipfile.ZipFile', 'zipfile.ZipFile', (['path'], {}), '(path)\n', (3956, 3962), False, 'import zipfile\n'), ((4108, 4128), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (4122, 4128), False, 'import os\n'), ((3765, 3787), 'urllib.request.urlretrieve', 'urlretrieve', (['url', 'path'], {}), '(url, path)\n', (3776, 3787), False, 'from urllib.request import urlretrieve\n'), ((9680, 9721), 'os.path.join', 'os.path.join', (['"""lensfunpy"""', '"""_lensfun.pyx"""'], {}), "('lensfunpy', '_lensfun.pyx')\n", (9692, 9721), False, 'import os\n'), ((3892, 3914), 'urllib.request.urlretrieve', 'urlretrieve', (['url', 'path'], {}), '(url, path)\n', (3903, 3914), False, 'from urllib.request import urlretrieve\n')]
|
# Copyright (c) 2008,2015,2016,2017,2018,2019 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Contains a collection of basic calculations.
These include:
* wind components
* heat index
* windchill
"""
import warnings
import numpy as np
from scipy.ndimage import gaussian_filter
from .. import constants as mpconsts
from ..package_tools import Exporter
from ..units import atleast_1d, check_units, masked_array, units
from ..xarray import preprocess_xarray
exporter = Exporter(globals())
# The following variables are constants for a standard atmosphere
t0 = 288. * units.kelvin
p0 = 1013.25 * units.hPa
@exporter.export
@preprocess_xarray
@check_units('[speed]', '[speed]')
def wind_speed(u, v):
r"""Compute the wind speed from u and v-components.
Parameters
----------
u : `pint.Quantity`
Wind component in the X (East-West) direction
v : `pint.Quantity`
Wind component in the Y (North-South) direction
Returns
-------
wind speed: `pint.Quantity`
The speed of the wind
See Also
--------
wind_components
"""
speed = np.sqrt(u * u + v * v)
return speed
@exporter.export
@preprocess_xarray
@check_units('[speed]', '[speed]')
def wind_direction(u, v, convention='from'):
r"""Compute the wind direction from u and v-components.
Parameters
----------
u : `pint.Quantity`
Wind component in the X (East-West) direction
v : `pint.Quantity`
Wind component in the Y (North-South) direction
convention : str
Convention to return direction. 'from' returns the direction the wind is coming from
(meteorological convention). 'to' returns the direction the wind is going towards
(oceanographic convention). Default is 'from'.
Returns
-------
direction: `pint.Quantity`
The direction of the wind in interval [0, 360] degrees, with 360 being North, with the
direction defined by the convention kwarg.
See Also
--------
wind_components
Notes
-----
In the case of calm winds (where `u` and `v` are zero), this function returns a direction
of 0.
"""
wdir = 90. * units.deg - np.arctan2(-v, -u)
origshape = wdir.shape
wdir = atleast_1d(wdir)
# Handle oceanographic convection
if convention == 'to':
wdir -= 180 * units.deg
elif convention not in ('to', 'from'):
raise ValueError('Invalid kwarg for "convention". Valid options are "from" or "to".')
wdir[wdir <= 0] += 360. * units.deg
# avoid unintended modification of `pint.Quantity` by direct use of magnitude
calm_mask = (np.asarray(u.magnitude) == 0.) & (np.asarray(v.magnitude) == 0.)
# np.any check required for legacy numpy which treats 0-d False boolean index as zero
if np.any(calm_mask):
wdir[calm_mask] = 0. * units.deg
return wdir.reshape(origshape).to('degrees')
@exporter.export
@preprocess_xarray
@check_units('[speed]')
def wind_components(speed, wdir):
r"""Calculate the U, V wind vector components from the speed and direction.
Parameters
----------
speed : `pint.Quantity`
The wind speed (magnitude)
wdir : `pint.Quantity`
The wind direction, specified as the direction from which the wind is
blowing (0-2 pi radians or 0-360 degrees), with 360 degrees being North.
Returns
-------
u, v : tuple of `pint.Quantity`
The wind components in the X (East-West) and Y (North-South)
directions, respectively.
See Also
--------
wind_speed
wind_direction
Examples
--------
>>> from metpy.units import units
>>> metpy.calc.wind_components(10. * units('m/s'), 225. * units.deg)
(<Quantity(7.071067811865475, 'meter / second')>,
<Quantity(7.071067811865477, 'meter / second')>)
"""
wdir = _check_radians(wdir, max_radians=4 * np.pi)
u = -speed * np.sin(wdir)
v = -speed * np.cos(wdir)
return u, v
@exporter.export
@preprocess_xarray
@check_units(temperature='[temperature]', speed='[speed]')
def windchill(temperature, speed, face_level_winds=False, mask_undefined=True):
r"""Calculate the Wind Chill Temperature Index (WCTI).
Calculates WCTI from the current temperature and wind speed using the formula
outlined by the FCM [FCMR192003]_.
Specifically, these formulas assume that wind speed is measured at
10m. If, instead, the speeds are measured at face level, the winds
need to be multiplied by a factor of 1.5 (this can be done by specifying
`face_level_winds` as `True`.)
Parameters
----------
temperature : `pint.Quantity`
The air temperature
speed : `pint.Quantity`
The wind speed at 10m. If instead the winds are at face level,
`face_level_winds` should be set to `True` and the 1.5 multiplicative
correction will be applied automatically.
face_level_winds : bool, optional
A flag indicating whether the wind speeds were measured at facial
level instead of 10m, thus requiring a correction. Defaults to
`False`.
mask_undefined : bool, optional
A flag indicating whether a masked array should be returned with
values where wind chill is undefined masked. These are values where
the temperature > 50F or wind speed <= 3 miles per hour. Defaults
to `True`.
Returns
-------
`pint.Quantity`
The corresponding Wind Chill Temperature Index value(s)
See Also
--------
heat_index
"""
# Correct for lower height measurement of winds if necessary
if face_level_winds:
# No in-place so that we copy
# noinspection PyAugmentAssignment
speed = speed * 1.5
temp_limit, speed_limit = 10. * units.degC, 3 * units.mph
speed_factor = speed.to('km/hr').magnitude ** 0.16
wcti = units.Quantity((0.6215 + 0.3965 * speed_factor) * temperature.to('degC').magnitude
- 11.37 * speed_factor + 13.12, units.degC).to(temperature.units)
# See if we need to mask any undefined values
if mask_undefined:
mask = np.array((temperature > temp_limit) | (speed <= speed_limit))
if mask.any():
wcti = masked_array(wcti, mask=mask)
return wcti
@exporter.export
@preprocess_xarray
@check_units('[temperature]')
def heat_index(temperature, rh, mask_undefined=True):
r"""Calculate the Heat Index from the current temperature and relative humidity.
The implementation uses the formula outlined in [Rothfusz1990]_, which is a
multi-variable least-squares regression of the values obtained in [Steadman1979]_.
Additional conditional corrections are applied to match what the National
Weather Service operationally uses. See Figure 3 of [Anderson2013]_ for a
depiction of this algorithm and further discussion.
Parameters
----------
temperature : `pint.Quantity`
Air temperature
rh : `pint.Quantity`
The relative humidity expressed as a unitless ratio in the range [0, 1].
Can also pass a percentage if proper units are attached.
Returns
-------
`pint.Quantity`
The corresponding Heat Index value(s)
Other Parameters
----------------
mask_undefined : bool, optional
A flag indicating whether a masked array should be returned with
values masked where the temperature < 80F. Defaults to `True`.
See Also
--------
windchill
"""
temperature = atleast_1d(temperature)
rh = atleast_1d(rh)
# assign units to rh if they currently are not present
if not hasattr(rh, 'units'):
rh = rh * units.dimensionless
delta = temperature.to(units.degF) - 0. * units.degF
rh2 = rh * rh
delta2 = delta * delta
# Simplifed Heat Index -- constants converted for RH in [0, 1]
a = -10.3 * units.degF + 1.1 * delta + 4.7 * units.delta_degF * rh
# More refined Heat Index -- constants converted for RH in [0, 1]
b = (-42.379 * units.degF
+ 2.04901523 * delta
+ 1014.333127 * units.delta_degF * rh
- 22.475541 * delta * rh
- 6.83783e-3 / units.delta_degF * delta2
- 5.481717e2 * units.delta_degF * rh2
+ 1.22874e-1 / units.delta_degF * delta2 * rh
+ 8.5282 * delta * rh2
- 1.99e-2 / units.delta_degF * delta2 * rh2)
# Create return heat index
hi = np.full(np.shape(temperature), np.nan) * units.degF
# Retain masked status of temperature with resulting heat index
if hasattr(temperature, 'mask'):
hi = masked_array(hi)
# If T <= 40F, Heat Index is T
sel = (temperature <= 40. * units.degF)
if np.any(sel):
hi[sel] = temperature[sel].to(units.degF)
# If a < 79F and hi is unset, Heat Index is a
sel = (a < 79. * units.degF) & np.isnan(hi)
if np.any(sel):
hi[sel] = a[sel]
# Use b now for anywhere hi has yet to be set
sel = np.isnan(hi)
if np.any(sel):
hi[sel] = b[sel]
# Adjustment for RH <= 13% and 80F <= T <= 112F
sel = ((rh <= 13. * units.percent) & (temperature >= 80. * units.degF)
& (temperature <= 112. * units.degF))
if np.any(sel):
rh15adj = ((13. - rh * 100.) / 4.
* ((17. * units.delta_degF - np.abs(delta - 95. * units.delta_degF))
/ 17. * units.delta_degF) ** 0.5)
hi[sel] = hi[sel] - rh15adj[sel]
# Adjustment for RH > 85% and 80F <= T <= 87F
sel = ((rh > 85. * units.percent) & (temperature >= 80. * units.degF)
& (temperature <= 87. * units.degF))
if np.any(sel):
rh85adj = 0.02 * (rh * 100. - 85.) * (87. * units.delta_degF - delta)
hi[sel] = hi[sel] + rh85adj[sel]
# See if we need to mask any undefined values
if mask_undefined:
mask = np.array(temperature < 80. * units.degF)
if mask.any():
hi = masked_array(hi, mask=mask)
return hi
@exporter.export
@preprocess_xarray
@check_units(temperature='[temperature]', speed='[speed]')
def apparent_temperature(temperature, rh, speed, face_level_winds=False, mask_undefined=True):
r"""Calculate the current apparent temperature.
Calculates the current apparent temperature based on the wind chill or heat index
as appropriate for the current conditions. Follows [NWS10201]_.
Parameters
----------
temperature : `pint.Quantity`
The air temperature
rh : `pint.Quantity`
The relative humidity expressed as a unitless ratio in the range [0, 1].
Can also pass a percentage if proper units are attached.
speed : `pint.Quantity`
The wind speed at 10m. If instead the winds are at face level,
`face_level_winds` should be set to `True` and the 1.5 multiplicative
correction will be applied automatically.
face_level_winds : bool, optional
A flag indicating whether the wind speeds were measured at facial
level instead of 10m, thus requiring a correction. Defaults to
`False`.
mask_undefined : bool, optional
A flag indicating whether a masked array should be returned with
values where wind chill or heat_index is undefined masked. For wind
chill, these are values where the temperature > 50F or
wind speed <= 3 miles per hour. For heat index, these are values
where the temperature < 80F.
Defaults to `True`.
Returns
-------
`pint.Quantity`
The corresponding apparent temperature value(s)
See Also
--------
heat_index, windchill
"""
is_not_scalar = isinstance(temperature.m, (list, tuple, np.ndarray))
temperature = atleast_1d(temperature)
rh = atleast_1d(rh)
speed = atleast_1d(speed)
# NB: mask_defined=True is needed to know where computed values exist
wind_chill_temperature = windchill(temperature, speed, face_level_winds=face_level_winds,
mask_undefined=True).to(temperature.units)
heat_index_temperature = heat_index(temperature, rh,
mask_undefined=True).to(temperature.units)
# Combine the heat index and wind chill arrays (no point has a value in both)
# NB: older numpy.ma.where does not return a masked array
app_temperature = masked_array(
np.ma.where(masked_array(wind_chill_temperature).mask,
heat_index_temperature.to(temperature.units),
wind_chill_temperature.to(temperature.units)
), temperature.units)
# If mask_undefined is False, then set any masked values to the temperature
if not mask_undefined:
app_temperature[app_temperature.mask] = temperature[app_temperature.mask]
# If no values are masked and provided temperature does not have a mask
# we should return a non-masked array
if not np.any(app_temperature.mask) and not hasattr(temperature, 'mask'):
app_temperature = np.array(app_temperature.m) * temperature.units
if is_not_scalar:
return app_temperature
else:
return atleast_1d(app_temperature)[0]
@exporter.export
@preprocess_xarray
@check_units('[pressure]')
def pressure_to_height_std(pressure):
r"""Convert pressure data to heights using the U.S. standard atmosphere [NOAA1976]_.
The implementation uses the formula outlined in [Hobbs1977]_ pg.60-61.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure
Returns
-------
`pint.Quantity`
The corresponding height value(s)
Notes
-----
.. math:: Z = \frac{T_0}{\Gamma}[1-\frac{p}{p_0}^\frac{R\Gamma}{g}]
"""
gamma = 6.5 * units('K/km')
return (t0 / gamma) * (1 - (pressure / p0).to('dimensionless')**(
mpconsts.Rd * gamma / mpconsts.g))
@exporter.export
@preprocess_xarray
@check_units('[length]')
def height_to_geopotential(height):
r"""Compute geopotential for a given height.
Calculates the geopotential from height using the following formula, which is derived from
the definition of geopotential as given in [Hobbs2006]_ Pg. 69 Eq 3.21:
.. math:: \Phi = G m_e \left( \frac{1}{R_e} - \frac{1}{R_e + z}\right)
(where :math:`\Phi` is geopotential, :math:`z` is height, :math:`R_e` is average Earth
radius, :math:`G` is the (universal) gravitational constant, and :math:`m_e` is the
approximate mass of Earth.)
Parameters
----------
height : `pint.Quantity`
Height above sea level
Returns
-------
`pint.Quantity`
The corresponding geopotential value(s)
Examples
--------
>>> import metpy.calc
>>> from metpy.units import units
>>> height = np.linspace(0, 10000, num=11) * units.m
>>> geopot = metpy.calc.height_to_geopotential(height)
>>> geopot
<Quantity([ 0. 9817.46806283 19631.85526579 29443.16305887
39251.39289118 49056.54621087 58858.62446524 68657.62910064
78453.56156252 88246.42329544 98036.21574305], 'meter ** 2 / second ** 2')>
"""
# Direct implementation of formula from Hobbs yields poor numerical results (see
# gh-1075), so was replaced with algebraic equivalent.
return (mpconsts.G * mpconsts.me / mpconsts.Re) * (height / (mpconsts.Re + height))
@exporter.export
@preprocess_xarray
def geopotential_to_height(geopot):
r"""Compute height from a given geopotential.
Calculates the height from geopotential using the following formula, which is derived from
the definition of geopotential as given in [Hobbs2006]_ Pg. 69 Eq 3.21:
.. math:: z = \frac{1}{\frac{1}{R_e} - \frac{\Phi}{G m_e}} - R_e
(where :math:`\Phi` is geopotential, :math:`z` is height, :math:`R_e` is average Earth
radius, :math:`G` is the (universal) gravitational constant, and :math:`m_e` is the
approximate mass of Earth.)
Parameters
----------
geopotential : `pint.Quantity`
Geopotential
Returns
-------
`pint.Quantity`
The corresponding height value(s)
Examples
--------
>>> import metpy.calc
>>> from metpy.units import units
>>> height = np.linspace(0, 10000, num=11) * units.m
>>> geopot = metpy.calc.height_to_geopotential(height)
>>> geopot
<Quantity([ 0. 9817.46806283 19631.85526579 29443.16305887
39251.39289118 49056.54621087 58858.62446524 68657.62910064
78453.56156252 88246.42329544 98036.21574305], 'meter ** 2 / second ** 2')>
>>> height = metpy.calc.geopotential_to_height(geopot)
>>> height
<Quantity([ 0. 1000. 2000. 3000. 4000. 5000. 6000. 7000. 8000.
9000. 10000.], 'meter')>
"""
# Direct implementation of formula from Hobbs yields poor numerical results (see
# gh-1075), so was replaced with algebraic equivalent.
scaled = geopot * mpconsts.Re
return scaled * mpconsts.Re / (mpconsts.G * mpconsts.me - scaled)
@exporter.export
@preprocess_xarray
@check_units('[length]')
def height_to_pressure_std(height):
r"""Convert height data to pressures using the U.S. standard atmosphere [NOAA1976]_.
The implementation inverts the formula outlined in [Hobbs1977]_ pg.60-61.
Parameters
----------
height : `pint.Quantity`
Atmospheric height
Returns
-------
`pint.Quantity`
The corresponding pressure value(s)
Notes
-----
.. math:: p = p_0 e^{\frac{g}{R \Gamma} \text{ln}(1-\frac{Z \Gamma}{T_0})}
"""
gamma = 6.5 * units('K/km')
return p0 * (1 - (gamma / t0) * height) ** (mpconsts.g / (mpconsts.Rd * gamma))
@exporter.export
@preprocess_xarray
def coriolis_parameter(latitude):
r"""Calculate the coriolis parameter at each point.
The implementation uses the formula outlined in [Hobbs1977]_ pg.370-371.
Parameters
----------
latitude : array_like
Latitude at each point
Returns
-------
`pint.Quantity`
The corresponding coriolis force at each point
"""
latitude = _check_radians(latitude, max_radians=np.pi / 2)
return (2. * mpconsts.omega * np.sin(latitude)).to('1/s')
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[length]')
def add_height_to_pressure(pressure, height):
r"""Calculate the pressure at a certain height above another pressure level.
This assumes a standard atmosphere [NOAA1976]_.
Parameters
----------
pressure : `pint.Quantity`
Pressure level
height : `pint.Quantity`
Height above a pressure level
Returns
-------
`pint.Quantity`
The corresponding pressure value for the height above the pressure level
See Also
--------
pressure_to_height_std, height_to_pressure_std, add_pressure_to_height
"""
pressure_level_height = pressure_to_height_std(pressure)
return height_to_pressure_std(pressure_level_height + height)
@exporter.export
@preprocess_xarray
@check_units('[length]', '[pressure]')
def add_pressure_to_height(height, pressure):
r"""Calculate the height at a certain pressure above another height.
This assumes a standard atmosphere [NOAA1976]_.
Parameters
----------
height : `pint.Quantity`
Height level
pressure : `pint.Quantity`
Pressure above height level
Returns
-------
`pint.Quantity`
The corresponding height value for the pressure above the height level
See Also
--------
pressure_to_height_std, height_to_pressure_std, add_height_to_pressure
"""
pressure_at_height = height_to_pressure_std(height)
return pressure_to_height_std(pressure_at_height - pressure)
@exporter.export
@preprocess_xarray
@check_units('[dimensionless]', '[pressure]', '[pressure]')
def sigma_to_pressure(sigma, psfc, ptop):
r"""Calculate pressure from sigma values.
Parameters
----------
sigma : ndarray
The sigma levels to be converted to pressure levels.
psfc : `pint.Quantity`
The surface pressure value.
ptop : `pint.Quantity`
The pressure value at the top of the model domain.
Returns
-------
`pint.Quantity`
The pressure values at the given sigma levels.
Notes
-----
Sigma definition adapted from [Philips1957]_.
.. math:: p = \sigma * (p_{sfc} - p_{top}) + p_{top}
* :math:`p` is pressure at a given `\sigma` level
* :math:`\sigma` is non-dimensional, scaled pressure
* :math:`p_{sfc}` is pressure at the surface or model floor
* :math:`p_{top}` is pressure at the top of the model domain
"""
if np.any(sigma < 0) or np.any(sigma > 1):
raise ValueError('Sigma values should be bounded by 0 and 1')
if psfc.magnitude < 0 or ptop.magnitude < 0:
raise ValueError('Pressure input should be non-negative')
return sigma * (psfc - ptop) + ptop
@exporter.export
@preprocess_xarray
def smooth_gaussian(scalar_grid, n):
"""Filter with normal distribution of weights.
Parameters
----------
scalar_grid : `pint.Quantity`
Some n-dimensional scalar grid. If more than two axes, smoothing
is only done across the last two.
n : int
Degree of filtering
Returns
-------
`pint.Quantity`
The filtered 2D scalar grid
Notes
-----
This function is a close replication of the GEMPAK function GWFS,
but is not identical. The following notes are incorporated from
the GEMPAK source code:
This function smoothes a scalar grid using a moving average
low-pass filter whose weights are determined by the normal
(Gaussian) probability distribution function for two dimensions.
The weight given to any grid point within the area covered by the
moving average for a target grid point is proportional to
EXP [ -( D ** 2 ) ],
where D is the distance from that point to the target point divided
by the standard deviation of the normal distribution. The value of
the standard deviation is determined by the degree of filtering
requested. The degree of filtering is specified by an integer.
This integer is the number of grid increments from crest to crest
of the wave for which the theoretical response is 1/e = .3679. If
the grid increment is called delta_x, and the value of this integer
is represented by N, then the theoretical filter response function
value for the N * delta_x wave will be 1/e. The actual response
function will be greater than the theoretical value.
The larger N is, the more severe the filtering will be, because the
response function for all wavelengths shorter than N * delta_x
will be less than 1/e. Furthermore, as N is increased, the slope
of the filter response function becomes more shallow; so, the
response at all wavelengths decreases, but the amount of decrease
lessens with increasing wavelength. (The theoretical response
function can be obtained easily--it is the Fourier transform of the
weight function described above.)
The area of the patch covered by the moving average varies with N.
As N gets bigger, the smoothing gets stronger, and weight values
farther from the target grid point are larger because the standard
deviation of the normal distribution is bigger. Thus, increasing
N has the effect of expanding the moving average window as well as
changing the values of weights. The patch is a square covering all
points whose weight values are within two standard deviations of the
mean of the two dimensional normal distribution.
The key difference between GEMPAK's GWFS and this function is that,
in GEMPAK, the leftover weight values representing the fringe of the
distribution are applied to the target grid point. In this
function, the leftover weights are not used.
When this function is invoked, the first argument is the grid to be
smoothed, the second is the value of N as described above:
GWFS ( S, N )
where N > 1. If N <= 1, N = 2 is assumed. For example, if N = 4,
then the 4 delta x wave length is passed with approximate response
1/e.
"""
# Compute standard deviation in a manner consistent with GEMPAK
n = int(round(n))
if n < 2:
n = 2
sgma = n / (2 * np.pi)
# Construct sigma sequence so smoothing occurs only in horizontal direction
nax = len(scalar_grid.shape)
# Assume the last two axes represent the horizontal directions
sgma_seq = [sgma if i > nax - 3 else 0 for i in range(nax)]
# Compute smoothed field and reattach units
res = gaussian_filter(scalar_grid, sgma_seq, truncate=2 * np.sqrt(2))
if hasattr(scalar_grid, 'units'):
res = res * scalar_grid.units
return res
@exporter.export
@preprocess_xarray
def smooth_n_point(scalar_grid, n=5, passes=1):
"""Filter with normal distribution of weights.
Parameters
----------
scalar_grid : array-like or `pint.Quantity`
Some 2D scalar grid to be smoothed.
n: int
The number of points to use in smoothing, only valid inputs
are 5 and 9. Defaults to 5.
passes : int
The number of times to apply the filter to the grid. Defaults
to 1.
Returns
-------
array-like or `pint.Quantity`
The filtered 2D scalar grid.
Notes
-----
This function is a close replication of the GEMPAK function SM5S
and SM9S depending on the choice of the number of points to use
for smoothing. This function can be applied multiple times to
create a more smoothed field and will only smooth the interior
points, leaving the end points with their original values. If a
masked value or NaN values exists in the array, it will propagate
to any point that uses that particular grid point in the smoothing
calculation. Applying the smoothing function multiple times will
propogate NaNs further throughout the domain.
"""
if n == 9:
p = 0.25
q = 0.125
r = 0.0625
elif n == 5:
p = 0.5
q = 0.125
r = 0.0
else:
raise ValueError('The number of points to use in the smoothing '
'calculation must be either 5 or 9.')
smooth_grid = scalar_grid[:].copy()
for _i in range(passes):
smooth_grid[1:-1, 1:-1] = (p * smooth_grid[1:-1, 1:-1]
+ q * (smooth_grid[2:, 1:-1] + smooth_grid[1:-1, 2:]
+ smooth_grid[:-2, 1:-1] + smooth_grid[1:-1, :-2])
+ r * (smooth_grid[2:, 2:] + smooth_grid[2:, :-2] +
+ smooth_grid[:-2, 2:] + smooth_grid[:-2, :-2]))
return smooth_grid
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[length]')
def altimeter_to_station_pressure(altimeter_value, height):
r"""Convert the altimeter measurement to station pressure.
This function is useful for working with METARs since they do not provide
altimeter values, but not sea-level pressure or station pressure.
The following definitions of altimeter setting and station pressure
are taken from [Smithsonian1951]_ Altimeter setting is the
pressure value to which an aircraft altimeter scale is set so that it will
indicate the altitude above mean sea-level of an aircraft on the ground at the
location for which the value is determined. It assumes a standard atmosphere [NOAA1976]_.
Station pressure is the atmospheric pressure at the designated station elevation.
Finding the station pressure can be helpful for calculating sea-level pressure
or other parameters.
Parameters
----------
altimeter_value : `pint.Quantity`
The altimeter setting value as defined by the METAR or other observation,
which can be measured in either inches of mercury (in. Hg) or millibars (mb)
height: `pint.Quantity`
Elevation of the station measuring pressure.
Returns
-------
`pint.Quantity`
The station pressure in hPa or in. Hg, which can be used to calculate sea-level
pressure
See Also
--------
altimeter_to_sea_level_pressure
Notes
-----
This function is implemented using the following equations from the
Smithsonian Handbook (1951) p. 269
Equation 1:
.. math:: A_{mb} = (p_{mb} - 0.3)F
Equation 3:
.. math:: F = \left [1 + \left(\frac{p_{0}^n a}{T_{0}} \right)
\frac{H_{b}}{p_{1}^n} \right ] ^ \frac{1}{n}
Where
:math:`p_{0}` = standard sea-level pressure = 1013.25 mb
:math:`p_{1} = p_{mb} - 0.3` when :math:`p_{0} = 1013.25 mb`
gamma = lapse rate in [NOAA1976]_ standard atmosphere below the isothermal layer
:math:`6.5^{\circ}C. km.^{-1}`
:math:`t_{0}` = standard sea-level temperature 288 K
:math:`H_{b} =` station elevation in meters (elevation for which station
pressure is given)
:math:`n = \frac{a R_{d}}{g} = 0.190284` where :math:`R_{d}` is the gas
constant for dry air
And solving for :math:`p_{mb}` results in the equation below, which is used to
calculate station pressure :math:`(p_{mb})`
.. math:: p_{mb} = \left [A_{mb} ^ n - \left (\frac{p_{0} a H_{b}}{T_0}
\right) \right] ^ \frac{1}{n} + 0.3
"""
# Gamma Value for this case
gamma = 0.0065 * units('K/m')
# N-Value
n = (mpconsts.Rd * gamma / mpconsts.g).to_base_units()
return ((altimeter_value ** n
- ((p0.to(altimeter_value.units) ** n * gamma * height) / t0)) ** (1 / n)
+ 0.3 * units.hPa)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[length]', '[temperature]')
def altimeter_to_sea_level_pressure(altimeter_value, height, temperature):
r"""Convert the altimeter setting to sea-level pressure.
This function is useful for working with METARs since most provide
altimeter values, but not sea-level pressure, which is often plotted
on surface maps. The following definitions of altimeter setting, station pressure, and
sea-level pressure are taken from [Smithsonian1951]_
Altimeter setting is the pressure value to which an aircraft altimeter scale
is set so that it will indicate the altitude above mean sea-level of an aircraft
on the ground at the location for which the value is determined. It assumes a standard
atmosphere. Station pressure is the atmospheric pressure at the designated station
elevation. Sea-level pressure is a pressure value obtained by the theoretical reduction
of barometric pressure to sea level. It is assumed that atmosphere extends to sea level
below the station and that the properties of the atmosphere are related to conditions
observed at the station. This value is recorded by some surface observation stations,
but not all. If the value is recorded, it can be found in the remarks section. Finding
the sea-level pressure is helpful for plotting purposes and different calculations.
Parameters
----------
altimeter_value : 'pint.Quantity'
The altimeter setting value is defined by the METAR or other observation,
with units of inches of mercury (in Hg) or millibars (hPa)
height : 'pint.Quantity'
Elevation of the station measuring pressure. Often times measured in meters
temperature : 'pint.Quantity'
Temperature at the station
Returns
-------
'pint.Quantity'
The sea-level pressure in hPa and makes pressure values easier to compare
between different stations
See Also
--------
altimeter_to_station_pressure
Notes
-----
This function is implemented using the following equations from Wallace and Hobbs (1977)
Equation 2.29:
.. math::
\Delta z = Z_{2} - Z_{1}
= \frac{R_{d} \bar T_{v}}{g_0}ln\left(\frac{p_{1}}{p_{2}}\right)
= \bar H ln \left (\frac {p_{1}}{p_{2}} \right)
Equation 2.31:
.. math::
p_{0} = p_{g}exp \left(\frac{Z_{g}}{\bar H} \right) \\
= p_{g}exp \left(\frac{g_{0}Z_{g}}{R_{d}\bar T_{v}} \right)
Then by substituting :math:`Delta_{Z}` for :math:`Z_{g}` in Equation 2.31:
.. math:: p_{sea_level} = p_{station} exp\left(\frac{\Delta z}{H}\right)
where :math:`Delta_{Z}` is the elevation in meters and :math:`H = \frac{R_{d}T}{g}`
"""
# Calculate the station pressure using function altimeter_to_station_pressure()
psfc = altimeter_to_station_pressure(altimeter_value, height)
# Calculate the scale height
h = mpconsts.Rd * temperature / mpconsts.g
return psfc * np.exp(height / h)
def _check_radians(value, max_radians=2 * np.pi):
"""Input validation of values that could be in degrees instead of radians.
Parameters
----------
value : `pint.Quantity`
The input value to check.
max_radians : float
Maximum absolute value of radians before warning.
Returns
-------
`pint.Quantity`
The input value
"""
try:
value = value.to('radians').m
except AttributeError:
pass
if np.greater(np.nanmax(np.abs(value)), max_radians):
warnings.warn('Input over {} radians. '
'Ensure proper units are given.'.format(max_radians))
return value
|
[
"numpy.abs",
"numpy.sqrt",
"numpy.asarray",
"numpy.any",
"numpy.exp",
"numpy.array",
"numpy.arctan2",
"numpy.isnan",
"numpy.cos",
"numpy.sin",
"numpy.shape"
] |
[((1176, 1198), 'numpy.sqrt', 'np.sqrt', (['(u * u + v * v)'], {}), '(u * u + v * v)\n', (1183, 1198), True, 'import numpy as np\n'), ((2865, 2882), 'numpy.any', 'np.any', (['calm_mask'], {}), '(calm_mask)\n', (2871, 2882), True, 'import numpy as np\n'), ((8772, 8783), 'numpy.any', 'np.any', (['sel'], {}), '(sel)\n', (8778, 8783), True, 'import numpy as np\n'), ((8941, 8952), 'numpy.any', 'np.any', (['sel'], {}), '(sel)\n', (8947, 8952), True, 'import numpy as np\n'), ((9040, 9052), 'numpy.isnan', 'np.isnan', (['hi'], {}), '(hi)\n', (9048, 9052), True, 'import numpy as np\n'), ((9060, 9071), 'numpy.any', 'np.any', (['sel'], {}), '(sel)\n', (9066, 9071), True, 'import numpy as np\n'), ((9282, 9293), 'numpy.any', 'np.any', (['sel'], {}), '(sel)\n', (9288, 9293), True, 'import numpy as np\n'), ((9702, 9713), 'numpy.any', 'np.any', (['sel'], {}), '(sel)\n', (9708, 9713), True, 'import numpy as np\n'), ((2254, 2272), 'numpy.arctan2', 'np.arctan2', (['(-v)', '(-u)'], {}), '(-v, -u)\n', (2264, 2272), True, 'import numpy as np\n'), ((3982, 3994), 'numpy.sin', 'np.sin', (['wdir'], {}), '(wdir)\n', (3988, 3994), True, 'import numpy as np\n'), ((4012, 4024), 'numpy.cos', 'np.cos', (['wdir'], {}), '(wdir)\n', (4018, 4024), True, 'import numpy as np\n'), ((6208, 6269), 'numpy.array', 'np.array', (['((temperature > temp_limit) | (speed <= speed_limit))'], {}), '((temperature > temp_limit) | (speed <= speed_limit))\n', (6216, 6269), True, 'import numpy as np\n'), ((8921, 8933), 'numpy.isnan', 'np.isnan', (['hi'], {}), '(hi)\n', (8929, 8933), True, 'import numpy as np\n'), ((9923, 9964), 'numpy.array', 'np.array', (['(temperature < 80.0 * units.degF)'], {}), '(temperature < 80.0 * units.degF)\n', (9931, 9964), True, 'import numpy as np\n'), ((20720, 20737), 'numpy.any', 'np.any', (['(sigma < 0)'], {}), '(sigma < 0)\n', (20726, 20737), True, 'import numpy as np\n'), ((20741, 20758), 'numpy.any', 'np.any', (['(sigma > 1)'], {}), '(sigma > 1)\n', (20747, 20758), True, 'import numpy as np\n'), ((32848, 32866), 'numpy.exp', 'np.exp', (['(height / h)'], {}), '(height / h)\n', (32854, 32866), True, 'import numpy as np\n'), ((2703, 2726), 'numpy.asarray', 'np.asarray', (['u.magnitude'], {}), '(u.magnitude)\n', (2713, 2726), True, 'import numpy as np\n'), ((2737, 2760), 'numpy.asarray', 'np.asarray', (['v.magnitude'], {}), '(v.magnitude)\n', (2747, 2760), True, 'import numpy as np\n'), ((8506, 8527), 'numpy.shape', 'np.shape', (['temperature'], {}), '(temperature)\n', (8514, 8527), True, 'import numpy as np\n'), ((12986, 13014), 'numpy.any', 'np.any', (['app_temperature.mask'], {}), '(app_temperature.mask)\n', (12992, 13014), True, 'import numpy as np\n'), ((13079, 13106), 'numpy.array', 'np.array', (['app_temperature.m'], {}), '(app_temperature.m)\n', (13087, 13106), True, 'import numpy as np\n'), ((33367, 33380), 'numpy.abs', 'np.abs', (['value'], {}), '(value)\n', (33373, 33380), True, 'import numpy as np\n'), ((18231, 18247), 'numpy.sin', 'np.sin', (['latitude'], {}), '(latitude)\n', (18237, 18247), True, 'import numpy as np\n'), ((24835, 24845), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (24842, 24845), True, 'import numpy as np\n'), ((9385, 9424), 'numpy.abs', 'np.abs', (['(delta - 95.0 * units.delta_degF)'], {}), '(delta - 95.0 * units.delta_degF)\n', (9391, 9424), True, 'import numpy as np\n')]
|
"""Objects representing regions in space."""
import math
import random
import itertools
import numpy
import scipy.spatial
import shapely.geometry
import shapely.ops
from scenic.core.distributions import Samplable, RejectionException, needsSampling
from scenic.core.lazy_eval import valueInContext
from scenic.core.vectors import Vector, OrientedVector, VectorDistribution
from scenic.core.geometry import RotatedRectangle
from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors
from scenic.core.geometry import headingOfSegment, triangulatePolygon, plotPolygon, polygonUnion
from scenic.core.type_support import toVector
from scenic.core.utils import cached, areEquivalent
def toPolygon(thing):
if needsSampling(thing):
return None
if hasattr(thing, 'polygon'):
return thing.polygon
if hasattr(thing, 'polygons'):
return thing.polygons
if hasattr(thing, 'lineString'):
return thing.lineString
return None
def regionFromShapelyObject(obj, orientation=None):
"""Build a 'Region' from Shapely geometry."""
assert obj.is_valid, obj
if obj.is_empty:
return nowhere
elif isinstance(obj, (shapely.geometry.Polygon, shapely.geometry.MultiPolygon)):
return PolygonalRegion(polygon=obj, orientation=orientation)
elif isinstance(obj, (shapely.geometry.LineString, shapely.geometry.MultiLineString)):
return PolylineRegion(polyline=obj, orientation=orientation)
else:
raise RuntimeError(f'unhandled type of Shapely geometry: {obj}')
class PointInRegionDistribution(VectorDistribution):
"""Uniform distribution over points in a Region"""
def __init__(self, region):
super().__init__(region)
self.region = region
def sampleGiven(self, value):
return value[self.region].uniformPointInner()
def __str__(self):
return f'PointIn({self.region})'
class Region(Samplable):
"""Abstract class for regions."""
def __init__(self, name, *dependencies, orientation=None):
super().__init__(dependencies)
self.name = name
self.orientation = orientation
def sampleGiven(self, value):
return self
def intersect(self, other, triedReversed=False):
"""Get a `Region` representing the intersection of this one with another."""
if triedReversed:
return IntersectionRegion(self, other)
else:
return other.intersect(self, triedReversed=True)
@staticmethod
def uniformPointIn(region):
"""Get a uniform `Distribution` over points in a `Region`."""
return PointInRegionDistribution(region)
def uniformPoint(self):
"""Sample a uniformly-random point in this `Region`.
Can only be called on fixed Regions with no random parameters.
"""
assert not needsSampling(self)
return self.uniformPointInner()
def uniformPointInner(self):
"""Do the actual random sampling. Implemented by subclasses."""
raise NotImplementedError()
def containsPoint(self, point):
"""Check if the `Region` contains a point. Implemented by subclasses."""
raise NotImplementedError()
def containsObject(self, obj):
"""Check if the `Region` contains an :obj:`~scenic.core.object_types.Object`.
The default implementation assumes the `Region` is convex; subclasses must
override the method if this is not the case.
"""
for corner in obj.corners:
if not self.containsPoint(corner):
return False
return True
def __contains__(self, thing):
"""Check if this `Region` contains an object or vector."""
from scenic.core.object_types import Object
if isinstance(thing, Object):
return self.containsObject(thing)
vec = toVector(thing, '"X in Y" with X not an Object or a vector')
return self.containsPoint(vec)
def getAABB(self):
"""Axis-aligned bounding box for this `Region`. Implemented by some subclasses."""
raise NotImplementedError()
def orient(self, vec):
"""Orient the given vector along the region's orientation, if any."""
if self.orientation is None:
return vec
else:
return OrientedVector(vec.x, vec.y, self.orientation[vec])
def __str__(self):
return f'<Region {self.name}>'
class AllRegion(Region):
"""Region consisting of all space."""
def intersect(self, other, triedReversed=False):
return other
def containsPoint(self, point):
return True
def containsObject(self, obj):
return True
def __eq__(self, other):
return type(other) is AllRegion
def __hash__(self):
return hash(AllRegion)
class EmptyRegion(Region):
"""Region containing no points."""
def intersect(self, other, triedReversed=False):
return self
def uniformPointInner(self):
raise RejectionException(f'sampling empty Region')
def containsPoint(self, point):
return False
def containsObject(self, obj):
return False
def show(self, plt, style=None):
pass
def __eq__(self, other):
return type(other) is EmptyRegion
def __hash__(self):
return hash(EmptyRegion)
everywhere = AllRegion('everywhere')
nowhere = EmptyRegion('nowhere')
class CircularRegion(Region):
def __init__(self, center, radius, resolution=32):
super().__init__('Circle', center, radius)
self.center = center.toVector()
self.radius = radius
self.circumcircle = (self.center, self.radius)
if not (needsSampling(self.center) or needsSampling(self.radius)):
ctr = shapely.geometry.Point(self.center)
self.polygon = ctr.buffer(self.radius, resolution=resolution)
def sampleGiven(self, value):
return CircularRegion(value[self.center], value[self.radius])
def evaluateInner(self, context):
center = valueInContext(self.center, context)
radius = valueInContext(self.radius, context)
return CircularRegion(center, radius)
def containsPoint(self, point):
point = point.toVector()
return point.distanceTo(self.center) <= self.radius
def uniformPointInner(self):
x, y = self.center
r = random.triangular(0, self.radius, self.radius)
t = random.uniform(-math.pi, math.pi)
pt = Vector(x + (r * cos(t)), y + (r * sin(t)))
return self.orient(pt)
def getAABB(self):
x, y = self.center
r = self.radius
return ((x - r, y - r), (x + r, y + r))
def isEquivalentTo(self, other):
if type(other) is not CircularRegion:
return False
return (areEquivalent(other.center, self.center)
and areEquivalent(other.radius, self.radius))
def __str__(self):
return f'CircularRegion({self.center}, {self.radius})'
class SectorRegion(Region):
def __init__(self, center, radius, heading, angle, resolution=32):
super().__init__('Sector', center, radius, heading, angle)
self.center = center.toVector()
self.radius = radius
self.heading = heading
self.angle = angle
r = (radius / 2) * cos(angle / 2)
self.circumcircle = (self.center.offsetRadially(r, heading), r)
if not any(needsSampling(x) for x in (self.center, radius, heading, angle)):
ctr = shapely.geometry.Point(self.center)
circle = ctr.buffer(self.radius, resolution=resolution)
if angle >= math.tau - 0.001:
self.polygon = circle
else:
mask = shapely.geometry.Polygon([
self.center,
self.center.offsetRadially(radius, heading + angle/2),
self.center.offsetRadially(2*radius, heading),
self.center.offsetRadially(radius, heading - angle/2)
])
self.polygon = circle & mask
def sampleGiven(self, value):
return SectorRegion(value[self.center], value[self.radius],
value[self.heading], value[self.angle])
def evaluateInner(self, context):
center = valueInContext(self.center, context)
radius = valueInContext(self.radius, context)
heading = valueInContext(self.heading, context)
angle = valueInContext(self.angle, context)
return SectorRegion(center, radius, heading, angle)
def containsPoint(self, point):
point = point.toVector()
if not pointIsInCone(tuple(point), tuple(self.center), self.heading, self.angle):
return False
return point.distanceTo(self.center) <= self.radius
def uniformPointInner(self):
x, y = self.center
heading, angle, maxDist = self.heading, self.angle, self.radius
r = random.triangular(0, maxDist, maxDist)
ha = angle / 2.0
t = random.uniform(-ha, ha) + (heading + (math.pi / 2))
pt = Vector(x + (r * cos(t)), y + (r * sin(t)))
return self.orient(pt)
def isEquivalentTo(self, other):
if type(other) is not SectorRegion:
return False
return (areEquivalent(other.center, self.center)
and areEquivalent(other.radius, self.radius)
and areEquivalent(other.heading, self.heading)
and areEquivalent(other.angle, self.angle))
def __str__(self):
return f'SectorRegion({self.center},{self.radius},{self.heading},{self.angle})'
class RectangularRegion(RotatedRectangle, Region):
def __init__(self, position, heading, width, height):
super().__init__('Rectangle', position, heading, width, height)
self.position = position.toVector()
self.heading = heading
self.width = width
self.height = height
self.hw = hw = width / 2
self.hh = hh = height / 2
self.radius = hypot(hw, hh) # circumcircle; for collision detection
self.corners = tuple(position.offsetRotated(heading, Vector(*offset))
for offset in ((hw, hh), (-hw, hh), (-hw, -hh), (hw, -hh)))
self.circumcircle = (self.position, self.radius)
def sampleGiven(self, value):
return RectangularRegion(value[self.position], value[self.heading],
value[self.width], value[self.height])
def evaluateInner(self, context):
position = valueInContext(self.position, context)
heading = valueInContext(self.heading, context)
width = valueInContext(self.width, context)
height = valueInContext(self.height, context)
return RectangularRegion(position, heading, width, height)
def uniformPointInner(self):
hw, hh = self.hw, self.hh
rx = random.uniform(-hw, hw)
ry = random.uniform(-hh, hh)
pt = self.position.offsetRotated(self.heading, Vector(rx, ry))
return self.orient(pt)
def getAABB(self):
x, y = zip(*self.corners)
minx, maxx = findMinMax(x)
miny, maxy = findMinMax(y)
return ((minx, miny), (maxx, maxy))
def isEquivalentTo(self, other):
if type(other) is not RectangularRegion:
return False
return (areEquivalent(other.position, self.position)
and areEquivalent(other.heading, self.heading)
and areEquivalent(other.width, self.width)
and areEquivalent(other.height, self.height))
def __str__(self):
return f'RectangularRegion({self.position},{self.heading},{self.width},{self.height})'
class PolylineRegion(Region):
"""Region given by one or more polylines (chain of line segments)"""
def __init__(self, points=None, polyline=None, orientation=True):
super().__init__('Polyline', orientation=orientation)
if points is not None:
points = tuple(points)
if len(points) < 2:
raise RuntimeError('tried to create PolylineRegion with < 2 points')
self.points = points
self.lineString = shapely.geometry.LineString(points)
elif polyline is not None:
if isinstance(polyline, shapely.geometry.LineString):
if len(polyline.coords) < 2:
raise RuntimeError('tried to create PolylineRegion with <2-point LineString')
elif isinstance(polyline, shapely.geometry.MultiLineString):
if len(polyline) == 0:
raise RuntimeError('tried to create PolylineRegion from empty MultiLineString')
for line in polyline:
assert len(line.coords) >= 2
else:
raise RuntimeError('tried to create PolylineRegion from non-LineString')
self.lineString = polyline
else:
raise RuntimeError('must specify points or polyline for PolylineRegion')
if not self.lineString.is_valid:
raise RuntimeError('tried to create PolylineRegion with '
f'invalid LineString {self.lineString}')
self.segments = self.segmentsOf(self.lineString)
cumulativeLengths = []
total = 0
for p, q in self.segments:
dx, dy = p[0] - q[0], p[1] - q[1]
total += math.hypot(dx, dy)
cumulativeLengths.append(total)
self.cumulativeLengths = cumulativeLengths
@classmethod
def segmentsOf(cls, lineString):
if isinstance(lineString, shapely.geometry.LineString):
segments = []
points = list(lineString.coords)
if len(points) < 2:
raise RuntimeError('LineString has fewer than 2 points')
last = points[0]
for point in points[1:]:
segments.append((last, point))
last = point
return segments
elif isinstance(lineString, shapely.geometry.MultiLineString):
allSegments = []
for line in lineString:
allSegments.extend(cls.segmentsOf(line))
return allSegments
else:
raise RuntimeError('called segmentsOf on non-linestring')
def uniformPointInner(self):
pointA, pointB = random.choices(self.segments,
cum_weights=self.cumulativeLengths)[0]
interpolation = random.random()
x, y = averageVectors(pointA, pointB, weight=interpolation)
if self.orientation is True:
return OrientedVector(x, y, headingOfSegment(pointA, pointB))
else:
return self.orient(Vector(x, y))
def intersect(self, other, triedReversed=False):
poly = toPolygon(other)
if poly is not None:
intersection = self.lineString & poly
if (intersection.is_empty or
not isinstance(intersection, (shapely.geometry.LineString,
shapely.geometry.MultiLineString))):
# TODO handle points!
return nowhere
return PolylineRegion(polyline=intersection)
return super().intersect(other, triedReversed)
def containsPoint(self, point):
return self.lineString.intersects(shapely.geometry.Point(point))
def containsObject(self, obj):
return False
def getAABB(self):
xmin, ymin, xmax, ymax = self.lineString.bounds
return ((xmin, ymin), (xmax, ymax))
def show(self, plt, style='r-'):
for pointA, pointB in self.segments:
plt.plot([pointA[0], pointB[0]], [pointA[1], pointB[1]], style)
def __str__(self):
return f'PolylineRegion({self.lineString})'
def __eq__(self, other):
if type(other) is not PolylineRegion:
return NotImplemented
return (other.lineString == self.lineString)
@cached
def __hash__(self):
return hash(str(self.lineString))
class PolygonalRegion(Region):
"""Region given by one or more polygons (possibly with holes)"""
def __init__(self, points=None, polygon=None, orientation=None):
super().__init__('Polygon', orientation=orientation)
if polygon is None and points is None:
raise RuntimeError('must specify points or polygon for PolygonalRegion')
if polygon is None:
points = tuple(points)
if len(points) == 0:
raise RuntimeError('tried to create PolygonalRegion from empty point list!')
for point in points:
if needsSampling(point):
raise RuntimeError('only fixed PolygonalRegions are supported')
self.points = points
polygon = shapely.geometry.Polygon(points)
if isinstance(polygon, shapely.geometry.Polygon):
self.polygons = shapely.geometry.MultiPolygon([polygon])
elif isinstance(polygon, shapely.geometry.MultiPolygon):
self.polygons = polygon
else:
raise RuntimeError(f'tried to create PolygonalRegion from non-polygon {polygon}')
if not self.polygons.is_valid:
raise RuntimeError('tried to create PolygonalRegion with '
f'invalid polygon {self.polygons}')
if points is None and len(self.polygons) == 1 and len(self.polygons[0].interiors) == 0:
self.points = tuple(self.polygons[0].exterior.coords[:-1])
if self.polygons.is_empty:
raise RuntimeError('tried to create empty PolygonalRegion')
triangles = []
for polygon in self.polygons:
triangles.extend(triangulatePolygon(polygon))
assert len(triangles) > 0, self.polygons
self.trianglesAndBounds = tuple((tri, tri.bounds) for tri in triangles)
areas = (triangle.area for triangle in triangles)
self.cumulativeTriangleAreas = tuple(itertools.accumulate(areas))
def uniformPointInner(self):
triangle, bounds = random.choices(
self.trianglesAndBounds,
cum_weights=self.cumulativeTriangleAreas)[0]
minx, miny, maxx, maxy = bounds
# TODO improve?
while True:
x, y = random.uniform(minx, maxx), random.uniform(miny, maxy)
if triangle.intersects(shapely.geometry.Point(x, y)):
return self.orient(Vector(x, y))
def intersect(self, other, triedReversed=False):
poly = toPolygon(other)
orientation = other.orientation if self.orientation is None else self.orientation
if poly is not None:
intersection = self.polygons & poly
if intersection.is_empty:
return nowhere
elif isinstance(intersection, (shapely.geometry.Polygon,
shapely.geometry.MultiPolygon)):
return PolygonalRegion(polygon=intersection, orientation=orientation)
elif isinstance(intersection, shapely.geometry.GeometryCollection):
polys = []
for geom in intersection:
if isinstance(geom, shapely.geometry.Polygon):
polys.append(geom)
if len(polys) == 0:
# TODO handle points, lines
raise RuntimeError('unhandled type of polygon intersection')
intersection = shapely.geometry.MultiPolygon(polys)
return PolygonalRegion(polygon=intersection, orientation=orientation)
else:
# TODO handle points, lines
raise RuntimeError('unhandled type of polygon intersection')
return super().intersect(other, triedReversed)
def union(self, other):
poly = toPolygon(other)
if not poly:
raise RuntimeError(f'cannot take union of PolygonalRegion with {other}')
union = polygonUnion((self.polygons, poly))
return PolygonalRegion(polygon=union)
def containsPoint(self, point):
return self.polygons.intersects(shapely.geometry.Point(point))
def containsObject(self, obj):
objPoly = obj.polygon
if objPoly is None:
raise RuntimeError('tried to test containment of symbolic Object!')
# TODO improve boundary handling?
return self.polygons.contains(objPoly)
def getAABB(self):
xmin, xmax, ymin, ymax = self.polygons.bounds
return ((xmin, ymin), (xmax, ymax))
def show(self, plt, style='r-'):
plotPolygon(self.polygons, plt, style=style)
def __str__(self):
return '<PolygonalRegion>'
def __eq__(self, other):
if type(other) is not PolygonalRegion:
return NotImplemented
return (other.polygons == self.polygons
and other.orientation == self.orientation)
@cached
def __hash__(self):
# TODO better way to hash mutable Shapely geometries? (also for PolylineRegion)
return hash((str(self.polygons), self.orientation))
class PointSetRegion(Region):
"""Region consisting of a set of discrete points.
No :obj:`~scenic.core.object_types.Object` can be contained in a `PointSetRegion`,
since the latter is discrete. (This may not be true for subclasses, e.g.
`GridRegion`.)
Args:
name (str): name for debugging
points (iterable): set of points comprising the region
kdtree (:obj:`scipy.spatial.KDTree`, optional): k-D tree for the points (one will
be computed if none is provided)
orientation (:obj:`~scenic.core.vectors.VectorField`, optional): orientation for
the region
tolerance (float, optional): distance tolerance for checking whether a point lies
in the region
"""
def __init__(self, name, points, kdTree=None, orientation=None, tolerance=1e-6):
super().__init__(name, orientation=orientation)
self.points = tuple(points)
for point in self.points:
if needsSampling(point):
raise RuntimeError('only fixed PointSetRegions are supported')
self.kdTree = scipy.spatial.cKDTree(self.points) if kdTree is None else kdTree
self.orientation = orientation
self.tolerance = tolerance
def uniformPointInner(self):
return self.orient(Vector(*random.choice(self.points)))
def intersect(self, other, triedReversed=False):
def sampler(intRegion):
o = intRegion.regions[1]
center, radius = o.circumcircle
possibles = (Vector(*self.kdTree.data[i])
for i in self.kdTree.query_ball_point(center, radius))
intersection = [p for p in possibles if o.containsPoint(p)]
if len(intersection) == 0:
raise RejectionException(f'empty intersection of Regions {self} and {o}')
return self.orient(random.choice(intersection))
return IntersectionRegion(self, other, sampler=sampler, orientation=self.orientation)
def containsPoint(self, point):
distance, location = self.kdTree.query(point)
return (distance <= self.tolerance)
def containsObject(self, obj):
raise NotImplementedError()
def __eq__(self, other):
if type(other) is not PointSetRegion:
return NotImplemented
return (other.name == self.name
and other.points == self.points
and other.orientation == self.orientation)
def __hash__(self):
return hash((self.name, self.points, self.orientation))
class GridRegion(PointSetRegion):
"""A Region given by an obstacle grid.
A point is considered to be in a `GridRegion` if the nearest grid point is
not an obstacle.
Args:
name (str): name for debugging
grid: 2D list, tuple, or NumPy array of 0s and 1s, where 1 indicates an obstacle
and 0 indicates free space
Ax (float): spacing between grid points along X axis
Ay (float): spacing between grid points along Y axis
Bx (float): X coordinate of leftmost grid column
By (float): Y coordinate of lowest grid row
orientation (:obj:`~scenic.core.vectors.VectorField`, optional): orientation of region
"""
def __init__(self, name, grid, Ax, Ay, Bx, By, orientation=None):
self.grid = numpy.array(grid)
self.sizeY, self.sizeX = self.grid.shape
self.Ax, self.Ay = Ax, Ay
self.Bx, self.By = Bx, By
y, x = numpy.where(self.grid == 0)
points = [self.gridToPoint(point) for point in zip(x, y)]
super().__init__(name, points, orientation=orientation)
def gridToPoint(self, gp):
x, y = gp
return ((self.Ax * x) + self.Bx, (self.Ay * y) + self.By)
def pointToGrid(self, point):
x, y = point
x = (x - self.Bx) / self.Ax
y = (y - self.By) / self.Ay
nx = int(round(x))
if nx < 0 or nx >= self.sizeX:
return None
ny = int(round(y))
if ny < 0 or ny >= self.sizeY:
return None
return (nx, ny)
def containsPoint(self, point):
gp = self.pointToGrid(point)
if gp is None:
return False
x, y = gp
return (self.grid[y, x] == 0)
def containsObject(self, obj):
# TODO improve this procedure!
# Fast check
for c in obj.corners:
if not self.containsPoint(c):
return False
# Slow check
gps = [self.pointToGrid(corner) for corner in obj.corners]
x, y = zip(*gps)
minx, maxx = findMinMax(x)
miny, maxy = findMinMax(y)
for x in range(minx, maxx+1):
for y in range(miny, maxy+1):
p = self.gridToPoint((x, y))
if self.grid[y, x] == 1 and obj.containsPoint(p):
return False
return True
class IntersectionRegion(Region):
def __init__(self, *regions, orientation=None, sampler=None):
self.regions = tuple(regions)
if len(self.regions) < 2:
raise RuntimeError('tried to take intersection of fewer than 2 regions')
super().__init__('Intersection', *self.regions, orientation=orientation)
if sampler is None:
sampler = self.genericSampler
self.sampler = sampler
def sampleGiven(self, value):
regs = [value[reg] for reg in self.regions]
# Now that regions have been sampled, attempt intersection again in the hopes
# there is a specialized sampler to handle it (unless we already have one)
if self.sampler is self.genericSampler:
failed = False
intersection = regs[0]
for region in regs[1:]:
intersection = intersection.intersect(region)
if isinstance(intersection, IntersectionRegion):
failed = True
break
if not failed:
intersection.orientation = value[self.orientation]
return intersection
return IntersectionRegion(*regs, orientation=value[self.orientation],
sampler=self.sampler)
def evaluateInner(self, context):
regs = (valueInContext(reg, context) for reg in self.regions)
orientation = valueInContext(self.orientation, context)
return IntersectionRegion(*regs, orientation=orientation, sampler=self.sampler)
def containsPoint(self, point):
return all(region.containsPoint(point) for region in self.regions)
def uniformPointInner(self):
return self.orient(self.sampler(self))
@staticmethod
def genericSampler(intersection):
regs = intersection.regions
point = regs[0].uniformPointInner()
for region in regs[1:]:
if not region.containsPoint(point):
raise RejectionException(
f'sampling intersection of Regions {regs[0]} and {region}')
return point
def isEquivalentTo(self, other):
if type(other) is not IntersectionRegion:
return False
return (areEquivalent(set(other.regions), set(self.regions))
and other.orientation == self.orientation)
def __str__(self):
return f'IntersectionRegion({self.regions})'
|
[
"random.triangular",
"scenic.core.vectors.OrientedVector",
"numpy.array",
"random.choices",
"scenic.core.lazy_eval.valueInContext",
"math.hypot",
"scenic.core.geometry.findMinMax",
"scenic.core.geometry.triangulatePolygon",
"scenic.core.type_support.toVector",
"scenic.core.geometry.hypot",
"scenic.core.vectors.Vector",
"numpy.where",
"scenic.core.geometry.polygonUnion",
"scenic.core.geometry.sin",
"scenic.core.utils.areEquivalent",
"scenic.core.geometry.headingOfSegment",
"random.uniform",
"random.choice",
"scenic.core.geometry.plotPolygon",
"scenic.core.distributions.RejectionException",
"scenic.core.geometry.cos",
"itertools.accumulate",
"scenic.core.geometry.averageVectors",
"random.random",
"scenic.core.distributions.needsSampling"
] |
[((739, 759), 'scenic.core.distributions.needsSampling', 'needsSampling', (['thing'], {}), '(thing)\n', (752, 759), False, 'from scenic.core.distributions import Samplable, RejectionException, needsSampling\n'), ((3511, 3571), 'scenic.core.type_support.toVector', 'toVector', (['thing', '""""X in Y" with X not an Object or a vector"""'], {}), '(thing, \'"X in Y" with X not an Object or a vector\')\n', (3519, 3571), False, 'from scenic.core.type_support import toVector\n'), ((4507, 4551), 'scenic.core.distributions.RejectionException', 'RejectionException', (['f"""sampling empty Region"""'], {}), "(f'sampling empty Region')\n", (4525, 4551), False, 'from scenic.core.distributions import Samplable, RejectionException, needsSampling\n'), ((5431, 5467), 'scenic.core.lazy_eval.valueInContext', 'valueInContext', (['self.center', 'context'], {}), '(self.center, context)\n', (5445, 5467), False, 'from scenic.core.lazy_eval import valueInContext\n'), ((5479, 5515), 'scenic.core.lazy_eval.valueInContext', 'valueInContext', (['self.radius', 'context'], {}), '(self.radius, context)\n', (5493, 5515), False, 'from scenic.core.lazy_eval import valueInContext\n'), ((5729, 5775), 'random.triangular', 'random.triangular', (['(0)', 'self.radius', 'self.radius'], {}), '(0, self.radius, self.radius)\n', (5746, 5775), False, 'import random\n'), ((5782, 5815), 'random.uniform', 'random.uniform', (['(-math.pi)', 'math.pi'], {}), '(-math.pi, math.pi)\n', (5796, 5815), False, 'import random\n'), ((7347, 7383), 'scenic.core.lazy_eval.valueInContext', 'valueInContext', (['self.center', 'context'], {}), '(self.center, context)\n', (7361, 7383), False, 'from scenic.core.lazy_eval import valueInContext\n'), ((7395, 7431), 'scenic.core.lazy_eval.valueInContext', 'valueInContext', (['self.radius', 'context'], {}), '(self.radius, context)\n', (7409, 7431), False, 'from scenic.core.lazy_eval import valueInContext\n'), ((7444, 7481), 'scenic.core.lazy_eval.valueInContext', 'valueInContext', (['self.heading', 'context'], {}), '(self.heading, context)\n', (7458, 7481), False, 'from scenic.core.lazy_eval import valueInContext\n'), ((7492, 7527), 'scenic.core.lazy_eval.valueInContext', 'valueInContext', (['self.angle', 'context'], {}), '(self.angle, context)\n', (7506, 7527), False, 'from scenic.core.lazy_eval import valueInContext\n'), ((7921, 7959), 'random.triangular', 'random.triangular', (['(0)', 'maxDist', 'maxDist'], {}), '(0, maxDist, maxDist)\n', (7938, 7959), False, 'import random\n'), ((8872, 8885), 'scenic.core.geometry.hypot', 'hypot', (['hw', 'hh'], {}), '(hw, hh)\n', (8877, 8885), False, 'from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors\n'), ((9306, 9344), 'scenic.core.lazy_eval.valueInContext', 'valueInContext', (['self.position', 'context'], {}), '(self.position, context)\n', (9320, 9344), False, 'from scenic.core.lazy_eval import valueInContext\n'), ((9357, 9394), 'scenic.core.lazy_eval.valueInContext', 'valueInContext', (['self.heading', 'context'], {}), '(self.heading, context)\n', (9371, 9394), False, 'from scenic.core.lazy_eval import valueInContext\n'), ((9405, 9440), 'scenic.core.lazy_eval.valueInContext', 'valueInContext', (['self.width', 'context'], {}), '(self.width, context)\n', (9419, 9440), False, 'from scenic.core.lazy_eval import valueInContext\n'), ((9452, 9488), 'scenic.core.lazy_eval.valueInContext', 'valueInContext', (['self.height', 'context'], {}), '(self.height, context)\n', (9466, 9488), False, 'from scenic.core.lazy_eval import valueInContext\n'), ((9616, 9639), 'random.uniform', 'random.uniform', (['(-hw)', 'hw'], {}), '(-hw, hw)\n', (9630, 9639), False, 'import random\n'), ((9647, 9670), 'random.uniform', 'random.uniform', (['(-hh)', 'hh'], {}), '(-hh, hh)\n', (9661, 9670), False, 'import random\n'), ((9825, 9838), 'scenic.core.geometry.findMinMax', 'findMinMax', (['x'], {}), '(x)\n', (9835, 9838), False, 'from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors\n'), ((9854, 9867), 'scenic.core.geometry.findMinMax', 'findMinMax', (['y'], {}), '(y)\n', (9864, 9867), False, 'from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors\n'), ((12627, 12642), 'random.random', 'random.random', ([], {}), '()\n', (12640, 12642), False, 'import random\n'), ((12652, 12704), 'scenic.core.geometry.averageVectors', 'averageVectors', (['pointA', 'pointB'], {'weight': 'interpolation'}), '(pointA, pointB, weight=interpolation)\n', (12666, 12704), False, 'from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors\n'), ((17273, 17308), 'scenic.core.geometry.polygonUnion', 'polygonUnion', (['(self.polygons, poly)'], {}), '((self.polygons, poly))\n', (17285, 17308), False, 'from scenic.core.geometry import headingOfSegment, triangulatePolygon, plotPolygon, polygonUnion\n'), ((17819, 17863), 'scenic.core.geometry.plotPolygon', 'plotPolygon', (['self.polygons', 'plt'], {'style': 'style'}), '(self.polygons, plt, style=style)\n', (17830, 17863), False, 'from scenic.core.geometry import headingOfSegment, triangulatePolygon, plotPolygon, polygonUnion\n'), ((21225, 21242), 'numpy.array', 'numpy.array', (['grid'], {}), '(grid)\n', (21236, 21242), False, 'import numpy\n'), ((21351, 21378), 'numpy.where', 'numpy.where', (['(self.grid == 0)'], {}), '(self.grid == 0)\n', (21362, 21378), False, 'import numpy\n'), ((22268, 22281), 'scenic.core.geometry.findMinMax', 'findMinMax', (['x'], {}), '(x)\n', (22278, 22281), False, 'from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors\n'), ((22297, 22310), 'scenic.core.geometry.findMinMax', 'findMinMax', (['y'], {}), '(y)\n', (22307, 22310), False, 'from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors\n'), ((23700, 23741), 'scenic.core.lazy_eval.valueInContext', 'valueInContext', (['self.orientation', 'context'], {}), '(self.orientation, context)\n', (23714, 23741), False, 'from scenic.core.lazy_eval import valueInContext\n'), ((2632, 2651), 'scenic.core.distributions.needsSampling', 'needsSampling', (['self'], {}), '(self)\n', (2645, 2651), False, 'from scenic.core.distributions import Samplable, RejectionException, needsSampling\n'), ((3901, 3952), 'scenic.core.vectors.OrientedVector', 'OrientedVector', (['vec.x', 'vec.y', 'self.orientation[vec]'], {}), '(vec.x, vec.y, self.orientation[vec])\n', (3915, 3952), False, 'from scenic.core.vectors import Vector, OrientedVector, VectorDistribution\n'), ((6094, 6134), 'scenic.core.utils.areEquivalent', 'areEquivalent', (['other.center', 'self.center'], {}), '(other.center, self.center)\n', (6107, 6134), False, 'from scenic.core.utils import cached, areEquivalent\n'), ((6149, 6189), 'scenic.core.utils.areEquivalent', 'areEquivalent', (['other.radius', 'self.radius'], {}), '(other.radius, self.radius)\n', (6162, 6189), False, 'from scenic.core.utils import cached, areEquivalent\n'), ((6551, 6565), 'scenic.core.geometry.cos', 'cos', (['(angle / 2)'], {}), '(angle / 2)\n', (6554, 6565), False, 'from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors\n'), ((7985, 8008), 'random.uniform', 'random.uniform', (['(-ha)', 'ha'], {}), '(-ha, ha)\n', (7999, 8008), False, 'import random\n'), ((8211, 8251), 'scenic.core.utils.areEquivalent', 'areEquivalent', (['other.center', 'self.center'], {}), '(other.center, self.center)\n', (8224, 8251), False, 'from scenic.core.utils import cached, areEquivalent\n'), ((8266, 8306), 'scenic.core.utils.areEquivalent', 'areEquivalent', (['other.radius', 'self.radius'], {}), '(other.radius, self.radius)\n', (8279, 8306), False, 'from scenic.core.utils import cached, areEquivalent\n'), ((8321, 8363), 'scenic.core.utils.areEquivalent', 'areEquivalent', (['other.heading', 'self.heading'], {}), '(other.heading, self.heading)\n', (8334, 8363), False, 'from scenic.core.utils import cached, areEquivalent\n'), ((8378, 8416), 'scenic.core.utils.areEquivalent', 'areEquivalent', (['other.angle', 'self.angle'], {}), '(other.angle, self.angle)\n', (8391, 8416), False, 'from scenic.core.utils import cached, areEquivalent\n'), ((9720, 9734), 'scenic.core.vectors.Vector', 'Vector', (['rx', 'ry'], {}), '(rx, ry)\n', (9726, 9734), False, 'from scenic.core.vectors import Vector, OrientedVector, VectorDistribution\n'), ((10010, 10054), 'scenic.core.utils.areEquivalent', 'areEquivalent', (['other.position', 'self.position'], {}), '(other.position, self.position)\n', (10023, 10054), False, 'from scenic.core.utils import cached, areEquivalent\n'), ((10069, 10111), 'scenic.core.utils.areEquivalent', 'areEquivalent', (['other.heading', 'self.heading'], {}), '(other.heading, self.heading)\n', (10082, 10111), False, 'from scenic.core.utils import cached, areEquivalent\n'), ((10126, 10164), 'scenic.core.utils.areEquivalent', 'areEquivalent', (['other.width', 'self.width'], {}), '(other.width, self.width)\n', (10139, 10164), False, 'from scenic.core.utils import cached, areEquivalent\n'), ((10179, 10219), 'scenic.core.utils.areEquivalent', 'areEquivalent', (['other.height', 'self.height'], {}), '(other.height, self.height)\n', (10192, 10219), False, 'from scenic.core.utils import cached, areEquivalent\n'), ((11746, 11764), 'math.hypot', 'math.hypot', (['dx', 'dy'], {}), '(dx, dy)\n', (11756, 11764), False, 'import math\n'), ((12506, 12571), 'random.choices', 'random.choices', (['self.segments'], {'cum_weights': 'self.cumulativeLengths'}), '(self.segments, cum_weights=self.cumulativeLengths)\n', (12520, 12571), False, 'import random\n'), ((15651, 15678), 'itertools.accumulate', 'itertools.accumulate', (['areas'], {}), '(areas)\n', (15671, 15678), False, 'import itertools\n'), ((15732, 15818), 'random.choices', 'random.choices', (['self.trianglesAndBounds'], {'cum_weights': 'self.cumulativeTriangleAreas'}), '(self.trianglesAndBounds, cum_weights=self.\n cumulativeTriangleAreas)\n', (15746, 15818), False, 'import random\n'), ((19147, 19167), 'scenic.core.distributions.needsSampling', 'needsSampling', (['point'], {}), '(point)\n', (19160, 19167), False, 'from scenic.core.distributions import Samplable, RejectionException, needsSampling\n'), ((23630, 23658), 'scenic.core.lazy_eval.valueInContext', 'valueInContext', (['reg', 'context'], {}), '(reg, context)\n', (23644, 23658), False, 'from scenic.core.lazy_eval import valueInContext\n'), ((5119, 5145), 'scenic.core.distributions.needsSampling', 'needsSampling', (['self.center'], {}), '(self.center)\n', (5132, 5145), False, 'from scenic.core.distributions import Samplable, RejectionException, needsSampling\n'), ((5149, 5175), 'scenic.core.distributions.needsSampling', 'needsSampling', (['self.radius'], {}), '(self.radius)\n', (5162, 5175), False, 'from scenic.core.distributions import Samplable, RejectionException, needsSampling\n'), ((12767, 12799), 'scenic.core.geometry.headingOfSegment', 'headingOfSegment', (['pointA', 'pointB'], {}), '(pointA, pointB)\n', (12783, 12799), False, 'from scenic.core.geometry import headingOfSegment, triangulatePolygon, plotPolygon, polygonUnion\n'), ((12831, 12843), 'scenic.core.vectors.Vector', 'Vector', (['x', 'y'], {}), '(x, y)\n', (12837, 12843), False, 'from scenic.core.vectors import Vector, OrientedVector, VectorDistribution\n'), ((14492, 14512), 'scenic.core.distributions.needsSampling', 'needsSampling', (['point'], {}), '(point)\n', (14505, 14512), False, 'from scenic.core.distributions import Samplable, RejectionException, needsSampling\n'), ((15414, 15441), 'scenic.core.geometry.triangulatePolygon', 'triangulatePolygon', (['polygon'], {}), '(polygon)\n', (15432, 15441), False, 'from scenic.core.geometry import headingOfSegment, triangulatePolygon, plotPolygon, polygonUnion\n'), ((15900, 15926), 'random.uniform', 'random.uniform', (['minx', 'maxx'], {}), '(minx, maxx)\n', (15914, 15926), False, 'import random\n'), ((15928, 15954), 'random.uniform', 'random.uniform', (['miny', 'maxy'], {}), '(miny, maxy)\n', (15942, 15954), False, 'import random\n'), ((19624, 19652), 'scenic.core.vectors.Vector', 'Vector', (['*self.kdTree.data[i]'], {}), '(*self.kdTree.data[i])\n', (19630, 19652), False, 'from scenic.core.vectors import Vector, OrientedVector, VectorDistribution\n'), ((19827, 19894), 'scenic.core.distributions.RejectionException', 'RejectionException', (['f"""empty intersection of Regions {self} and {o}"""'], {}), "(f'empty intersection of Regions {self} and {o}')\n", (19845, 19894), False, 'from scenic.core.distributions import Samplable, RejectionException, needsSampling\n'), ((19917, 19944), 'random.choice', 'random.choice', (['intersection'], {}), '(intersection)\n', (19930, 19944), False, 'import random\n'), ((24193, 24271), 'scenic.core.distributions.RejectionException', 'RejectionException', (['f"""sampling intersection of Regions {regs[0]} and {region}"""'], {}), "(f'sampling intersection of Regions {regs[0]} and {region}')\n", (24211, 24271), False, 'from scenic.core.distributions import Samplable, RejectionException, needsSampling\n'), ((5839, 5845), 'scenic.core.geometry.cos', 'cos', (['t'], {}), '(t)\n', (5842, 5845), False, 'from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors\n'), ((5857, 5863), 'scenic.core.geometry.sin', 'sin', (['t'], {}), '(t)\n', (5860, 5863), False, 'from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors\n'), ((6646, 6662), 'scenic.core.distributions.needsSampling', 'needsSampling', (['x'], {}), '(x)\n', (6659, 6662), False, 'from scenic.core.distributions import Samplable, RejectionException, needsSampling\n'), ((8060, 8066), 'scenic.core.geometry.cos', 'cos', (['t'], {}), '(t)\n', (8063, 8066), False, 'from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors\n'), ((8078, 8084), 'scenic.core.geometry.sin', 'sin', (['t'], {}), '(t)\n', (8081, 8084), False, 'from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors\n'), ((8982, 8997), 'scenic.core.vectors.Vector', 'Vector', (['*offset'], {}), '(*offset)\n', (8988, 8997), False, 'from scenic.core.vectors import Vector, OrientedVector, VectorDistribution\n'), ((16035, 16047), 'scenic.core.vectors.Vector', 'Vector', (['x', 'y'], {}), '(x, y)\n', (16041, 16047), False, 'from scenic.core.vectors import Vector, OrientedVector, VectorDistribution\n'), ((19439, 19465), 'random.choice', 'random.choice', (['self.points'], {}), '(self.points)\n', (19452, 19465), False, 'import random\n')]
|
#!/usr/bin/env python
"""Distribution functions
This module provides functions for dealing with normal distributions
and generating error maps.
When called directly as main, it allows for converting a threshold map
into an error map.
```
$ python -m mlcsim.dist --help
usage: dist.py [-h] [-b {1,2,3,4}] -f F [-o O]
options:
-h, --help show this help message and exit
-b {1,2,3,4} bits per cell
-f F Threshold map json to convert
-o O output to file
```
"""
import argparse
import json
from pprint import pprint
from typing import Dict, List
import numpy as np
from scipy import stats as ss # type: ignore
# https://stackoverflow.com/a/32574638/9047818
# https://stackoverflow.com/a/13072714/9047818
def normalMidpoint(mean_a: float, mean_b: float, std_a: float, std_b: float) -> float:
"""Find the midpoint between two normal distributions
Args:
mean_a (float): Mean of first distribution
mean_b (float): Mean of second distribution
std_a (float): Std dev of first distribution
std_b (float): Std dev of second distribution
Returns:
float: Midpoint between distributions
"""
a = 1 / (2 * std_a**2) - 1 / (2 * std_b**2)
b = mean_b / (std_b**2) - mean_a / (std_a**2)
c = (
mean_a**2 / (2 * std_a**2)
- mean_b**2 / (2 * std_b**2)
- np.log(std_b / std_a)
)
roots = np.roots([a, b, c])
masked = np.ma.masked_outside(roots, mean_a, mean_b)
return float(masked[~masked.mask][0][0])
# https://www.askpython.com/python/normal-distribution
def normalChance(mean: float, stdev: float, thr: float) -> float:
"""Find the chance of a normal distribution above/below a given value
Args:
mean (float): Mean of the distribution
stdev (float): Std dev of the distribution
thr (float): Threshold to check above/below
Returns:
float: Chance for threshold to end up above/below the given point in the distribution
"""
chance = ss.norm(loc=mean, scale=stdev).cdf(thr)
return float(chance if mean > thr else 1 - chance)
def genErrorMap(thr_maps: Dict[str, List[List[float]]], bpc: int) -> List[List[float]]:
"""Generate an error map from a threshold map
Args:
thr_maps (dict): Threshold map
bpc (int): Bits per cell
Raises:
ValueError: if the given bpc is not in the threshold map
Returns:
list: Error map from the threshold map
"""
if str(bpc) not in thr_maps.keys():
raise ValueError(f"Threshold map does not have values for {bpc} levels")
thr_map: List[List[float]] = thr_maps[str(bpc)]
err_map = [[0.0]]
for i in range(len(thr_map) - 1):
mid = normalMidpoint(
thr_map[i][0], thr_map[i + 1][0], thr_map[i][1], thr_map[i + 1][1]
)
up = normalChance(thr_map[i][0], thr_map[i][1], mid)
dn = normalChance(thr_map[i + 1][0], thr_map[i + 1][1], mid)
err_map[i].append(up)
err_map.append([dn])
err_map[-1].append(0.0)
return err_map
def _main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-b", type=int, default=2, choices=range(1, 5), help="bits per cell"
)
parser.add_argument("-f", required=True, help="Threshold map json to convert")
parser.add_argument("-o", type=str, help="output to file")
args = parser.parse_args()
with open(args.f) as f:
thr_map = json.load(f)
err_map = genErrorMap(thr_map, args.b)
if args.o:
with open(args.o, "w") as f:
json.dump(err_map, f)
else:
pprint(err_map)
if __name__ == "__main__":
_main()
|
[
"numpy.ma.masked_outside",
"argparse.ArgumentParser",
"scipy.stats.norm",
"numpy.log",
"numpy.roots",
"json.load",
"pprint.pprint",
"json.dump"
] |
[((1404, 1423), 'numpy.roots', 'np.roots', (['[a, b, c]'], {}), '([a, b, c])\n', (1412, 1423), True, 'import numpy as np\n'), ((1437, 1480), 'numpy.ma.masked_outside', 'np.ma.masked_outside', (['roots', 'mean_a', 'mean_b'], {}), '(roots, mean_a, mean_b)\n', (1457, 1480), True, 'import numpy as np\n'), ((3098, 3123), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3121, 3123), False, 'import argparse\n'), ((1364, 1385), 'numpy.log', 'np.log', (['(std_b / std_a)'], {}), '(std_b / std_a)\n', (1370, 1385), True, 'import numpy as np\n'), ((3458, 3470), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3467, 3470), False, 'import json\n'), ((3620, 3635), 'pprint.pprint', 'pprint', (['err_map'], {}), '(err_map)\n', (3626, 3635), False, 'from pprint import pprint\n'), ((2013, 2043), 'scipy.stats.norm', 'ss.norm', ([], {'loc': 'mean', 'scale': 'stdev'}), '(loc=mean, scale=stdev)\n', (2020, 2043), True, 'from scipy import stats as ss\n'), ((3580, 3601), 'json.dump', 'json.dump', (['err_map', 'f'], {}), '(err_map, f)\n', (3589, 3601), False, 'import json\n')]
|
import random
import argparse
import numpy as np
import pandas as pd
import os
import time
import string
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
from model import WideResnet
from cifar import get_train_loader, get_val_loader
from label_guessor import LabelGuessor
from lr_scheduler import WarmupCosineLrScheduler
from ema import EMA
import utils
## args
parser = argparse.ArgumentParser(description=' FixMatch Training')
parser.add_argument('--wresnet-k', default=2, type=int, help='width factor of wide resnet')
parser.add_argument('--wresnet-n', default=28, type=int, help='depth of wide resnet')
parser.add_argument('--n-classes', type=int, default=10, help='number of classes in dataset')
parser.add_argument('--n-labeled', type=int, default=10, help='number of labeled samples for training')
parser.add_argument('--n-epochs', type=int, default=256, help='number of training epochs')
parser.add_argument('--batchsize', type=int, default=64, help='train batch size of labeled samples')
parser.add_argument('--mu', type=int, default=7, help='factor of train batch size of unlabeled samples')
parser.add_argument('--mu-c', type=int, default=1, help='factor of train batch size of contrastive learing samples')
parser.add_argument('--thr', type=float, default=0.95, help='pseudo label threshold')
parser.add_argument('--n-imgs-per-epoch', type=int, default=50000, help='number of training images for each epoch')
parser.add_argument('--lam-x', type=float, default=1., help='coefficient of labeled loss')
parser.add_argument('--lam-u', type=float, default=1., help='coefficient of unlabeled loss')
parser.add_argument('--lam-clr', type=float, default=1., help='coefficient of contrastive loss')
parser.add_argument('--ema-alpha', type=float, default=0.999, help='decay rate for ema module')
parser.add_argument('--lr', type=float, default=0.03, help='learning rate for training')
parser.add_argument('--weight-decay', type=float, default=5e-4, help='weight decay')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum for optimizer')
parser.add_argument('--seed', type=int, default=-1, help='seed for random behaviors, no seed if negtive')
parser.add_argument('--feature_dim', default=128, type=int, help='Feature dim for latent vector')
parser.add_argument('--temperature', default=0.5, type=float, help='Temperature used in softmax')
parser.add_argument('--k', default=200, type=int, help='Top k most similar images used to predict the label')
parser.add_argument('--test', default=0, type=int, help='0 is softmax test function, 1 is similarity test function')
parser.add_argument('--bootstrap', type=int, default=16, help='Bootstrapping factor (default=16)')
parser.add_argument('--boot-schedule', type=int, default=1, help='Bootstrapping schedule (default=1)')
parser.add_argument('--balance', type=int, default=0, help='Balance class methods to use (default=0 None)')
parser.add_argument('--delT', type=float, default=0.2, help='Class balance threshold delta (default=0.2)')
args = parser.parse_args()
print(args)
# save results
save_name_pre = '{}_E{}_B{}_LX{}_LU{}_LCLR{}_THR{}_LR{}_WD{}'.format(args.n_labeled, args.n_epochs, args.batchsize,
args.lam_x, args.lam_u, args.lam_clr, args.thr, args.lr, args.weight_decay)
ticks = time.time()
result_dir = 'results/' + save_name_pre + '.' + str(ticks)
if not os.path.exists(result_dir):
os.mkdir(result_dir)
def set_model():
model = WideResnet(args.n_classes, k=args.wresnet_k, n=args.wresnet_n, feature_dim=args.feature_dim) # wresnet-28-2
model.train()
model.cuda()
criteria_x = nn.CrossEntropyLoss().cuda()
criteria_u = nn.CrossEntropyLoss().cuda()
return model, criteria_x, criteria_u
def train_one_epoch(
model,
criteria_x,
criteria_u,
optim,
lr_schdlr,
ema,
dltrain_x,
dltrain_u,
dltrain_all,
lb_guessor,
):
loss_avg, loss_x_avg, loss_u_avg, loss_clr_avg = [], [], [], []
epsilon = 0.000001
dl_u, dl_all = iter(dltrain_u), iter(dltrain_all)
for _, _, ims_all_1, ims_all_2, _ in tqdm(dl_all, desc='Training ...'):
ims_u_weak, ims_u_strong, _, _, lbs_u = next(dl_u)
loss_x, loss_u, loss_clr = torch.tensor(0).cuda(), torch.tensor(0).cuda(), torch.tensor(0).cuda()
fv_1, fv_2 = torch.tensor(0).cuda(), torch.tensor(0).cuda()
ims_u_weak = ims_u_weak.cuda()
ims_u_strong = ims_u_strong.cuda()
ims_all_1 = ims_all_1.cuda(non_blocking=True)
ims_all_2 = ims_all_2.cuda(non_blocking=True)
dl_x = iter(dltrain_x)
ims_x_weak, _, _, _, lbs_x = next(dl_x)
ims_x_weak = ims_x_weak.cuda()
lbs_x = lbs_x.cuda()
n_x, n_u, n_all = 0, 0, 0
if args.lam_u >= epsilon and args.lam_clr >= epsilon: #pseudo-labeling and Contrasive learning
lbs_u, valid_u, mask_u = lb_guessor(model, ims_u_weak, args.balance, args.delT)
ims_u_strong = ims_u_strong[valid_u]
n_x, n_u, n_all = ims_x_weak.size(0), ims_u_strong.size(0), ims_all_1.size(0)
if n_u != 0:
ims_x_u_all_1 = torch.cat([ims_x_weak, ims_u_strong, ims_all_1], dim=0).detach()
ims_x_u_all_2 = torch.cat([ims_x_weak, ims_u_strong, ims_all_2], dim=0).detach()
logits_x_u_all_1, fv_1, z_1 = model(ims_x_u_all_1)
logits_x_u_all_2, fv_2, z_2 = model(ims_x_u_all_2)
logits_x_u_all = (logits_x_u_all_1 + logits_x_u_all_2) / 2
logits_x, logits_u = logits_x_u_all[:n_x], logits_x_u_all[n_x:(n_x + n_u)]
loss_x = criteria_x(logits_x, lbs_x)
if args.balance == 2 or args.balance == 3:
loss_u = (F.cross_entropy(logits_u, lbs_u, reduction='none') * mask_u).mean()
else:
loss_u = criteria_u(logits_u, lbs_u)
else: # n_u == 0
ims_x_all_1 = torch.cat([ims_x_weak, ims_all_1], dim=0).detach()
ims_x_all_2 = torch.cat([ims_x_weak, ims_all_2], dim=0).detach()
logits_x_all_1, fv_1, z_1 = model(ims_x_all_1)
logits_x_all_2, fv_2, z_2 = model(ims_x_all_2)
logits_x_all = (logits_x_all_1 + logits_x_all_2) / 2
logits_x = logits_x_all[:n_x]
loss_x = criteria_x(logits_x, lbs_x)
loss_u = torch.tensor(0)
elif args.lam_u >= epsilon: #lam_clr == 0: pseudo-labeling only
lbs_u, valid_u, mask_u = lb_guessor(model, ims_u_weak, args.balance, args.delT)
ims_u_strong = ims_u_strong[valid_u]
n_x, n_u = ims_x_weak.size(0), ims_u_strong.size(0)
if n_u != 0:
ims_x_u = torch.cat([ims_x_weak, ims_u_strong], dim=0).detach()
logits_x_u, _, _ = model(ims_x_u)
logits_x, logits_u = logits_x_u[:n_x], logits_x_u[n_x:]
loss_x = criteria_x(logits_x, lbs_x)
if args.balance == 2 or args.balance == 3:
loss_u = (F.cross_entropy(logits_u, lbs_u, reduction='none') * mask_u).mean()
else:
loss_u = criteria_u(logits_u, lbs_u)
else: # n_u == 0
logits_x, _, _ = model(ims_x_weak)
loss_x = criteria_x(logits_x, lbs_x)
loss_u = torch.tensor(0)
else: #lam_u == 0: contrastive learning only
n_x, n_all = ims_x_weak.size(0), ims_all_1.size(0)
ims_x_all_1 = torch.cat([ims_x_weak, ims_all_1], dim=0).detach()
ims_x_all_2 = torch.cat([ims_x_weak, ims_all_2], dim=0).detach()
logits_x_all_1, fv_1, z_1 = model(ims_x_all_1)
logits_x_all_2, fv_2, z_2 = model(ims_x_all_2)
logits_x_all = (logits_x_all_1 + logits_x_all_2) / 2
logits_x = logits_x_all[:n_x]
loss_x = criteria_x(logits_x, lbs_x)
loss_u = torch.tensor(0)
if args.lam_clr >= epsilon:
#compute l_clr
fv_1 = fv_1[(n_x + n_u):]
fv_2 = fv_2[(n_x + n_u):]
z_1 = z_1[(n_x + n_u):]
z_2 = z_2[(n_x + n_u):]
#[2*muc*B, D]
z = torch.cat([z_1, z_2], dim=0)
#[2*muc*B, 2*muc*B]
sim_matrix = torch.exp(torch.mm(z, z.t().contiguous()) / args.temperature) #denominator
#[2*muc*B, 2*muc*B]
# mask = (torch.ones_like(sim_matrix) - torch.eye(2 * args.mu_c * args.batchsize, device=sim_matrix.device)).bool()
mask = (torch.ones_like(sim_matrix) - torch.eye(2 * args.mu_c * args.batchsize, device=sim_matrix.device))
mask = mask > 0
#[2*muc*B, 2*muc*B - 1]
sim_matrix = sim_matrix.masked_select(mask).view(2 * args.mu_c * args.batchsize, -1)
#[muc*B]
pos_sim = torch.exp(torch.sum(z_1 * z_2, dim=-1) / args.temperature) #numerator
#[2*muc*B]
pos_sim = torch.cat([pos_sim, pos_sim], dim=0)
loss_clr = (- torch.log(pos_sim / sim_matrix.sum(dim=-1))).mean()
#compute loss
loss = args.lam_x * loss_x + args.lam_u * loss_u + args.lam_clr * loss_clr
optim.zero_grad()
loss.backward()
optim.step()
ema.update_params()
lr_schdlr.step()
loss_x_avg.append(loss_x.item())
loss_u_avg.append(loss_u.item())
loss_clr_avg.append(loss_clr.item())
loss_avg.append(loss.item())
ema.update_buffer()
def evaluate(ema):
ema.apply_shadow()
ema.model.eval()
ema.model.cuda()
dlval = get_val_loader(batch_size=128, num_workers=0)
matches = []
for ims, lbs in dlval:
ims = ims.cuda()
lbs = lbs.cuda()
with torch.no_grad():
logits, _, _ = ema.model(ims)
scores = torch.softmax(logits, dim=1)
_, preds = torch.max(scores, dim=1)
match = lbs == preds
matches.append(match)
matches = torch.cat(matches, dim=0).float()
acc = torch.mean(matches)
ema.restore()
return acc
def test(model, memory_data_loader, test_data_loader, c, epoch):
model.eval()
total_top1, total_top5, total_num, feature_bank, feature_labels = 0.0, 0.0, 0, [], []
with torch.no_grad():
# generate feature bank
for data, _, _ in tqdm(memory_data_loader, desc='Feature extracting'):
logits, feature, _ = model(data.cuda(non_blocking=True))
feature_bank.append(feature)
feature_labels.append(torch.tensor(torch.argmax(logits,dim=1),dtype=torch.int64))
# [D, N]
feature_bank = torch.cat(feature_bank, dim=0).t().contiguous()
# [N]
feature_labels = torch.cat(feature_labels, dim=0).contiguous().cpu()
# loop test data to predict the label by weighted knn search
test_bar = tqdm(test_data_loader)
for data, _, target in test_bar:
# data, target = data.cuda(non_blocking=True), target.cuda(non_blocking=True)
data = data.cuda(non_blocking=True)
_, feature, _ = model(data)
total_num += data.size(0)
# compute cos similarity between each feature vector and feature bank ---> [B, N]
sim_matrix = torch.mm(feature, feature_bank)
# [B, K]
sim_weight, sim_indices = sim_matrix.topk(k=args.k, dim=-1)
# [B, K]
# sim_labels = torch.gather(feature_labels.expand(data.size(0), -1), dim=-1, index=sim_indices)
sim_labels = torch.gather(feature_labels.expand(data.size(0), -1), dim=-1, index=sim_indices.cpu())
sim_weight = (sim_weight / args.temperature).exp()
# counts for each class
one_hot_label = torch.zeros(data.size(0) * args.k, c, device=sim_labels.device)
# [B*K, C]
one_hot_label = one_hot_label.scatter(-1, sim_labels.view(-1, 1), 1.0)
# weighted score ---> [B, C]
pred_scores = torch.sum(one_hot_label.view(data.size(0), -1, c) * sim_weight.cpu().unsqueeze(dim=-1), dim=1)
pred_labels = pred_scores.argsort(dim=-1, descending=True)
total_top1 += torch.sum((pred_labels[:, :1] == target.unsqueeze(dim=-1)).any(dim=-1).float()).item()
test_bar.set_description('Test Epoch: [{}/{}] Acc@1:{:.2f}%'
.format(epoch, args.n_epochs, total_top1 / total_num * 100))
return total_top1 / total_num * 100
def get_random_string(length):
letters = string.ascii_lowercase
result_str = ''.join(random.choice(letters) for i in range(length))
return result_str
def sort_unlabeled(ema,numPerClass):
ema.apply_shadow()
ema.model.eval()
ema.model.cuda()
n_iters_per_epoch = args.n_imgs_per_epoch // args.batchsize
_, _, dltrain_all = get_train_loader(args.batchsize, 1, 1, n_iters_per_epoch, L=args.n_classes*numPerClass, seed=args.seed)
predicted = []
labels = []
for ims_w, _, _, _, lbs in dltrain_all:
ims = ims_w.cuda()
labels.append(lbs)
with torch.no_grad():
logits, _, _ = ema.model(ims)
scores = torch.softmax(logits, dim=1)
predicted.append(scores.cpu())
print( "labels ",len(labels))
labels = np.concatenate(labels, axis=0)
print( "labels ",len(labels))
predicted = np.concatenate( predicted, axis=0)
preds = predicted.argmax(1)
probs = predicted.max(1)
top = np.argsort(-probs,axis=0)
del dltrain_all, logits
labeledSize =args.n_classes * numPerClass
unique_train_pseudo_labels, unique_train_counts = np.unique(preds, return_counts=True)
print("Number of training pseudo-labels in each class: ", unique_train_counts," for classes: ", unique_train_pseudo_labels)
sortByClass = np.random.randint(0,high=len(top), size=(args.n_classes, numPerClass), dtype=int)
indx = np.zeros([args.n_classes], dtype=int)
matches = np.zeros([args.n_classes, numPerClass], dtype=int)
labls = preds[top]
samples = top
for i in range(len(top)):
if indx[labls[i]] < numPerClass:
sortByClass[labls[i], indx[labls[i]]] = samples[i]
if labls[i] == labels[top[i]]:
matches[labls[i], indx[labls[i]]] = 1
indx[labls[i]] += 1
if min(indx) < numPerClass:
print("Counts of at least one class ", indx, " is lower than ", numPerClass)
name = "dataset/seeds/size"+str(labeledSize)+"." + get_random_string(8) + ".npy"
np.save(name, sortByClass[0:args.n_classes, :numPerClass])
classAcc = 100*np.sum(matches, axis=1)/numPerClass
print("Accuracy of the predicted pseudo-labels: top ", labeledSize, ", ", np.mean(classAcc), classAcc )
ema.restore()
return name
def train():
n_iters_per_epoch = args.n_imgs_per_epoch // args.batchsize
n_iters_all = n_iters_per_epoch * args.n_epochs #/ args.mu_c
epsilon = 0.000001
model, criteria_x, criteria_u = set_model()
lb_guessor = LabelGuessor(thresh=args.thr)
ema = EMA(model, args.ema_alpha)
wd_params, non_wd_params = [], []
for param in model.parameters():
if len(param.size()) == 1:
non_wd_params.append(param)
else:
wd_params.append(param)
param_list = [{'params': wd_params}, {'params': non_wd_params, 'weight_decay': 0}]
optim = torch.optim.SGD(param_list, lr=args.lr, weight_decay=args.weight_decay, momentum=args.momentum, nesterov=True)
lr_schdlr = WarmupCosineLrScheduler(optim, max_iter=n_iters_all, warmup_iter=0)
dltrain_x, dltrain_u, dltrain_all = get_train_loader(args.batchsize, args.mu, args.mu_c, n_iters_per_epoch,
L=args.n_labeled, seed=args.seed)
train_args = dict(
model=model,
criteria_x=criteria_x,
criteria_u=criteria_u,
optim=optim,
lr_schdlr=lr_schdlr,
ema=ema,
dltrain_x=dltrain_x,
dltrain_u=dltrain_u,
dltrain_all=dltrain_all,
lb_guessor=lb_guessor,
)
n_labeled = int(args.n_labeled / args.n_classes)
best_acc, top1 = -1, -1
results = {'top 1 acc': [], 'best_acc': []}
b_schedule = [args.n_epochs/2, 3*args.n_epochs/4]
if args.boot_schedule == 1:
step = int(args.n_epochs/3)
b_schedule = [step, 2*step]
elif args.boot_schedule == 2:
step = int(args.n_epochs/4)
b_schedule = [step, 2*step, 3*step]
for e in range(args.n_epochs):
if args.bootstrap > 1 and (e in b_schedule):
seed = 99
n_labeled *= args.bootstrap
name = sort_unlabeled(ema, n_labeled)
print("Bootstrap at epoch ", e," Name = ",name)
dltrain_x, dltrain_u, dltrain_all = get_train_loader(args.batchsize, args.mu, args.mu_c, n_iters_per_epoch,
L=10*n_labeled, seed=seed, name=name)
train_args = dict(
model=model,
criteria_x=criteria_x,
criteria_u=criteria_u,
optim=optim,
lr_schdlr=lr_schdlr,
ema=ema,
dltrain_x=dltrain_x,
dltrain_u=dltrain_u,
dltrain_all=dltrain_all,
lb_guessor=lb_guessor,
)
model.train()
train_one_epoch(**train_args)
torch.cuda.empty_cache()
if args.test == 0 or args.lam_clr < epsilon:
top1 = evaluate(ema) * 100
elif args.test == 1:
memory_data = utils.CIFAR10Pair(root='dataset', train=True, transform=utils.test_transform, download=False)
memory_data_loader = DataLoader(memory_data, batch_size=args.batchsize, shuffle=False, num_workers=16, pin_memory=True)
test_data = utils.CIFAR10Pair(root='dataset', train=False, transform=utils.test_transform, download=False)
test_data_loader = DataLoader(test_data, batch_size=args.batchsize, shuffle=False, num_workers=16, pin_memory=True)
c = len(memory_data.classes) #10
top1 = test(model, memory_data_loader, test_data_loader, c, e)
best_acc = top1 if best_acc < top1 else best_acc
results['top 1 acc'].append('{:.4f}'.format(top1))
results['best_acc'].append('{:.4f}'.format(best_acc))
data_frame = pd.DataFrame(data=results)
data_frame.to_csv(result_dir + '/' + save_name_pre + '.accuracy.csv', index_label='epoch')
log_msg = [
'epoch: {}'.format(e + 1),
'top 1 acc: {:.4f}'.format(top1),
'best_acc: {:.4f}'.format(best_acc)]
print(', '.join(log_msg))
if __name__ == '__main__':
train()
|
[
"torch.nn.CrossEntropyLoss",
"torch.max",
"numpy.argsort",
"lr_scheduler.WarmupCosineLrScheduler",
"torch.softmax",
"torch.sum",
"utils.CIFAR10Pair",
"numpy.save",
"os.path.exists",
"numpy.mean",
"argparse.ArgumentParser",
"torch.mean",
"torch.eye",
"os.mkdir",
"numpy.concatenate",
"pandas.DataFrame",
"ema.EMA",
"torch.argmax",
"torch.optim.SGD",
"random.choice",
"torch.ones_like",
"label_guessor.LabelGuessor",
"cifar.get_val_loader",
"time.time",
"torch.cuda.empty_cache",
"torch.cat",
"numpy.unique",
"model.WideResnet",
"tqdm.tqdm",
"torch.mm",
"numpy.sum",
"numpy.zeros",
"torch.tensor",
"torch.nn.functional.cross_entropy",
"torch.utils.data.DataLoader",
"torch.no_grad",
"cifar.get_train_loader"
] |
[((454, 511), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '""" FixMatch Training"""'}), "(description=' FixMatch Training')\n", (477, 511), False, 'import argparse\n'), ((3352, 3363), 'time.time', 'time.time', ([], {}), '()\n', (3361, 3363), False, 'import time\n'), ((3430, 3456), 'os.path.exists', 'os.path.exists', (['result_dir'], {}), '(result_dir)\n', (3444, 3456), False, 'import os\n'), ((3462, 3482), 'os.mkdir', 'os.mkdir', (['result_dir'], {}), '(result_dir)\n', (3470, 3482), False, 'import os\n'), ((3513, 3610), 'model.WideResnet', 'WideResnet', (['args.n_classes'], {'k': 'args.wresnet_k', 'n': 'args.wresnet_n', 'feature_dim': 'args.feature_dim'}), '(args.n_classes, k=args.wresnet_k, n=args.wresnet_n, feature_dim=\n args.feature_dim)\n', (3523, 3610), False, 'from model import WideResnet\n'), ((4198, 4231), 'tqdm.tqdm', 'tqdm', (['dl_all'], {'desc': '"""Training ..."""'}), "(dl_all, desc='Training ...')\n", (4202, 4231), False, 'from tqdm import tqdm\n'), ((9834, 9879), 'cifar.get_val_loader', 'get_val_loader', ([], {'batch_size': '(128)', 'num_workers': '(0)'}), '(batch_size=128, num_workers=0)\n', (9848, 9879), False, 'from cifar import get_train_loader, get_val_loader\n'), ((10270, 10289), 'torch.mean', 'torch.mean', (['matches'], {}), '(matches)\n', (10280, 10289), False, 'import torch\n'), ((13106, 13215), 'cifar.get_train_loader', 'get_train_loader', (['args.batchsize', '(1)', '(1)', 'n_iters_per_epoch'], {'L': '(args.n_classes * numPerClass)', 'seed': 'args.seed'}), '(args.batchsize, 1, 1, n_iters_per_epoch, L=args.n_classes *\n numPerClass, seed=args.seed)\n', (13122, 13215), False, 'from cifar import get_train_loader, get_val_loader\n'), ((13557, 13587), 'numpy.concatenate', 'np.concatenate', (['labels'], {'axis': '(0)'}), '(labels, axis=0)\n', (13571, 13587), True, 'import numpy as np\n'), ((13638, 13671), 'numpy.concatenate', 'np.concatenate', (['predicted'], {'axis': '(0)'}), '(predicted, axis=0)\n', (13652, 13671), True, 'import numpy as np\n'), ((13744, 13770), 'numpy.argsort', 'np.argsort', (['(-probs)'], {'axis': '(0)'}), '(-probs, axis=0)\n', (13754, 13770), True, 'import numpy as np\n'), ((13924, 13960), 'numpy.unique', 'np.unique', (['preds'], {'return_counts': '(True)'}), '(preds, return_counts=True)\n', (13933, 13960), True, 'import numpy as np\n'), ((14200, 14237), 'numpy.zeros', 'np.zeros', (['[args.n_classes]'], {'dtype': 'int'}), '([args.n_classes], dtype=int)\n', (14208, 14237), True, 'import numpy as np\n'), ((14252, 14302), 'numpy.zeros', 'np.zeros', (['[args.n_classes, numPerClass]'], {'dtype': 'int'}), '([args.n_classes, numPerClass], dtype=int)\n', (14260, 14302), True, 'import numpy as np\n'), ((14816, 14874), 'numpy.save', 'np.save', (['name', 'sortByClass[0:args.n_classes, :numPerClass]'], {}), '(name, sortByClass[0:args.n_classes, :numPerClass])\n', (14823, 14874), True, 'import numpy as np\n'), ((15307, 15336), 'label_guessor.LabelGuessor', 'LabelGuessor', ([], {'thresh': 'args.thr'}), '(thresh=args.thr)\n', (15319, 15336), False, 'from label_guessor import LabelGuessor\n'), ((15347, 15373), 'ema.EMA', 'EMA', (['model', 'args.ema_alpha'], {}), '(model, args.ema_alpha)\n', (15350, 15373), False, 'from ema import EMA\n'), ((15674, 15788), 'torch.optim.SGD', 'torch.optim.SGD', (['param_list'], {'lr': 'args.lr', 'weight_decay': 'args.weight_decay', 'momentum': 'args.momentum', 'nesterov': '(True)'}), '(param_list, lr=args.lr, weight_decay=args.weight_decay,\n momentum=args.momentum, nesterov=True)\n', (15689, 15788), False, 'import torch\n'), ((15801, 15868), 'lr_scheduler.WarmupCosineLrScheduler', 'WarmupCosineLrScheduler', (['optim'], {'max_iter': 'n_iters_all', 'warmup_iter': '(0)'}), '(optim, max_iter=n_iters_all, warmup_iter=0)\n', (15824, 15868), False, 'from lr_scheduler import WarmupCosineLrScheduler\n'), ((15910, 16020), 'cifar.get_train_loader', 'get_train_loader', (['args.batchsize', 'args.mu', 'args.mu_c', 'n_iters_per_epoch'], {'L': 'args.n_labeled', 'seed': 'args.seed'}), '(args.batchsize, args.mu, args.mu_c, n_iters_per_epoch, L=\n args.n_labeled, seed=args.seed)\n', (15926, 16020), False, 'from cifar import get_train_loader, get_val_loader\n'), ((10505, 10520), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10518, 10520), False, 'import torch\n'), ((10580, 10631), 'tqdm.tqdm', 'tqdm', (['memory_data_loader'], {'desc': '"""Feature extracting"""'}), "(memory_data_loader, desc='Feature extracting')\n", (10584, 10631), False, 'from tqdm import tqdm\n'), ((11104, 11126), 'tqdm.tqdm', 'tqdm', (['test_data_loader'], {}), '(test_data_loader)\n', (11108, 11126), False, 'from tqdm import tqdm\n'), ((15010, 15027), 'numpy.mean', 'np.mean', (['classAcc'], {}), '(classAcc)\n', (15017, 15027), True, 'import numpy as np\n'), ((17740, 17764), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (17762, 17764), False, 'import torch\n'), ((18719, 18745), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'results'}), '(data=results)\n', (18731, 18745), True, 'import pandas as pd\n'), ((3673, 3694), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (3692, 3694), True, 'import torch.nn as nn\n'), ((3719, 3740), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (3738, 3740), True, 'import torch.nn as nn\n'), ((8427, 8455), 'torch.cat', 'torch.cat', (['[z_1, z_2]'], {'dim': '(0)'}), '([z_1, z_2], dim=0)\n', (8436, 8455), False, 'import torch\n'), ((9185, 9221), 'torch.cat', 'torch.cat', (['[pos_sim, pos_sim]'], {'dim': '(0)'}), '([pos_sim, pos_sim], dim=0)\n', (9194, 9221), False, 'import torch\n'), ((9988, 10003), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10001, 10003), False, 'import torch\n'), ((10068, 10096), 'torch.softmax', 'torch.softmax', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (10081, 10096), False, 'import torch\n'), ((10120, 10144), 'torch.max', 'torch.max', (['scores'], {'dim': '(1)'}), '(scores, dim=1)\n', (10129, 10144), False, 'import torch\n'), ((10226, 10251), 'torch.cat', 'torch.cat', (['matches'], {'dim': '(0)'}), '(matches, dim=0)\n', (10235, 10251), False, 'import torch\n'), ((11515, 11546), 'torch.mm', 'torch.mm', (['feature', 'feature_bank'], {}), '(feature, feature_bank)\n', (11523, 11546), False, 'import torch\n'), ((12845, 12867), 'random.choice', 'random.choice', (['letters'], {}), '(letters)\n', (12858, 12867), False, 'import random\n'), ((13357, 13372), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13370, 13372), False, 'import torch\n'), ((13438, 13466), 'torch.softmax', 'torch.softmax', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (13451, 13466), False, 'import torch\n'), ((14895, 14918), 'numpy.sum', 'np.sum', (['matches'], {'axis': '(1)'}), '(matches, axis=1)\n', (14901, 14918), True, 'import numpy as np\n'), ((17098, 17214), 'cifar.get_train_loader', 'get_train_loader', (['args.batchsize', 'args.mu', 'args.mu_c', 'n_iters_per_epoch'], {'L': '(10 * n_labeled)', 'seed': 'seed', 'name': 'name'}), '(args.batchsize, args.mu, args.mu_c, n_iters_per_epoch, L=\n 10 * n_labeled, seed=seed, name=name)\n', (17114, 17214), False, 'from cifar import get_train_loader, get_val_loader\n'), ((6509, 6524), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (6521, 6524), False, 'import torch\n'), ((8140, 8155), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (8152, 8155), False, 'import torch\n'), ((8767, 8794), 'torch.ones_like', 'torch.ones_like', (['sim_matrix'], {}), '(sim_matrix)\n', (8782, 8794), False, 'import torch\n'), ((8797, 8864), 'torch.eye', 'torch.eye', (['(2 * args.mu_c * args.batchsize)'], {'device': 'sim_matrix.device'}), '(2 * args.mu_c * args.batchsize, device=sim_matrix.device)\n', (8806, 8864), False, 'import torch\n'), ((17913, 18011), 'utils.CIFAR10Pair', 'utils.CIFAR10Pair', ([], {'root': '"""dataset"""', 'train': '(True)', 'transform': 'utils.test_transform', 'download': '(False)'}), "(root='dataset', train=True, transform=utils.\n test_transform, download=False)\n", (17930, 18011), False, 'import utils\n'), ((18040, 18142), 'torch.utils.data.DataLoader', 'DataLoader', (['memory_data'], {'batch_size': 'args.batchsize', 'shuffle': '(False)', 'num_workers': '(16)', 'pin_memory': '(True)'}), '(memory_data, batch_size=args.batchsize, shuffle=False,\n num_workers=16, pin_memory=True)\n', (18050, 18142), False, 'from torch.utils.data import DataLoader\n'), ((18163, 18262), 'utils.CIFAR10Pair', 'utils.CIFAR10Pair', ([], {'root': '"""dataset"""', 'train': '(False)', 'transform': 'utils.test_transform', 'download': '(False)'}), "(root='dataset', train=False, transform=utils.\n test_transform, download=False)\n", (18180, 18262), False, 'import utils\n'), ((18289, 18390), 'torch.utils.data.DataLoader', 'DataLoader', (['test_data'], {'batch_size': 'args.batchsize', 'shuffle': '(False)', 'num_workers': '(16)', 'pin_memory': '(True)'}), '(test_data, batch_size=args.batchsize, shuffle=False, num_workers\n =16, pin_memory=True)\n', (18299, 18390), False, 'from torch.utils.data import DataLoader\n'), ((4328, 4343), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (4340, 4343), False, 'import torch\n'), ((4352, 4367), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (4364, 4367), False, 'import torch\n'), ((4376, 4391), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (4388, 4391), False, 'import torch\n'), ((4420, 4435), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (4432, 4435), False, 'import torch\n'), ((4444, 4459), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (4456, 4459), False, 'import torch\n'), ((7509, 7524), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (7521, 7524), False, 'import torch\n'), ((9080, 9108), 'torch.sum', 'torch.sum', (['(z_1 * z_2)'], {'dim': '(-1)'}), '(z_1 * z_2, dim=-1)\n', (9089, 9108), False, 'import torch\n'), ((10790, 10817), 'torch.argmax', 'torch.argmax', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (10802, 10817), False, 'import torch\n'), ((5243, 5298), 'torch.cat', 'torch.cat', (['[ims_x_weak, ims_u_strong, ims_all_1]'], {'dim': '(0)'}), '([ims_x_weak, ims_u_strong, ims_all_1], dim=0)\n', (5252, 5298), False, 'import torch\n'), ((5340, 5395), 'torch.cat', 'torch.cat', (['[ims_x_weak, ims_u_strong, ims_all_2]'], {'dim': '(0)'}), '([ims_x_weak, ims_u_strong, ims_all_2], dim=0)\n', (5349, 5395), False, 'import torch\n'), ((6058, 6099), 'torch.cat', 'torch.cat', (['[ims_x_weak, ims_all_1]'], {'dim': '(0)'}), '([ims_x_weak, ims_all_1], dim=0)\n', (6067, 6099), False, 'import torch\n'), ((6139, 6180), 'torch.cat', 'torch.cat', (['[ims_x_weak, ims_all_2]'], {'dim': '(0)'}), '([ims_x_weak, ims_all_2], dim=0)\n', (6148, 6180), False, 'import torch\n'), ((7717, 7758), 'torch.cat', 'torch.cat', (['[ims_x_weak, ims_all_1]'], {'dim': '(0)'}), '([ims_x_weak, ims_all_1], dim=0)\n', (7726, 7758), False, 'import torch\n'), ((7794, 7835), 'torch.cat', 'torch.cat', (['[ims_x_weak, ims_all_2]'], {'dim': '(0)'}), '([ims_x_weak, ims_all_2], dim=0)\n', (7803, 7835), False, 'import torch\n'), ((10877, 10907), 'torch.cat', 'torch.cat', (['feature_bank'], {'dim': '(0)'}), '(feature_bank, dim=0)\n', (10886, 10907), False, 'import torch\n'), ((10964, 10996), 'torch.cat', 'torch.cat', (['feature_labels'], {'dim': '(0)'}), '(feature_labels, dim=0)\n', (10973, 10996), False, 'import torch\n'), ((6882, 6926), 'torch.cat', 'torch.cat', (['[ims_x_weak, ims_u_strong]'], {'dim': '(0)'}), '([ims_x_weak, ims_u_strong], dim=0)\n', (6891, 6926), False, 'import torch\n'), ((5847, 5897), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits_u', 'lbs_u'], {'reduction': '"""none"""'}), "(logits_u, lbs_u, reduction='none')\n", (5862, 5897), True, 'import torch.nn.functional as F\n'), ((7200, 7250), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits_u', 'lbs_u'], {'reduction': '"""none"""'}), "(logits_u, lbs_u, reduction='none')\n", (7215, 7250), True, 'import torch.nn.functional as F\n')]
|
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
data = pd.read_csv("data.csv")
data.info()
"""
Data columns (total 33 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 id 569 non-null int64
.
.
.
32 Unnamed: 32 0 non-null float64
"""
data.drop(["Unnamed: 32", "id"], axis = 1, inplace = True)
# data.head(10)
data.diagnosis = [1 if each == "M" else 0 for each in data.diagnosis]
y = data.diagnosis.values
x_data = data.drop(["diagnosis"], axis = 1)
# %% Normalization
x_normalized = (x_data - np.min(x_data)) / (np.max(x_data) - np.min(x_data)).values
x_data.head()
"""
x_data.head()
Out[9]:
radius_mean texture_mean ... symmetry_worst fractal_dimension_worst
0 17.99 10.38 ... 0.4601 0.11890
1 20.57 17.77 ... 0.2750 0.08902
2 19.69 21.25 ... 0.3613 0.08758
3 11.42 20.38 ... 0.6638 0.17300
4 20.29 14.34 ... 0.2364 0.07678
"""
x_normalized.head()
"""
x_normalized.head()
Out[10]:
radius_mean texture_mean ... symmetry_worst fractal_dimension_worst
0 0.521037 0.022658 ... 0.598462 0.418864
1 0.643144 0.272574 ... 0.233590 0.222878
2 0.601496 0.390260 ... 0.403706 0.213433
3 0.210090 0.360839 ... 1.000000 0.773711
4 0.629893 0.156578 ... 0.157500 0.142595
"""
# %% train test split
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x_normalized,y,test_size = 0.25, random_state = 42)
# test size & random state can be changed, test size can be choosen as 0.2 or 0.18
# sklearn randomly splits, with given state data will be splitted with same random pattern.
# rows as features
x_train = x_train.T
x_test = x_test.T
y_train = y_train.T
y_test = y_test.T
# %% Parameter Initialize
"""
If all the weights were initialized to zero,
backpropagation will not work as expected because the gradient for the intermediate neurons
and starting neurons will die out(become zero) and will not update ever.
"""
def initialize_weights_and_bias(dimension):
w = np.full((dimension,1), 0.01) # init 0.01
b = np.zeros(1)
return w,b
def sigmoid(n):
y_hat = 1 / (1 + np.exp(-n))
return y_hat
# %%
def forward_backward_propagation(w,b,x_train,y_train):
# forward propagation
z = np.dot(w.T,x_train) + b
#y_train = y_train.T.reshape(-1,1)
y_hat = sigmoid(z)
loss = -(y_train*np.log(y_hat)+(1-y_train)*np.log(1-y_hat))
cost = (np.sum(loss))/x_train.shape[1] # x_train.shape[1] is for scaling
# Once cost is calculated, forward prop. is completed.
# backward propagation
derivative_weight = (np.dot(x_train,((y_hat-y_train).T)))/x_train.shape[1] # x_train.shape[1] is for scaling
derivative_bias = np.sum(y_hat-y_train)/x_train.shape[1] # x_train.shape[1] is for scaling
# x_train.shape[1] = 426
gradients = {"derivative_weight": derivative_weight,"derivative_bias": derivative_bias}
return cost,gradients
# Updating(learning) parameters
def update(w, b, x_train, y_train, learning_rate,number_of_iteration):
cost_list = []
cost_list2 = []
index = []
# updating(learning) parameters is number_of_iterarion times
for i in range(number_of_iteration):
# make forward and backward propagation and find cost and gradients
cost,gradients = forward_backward_propagation(w,b,x_train,y_train)
cost_list.append(cost)
# lets update
w = w - learning_rate * gradients["derivative_weight"]
b = b - learning_rate * gradients["derivative_bias"]
if i % 100 == 0: # that's arbitrary, you can set it differently
cost_list2.append(cost)
index.append(i)
print ("Cost after iteration %i: %f" %(i, cost))
# we update(learn) parameters weights and bias
parameters = {"weight": w,"bias": b}
plt.plot(index,cost_list2)
plt.xticks(index,rotation='vertical')
plt.xlabel("Number of Iteration")
plt.ylabel("Cost")
plt.legend()
plt.show()
return parameters, gradients, cost_list
# prediction
def predict(w,b,x_test):
# x_test is a input for forward propagation
z = sigmoid(np.dot(w.T,x_test)+b)
Y_prediction = np.zeros((1,x_test.shape[1]))
# if z is bigger than 0.5, our prediction is one - true (y_hat=1),
# if z is smaller than 0.5, our prediction is sign zero - false (y_hat=0),
for i in range(z.shape[1]):
if z[0,i]<= 0.5:
Y_prediction[0,i] = 0
else:
Y_prediction[0,i] = 1
return Y_prediction
#implementing logistic regression
def logistic_regression(x_train, y_train, x_test, y_test, learning_rate , num_iterations):
# initialize
dimension = x_train.shape[0]
w,b = initialize_weights_and_bias(dimension)
# do not change learning rate
parameters, gradients, cost_list = update(w, b, x_train, y_train, learning_rate,num_iterations)
y_prediction_test = predict(parameters["weight"],parameters["bias"],x_test)
y_pred_train = predict(parameters["weight"],parameters["bias"],x_train)
# Print accuracy
print("test accuracy: {} %".format(100 - np.mean(np.abs(y_prediction_test - y_test)) * 100))
print("train accuracy: {} %".format(100 - np.mean(np.abs(y_pred_train - y_train)) * 100))
# %% Hyperparameter tuning
logistic_regression(x_train, y_train, x_test, y_test,learning_rate = 3, num_iterations = 1500)
"""
Cost after iteration 0: 0.693035
Cost after iteration 100: 0.153169
Cost after iteration 200: 0.121662
Cost after iteration 300: 0.107146
Cost after iteration 400: 0.098404
Cost after iteration 500: 0.092401
Cost after iteration 600: 0.087937
Cost after iteration 700: 0.084435
Cost after iteration 800: 0.081582
Cost after iteration 900: 0.079191
Cost after iteration 1000: 0.077143
Cost after iteration 1100: 0.075359
Cost after iteration 1200: 0.073784
Cost after iteration 1300: 0.072378
Cost after iteration 1400: 0.071111
No handles with labels found to put in legend.
test accuracy: 98.6013986013986 %
train accuracy: 98.35680751173709 %
"""
logistic_regression(x_train, y_train, x_test, y_test,learning_rate = 1, num_iterations = 1500)
"""
Cost after iteration 0: 0.693035
Cost after iteration 100: 0.226383
Cost after iteration 200: 0.176670
Cost after iteration 300: 0.153585
Cost after iteration 400: 0.139306
Cost after iteration 500: 0.129319
Cost after iteration 600: 0.121835
Cost after iteration 700: 0.115963
Cost after iteration 800: 0.111204
Cost after iteration 900: 0.107248
No handles with labels found to put in legend.
Cost after iteration 1000: 0.103893
Cost after iteration 1100: 0.101001
Cost after iteration 1200: 0.098474
Cost after iteration 1300: 0.096240
Cost after iteration 1400: 0.094247
test accuracy: 97.9020979020979 %
train accuracy: 98.12206572769954 %
"""
logistic_regression(x_train, y_train, x_test, y_test,learning_rate = 0.3, num_iterations = 1500)
"""
Cost after iteration 0: 0.693035
Cost after iteration 100: 0.357455
Cost after iteration 200: 0.274917
Cost after iteration 300: 0.235865
Cost after iteration 400: 0.212165
Cost after iteration 500: 0.195780
Cost after iteration 600: 0.183524
Cost after iteration 700: 0.173868
Cost after iteration 800: 0.165980
Cost after iteration 900: 0.159363
Cost after iteration 1000: 0.153700
Cost after iteration 1100: 0.148775
Cost after iteration 1200: 0.144439
Cost after iteration 1300: 0.140581
Cost after iteration 1400: 0.137119
No handles with labels found to put in legend.
test accuracy: 97.9020979020979 %
train accuracy: 96.94835680751174 %
"""
# %% Sklearn
from sklearn.linear_model import LogisticRegression
x_train = x_train.T
x_test = x_test.T
y_train = y_train.T
y_test = y_test.T
logreg = LogisticRegression(random_state = 42,max_iter= 1500)
print("test accuracy: {} ".format(logreg.fit(x_train, y_train).score(x_test, y_test)))
print("train accuracy: {} ".format(logreg.fit(x_train, y_train).score(x_train, y_train)))
"""
test accuracy: 0.986013986013986
train accuracy: 0.9671361502347418
"""
# %%
|
[
"numpy.abs",
"pandas.read_csv",
"matplotlib.pyplot.xticks",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.log",
"sklearn.linear_model.LogisticRegression",
"numpy.max",
"numpy.sum",
"numpy.zeros",
"numpy.dot",
"numpy.exp",
"numpy.min",
"numpy.full",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((101, 124), 'pandas.read_csv', 'pd.read_csv', (['"""data.csv"""'], {}), "('data.csv')\n", (112, 124), True, 'import pandas as pd\n'), ((1836, 1902), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x_normalized', 'y'], {'test_size': '(0.25)', 'random_state': '(42)'}), '(x_normalized, y, test_size=0.25, random_state=42)\n', (1852, 1902), False, 'from sklearn.model_selection import train_test_split\n'), ((8143, 8193), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(42)', 'max_iter': '(1500)'}), '(random_state=42, max_iter=1500)\n', (8161, 8193), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2473, 2502), 'numpy.full', 'np.full', (['(dimension, 1)', '(0.01)'], {}), '((dimension, 1), 0.01)\n', (2480, 2502), True, 'import numpy as np\n'), ((2522, 2533), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (2530, 2533), True, 'import numpy as np\n'), ((4282, 4309), 'matplotlib.pyplot.plot', 'plt.plot', (['index', 'cost_list2'], {}), '(index, cost_list2)\n', (4290, 4309), True, 'import matplotlib.pyplot as plt\n'), ((4313, 4351), 'matplotlib.pyplot.xticks', 'plt.xticks', (['index'], {'rotation': '"""vertical"""'}), "(index, rotation='vertical')\n", (4323, 4351), True, 'import matplotlib.pyplot as plt\n'), ((4355, 4388), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Iteration"""'], {}), "('Number of Iteration')\n", (4365, 4388), True, 'import matplotlib.pyplot as plt\n'), ((4393, 4411), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cost"""'], {}), "('Cost')\n", (4403, 4411), True, 'import matplotlib.pyplot as plt\n'), ((4416, 4428), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4426, 4428), True, 'import matplotlib.pyplot as plt\n'), ((4433, 4443), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4441, 4443), True, 'import matplotlib.pyplot as plt\n'), ((4632, 4662), 'numpy.zeros', 'np.zeros', (['(1, x_test.shape[1])'], {}), '((1, x_test.shape[1]))\n', (4640, 4662), True, 'import numpy as np\n'), ((662, 676), 'numpy.min', 'np.min', (['x_data'], {}), '(x_data)\n', (668, 676), True, 'import numpy as np\n'), ((2712, 2732), 'numpy.dot', 'np.dot', (['w.T', 'x_train'], {}), '(w.T, x_train)\n', (2718, 2732), True, 'import numpy as np\n'), ((2874, 2886), 'numpy.sum', 'np.sum', (['loss'], {}), '(loss)\n', (2880, 2886), True, 'import numpy as np\n'), ((3061, 3097), 'numpy.dot', 'np.dot', (['x_train', '(y_hat - y_train).T'], {}), '(x_train, (y_hat - y_train).T)\n', (3067, 3097), True, 'import numpy as np\n'), ((3172, 3195), 'numpy.sum', 'np.sum', (['(y_hat - y_train)'], {}), '(y_hat - y_train)\n', (3178, 3195), True, 'import numpy as np\n'), ((681, 695), 'numpy.max', 'np.max', (['x_data'], {}), '(x_data)\n', (687, 695), True, 'import numpy as np\n'), ((698, 712), 'numpy.min', 'np.min', (['x_data'], {}), '(x_data)\n', (704, 712), True, 'import numpy as np\n'), ((2587, 2597), 'numpy.exp', 'np.exp', (['(-n)'], {}), '(-n)\n', (2593, 2597), True, 'import numpy as np\n'), ((4591, 4610), 'numpy.dot', 'np.dot', (['w.T', 'x_test'], {}), '(w.T, x_test)\n', (4597, 4610), True, 'import numpy as np\n'), ((2819, 2832), 'numpy.log', 'np.log', (['y_hat'], {}), '(y_hat)\n', (2825, 2832), True, 'import numpy as np\n'), ((2845, 2862), 'numpy.log', 'np.log', (['(1 - y_hat)'], {}), '(1 - y_hat)\n', (2851, 2862), True, 'import numpy as np\n'), ((5571, 5605), 'numpy.abs', 'np.abs', (['(y_prediction_test - y_test)'], {}), '(y_prediction_test - y_test)\n', (5577, 5605), True, 'import numpy as np\n'), ((5669, 5699), 'numpy.abs', 'np.abs', (['(y_pred_train - y_train)'], {}), '(y_pred_train - y_train)\n', (5675, 5699), True, 'import numpy as np\n')]
|
import os
import numpy as np
import torch
import argparse
from hparams import create_hparams
from model import lcm
from train import load_model
from torch.utils.data import DataLoader
from reader import TextMelIDLoader, TextMelIDCollate, id2sp
from inference_utils import plot_data
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--checkpoint_path', type=str,
help='directory to save checkpoints')
parser.add_argument('--hparams', type=str,
required=False, help='comma separated name=value pairs')
args = parser.parse_args()
checkpoint_path=args.checkpoint_path
hparams = create_hparams(args.hparams)
model = load_model(hparams)
model.load_state_dict(torch.load(checkpoint_path)['state_dict'], strict=False)
_ = model.eval()
def gen_embedding(speaker):
training_list = hparams.training_list
train_set_A = TextMelIDLoader(training_list, hparams.mel_mean_std, hparams.speaker_A,
hparams.speaker_B,
shuffle=False,pids=[speaker])
collate_fn = TextMelIDCollate(lcm(hparams.n_frames_per_step_encoder,
hparams.n_frames_per_step_decoder))
train_loader_A = DataLoader(train_set_A, num_workers=1, shuffle=False,
sampler=None,
batch_size=1, pin_memory=False,
drop_last=True, collate_fn=collate_fn)
with torch.no_grad():
speaker_embeddings = []
for i,batch in enumerate(train_loader_A):
#print i
x, y = model.parse_batch(batch)
text_input_padded, mel_padded, text_lengths, mel_lengths, speaker_id = x
speaker_id, speaker_embedding = model.speaker_encoder.inference(mel_padded)
speaker_embedding = speaker_embedding.data.cpu().numpy()
speaker_embeddings.append(speaker_embedding)
speaker_embeddings = np.vstack(speaker_embeddings)
print(speaker_embeddings.shape)
if not os.path.exists('outdir/embeddings'):
os.makedirs('outdir/embeddings')
np.save('outdir/embeddings/%s.npy'%speaker, speaker_embeddings)
plot_data([speaker_embeddings],
'outdir/embeddings/%s.pdf'%speaker)
print('Generating embedding of %s ...'%hparams.speaker_A)
gen_embedding(hparams.speaker_A)
print('Generating embedding of %s ...'%hparams.speaker_B)
gen_embedding(hparams.speaker_B)
|
[
"os.path.exists",
"argparse.ArgumentParser",
"os.makedirs",
"train.load_model",
"torch.load",
"model.lcm",
"hparams.create_hparams",
"numpy.vstack",
"torch.utils.data.DataLoader",
"torch.no_grad",
"reader.TextMelIDLoader",
"inference_utils.plot_data",
"numpy.save"
] |
[((293, 318), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (316, 318), False, 'import argparse\n'), ((638, 666), 'hparams.create_hparams', 'create_hparams', (['args.hparams'], {}), '(args.hparams)\n', (652, 666), False, 'from hparams import create_hparams\n'), ((676, 695), 'train.load_model', 'load_model', (['hparams'], {}), '(hparams)\n', (686, 695), False, 'from train import load_model\n'), ((884, 1009), 'reader.TextMelIDLoader', 'TextMelIDLoader', (['training_list', 'hparams.mel_mean_std', 'hparams.speaker_A', 'hparams.speaker_B'], {'shuffle': '(False)', 'pids': '[speaker]'}), '(training_list, hparams.mel_mean_std, hparams.speaker_A,\n hparams.speaker_B, shuffle=False, pids=[speaker])\n', (899, 1009), False, 'from reader import TextMelIDLoader, TextMelIDCollate, id2sp\n'), ((1202, 1344), 'torch.utils.data.DataLoader', 'DataLoader', (['train_set_A'], {'num_workers': '(1)', 'shuffle': '(False)', 'sampler': 'None', 'batch_size': '(1)', 'pin_memory': '(False)', 'drop_last': '(True)', 'collate_fn': 'collate_fn'}), '(train_set_A, num_workers=1, shuffle=False, sampler=None,\n batch_size=1, pin_memory=False, drop_last=True, collate_fn=collate_fn)\n', (1212, 1344), False, 'from torch.utils.data import DataLoader\n'), ((2116, 2181), 'numpy.save', 'np.save', (["('outdir/embeddings/%s.npy' % speaker)", 'speaker_embeddings'], {}), "('outdir/embeddings/%s.npy' % speaker, speaker_embeddings)\n", (2123, 2181), True, 'import numpy as np\n'), ((2184, 2253), 'inference_utils.plot_data', 'plot_data', (['[speaker_embeddings]', "('outdir/embeddings/%s.pdf' % speaker)"], {}), "([speaker_embeddings], 'outdir/embeddings/%s.pdf' % speaker)\n", (2193, 2253), False, 'from inference_utils import plot_data\n'), ((718, 745), 'torch.load', 'torch.load', (['checkpoint_path'], {}), '(checkpoint_path)\n', (728, 745), False, 'import torch\n'), ((1077, 1150), 'model.lcm', 'lcm', (['hparams.n_frames_per_step_encoder', 'hparams.n_frames_per_step_decoder'], {}), '(hparams.n_frames_per_step_encoder, hparams.n_frames_per_step_decoder)\n', (1080, 1150), False, 'from model import lcm\n'), ((1447, 1462), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1460, 1462), False, 'import torch\n'), ((1943, 1972), 'numpy.vstack', 'np.vstack', (['speaker_embeddings'], {}), '(speaker_embeddings)\n', (1952, 1972), True, 'import numpy as np\n'), ((2029, 2064), 'os.path.exists', 'os.path.exists', (['"""outdir/embeddings"""'], {}), "('outdir/embeddings')\n", (2043, 2064), False, 'import os\n'), ((2074, 2106), 'os.makedirs', 'os.makedirs', (['"""outdir/embeddings"""'], {}), "('outdir/embeddings')\n", (2085, 2106), False, 'import os\n')]
|
# Author: <NAME> <<EMAIL>>
import numpy as np
from bolero.representation import BlackBoxBehavior
from bolero.representation import DMPBehavior as DMPBehaviorImpl
class DMPBehavior(BlackBoxBehavior):
"""Dynamical Movement Primitive.
Parameters
----------
execution_time : float, optional (default: 1)
Execution time of the DMP in seconds.
dt : float, optional (default: 0.01)
Time between successive steps in seconds.
n_features : int, optional (default: 50)
Number of RBF features for each dimension of the DMP.
configuration_file : string, optional (default: None)
Name of a configuration file that should be used to initialize the DMP.
If it is set all other arguments will be ignored.
"""
def __init__(self, execution_time=1.0, dt=0.01, n_features=50,
configuration_file=None):
self.dmp = DMPBehaviorImpl(execution_time, dt, n_features,
configuration_file)
def init(self, n_inputs, n_outputs):
"""Initialize the behavior.
Parameters
----------
n_inputs : int
number of inputs
n_outputs : int
number of outputs
"""
self.dmp.init(3 * n_inputs, 3 * n_outputs)
self.n_joints = n_inputs
self.x = np.empty(3 * self.n_joints)
self.x[:] = np.nan
def reset(self):
self.dmp.reset()
self.x[:] = 0.0
def set_inputs(self, inputs):
self.x[:self.n_joints] = inputs[:]
def can_step(self):
return self.dmp.can_step()
def step(self):
self.dmp.set_inputs(self.x)
self.dmp.step()
self.dmp.get_outputs(self.x)
def get_outputs(self, outputs):
outputs[:] = self.x[:self.n_joints]
def get_n_params(self):
return self.dmp.get_n_params()
def get_params(self):
return self.dmp.get_params()
def set_params(self, params):
self.dmp.set_params(params)
def set_meta_parameters(self, keys, values):
self.dmp.set_meta_parameters(keys, values)
def trajectory(self):
return self.dmp.trajectory()
class DMPBehaviorWithGoalParams(DMPBehavior):
def __init__(self, goal, execution_time=1.0, dt=0.01, n_features=50,
configuration_file=None):
super(DMPBehaviorWithGoalParams, self).__init__(
execution_time, dt, n_features, configuration_file)
self.params = np.copy(goal)
def set_meta_parameters(self, keys, values):
self.dmp.set_meta_parameters(keys, values)
self.set_params(self.params)
def get_n_params(self):
return len(self.params)
def get_params(self):
return self.params
def set_params(self, params):
self.params[:] = params
self.dmp.set_meta_parameters(["g"], [self.params])
|
[
"numpy.copy",
"numpy.empty",
"bolero.representation.DMPBehavior"
] |
[((900, 967), 'bolero.representation.DMPBehavior', 'DMPBehaviorImpl', (['execution_time', 'dt', 'n_features', 'configuration_file'], {}), '(execution_time, dt, n_features, configuration_file)\n', (915, 967), True, 'from bolero.representation import DMPBehavior as DMPBehaviorImpl\n'), ((1340, 1367), 'numpy.empty', 'np.empty', (['(3 * self.n_joints)'], {}), '(3 * self.n_joints)\n', (1348, 1367), True, 'import numpy as np\n'), ((2478, 2491), 'numpy.copy', 'np.copy', (['goal'], {}), '(goal)\n', (2485, 2491), True, 'import numpy as np\n')]
|
# Script for data augmentation functions
import numpy as np
from collections import deque
from PIL import Image
import cv2
from data.config import *
def imread_cv2(image_path):
"""
Read image_path with cv2 format (H, W, C)
if image is '.gif' outputs is a numpy array of {0,1}
"""
image_format = image_path[-3:]
if image_format == 'jpg':
image = cv2.imread(image_path)
else:
image = np.array(Image.open(image_path))
return image
def resize_cv2(image, heigh=1280, width=1918):
return cv2.resize(image, (width, heigh), cv2.INTER_LINEAR)
def image_to_tensor(image, mean=0, std=1.):
"""Transform image (input is numpy array, read in by cv2) """
if len(image.shape) == 2:
image = image.reshape(image.shape[0], image.shape[1], 1)
image = image.astype(np.float32)
image = (image-mean)/std
image = image.transpose((2,0,1))
tensor = torch.from_numpy(image)
return tensor
# --- Data Augmentation functions --- #
# A lot of functions can be found here:
# https://github.com/fchollet/keras/blob/master/keras/preprocessing/image.py#L223
# transform image and label
def randomHorizontalFlip(image, mask, p=0.5):
"""Do a random horizontal flip with probability p"""
if np.random.random() < p:
image = np.fliplr(image)
mask = np.fliplr(mask)
return image, mask
def randomVerticalFlip(image, mask, p=0.5):
"""Do a random vertical flip with probability p"""
if np.random.random() < p:
image = np.flipud(image)
mask = np.flipud(mask)
return image, mask
def randomHorizontalShift(image, mask, max_shift=0.05, p=0.5):
"""Do random horizontal shift with max proportion shift and with probability p
Elements that roll beyond the last position are re-introduced at the first."""
max_shift_pixels = int(max_shift*image.shape[1])
shift = np.random.choice(np.arange(-max_shift_pixels, max_shift_pixels+1))
if np.random.random() < p:
image = np.roll(image, shift, axis=1)
mask = np.roll(mask, shift, axis=1)
return image, mask
def randomVerticalShift(image, mask, max_shift=0.05, p=0.5):
"""Do random vertical shift with max proportion shift and probability p
Elements that roll beyond the last position are re-introduced at the first."""
max_shift_pixels = int(max_shift*image.shape[0])
shift = np.random.choice(np.arange(-max_shift_pixels, max_shift_pixels+1))
if np.random.random() < p:
image = np.roll(image, shift, axis=0)
mask = np.roll(mask, shift, axis=0)
return image, mask
def randomInvert(image, mask, p=0.5):
"""Randomly invert image with probability p"""
if np.random.random() < p:
image = 255 - image
mask = mask
return image, mask
def randomBrightness(image, mask, p=0.75):
"""With probability p, randomly increase or decrease brightness.
See https://stackoverflow.com/questions/37822375/python-opencv-increasing-image-brightness-without-overflowing-uint8-array"""
if np.random.random() < p:
max_value = np.percentile(255-image, q=25) # avoid burning out white cars, so take image-specific maximum
value = np.random.choice(np.arange(-max_value, max_value))
if value > 0:
image = np.where((255 - image) < value,255,image+value).astype(np.uint8)
else:
image = np.where(image < -value,0,image+value).astype(np.uint8)
return image, mask
def randomHue(image, mask, p=0.25, max_value=75):
"""With probability p, randomly increase or decrease hue.
See https://stackoverflow.com/questions/32609098/how-to-fast-change-image-brightness-with-python-opencv"""
if np.random.random() < p:
value = np.random.choice(np.arange(-max_value, max_value))
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
hsv[:,:,0] = hsv[:,:,0] + value
hsv = np.clip(hsv, a_min=0, a_max=255).astype(np.uint8)
image = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return image, mask
def GaussianBlur(image, mask, kernel=(1, 1),sigma=1, p=0.5):
"""With probability p, apply Gaussian blur"""
# TODO
return image, mask
def randomRotate(image, mask, max_angle, p=0.5):
"""Perform random rotation with max_angle and probability p"""
# TODO
return(image, mask)
|
[
"numpy.clip",
"PIL.Image.open",
"numpy.roll",
"numpy.flipud",
"numpy.random.random",
"numpy.fliplr",
"numpy.where",
"cv2.cvtColor",
"numpy.percentile",
"cv2.resize",
"cv2.imread",
"numpy.arange"
] |
[((538, 589), 'cv2.resize', 'cv2.resize', (['image', '(width, heigh)', 'cv2.INTER_LINEAR'], {}), '(image, (width, heigh), cv2.INTER_LINEAR)\n', (548, 589), False, 'import cv2\n'), ((379, 401), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (389, 401), False, 'import cv2\n'), ((1257, 1275), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1273, 1275), True, 'import numpy as np\n'), ((1297, 1313), 'numpy.fliplr', 'np.fliplr', (['image'], {}), '(image)\n', (1306, 1313), True, 'import numpy as np\n'), ((1329, 1344), 'numpy.fliplr', 'np.fliplr', (['mask'], {}), '(mask)\n', (1338, 1344), True, 'import numpy as np\n'), ((1475, 1493), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1491, 1493), True, 'import numpy as np\n'), ((1515, 1531), 'numpy.flipud', 'np.flipud', (['image'], {}), '(image)\n', (1524, 1531), True, 'import numpy as np\n'), ((1547, 1562), 'numpy.flipud', 'np.flipud', (['mask'], {}), '(mask)\n', (1556, 1562), True, 'import numpy as np\n'), ((1898, 1948), 'numpy.arange', 'np.arange', (['(-max_shift_pixels)', '(max_shift_pixels + 1)'], {}), '(-max_shift_pixels, max_shift_pixels + 1)\n', (1907, 1948), True, 'import numpy as np\n'), ((1955, 1973), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1971, 1973), True, 'import numpy as np\n'), ((1995, 2024), 'numpy.roll', 'np.roll', (['image', 'shift'], {'axis': '(1)'}), '(image, shift, axis=1)\n', (2002, 2024), True, 'import numpy as np\n'), ((2040, 2068), 'numpy.roll', 'np.roll', (['mask', 'shift'], {'axis': '(1)'}), '(mask, shift, axis=1)\n', (2047, 2068), True, 'import numpy as np\n'), ((2395, 2445), 'numpy.arange', 'np.arange', (['(-max_shift_pixels)', '(max_shift_pixels + 1)'], {}), '(-max_shift_pixels, max_shift_pixels + 1)\n', (2404, 2445), True, 'import numpy as np\n'), ((2452, 2470), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2468, 2470), True, 'import numpy as np\n'), ((2496, 2525), 'numpy.roll', 'np.roll', (['image', 'shift'], {'axis': '(0)'}), '(image, shift, axis=0)\n', (2503, 2525), True, 'import numpy as np\n'), ((2545, 2573), 'numpy.roll', 'np.roll', (['mask', 'shift'], {'axis': '(0)'}), '(mask, shift, axis=0)\n', (2552, 2573), True, 'import numpy as np\n'), ((2694, 2712), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2710, 2712), True, 'import numpy as np\n'), ((3039, 3057), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3055, 3057), True, 'import numpy as np\n'), ((3083, 3115), 'numpy.percentile', 'np.percentile', (['(255 - image)'], {'q': '(25)'}), '(255 - image, q=25)\n', (3096, 3115), True, 'import numpy as np\n'), ((3696, 3714), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3712, 3714), True, 'import numpy as np\n'), ((3801, 3839), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2HSV'], {}), '(image, cv2.COLOR_BGR2HSV)\n', (3813, 3839), False, 'import cv2\n'), ((3960, 3996), 'cv2.cvtColor', 'cv2.cvtColor', (['hsv', 'cv2.COLOR_HSV2BGR'], {}), '(hsv, cv2.COLOR_HSV2BGR)\n', (3972, 3996), False, 'import cv2\n'), ((437, 459), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (447, 459), False, 'from PIL import Image\n'), ((3210, 3242), 'numpy.arange', 'np.arange', (['(-max_value)', 'max_value'], {}), '(-max_value, max_value)\n', (3219, 3242), True, 'import numpy as np\n'), ((3753, 3785), 'numpy.arange', 'np.arange', (['(-max_value)', 'max_value'], {}), '(-max_value, max_value)\n', (3762, 3785), True, 'import numpy as np\n'), ((3894, 3926), 'numpy.clip', 'np.clip', (['hsv'], {'a_min': '(0)', 'a_max': '(255)'}), '(hsv, a_min=0, a_max=255)\n', (3901, 3926), True, 'import numpy as np\n'), ((3286, 3335), 'numpy.where', 'np.where', (['(255 - image < value)', '(255)', '(image + value)'], {}), '(255 - image < value, 255, image + value)\n', (3294, 3335), True, 'import numpy as np\n'), ((3385, 3427), 'numpy.where', 'np.where', (['(image < -value)', '(0)', '(image + value)'], {}), '(image < -value, 0, image + value)\n', (3393, 3427), True, 'import numpy as np\n')]
|
### Load necessary libraries ###
import numpy as np
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score
import tensorflow as tf
from tensorflow import keras
from sklearn.metrics import ConfusionMatrixDisplay
model = get_network()
model.summary()
### Train and evaluate via 10-Folds cross-validation ###
accuracies = []
folds = np.array(['fold1','fold2','fold3','fold4',
'fold5','fold6','fold7','fold8',
'fold9','fold10'])
load_dir = "UrbanSounds8K/processed/"
kf = KFold(n_splits=10)
for train_index, test_index in kf.split(folds):
x_train, y_train = [], []
for ind in train_index:
# read features or segments of an audio file
train_data = np.load("{0}/{1}.npz".format(load_dir,folds[ind]),
allow_pickle=True)
# for training stack all the segments so that they are treated as an example/instance
features = np.concatenate(train_data["features"], axis=0)
labels = np.concatenate(train_data["labels"], axis=0)
x_train.append(features)
y_train.append(labels)
# stack x,y pairs of all training folds
x_train = np.concatenate(x_train, axis = 0).astype(np.float32)
y_train = np.concatenate(y_train, axis = 0).astype(np.float32)
# for testing we will make predictions on each segment and average them to
# produce single label for an entire sound clip.
test_data = np.load("{0}/{1}.npz".format(load_dir,
folds[test_index][0]), allow_pickle=True)
x_test = test_data["features"]
y_test = test_data["labels"]
log_dir="logs/fit/" + folds[test_index][0]
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
model = get_network()
model.fit(x_train, y_train, epochs = 20, batch_size = 64, verbose = 1, validation_split=0.2,
use_multiprocessing=True, workers=8, callbacks=[tensorboard_callback])
# evaluate on test set/fold
y_true, y_pred = [], []
for x, y in zip(x_test, y_test):
# average predictions over segments of a sound clip
avg_p = np.argmax(np.mean(model.predict(x), axis = 0))
y_pred.append(avg_p)
# pick single label via np.unique for a sound clip
y_true.append(np.unique(y)[0])
accuracies.append(accuracy_score(y_true, y_pred))
print("Fold n accuracy: {0}".format(accuracy_score(y_true, y_pred)))
cm = ConfusionMatrixDisplay.from_predictions(y_true, y_pred)
cm.figure_.savefig('conf_mat_' + str(test_index) + '_acc_' + str(accuracy_score(y_true, y_pred)) + '.png',dpi=1000)
print("Average 10 Folds Accuracy: {0}".format(np.mean(accuracies)))
|
[
"numpy.mean",
"sklearn.metrics.ConfusionMatrixDisplay.from_predictions",
"tensorflow.keras.callbacks.TensorBoard",
"numpy.unique",
"numpy.array",
"numpy.concatenate",
"sklearn.model_selection.KFold",
"sklearn.metrics.accuracy_score"
] |
[((365, 470), 'numpy.array', 'np.array', (["['fold1', 'fold2', 'fold3', 'fold4', 'fold5', 'fold6', 'fold7', 'fold8',\n 'fold9', 'fold10']"], {}), "(['fold1', 'fold2', 'fold3', 'fold4', 'fold5', 'fold6', 'fold7',\n 'fold8', 'fold9', 'fold10'])\n", (373, 470), True, 'import numpy as np\n'), ((539, 557), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(10)'}), '(n_splits=10)\n', (544, 557), False, 'from sklearn.model_selection import KFold\n'), ((1696, 1761), 'tensorflow.keras.callbacks.TensorBoard', 'tf.keras.callbacks.TensorBoard', ([], {'log_dir': 'log_dir', 'histogram_freq': '(1)'}), '(log_dir=log_dir, histogram_freq=1)\n', (1726, 1761), True, 'import tensorflow as tf\n'), ((2468, 2523), 'sklearn.metrics.ConfusionMatrixDisplay.from_predictions', 'ConfusionMatrixDisplay.from_predictions', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (2507, 2523), False, 'from sklearn.metrics import ConfusionMatrixDisplay\n'), ((945, 991), 'numpy.concatenate', 'np.concatenate', (["train_data['features']"], {'axis': '(0)'}), "(train_data['features'], axis=0)\n", (959, 991), True, 'import numpy as np\n'), ((1010, 1054), 'numpy.concatenate', 'np.concatenate', (["train_data['labels']"], {'axis': '(0)'}), "(train_data['labels'], axis=0)\n", (1024, 1054), True, 'import numpy as np\n'), ((2349, 2379), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (2363, 2379), False, 'from sklearn.metrics import accuracy_score\n'), ((2695, 2714), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (2702, 2714), True, 'import numpy as np\n'), ((1179, 1210), 'numpy.concatenate', 'np.concatenate', (['x_train'], {'axis': '(0)'}), '(x_train, axis=0)\n', (1193, 1210), True, 'import numpy as np\n'), ((1246, 1277), 'numpy.concatenate', 'np.concatenate', (['y_train'], {'axis': '(0)'}), '(y_train, axis=0)\n', (1260, 1277), True, 'import numpy as np\n'), ((2425, 2455), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (2439, 2455), False, 'from sklearn.metrics import accuracy_score\n'), ((2309, 2321), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (2318, 2321), True, 'import numpy as np\n'), ((2593, 2623), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (2607, 2623), False, 'from sklearn.metrics import accuracy_score\n')]
|
import os
# Restrict the script to run on CPU
os.environ ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = ""
# Import Keras Tensoflow Backend
# from keras import backend as K
import tensorflow as tf
# Configure it to use only specific CPU Cores
config = tf.ConfigProto(intra_op_parallelism_threads=4,
inter_op_parallelism_threads=4,
device_count={"CPU": 1, "GPU": 0},
allow_soft_placement=True)
# import tensorflow as tf
import numpy as np
from IEOMAP_dataset_AC import dataset, IeomapSentenceIterator
from sklearn.metrics import confusion_matrix
from models_AC import SentenceModel
import json
import os
def emotion_recognition(n_run, epochs, batch_size, embedding_size, first_rnn_size, dropout, embedding, num_speakers):
########################################################################################################################
# Hyper-parameters
########################################################################################################################
split_size = 0.8 # Split proportion of train and test data
#log_dir = './logs_AC/RNN_without_ID/1'
log_dir = './logs_AC/RNN_' \
+ str(num_speakers) + '/' + str(n_run) + '/'
#log_dir = './logs_AC/RNN_' + embedding + 'Emb' + str(embedding_size) + '_1layer' + str(2*first_rnn_size) + '/' + str(n_run)
train_log_dir = log_dir + 'train'
val_log_dir = log_dir + 'val'
########################################################################################################################
# Initialize the Data set
########################################################################################################################
sentences, targets, data_info, speakers = dataset(mode='sentences', embedding=embedding, embedding_size=embedding_size)
train_data = IeomapSentenceIterator(sentences[0], targets[0], data_info['sentences_length'][0], speakers[0])
val_data = IeomapSentenceIterator(sentences[1], targets[1], data_info['sentences_length'][1], speakers[1])
test_data = IeomapSentenceIterator(sentences[2], targets[2], data_info['sentences_length'][2], speakers[2])
########################################################################################################################
# Initialize the model
########################################################################################################################
g = SentenceModel(vocab_size=(data_info['vocabulary_size'] + 1),
embedding_size=embedding_size,
first_rnn_size=first_rnn_size,
num_classes=data_info['num_classes'],
dropout=dropout,
embedding=embedding,
num_speakers=num_speakers)
# Store model setup
model_setup = {'vocab_size': (data_info['vocabulary_size'] + 1),
'embedding_size': embedding_size,
'first_rnn_size': first_rnn_size,
'num_classes': data_info['num_classes'],
'dropout': dropout,
'embedding': embedding,
'num_speakers': num_speakers}
dirname = os.path.dirname(log_dir)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(log_dir + 'model_setup.p', 'w') as file:
json.dump(model_setup, file, indent=4)
########################################################################################################################
# Initialize the parameters
########################################################################################################################
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
saver = tf.train.Saver()
epoch = 0
best_epoch = 0
train_conf_matrix = 0
val_conf_matrix = 0
test_conf_matrix = 0
best_acc = 0
########################################################################################################################
# Performance Indicators
########################################################################################################################
writer_train = tf.summary.FileWriter(train_log_dir, sess.graph)
writer_val = tf.summary.FileWriter(val_log_dir)
accuracy_tf = tf.placeholder(tf.float32, [])
precision_tf = tf.placeholder(tf.float32, [])
recall_tf = tf.placeholder(tf.float32, [])
summary_op = tf.summary.scalar('accuracy', accuracy_tf)
summary_op = tf.summary.scalar('precision', precision_tf)
summary_op = tf.summary.scalar('recall', recall_tf)
########################################################################################################################
# Model training procedure
########################################################################################################################
while train_data.epoch < epochs: # and train_data.epoch < best_epoch + 20:
sentences_batch, sentences_length_batch, targets_batch, speakers_batch = train_data.next_batch(batch_size)
preds, _ = sess.run([g['preds'],
g['ts']],
feed_dict={g['x']: np.array(sentences_batch),
g['y']: np.array(targets_batch).reshape(len(targets_batch)),
g['speaker']: np.array(speakers_batch),
g['seqlen']: np.array(sentences_length_batch).reshape(len(targets_batch))})
####################################################################################################################
# Calculate the Train data Confusion Matrix
####################################################################################################################
train_conf_matrix += confusion_matrix(targets_batch, preds, labels=range(data_info['num_classes']))
####################################################################################################################
# Add the end of each training epoch compute the validation results and store the relevant information
####################################################################################################################
if train_data.epoch != epoch:
while val_data.epoch == epoch:
sentences_batch, sentences_length_batch, targets_batch, speakers_batch = val_data.next_batch(batch_size)
preds = sess.run([g['preds']],
feed_dict={g['x']: np.array(sentences_batch),
g['y']: np.array(targets_batch).reshape(len(targets_batch)),
g['speaker']: np.array(speakers_batch),
g['seqlen']: np.array(sentences_length_batch).reshape(
len(targets_batch))})
############################################################################################################
# Calculate the Test data Confusion Matrix
############################################################################################################
val_conf_matrix += confusion_matrix(targets_batch, preds[0], labels=range(data_info['num_classes']))
################################################################################################################
# Compute Accuracy, Precision and Recall
################################################################################################################
train_CM_size = len(train_conf_matrix)
total_train = sum(sum(train_conf_matrix))
train_TP = np.diagonal(train_conf_matrix)
train_FP = [sum(train_conf_matrix[:, i]) - train_TP[i] for i in range(train_CM_size)]
train_FN = [sum(train_conf_matrix[i, :]) - train_TP[i] for i in range(train_CM_size)]
train_TN = train_CM_size - train_TP - train_FP - train_FN
train_precision = train_TP / (train_TP + train_FP) # aka True Positive Rate
train_recall = train_TP / (train_TP + train_FN)
total_train_correct = sum(train_TP)
total_train_accuracy = total_train_correct / total_train
total_train_precision = sum(train_precision) / train_CM_size
total_train_recall = sum(train_recall) / train_CM_size
val_CM_size = len(val_conf_matrix)
total_val = sum(sum(val_conf_matrix))
val_TP = np.diagonal(val_conf_matrix)
val_FP = [sum(val_conf_matrix[:, i]) - val_TP[i] for i in range(val_CM_size)]
val_FN = [sum(val_conf_matrix[i, :]) - val_TP[i] for i in range(val_CM_size)]
val_TN = val_CM_size - val_TP - val_FP - val_FN
val_precision = val_TP / (val_TP + val_FP)
val_recall = val_TP / (val_TP + val_FN)
total_val_correct = sum(val_TP)
total_val_accuracy = total_val_correct / total_val
total_val_precision = sum(val_precision) / val_CM_size
total_val_recall = sum(val_recall) / val_CM_size
################################################################################################################
# Store Accuracy Precision Recall
################################################################################################################
train_acc_summary = tf.Summary(
value=[tf.Summary.Value(tag="accuracy", simple_value=total_train_accuracy), ])
train_prec_summary = tf.Summary(
value=[tf.Summary.Value(tag="precision", simple_value=total_train_precision), ])
train_rec_summary = tf.Summary(value=[tf.Summary.Value(tag="recall", simple_value=total_train_recall), ])
val_acc_summary = tf.Summary(value=[tf.Summary.Value(tag="accuracy", simple_value=total_val_accuracy), ])
val_prec_summary = tf.Summary(
value=[tf.Summary.Value(tag="precision", simple_value=total_val_precision), ])
val_rec_summary = tf.Summary(value=[tf.Summary.Value(tag="recall", simple_value=total_val_recall), ])
writer_train.add_summary(train_acc_summary, epoch)
writer_train.add_summary(train_prec_summary, epoch)
writer_train.add_summary(train_rec_summary, epoch)
writer_val.add_summary(val_acc_summary, epoch)
writer_val.add_summary(val_prec_summary, epoch)
writer_val.add_summary(val_rec_summary, epoch)
writer_train.flush()
writer_val.flush()
################################################################################################################
# Print the confusion matrix and store important information
################################################################################################################
print(train_conf_matrix)
print(val_conf_matrix)
if best_acc < total_val_accuracy:
saver.save(sess, log_dir + "acc_best_validation_model.ckpt")
best_acc = total_val_accuracy
best_epoch = epoch
store_info = {'epoch': best_epoch,
'train_conf_matrix': list([list(x) for x in train_conf_matrix]),
'train_accuracy': total_train_accuracy,
'train_precision': list(train_precision),
'total_train_precision': total_train_precision,
'train_recall': list(train_recall),
'total_train_recall': total_train_recall,
'val_conf_matrix': list([list(x) for x in val_conf_matrix]),
'val_accuracy': total_val_accuracy,
'val_precision': list(val_precision),
'total_val_precision': total_val_precision,
'val_recall': list(val_recall),
'total_val_recall': total_val_recall}
store_convergence_info = {'epoch': train_data.epoch,
'train_conf_matrix': list([list(x) for x in train_conf_matrix]),
'train_accuracy': total_train_accuracy,
'train_precision': list(train_precision),
'total_train_precision': total_train_precision,
'train_recall': list(train_recall),
'total_train_recall': total_train_recall,
'val_conf_matrix': list([list(x) for x in val_conf_matrix]),
'val_accuracy': total_val_accuracy,
'val_precision': list(val_precision),
'total_val_precision': total_val_precision,
'val_recall': list(val_recall),
'total_val_recall': total_val_recall}
################################################################################################################
# Get ready for the next epoch
################################################################################################################
epoch += 1
train_conf_matrix = 0
val_conf_matrix = 0
################################################################################################################
####################################################################################################################
# Add the end of training compute the test results and store the relevant information
####################################################################################################################
while test_data.epoch == 0:
sentences_batch, sentences_length_batch, targets_batch, speakers_batch = test_data.next_batch(batch_size)
preds = sess.run([g['preds']],
feed_dict={g['x']: np.array(sentences_batch),
g['y']: np.array(targets_batch).reshape(len(targets_batch)),
g['speaker']: np.array(speakers_batch),
g['seqlen']: np.array(sentences_length_batch).reshape(
len(targets_batch))})
############################################################################################################
# Calculate the Test data Confusion Matrix
############################################################################################################
test_conf_matrix += confusion_matrix(targets_batch, preds[0], labels=range(data_info['num_classes']))
################################################################################################################
# Compute Accuracy, Precision and Recall
################################################################################################################
test_CM_size = len(test_conf_matrix)
total_test = sum(sum(test_conf_matrix))
test_TP = np.diagonal(test_conf_matrix)
test_FP = [sum(test_conf_matrix[:, i]) - test_TP[i] for i in range(test_CM_size)]
test_FN = [sum(test_conf_matrix[i, :]) - test_TP[i] for i in range(test_CM_size)]
test_TN = test_CM_size - test_TP - test_FP - test_FN
test_precision = test_TP / (test_TP + test_FP)
test_recall = test_TP / (test_TP + test_FN)
total_test_correct = sum(test_TP)
total_test_accuracy = total_test_correct / total_test
total_test_precision = sum(test_precision) / test_CM_size
total_test_recall = sum(test_recall) / test_CM_size
################################################################################################################
# Print the confusion matrix and store important information
################################################################################################################
print(test_conf_matrix)
store_convergence_info['test_conf_matrix'] = list([list(x) for x in test_conf_matrix])
store_convergence_info['test_accuracy'] = total_test_accuracy
store_convergence_info['test_precision'] = list(test_precision)
store_convergence_info['total_test_precision'] = total_test_precision
store_convergence_info['test_recall'] = list(test_recall)
store_convergence_info['total_test_recall'] = total_test_recall
# trick to be able to save numpy.int64 into json
def default(o):
if isinstance(o, np.int64): return int(o)
raise TypeError
with open(log_dir + 'convergence_results.p', 'w') as file:
json.dump(store_convergence_info, file, default=default, indent=4)
saver.save(sess, log_dir + "convergence_model.ckpt")
####################################################################################################################
# Add the end of training compute the test results of the best validation model and store the relevant information
####################################################################################################################
saver.restore(sess, log_dir + "acc_best_validation_model.ckpt")
test_conf_matrix = 0
while test_data.epoch == 1:
sentences_batch, sentences_length_batch, targets_batch, speakers_batch = test_data.next_batch(batch_size)
preds = sess.run([g['preds']],
feed_dict={g['x']: np.array(sentences_batch),
g['y']: np.array(targets_batch).reshape(len(targets_batch)),
g['speaker']: np.array(speakers_batch),
g['seqlen']: np.array(sentences_length_batch).reshape(
len(targets_batch))})
############################################################################################################
# Calculate the Test data Confusion Matrix
############################################################################################################
test_conf_matrix += confusion_matrix(targets_batch, preds[0], labels=range(data_info['num_classes']))
################################################################################################################
# Compute Accuracy, Precision and Recall
################################################################################################################
test_CM_size = len(test_conf_matrix)
total_test = sum(sum(test_conf_matrix))
test_TP = np.diagonal(test_conf_matrix)
test_FP = [sum(test_conf_matrix[:, i]) - test_TP[i] for i in range(test_CM_size)]
test_FN = [sum(test_conf_matrix[i, :]) - test_TP[i] for i in range(test_CM_size)]
test_TN = test_CM_size - test_TP - test_FP - test_FN
test_precision = test_TP / (test_TP + test_FP)
test_recall = test_TP / (test_TP + test_FN)
total_test_correct = sum(test_TP)
total_test_accuracy = total_test_correct / total_test
total_test_precision = sum(test_precision) / test_CM_size
total_test_recall = sum(test_recall) / test_CM_size
################################################################################################################
# Print the confusion matrix and store important information
################################################################################################################
print(test_conf_matrix)
store_info['test_conf_matrix'] = list([list(x) for x in test_conf_matrix])
store_info['test_accuracy'] = total_test_accuracy
store_info['test_precision'] = list(test_precision)
store_info['total_test_precision'] = total_test_precision
store_info['test_recall'] = list(test_recall)
store_info['total_test_recall'] = total_test_recall
with open(log_dir + 'acc_best_validation_results.p', 'w') as file:
json.dump(store_info, file, default=default, indent=4)
|
[
"tensorflow.local_variables_initializer",
"numpy.diagonal",
"os.path.exists",
"os.makedirs",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"IEOMAP_dataset_AC.dataset",
"os.path.dirname",
"IEOMAP_dataset_AC.IeomapSentenceIterator",
"numpy.array",
"tensorflow.Summary.Value",
"models_AC.SentenceModel",
"tensorflow.ConfigProto",
"tensorflow.summary.scalar",
"tensorflow.summary.FileWriter",
"json.dump"
] |
[((280, 425), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'intra_op_parallelism_threads': '(4)', 'inter_op_parallelism_threads': '(4)', 'device_count': "{'CPU': 1, 'GPU': 0}", 'allow_soft_placement': '(True)'}), "(intra_op_parallelism_threads=4, inter_op_parallelism_threads\n =4, device_count={'CPU': 1, 'GPU': 0}, allow_soft_placement=True)\n", (294, 425), True, 'import tensorflow as tf\n'), ((1837, 1914), 'IEOMAP_dataset_AC.dataset', 'dataset', ([], {'mode': '"""sentences"""', 'embedding': 'embedding', 'embedding_size': 'embedding_size'}), "(mode='sentences', embedding=embedding, embedding_size=embedding_size)\n", (1844, 1914), False, 'from IEOMAP_dataset_AC import dataset, IeomapSentenceIterator\n'), ((1933, 2033), 'IEOMAP_dataset_AC.IeomapSentenceIterator', 'IeomapSentenceIterator', (['sentences[0]', 'targets[0]', "data_info['sentences_length'][0]", 'speakers[0]'], {}), "(sentences[0], targets[0], data_info[\n 'sentences_length'][0], speakers[0])\n", (1955, 2033), False, 'from IEOMAP_dataset_AC import dataset, IeomapSentenceIterator\n'), ((2044, 2144), 'IEOMAP_dataset_AC.IeomapSentenceIterator', 'IeomapSentenceIterator', (['sentences[1]', 'targets[1]', "data_info['sentences_length'][1]", 'speakers[1]'], {}), "(sentences[1], targets[1], data_info[\n 'sentences_length'][1], speakers[1])\n", (2066, 2144), False, 'from IEOMAP_dataset_AC import dataset, IeomapSentenceIterator\n'), ((2156, 2256), 'IEOMAP_dataset_AC.IeomapSentenceIterator', 'IeomapSentenceIterator', (['sentences[2]', 'targets[2]', "data_info['sentences_length'][2]", 'speakers[2]'], {}), "(sentences[2], targets[2], data_info[\n 'sentences_length'][2], speakers[2])\n", (2178, 2256), False, 'from IEOMAP_dataset_AC import dataset, IeomapSentenceIterator\n'), ((2538, 2776), 'models_AC.SentenceModel', 'SentenceModel', ([], {'vocab_size': "(data_info['vocabulary_size'] + 1)", 'embedding_size': 'embedding_size', 'first_rnn_size': 'first_rnn_size', 'num_classes': "data_info['num_classes']", 'dropout': 'dropout', 'embedding': 'embedding', 'num_speakers': 'num_speakers'}), "(vocab_size=data_info['vocabulary_size'] + 1, embedding_size=\n embedding_size, first_rnn_size=first_rnn_size, num_classes=data_info[\n 'num_classes'], dropout=dropout, embedding=embedding, num_speakers=\n num_speakers)\n", (2551, 2776), False, 'from models_AC import SentenceModel\n'), ((3308, 3332), 'os.path.dirname', 'os.path.dirname', (['log_dir'], {}), '(log_dir)\n', (3323, 3332), False, 'import os\n'), ((3795, 3820), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (3805, 3820), True, 'import tensorflow as tf\n'), ((3928, 3944), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (3942, 3944), True, 'import tensorflow as tf\n'), ((4369, 4417), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['train_log_dir', 'sess.graph'], {}), '(train_log_dir, sess.graph)\n', (4390, 4417), True, 'import tensorflow as tf\n'), ((4435, 4469), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['val_log_dir'], {}), '(val_log_dir)\n', (4456, 4469), True, 'import tensorflow as tf\n'), ((4489, 4519), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[]'], {}), '(tf.float32, [])\n', (4503, 4519), True, 'import tensorflow as tf\n'), ((4539, 4569), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[]'], {}), '(tf.float32, [])\n', (4553, 4569), True, 'import tensorflow as tf\n'), ((4586, 4616), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[]'], {}), '(tf.float32, [])\n', (4600, 4616), True, 'import tensorflow as tf\n'), ((4635, 4677), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""accuracy"""', 'accuracy_tf'], {}), "('accuracy', accuracy_tf)\n", (4652, 4677), True, 'import tensorflow as tf\n'), ((4695, 4739), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""precision"""', 'precision_tf'], {}), "('precision', precision_tf)\n", (4712, 4739), True, 'import tensorflow as tf\n'), ((4757, 4795), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""recall"""', 'recall_tf'], {}), "('recall', recall_tf)\n", (4774, 4795), True, 'import tensorflow as tf\n'), ((15755, 15784), 'numpy.diagonal', 'np.diagonal', (['test_conf_matrix'], {}), '(test_conf_matrix)\n', (15766, 15784), True, 'import numpy as np\n'), ((19245, 19274), 'numpy.diagonal', 'np.diagonal', (['test_conf_matrix'], {}), '(test_conf_matrix)\n', (19256, 19274), True, 'import numpy as np\n'), ((3344, 3367), 'os.path.exists', 'os.path.exists', (['dirname'], {}), '(dirname)\n', (3358, 3367), False, 'import os\n'), ((3377, 3397), 'os.makedirs', 'os.makedirs', (['dirname'], {}), '(dirname)\n', (3388, 3397), False, 'import os\n'), ((3462, 3500), 'json.dump', 'json.dump', (['model_setup', 'file'], {'indent': '(4)'}), '(model_setup, file, indent=4)\n', (3471, 3500), False, 'import json\n'), ((3834, 3867), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3865, 3867), True, 'import tensorflow as tf\n'), ((3882, 3914), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (3912, 3914), True, 'import tensorflow as tf\n'), ((17307, 17373), 'json.dump', 'json.dump', (['store_convergence_info', 'file'], {'default': 'default', 'indent': '(4)'}), '(store_convergence_info, file, default=default, indent=4)\n', (17316, 17373), False, 'import json\n'), ((20585, 20639), 'json.dump', 'json.dump', (['store_info', 'file'], {'default': 'default', 'indent': '(4)'}), '(store_info, file, default=default, indent=4)\n', (20594, 20639), False, 'import json\n'), ((8039, 8069), 'numpy.diagonal', 'np.diagonal', (['train_conf_matrix'], {}), '(train_conf_matrix)\n', (8050, 8069), True, 'import numpy as np\n'), ((8864, 8892), 'numpy.diagonal', 'np.diagonal', (['val_conf_matrix'], {}), '(val_conf_matrix)\n', (8875, 8892), True, 'import numpy as np\n'), ((5400, 5425), 'numpy.array', 'np.array', (['sentences_batch'], {}), '(sentences_batch)\n', (5408, 5425), True, 'import numpy as np\n'), ((5580, 5604), 'numpy.array', 'np.array', (['speakers_batch'], {}), '(speakers_batch)\n', (5588, 5604), True, 'import numpy as np\n'), ((14627, 14652), 'numpy.array', 'np.array', (['sentences_batch'], {}), '(sentences_batch)\n', (14635, 14652), True, 'import numpy as np\n'), ((14801, 14825), 'numpy.array', 'np.array', (['speakers_batch'], {}), '(speakers_batch)\n', (14809, 14825), True, 'import numpy as np\n'), ((18117, 18142), 'numpy.array', 'np.array', (['sentences_batch'], {}), '(sentences_batch)\n', (18125, 18142), True, 'import numpy as np\n'), ((18291, 18315), 'numpy.array', 'np.array', (['speakers_batch'], {}), '(speakers_batch)\n', (18299, 18315), True, 'import numpy as np\n'), ((9841, 9908), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""accuracy"""', 'simple_value': 'total_train_accuracy'}), "(tag='accuracy', simple_value=total_train_accuracy)\n", (9857, 9908), True, 'import tensorflow as tf\n'), ((9981, 10050), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""precision"""', 'simple_value': 'total_train_precision'}), "(tag='precision', simple_value=total_train_precision)\n", (9997, 10050), True, 'import tensorflow as tf\n'), ((10105, 10168), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""recall"""', 'simple_value': 'total_train_recall'}), "(tag='recall', simple_value=total_train_recall)\n", (10121, 10168), True, 'import tensorflow as tf\n'), ((10222, 10287), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""accuracy"""', 'simple_value': 'total_val_accuracy'}), "(tag='accuracy', simple_value=total_val_accuracy)\n", (10238, 10287), True, 'import tensorflow as tf\n'), ((10358, 10425), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""precision"""', 'simple_value': 'total_val_precision'}), "(tag='precision', simple_value=total_val_precision)\n", (10374, 10425), True, 'import tensorflow as tf\n'), ((10478, 10539), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""recall"""', 'simple_value': 'total_val_recall'}), "(tag='recall', simple_value=total_val_recall)\n", (10494, 10539), True, 'import tensorflow as tf\n'), ((5474, 5497), 'numpy.array', 'np.array', (['targets_batch'], {}), '(targets_batch)\n', (5482, 5497), True, 'import numpy as np\n'), ((5658, 5690), 'numpy.array', 'np.array', (['sentences_length_batch'], {}), '(sentences_length_batch)\n', (5666, 5690), True, 'import numpy as np\n'), ((6795, 6820), 'numpy.array', 'np.array', (['sentences_batch'], {}), '(sentences_batch)\n', (6803, 6820), True, 'import numpy as np\n'), ((6985, 7009), 'numpy.array', 'np.array', (['speakers_batch'], {}), '(speakers_batch)\n', (6993, 7009), True, 'import numpy as np\n'), ((14698, 14721), 'numpy.array', 'np.array', (['targets_batch'], {}), '(targets_batch)\n', (14706, 14721), True, 'import numpy as np\n'), ((14876, 14908), 'numpy.array', 'np.array', (['sentences_length_batch'], {}), '(sentences_length_batch)\n', (14884, 14908), True, 'import numpy as np\n'), ((18188, 18211), 'numpy.array', 'np.array', (['targets_batch'], {}), '(targets_batch)\n', (18196, 18211), True, 'import numpy as np\n'), ((18366, 18398), 'numpy.array', 'np.array', (['sentences_length_batch'], {}), '(sentences_length_batch)\n', (18374, 18398), True, 'import numpy as np\n'), ((6874, 6897), 'numpy.array', 'np.array', (['targets_batch'], {}), '(targets_batch)\n', (6882, 6897), True, 'import numpy as np\n'), ((7068, 7100), 'numpy.array', 'np.array', (['sentences_length_batch'], {}), '(sentences_length_batch)\n', (7076, 7100), True, 'import numpy as np\n')]
|
# Copyright 2017 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for projectq.backends._sim._simulator.py, using both the Python
and the C++ simulator as backends.
"""
import copy
import math
import cmath
import numpy
import pytest
import random
import scipy
import scipy.sparse
import scipy.sparse.linalg
from projectq import MainEngine
from projectq.cengines import (BasicEngine, BasicMapperEngine, DummyEngine,
LocalOptimizer, NotYetMeasuredError)
from projectq.ops import (All, Allocate, BasicGate, BasicMathGate, CNOT, C,
Command, H, Measure, QubitOperator, Rx, Ry, Rz, S,
TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap,
UniformlyControlledRy, UniformlyControlledRz)
from projectq.libs.math import (AddConstant,
AddConstantModN,
SubConstant,
SubConstantModN,
MultiplyByConstantModN)
from projectq.meta import Compute, Uncompute, Control, Dagger, LogicalQubitIDTag
from projectq.types import WeakQubitRef
from projectq.backends import Simulator
tolerance = 1e-6
def test_is_qrack_simulator_present():
_qracksim = pytest.importorskip("projectq.backends._qracksim._qracksim")
import projectq.backends._qracksim._qracksim as _
def get_available_simulators():
result = []
try:
test_is_qrack_simulator_present()
result.append("qrack_simulator_qengine")
result.append("qrack_simulator_qunit")
except:
pass
return result
@pytest.fixture(params=get_available_simulators())
def sim(request):
if request.param == "qrack_simulator_qengine":
from projectq.backends._qracksim._qracksim import QrackSimulator as QrackSim
sim = Simulator()
sim._simulator = QrackSim(1, -1, 1)
elif request.param == "qrack_simulator_qunit":
from projectq.backends._qracksim._qracksim import QrackSimulator as QrackSim
sim = Simulator()
sim._simulator = QrackSim(1, -1, 2)
return sim
@pytest.fixture(params=["mapper", "no_mapper"])
def mapper(request):
"""
Adds a mapper which changes qubit ids by adding 1
"""
if request.param == "mapper":
class TrivialMapper(BasicMapperEngine):
def __init__(self):
BasicEngine.__init__(self)
self.current_mapping = dict()
def receive(self, command_list):
for cmd in command_list:
for qureg in cmd.all_qubits:
for qubit in qureg:
if qubit.id == -1:
continue
elif qubit.id not in self.current_mapping:
previous_map = self.current_mapping
previous_map[qubit.id] = qubit.id + 1
self.current_mapping = previous_map
self._send_cmd_with_mapped_ids(cmd)
return TrivialMapper()
if request.param == "no_mapper":
return None
class Mock1QubitGate(BasicGate):
def __init__(self):
BasicGate.__init__(self)
self.cnt = 0
@property
def matrix(self):
self.cnt += 1
return numpy.matrix([[0, 1],
[1, 0]])
class Mock6QubitGate(BasicGate):
def __init__(self):
BasicGate.__init__(self)
self.cnt = 0
@property
def matrix(self):
self.cnt += 1
return numpy.eye(2 ** 6)
class MockNoMatrixGate(BasicGate):
def __init__(self):
BasicGate.__init__(self)
self.cnt = 0
@property
def matrix(self):
self.cnt += 1
raise AttributeError
def test_simulator_is_available(sim):
backend = DummyEngine(save_commands=True)
eng = MainEngine(backend, [])
qubit = eng.allocate_qubit()
Measure | qubit
qubit[0].__del__()
assert len(backend.received_commands) == 3
# Test that allocate, measure, basic math, and deallocate are available.
for cmd in backend.received_commands:
assert sim.is_available(cmd)
new_cmd = backend.received_commands[-1]
new_cmd.gate = Mock6QubitGate()
assert not sim.is_available(new_cmd)
new_cmd.gate = MockNoMatrixGate()
assert not sim.is_available(new_cmd)
new_cmd.gate = Mock1QubitGate()
assert sim.is_available(new_cmd)
new_cmd = backend.received_commands[-2]
assert len(new_cmd.qubits) == 1
new_cmd.gate = AddConstantModN(1, 2)
assert sim.is_available(new_cmd)
new_cmd.gate = MultiplyByConstantModN(1, 2)
assert sim.is_available(new_cmd)
#new_cmd.gate = DivideByConstantModN(1, 2)
#assert sim.is_available(new_cmd)
def test_simulator_cheat(sim):
# cheat function should return a tuple
assert isinstance(sim.cheat(), tuple)
# first entry is the qubit mapping.
# should be empty:
assert len(sim.cheat()[0]) == 0
# state vector should only have 1 entry:
assert len(sim.cheat()[1]) == 1
eng = MainEngine(sim, [])
qubit = eng.allocate_qubit()
# one qubit has been allocated
assert len(sim.cheat()[0]) == 1
assert sim.cheat()[0][0] == 0
assert len(sim.cheat()[1]) == 2
assert 1. == pytest.approx(abs(sim.cheat()[1][0]))
qubit[0].__del__()
# should be empty:
assert len(sim.cheat()[0]) == 0
# state vector should only have 1 entry:
assert len(sim.cheat()[1]) == 1
def test_simulator_functional_measurement(sim):
eng = MainEngine(sim, [])
qubits = eng.allocate_qureg(5)
# entangle all qubits:
H | qubits[0]
for qb in qubits[1:]:
CNOT | (qubits[0], qb)
All(Measure) | qubits
bit_value_sum = sum([int(qubit) for qubit in qubits])
assert bit_value_sum == 0 or bit_value_sum == 5
def test_simulator_measure_mapped_qubit(sim):
eng = MainEngine(sim, [])
qb1 = WeakQubitRef(engine=eng, idx=1)
qb2 = WeakQubitRef(engine=eng, idx=2)
cmd0 = Command(engine=eng, gate=Allocate, qubits=([qb1],))
cmd1 = Command(engine=eng, gate=X, qubits=([qb1],))
cmd2 = Command(engine=eng, gate=Measure, qubits=([qb1],), controls=[],
tags=[LogicalQubitIDTag(2)])
with pytest.raises(NotYetMeasuredError):
int(qb1)
with pytest.raises(NotYetMeasuredError):
int(qb2)
eng.send([cmd0, cmd1, cmd2])
eng.flush()
with pytest.raises(NotYetMeasuredError):
int(qb1)
assert int(qb2) == 1
def test_simulator_kqubit_exception(sim):
m1 = Rx(0.3).matrix
m2 = Rx(0.8).matrix
m3 = Ry(0.1).matrix
m4 = Rz(0.9).matrix.dot(Ry(-0.1).matrix)
m = numpy.kron(m4, numpy.kron(m3, numpy.kron(m2, m1)))
class KQubitGate(BasicGate):
@property
def matrix(self):
return m
eng = MainEngine(sim, [])
qureg = eng.allocate_qureg(3)
with pytest.raises(Exception):
KQubitGate() | qureg
with pytest.raises(Exception):
H | qureg
def test_simulator_swap(sim):
eng = MainEngine(sim, [])
qubits1 = eng.allocate_qureg(1)
qubits2 = eng.allocate_qureg(1)
X | qubits1
Swap | (qubits1, qubits2)
All(Measure) | qubits1
All(Measure) | qubits2
assert (int(qubits1[0]) == 0) and (int(qubits2[0]) == 1)
SqrtSwap | (qubits1, qubits2)
SqrtSwap | (qubits1, qubits2)
All(Measure) | qubits1
All(Measure) | qubits2
assert (int(qubits1[0]) == 1) and (int(qubits2[0]) == 0)
def test_simulator_math(sim):
eng = MainEngine(sim, [])
qubits = eng.allocate_qureg(8)
AddConstant(1) | qubits;
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 1
AddConstantModN(10, 256) | qubits;
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 11
controls = eng.allocate_qureg(1)
# Control is off
C(AddConstantModN(10, 256)) | (controls, qubits)
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 11
# Turn control on
X | controls
C(AddConstantModN(10, 256)) | (controls, qubits)
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 21
SubConstant(5) | qubits;
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 16
C(SubConstantModN(10, 256)) | (controls, qubits)
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 6
# Turn control off
X | controls
C(SubConstantModN(10, 256)) | (controls, qubits)
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 6
MultiplyByConstantModN(2, 256) | qubits;
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 12
# Control is off
C(MultiplyByConstantModN(2, 256)) | (controls, qubits)
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 12
# Turn control on
X | controls
C(MultiplyByConstantModN(10, 256)) | (controls, qubits)
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 120
def test_simulator_probability(sim, mapper):
engine_list = [LocalOptimizer()]
if mapper is not None:
engine_list.append(mapper)
eng = MainEngine(sim, engine_list=engine_list)
qubits = eng.allocate_qureg(6)
All(H) | qubits
eng.flush()
bits = [0, 0, 1, 0, 1, 0]
for i in range(6):
assert (eng.backend.get_probability(bits[:i], qubits[:i]) ==
pytest.approx(0.5**i))
extra_qubit = eng.allocate_qubit()
with pytest.raises(RuntimeError):
eng.backend.get_probability([0], extra_qubit)
del extra_qubit
All(H) | qubits
Ry(2 * math.acos(math.sqrt(0.3))) | qubits[0]
eng.flush()
assert eng.backend.get_probability([0], [qubits[0]]) == pytest.approx(0.3)
Ry(2 * math.acos(math.sqrt(0.4))) | qubits[2]
eng.flush()
assert eng.backend.get_probability([0], [qubits[2]]) == pytest.approx(0.4)
assert (numpy.isclose(0.12, eng.backend.get_probability([0, 0], qubits[:3:2]), rtol=tolerance, atol=tolerance))
assert (numpy.isclose(0.18, eng.backend.get_probability([0, 1], qubits[:3:2]), rtol=tolerance, atol=tolerance))
assert (numpy.isclose(0.28, eng.backend.get_probability([1, 0], qubits[:3:2]), rtol=tolerance, atol=tolerance))
All(Measure) | qubits
def test_simulator_amplitude(sim, mapper):
engine_list = [LocalOptimizer()]
if mapper is not None:
engine_list.append(mapper)
eng = MainEngine(sim, engine_list=engine_list)
qubits = eng.allocate_qureg(6)
All(X) | qubits
All(H) | qubits
eng.flush()
bits = [0, 0, 1, 0, 1, 0]
polR, polPhi = cmath.polar(eng.backend.get_amplitude(bits, qubits))
while polPhi < 0:
polPhi += 2 * math.pi
assert polR == pytest.approx(1. / 8.)
bits = [0, 0, 0, 0, 1, 0]
polR2, polPhi2 = cmath.polar(eng.backend.get_amplitude(bits, qubits))
while polPhi2 < math.pi:
polPhi2 += 2 * math.pi
assert polR2 == pytest.approx(polR)
assert (polPhi2 - math.pi) == pytest.approx(polPhi)
bits = [0, 1, 1, 0, 1, 0]
polR3, polPhi3 = cmath.polar(eng.backend.get_amplitude(bits, qubits))
while polPhi3 < math.pi:
polPhi3 += 2 * math.pi
assert polR3 == pytest.approx(polR)
assert (polPhi3 - math.pi) == pytest.approx(polPhi)
All(H) | qubits
All(X) | qubits
Ry(2 * math.acos(0.3)) | qubits[0]
eng.flush()
bits = [0] * 6
polR, polPhi = cmath.polar(eng.backend.get_amplitude(bits, qubits))
assert polR == pytest.approx(0.3)
bits[0] = 1
polR, polPhi = cmath.polar(eng.backend.get_amplitude(bits, qubits))
assert (polR ==
pytest.approx(math.sqrt(0.91)))
All(Measure) | qubits
# raises if not all qubits are in the list:
with pytest.raises(RuntimeError):
eng.backend.get_amplitude(bits, qubits[:-1])
# doesn't just check for length:
with pytest.raises(RuntimeError):
eng.backend.get_amplitude(bits, qubits[:-1] + [qubits[0]])
extra_qubit = eng.allocate_qubit()
eng.flush()
# there is a new qubit now!
with pytest.raises(RuntimeError):
eng.backend.get_amplitude(bits, qubits)
def test_simulator_set_wavefunction(sim, mapper):
engine_list = [LocalOptimizer()]
if mapper is not None:
engine_list.append(mapper)
eng = MainEngine(sim, engine_list=engine_list)
qubits = eng.allocate_qureg(2)
wf = [0., 0., math.sqrt(0.2), math.sqrt(0.8)]
with pytest.raises(RuntimeError):
eng.backend.set_wavefunction(wf, qubits)
eng.flush()
eng.backend.set_wavefunction(wf, qubits)
assert pytest.approx(eng.backend.get_probability('1', [qubits[0]])) == .8
assert pytest.approx(eng.backend.get_probability('01', qubits)) == .2
assert pytest.approx(eng.backend.get_probability('1', [qubits[1]])) == 1.
All(Measure) | qubits
def test_simulator_set_wavefunction_always_complex(sim):
""" Checks that wavefunction is always complex """
eng = MainEngine(sim)
qubit = eng.allocate_qubit()
eng.flush()
wf = [1., 0]
eng.backend.set_wavefunction(wf, qubit)
Y | qubit
eng.flush()
amplitude = eng.backend.get_amplitude('1', qubit)
assert amplitude == pytest.approx(1j) or amplitude == pytest.approx(-1j)
def test_simulator_collapse_wavefunction(sim, mapper):
engine_list = [LocalOptimizer()]
if mapper is not None:
engine_list.append(mapper)
eng = MainEngine(sim, engine_list=engine_list)
qubits = eng.allocate_qureg(4)
# unknown qubits: raises
with pytest.raises(RuntimeError):
eng.backend.collapse_wavefunction(qubits, [0] * 4)
eng.flush()
eng.backend.collapse_wavefunction(qubits, [0] * 4)
assert pytest.approx(eng.backend.get_probability([0] * 4, qubits)) == 1.
All(H) | qubits[1:]
eng.flush()
assert pytest.approx(eng.backend.get_probability([0] * 4, qubits)) == .125
# impossible outcome: raises
with pytest.raises(RuntimeError):
eng.backend.collapse_wavefunction(qubits, [1] + [0] * 3)
eng.backend.collapse_wavefunction(qubits[:-1], [0, 1, 0])
probability = eng.backend.get_probability([0, 1, 0, 1], qubits)
assert probability == pytest.approx(.5)
eng.backend.set_wavefunction([1.] + [0.] * 15, qubits)
H | qubits[0]
CNOT | (qubits[0], qubits[1])
eng.flush()
eng.backend.collapse_wavefunction([qubits[0]], [1])
probability = eng.backend.get_probability([1, 1], qubits[0:2])
assert probability == pytest.approx(1.)
def test_simulator_no_uncompute_exception(sim):
eng = MainEngine(sim, [])
qubit = eng.allocate_qubit()
H | qubit
with pytest.raises(RuntimeError):
qubit[0].__del__()
# If you wanted to keep using the qubit, you shouldn't have deleted it.
assert qubit[0].id == -1
def test_simulator_functional_entangle(sim):
eng = MainEngine(sim, [])
qubits = eng.allocate_qureg(5)
# entangle all qubits:
H | qubits[0]
for qb in qubits[1:]:
CNOT | (qubits[0], qb)
# check the state vector:
assert .5 == pytest.approx(abs(sim.cheat()[1][0])**2, rel=tolerance, abs=tolerance)
assert .5 == pytest.approx(abs(sim.cheat()[1][31])**2, rel=tolerance, abs=tolerance)
for i in range(1, 31):
assert 0. == pytest.approx(abs(sim.cheat()[1][i]), rel=tolerance, abs=tolerance)
# unentangle all except the first 2
for qb in qubits[2:]:
CNOT | (qubits[0], qb)
# entangle using Toffolis
for qb in qubits[2:]:
Toffoli | (qubits[0], qubits[1], qb)
# check the state vector:
assert .5 == pytest.approx(abs(sim.cheat()[1][0])**2, rel=tolerance, abs=tolerance)
assert .5 == pytest.approx(abs(sim.cheat()[1][31])**2, rel=tolerance, abs=tolerance)
for i in range(1, 31):
assert 0. == pytest.approx(abs(sim.cheat()[1][i]), rel=tolerance, abs=tolerance)
# uncompute using multi-controlled NOTs
with Control(eng, qubits[0:-1]):
X | qubits[-1]
with Control(eng, qubits[0:-2]):
X | qubits[-2]
with Control(eng, qubits[0:-3]):
X | qubits[-3]
CNOT | (qubits[0], qubits[1])
H | qubits[0]
# check the state vector:
assert 1. == pytest.approx(abs(sim.cheat()[1][0])**2, rel=tolerance, abs=tolerance)
for i in range(1, 32):
assert 0. == pytest.approx(abs(sim.cheat()[1][i]), rel=tolerance, abs=tolerance)
All(Measure) | qubits
def test_simulator_convert_logical_to_mapped_qubits(sim):
mapper = BasicMapperEngine()
def receive(command_list):
pass
mapper.receive = receive
eng = MainEngine(sim, [mapper])
qubit0 = eng.allocate_qubit()
qubit1 = eng.allocate_qubit()
mapper.current_mapping = {qubit0[0].id: qubit1[0].id,
qubit1[0].id: qubit0[0].id}
assert (sim._convert_logical_to_mapped_qureg(qubit0 + qubit1) ==
qubit1 + qubit0)
def slow_implementation(angles, control_qubits, target_qubit, eng, gate_class):
"""
Assumption is that control_qubits[0] is lowest order bit
We apply angles[0] to state |0>
"""
assert len(angles) == 2**len(control_qubits)
for index in range(2**len(control_qubits)):
with Compute(eng):
for bit_pos in range(len(control_qubits)):
if not (index >> bit_pos) & 1:
X | control_qubits[bit_pos]
with Control(eng, control_qubits):
gate_class(angles[index]) | target_qubit
Uncompute(eng)
@pytest.mark.parametrize("gate_classes", [(Ry, UniformlyControlledRy),
(Rz, UniformlyControlledRz)])
def test_uniformly_controlled_r(sim, gate_classes):
n = 2
random_angles = [3.0, 0.8, 1.2, 0.7]
basis_state_index = 2
basis_state = [0] * 2**(n+1)
basis_state[basis_state_index] = 1.
correct_eng = MainEngine(backend=Simulator())
test_eng = MainEngine(backend=sim)
correct_sim = correct_eng.backend
correct_qb = correct_eng.allocate_qubit()
correct_ctrl_qureg = correct_eng.allocate_qureg(n)
correct_eng.flush()
test_sim = test_eng.backend
test_qb = test_eng.allocate_qubit()
test_ctrl_qureg = test_eng.allocate_qureg(n)
test_eng.flush()
correct_sim.set_wavefunction(basis_state, correct_qb + correct_ctrl_qureg)
test_sim.set_wavefunction(basis_state, test_qb + test_ctrl_qureg)
test_eng.flush()
correct_eng.flush()
gate_classes[1](random_angles) | (test_ctrl_qureg, test_qb)
slow_implementation(angles=random_angles,
control_qubits=correct_ctrl_qureg,
target_qubit=correct_qb,
eng=correct_eng,
gate_class=gate_classes[0])
test_eng.flush()
correct_eng.flush()
for fstate in range(2**(n+1)):
binary_state = format(fstate, '0' + str(n+1) + 'b')
test = test_sim.get_amplitude(binary_state,
test_qb + test_ctrl_qureg)
correct = correct_sim.get_amplitude(binary_state, correct_qb +
correct_ctrl_qureg)
print(test, "==", correct)
assert correct == pytest.approx(test, rel=tolerance, abs=tolerance)
All(Measure) | test_qb + test_ctrl_qureg
All(Measure) | correct_qb + correct_ctrl_qureg
test_eng.flush(deallocate_qubits=True)
correct_eng.flush(deallocate_qubits=True)
def test_qubit_operator(sim):
test_eng = MainEngine(sim)
test_qureg = test_eng.allocate_qureg(1)
test_eng.flush()
qubit_op = QubitOperator("X0 X1", 1)
with pytest.raises(Exception):
sim.get_expectation_value(qubit_op, test_qureg)
test_eng.backend.set_wavefunction([1, 0],
test_qureg)
test_eng.flush()
qubit_op = QubitOperator("X0", 1)
qubit_op | test_qureg[0]
test_eng.flush()
amplitude = test_eng.backend.get_amplitude('0', test_qureg)
assert amplitude == pytest.approx(0.)
amplitude = test_eng.backend.get_amplitude('1', test_qureg)
assert amplitude == pytest.approx(1.)
def test_get_expectation_value(sim):
num_qubits = 2
test_eng = MainEngine(sim)
test_qureg = test_eng.allocate_qureg(num_qubits)
test_eng.flush()
qubit_op = QubitOperator("X0 X1 X2", 1)
with pytest.raises(Exception):
sim.get_expectation_value(qubit_op, test_qureg)
qubit_op = QubitOperator("X0", 1)
test_eng.backend.set_wavefunction([1 / math.sqrt(2), 1 / math.sqrt(2), 0, 0],
test_qureg)
test_eng.flush()
assert(sim.get_expectation_value(qubit_op, test_qureg) == pytest.approx(1, rel=tolerance, abs=tolerance))
test_eng.backend.set_wavefunction([1 / math.sqrt(2), -1 / math.sqrt(2), 0, 0],
test_qureg)
test_eng.flush()
assert(sim.get_expectation_value(qubit_op, test_qureg) == pytest.approx(-1, rel=tolerance, abs=tolerance))
qubit_op = QubitOperator("Y0", 1)
test_eng.backend.set_wavefunction([1 / math.sqrt(2), 1j / math.sqrt(2), 0, 0],
test_qureg)
test_eng.flush()
assert(sim.get_expectation_value(qubit_op, test_qureg) == pytest.approx(1, rel=tolerance, abs=tolerance))
test_eng.backend.set_wavefunction([1 / math.sqrt(2), -1j / math.sqrt(2), 0, 0],
test_qureg)
test_eng.flush()
assert(sim.get_expectation_value(qubit_op, test_qureg) == pytest.approx(-1, rel=tolerance, abs=tolerance))
qubit_op = QubitOperator("Z0", 1)
test_eng.backend.set_wavefunction([1, 0, 0, 0],
test_qureg)
test_eng.flush()
assert(sim.get_expectation_value(qubit_op, test_qureg) == pytest.approx(1, rel=tolerance, abs=tolerance))
test_eng.backend.set_wavefunction([0, 1, 0, 0],
test_qureg)
test_eng.flush()
assert(sim.get_expectation_value(qubit_op, test_qureg) == pytest.approx(-1, rel=tolerance, abs=tolerance))
qubit_op = QubitOperator("Z0", 0.25)
test_eng.backend.set_wavefunction([1, 0, 0, 0],
test_qureg)
test_eng.flush()
assert(sim.get_expectation_value(qubit_op, test_qureg) == pytest.approx(0.25, rel=tolerance, abs=tolerance))
test_eng.backend.set_wavefunction([0, 1, 0, 0],
test_qureg)
test_eng.flush()
assert(sim.get_expectation_value(qubit_op, test_qureg) == pytest.approx(-0.25, rel=tolerance, abs=tolerance))
qubit_op = QubitOperator("Z0 Z1", 1)
test_eng.backend.set_wavefunction([1, 0, 0, 0],
test_qureg)
test_eng.flush()
assert(sim.get_expectation_value(qubit_op, test_qureg) == pytest.approx(1, rel=tolerance, abs=tolerance))
X | test_qureg[0]
test_eng.flush()
assert(sim.get_expectation_value(qubit_op, test_qureg) == pytest.approx(-1, rel=tolerance, abs=tolerance))
X | test_qureg[1]
test_eng.flush()
assert(sim.get_expectation_value(qubit_op, test_qureg) == pytest.approx(1, rel=tolerance, abs=tolerance))
X | test_qureg[0]
test_eng.flush()
assert(sim.get_expectation_value(qubit_op, test_qureg) == pytest.approx(-1, rel=tolerance, abs=tolerance))
|
[
"projectq.types.WeakQubitRef",
"math.acos",
"projectq.MainEngine",
"projectq.cengines.DummyEngine",
"math.sqrt",
"projectq.ops.All",
"pytest.fixture",
"projectq.libs.math.SubConstantModN",
"projectq.libs.math.AddConstantModN",
"projectq.ops.Rx",
"projectq.libs.math.SubConstant",
"numpy.eye",
"projectq.meta.Compute",
"projectq.ops.Ry",
"projectq.meta.LogicalQubitIDTag",
"projectq.cengines.BasicMapperEngine",
"numpy.kron",
"projectq.libs.math.AddConstant",
"pytest.raises",
"projectq.libs.math.MultiplyByConstantModN",
"projectq.meta.Control",
"projectq.ops.Rz",
"projectq.backends._qracksim._qracksim.QrackSimulator",
"pytest.approx",
"projectq.ops.QubitOperator",
"projectq.ops.Command",
"projectq.cengines.BasicEngine.__init__",
"projectq.cengines.LocalOptimizer",
"projectq.backends.Simulator",
"pytest.mark.parametrize",
"pytest.importorskip",
"projectq.ops.BasicGate.__init__",
"numpy.matrix",
"projectq.meta.Uncompute"
] |
[((2688, 2734), 'pytest.fixture', 'pytest.fixture', ([], {'params': "['mapper', 'no_mapper']"}), "(params=['mapper', 'no_mapper'])\n", (2702, 2734), False, 'import pytest\n'), ((18744, 18847), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""gate_classes"""', '[(Ry, UniformlyControlledRy), (Rz, UniformlyControlledRz)]'], {}), "('gate_classes', [(Ry, UniformlyControlledRy), (Rz,\n UniformlyControlledRz)])\n", (18767, 18847), False, 'import pytest\n'), ((1832, 1892), 'pytest.importorskip', 'pytest.importorskip', (['"""projectq.backends._qracksim._qracksim"""'], {}), "('projectq.backends._qracksim._qracksim')\n", (1851, 1892), False, 'import pytest\n'), ((4517, 4548), 'projectq.cengines.DummyEngine', 'DummyEngine', ([], {'save_commands': '(True)'}), '(save_commands=True)\n', (4528, 4548), False, 'from projectq.cengines import BasicEngine, BasicMapperEngine, DummyEngine, LocalOptimizer, NotYetMeasuredError\n'), ((4559, 4582), 'projectq.MainEngine', 'MainEngine', (['backend', '[]'], {}), '(backend, [])\n', (4569, 4582), False, 'from projectq import MainEngine\n'), ((5241, 5262), 'projectq.libs.math.AddConstantModN', 'AddConstantModN', (['(1)', '(2)'], {}), '(1, 2)\n', (5256, 5262), False, 'from projectq.libs.math import AddConstant, AddConstantModN, SubConstant, SubConstantModN, MultiplyByConstantModN\n'), ((5320, 5348), 'projectq.libs.math.MultiplyByConstantModN', 'MultiplyByConstantModN', (['(1)', '(2)'], {}), '(1, 2)\n', (5342, 5348), False, 'from projectq.libs.math import AddConstant, AddConstantModN, SubConstant, SubConstantModN, MultiplyByConstantModN\n'), ((5781, 5800), 'projectq.MainEngine', 'MainEngine', (['sim', '[]'], {}), '(sim, [])\n', (5791, 5800), False, 'from projectq import MainEngine\n'), ((6255, 6274), 'projectq.MainEngine', 'MainEngine', (['sim', '[]'], {}), '(sim, [])\n', (6265, 6274), False, 'from projectq import MainEngine\n'), ((6608, 6627), 'projectq.MainEngine', 'MainEngine', (['sim', '[]'], {}), '(sim, [])\n', (6618, 6627), False, 'from projectq import MainEngine\n'), ((6638, 6669), 'projectq.types.WeakQubitRef', 'WeakQubitRef', ([], {'engine': 'eng', 'idx': '(1)'}), '(engine=eng, idx=1)\n', (6650, 6669), False, 'from projectq.types import WeakQubitRef\n'), ((6680, 6711), 'projectq.types.WeakQubitRef', 'WeakQubitRef', ([], {'engine': 'eng', 'idx': '(2)'}), '(engine=eng, idx=2)\n', (6692, 6711), False, 'from projectq.types import WeakQubitRef\n'), ((6723, 6774), 'projectq.ops.Command', 'Command', ([], {'engine': 'eng', 'gate': 'Allocate', 'qubits': '([qb1],)'}), '(engine=eng, gate=Allocate, qubits=([qb1],))\n', (6730, 6774), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((6786, 6830), 'projectq.ops.Command', 'Command', ([], {'engine': 'eng', 'gate': 'X', 'qubits': '([qb1],)'}), '(engine=eng, gate=X, qubits=([qb1],))\n', (6793, 6830), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((7544, 7563), 'projectq.MainEngine', 'MainEngine', (['sim', '[]'], {}), '(sim, [])\n', (7554, 7563), False, 'from projectq import MainEngine\n'), ((7757, 7776), 'projectq.MainEngine', 'MainEngine', (['sim', '[]'], {}), '(sim, [])\n', (7767, 7776), False, 'from projectq import MainEngine\n'), ((8238, 8257), 'projectq.MainEngine', 'MainEngine', (['sim', '[]'], {}), '(sim, [])\n', (8248, 8257), False, 'from projectq import MainEngine\n'), ((10462, 10502), 'projectq.MainEngine', 'MainEngine', (['sim'], {'engine_list': 'engine_list'}), '(sim, engine_list=engine_list)\n', (10472, 10502), False, 'from projectq import MainEngine\n'), ((11724, 11764), 'projectq.MainEngine', 'MainEngine', (['sim'], {'engine_list': 'engine_list'}), '(sim, engine_list=engine_list)\n', (11734, 11764), False, 'from projectq import MainEngine\n'), ((13589, 13629), 'projectq.MainEngine', 'MainEngine', (['sim'], {'engine_list': 'engine_list'}), '(sim, engine_list=engine_list)\n', (13599, 13629), False, 'from projectq import MainEngine\n'), ((14243, 14258), 'projectq.MainEngine', 'MainEngine', (['sim'], {}), '(sim)\n', (14253, 14258), False, 'from projectq import MainEngine\n'), ((14696, 14736), 'projectq.MainEngine', 'MainEngine', (['sim'], {'engine_list': 'engine_list'}), '(sim, engine_list=engine_list)\n', (14706, 14736), False, 'from projectq import MainEngine\n'), ((15829, 15848), 'projectq.MainEngine', 'MainEngine', (['sim', '[]'], {}), '(sim, [])\n', (15839, 15848), False, 'from projectq import MainEngine\n'), ((16123, 16142), 'projectq.MainEngine', 'MainEngine', (['sim', '[]'], {}), '(sim, [])\n', (16133, 16142), False, 'from projectq import MainEngine\n'), ((17740, 17759), 'projectq.cengines.BasicMapperEngine', 'BasicMapperEngine', ([], {}), '()\n', (17757, 17759), False, 'from projectq.cengines import BasicEngine, BasicMapperEngine, DummyEngine, LocalOptimizer, NotYetMeasuredError\n'), ((17845, 17870), 'projectq.MainEngine', 'MainEngine', (['sim', '[mapper]'], {}), '(sim, [mapper])\n', (17855, 17870), False, 'from projectq import MainEngine\n'), ((19155, 19178), 'projectq.MainEngine', 'MainEngine', ([], {'backend': 'sim'}), '(backend=sim)\n', (19165, 19178), False, 'from projectq import MainEngine\n'), ((20729, 20744), 'projectq.MainEngine', 'MainEngine', (['sim'], {}), '(sim)\n', (20739, 20744), False, 'from projectq import MainEngine\n'), ((20826, 20851), 'projectq.ops.QubitOperator', 'QubitOperator', (['"""X0 X1"""', '(1)'], {}), "('X0 X1', 1)\n", (20839, 20851), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((21076, 21098), 'projectq.ops.QubitOperator', 'QubitOperator', (['"""X0"""', '(1)'], {}), "('X0', 1)\n", (21089, 21098), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((21434, 21449), 'projectq.MainEngine', 'MainEngine', (['sim'], {}), '(sim)\n', (21444, 21449), False, 'from projectq import MainEngine\n'), ((21540, 21568), 'projectq.ops.QubitOperator', 'QubitOperator', (['"""X0 X1 X2"""', '(1)'], {}), "('X0 X1 X2', 1)\n", (21553, 21568), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((21676, 21698), 'projectq.ops.QubitOperator', 'QubitOperator', (['"""X0"""', '(1)'], {}), "('X0', 1)\n", (21689, 21698), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((22243, 22265), 'projectq.ops.QubitOperator', 'QubitOperator', (['"""Y0"""', '(1)'], {}), "('Y0', 1)\n", (22256, 22265), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((22812, 22834), 'projectq.ops.QubitOperator', 'QubitOperator', (['"""Z0"""', '(1)'], {}), "('Z0', 1)\n", (22825, 22834), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((23318, 23343), 'projectq.ops.QubitOperator', 'QubitOperator', (['"""Z0"""', '(0.25)'], {}), "('Z0', 0.25)\n", (23331, 23343), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((23833, 23858), 'projectq.ops.QubitOperator', 'QubitOperator', (['"""Z0 Z1"""', '(1)'], {}), "('Z0 Z1', 1)\n", (23846, 23858), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((2408, 2419), 'projectq.backends.Simulator', 'Simulator', ([], {}), '()\n', (2417, 2419), False, 'from projectq.backends import Simulator\n'), ((2445, 2463), 'projectq.backends._qracksim._qracksim.QrackSimulator', 'QrackSim', (['(1)', '(-1)', '(1)'], {}), '(1, -1, 1)\n', (2453, 2463), True, 'from projectq.backends._qracksim._qracksim import QrackSimulator as QrackSim\n'), ((3795, 3819), 'projectq.ops.BasicGate.__init__', 'BasicGate.__init__', (['self'], {}), '(self)\n', (3813, 3819), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((3935, 3965), 'numpy.matrix', 'numpy.matrix', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (3947, 3965), False, 'import numpy\n'), ((4074, 4098), 'projectq.ops.BasicGate.__init__', 'BasicGate.__init__', (['self'], {}), '(self)\n', (4092, 4098), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((4214, 4231), 'numpy.eye', 'numpy.eye', (['(2 ** 6)'], {}), '(2 ** 6)\n', (4223, 4231), False, 'import numpy\n'), ((4309, 4333), 'projectq.ops.BasicGate.__init__', 'BasicGate.__init__', (['self'], {}), '(self)\n', (4327, 4333), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((6417, 6429), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (6420, 6429), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((6963, 6997), 'pytest.raises', 'pytest.raises', (['NotYetMeasuredError'], {}), '(NotYetMeasuredError)\n', (6976, 6997), False, 'import pytest\n'), ((7025, 7059), 'pytest.raises', 'pytest.raises', (['NotYetMeasuredError'], {}), '(NotYetMeasuredError)\n', (7038, 7059), False, 'import pytest\n'), ((7136, 7170), 'pytest.raises', 'pytest.raises', (['NotYetMeasuredError'], {}), '(NotYetMeasuredError)\n', (7149, 7170), False, 'import pytest\n'), ((7267, 7274), 'projectq.ops.Rx', 'Rx', (['(0.3)'], {}), '(0.3)\n', (7269, 7274), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((7291, 7298), 'projectq.ops.Rx', 'Rx', (['(0.8)'], {}), '(0.8)\n', (7293, 7298), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((7315, 7322), 'projectq.ops.Ry', 'Ry', (['(0.1)'], {}), '(0.1)\n', (7317, 7322), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((7607, 7631), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (7620, 7631), False, 'import pytest\n'), ((7671, 7695), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (7684, 7695), False, 'import pytest\n'), ((7901, 7913), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (7904, 7913), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((7928, 7940), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (7931, 7940), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((8085, 8097), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (8088, 8097), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((8112, 8124), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (8115, 8124), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((8298, 8312), 'projectq.libs.math.AddConstant', 'AddConstant', (['(1)'], {}), '(1)\n', (8309, 8312), False, 'from projectq.libs.math import AddConstant, AddConstantModN, SubConstant, SubConstantModN, MultiplyByConstantModN\n'), ((8327, 8339), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (8330, 8339), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((8460, 8484), 'projectq.libs.math.AddConstantModN', 'AddConstantModN', (['(10)', '(256)'], {}), '(10, 256)\n', (8475, 8484), False, 'from projectq.libs.math import AddConstant, AddConstantModN, SubConstant, SubConstantModN, MultiplyByConstantModN\n'), ((8499, 8511), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (8502, 8511), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((8745, 8757), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (8748, 8757), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((8972, 8984), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (8975, 8984), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((9106, 9120), 'projectq.libs.math.SubConstant', 'SubConstant', (['(5)'], {}), '(5)\n', (9117, 9120), False, 'from projectq.libs.math import AddConstant, AddConstantModN, SubConstant, SubConstantModN, MultiplyByConstantModN\n'), ((9135, 9147), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (9138, 9147), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((9322, 9334), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (9325, 9334), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((9549, 9561), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (9552, 9561), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((9682, 9712), 'projectq.libs.math.MultiplyByConstantModN', 'MultiplyByConstantModN', (['(2)', '(256)'], {}), '(2, 256)\n', (9704, 9712), False, 'from projectq.libs.math import AddConstant, AddConstantModN, SubConstant, SubConstantModN, MultiplyByConstantModN\n'), ((9727, 9739), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (9730, 9739), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((9942, 9954), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (9945, 9954), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((10176, 10188), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (10179, 10188), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((10372, 10388), 'projectq.cengines.LocalOptimizer', 'LocalOptimizer', ([], {}), '()\n', (10386, 10388), False, 'from projectq.cengines import BasicEngine, BasicMapperEngine, DummyEngine, LocalOptimizer, NotYetMeasuredError\n'), ((10542, 10548), 'projectq.ops.All', 'All', (['H'], {}), '(H)\n', (10545, 10548), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((10783, 10810), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (10796, 10810), False, 'import pytest\n'), ((10890, 10896), 'projectq.ops.All', 'All', (['H'], {}), '(H)\n', (10893, 10896), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((11032, 11050), 'pytest.approx', 'pytest.approx', (['(0.3)'], {}), '(0.3)\n', (11045, 11050), False, 'import pytest\n'), ((11177, 11195), 'pytest.approx', 'pytest.approx', (['(0.4)'], {}), '(0.4)\n', (11190, 11195), False, 'import pytest\n'), ((11548, 11560), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (11551, 11560), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((11634, 11650), 'projectq.cengines.LocalOptimizer', 'LocalOptimizer', ([], {}), '()\n', (11648, 11650), False, 'from projectq.cengines import BasicEngine, BasicMapperEngine, DummyEngine, LocalOptimizer, NotYetMeasuredError\n'), ((11804, 11810), 'projectq.ops.All', 'All', (['X'], {}), '(X)\n', (11807, 11810), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((11824, 11830), 'projectq.ops.All', 'All', (['H'], {}), '(H)\n', (11827, 11830), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((12029, 12053), 'pytest.approx', 'pytest.approx', (['(1.0 / 8.0)'], {}), '(1.0 / 8.0)\n', (12042, 12053), False, 'import pytest\n'), ((12236, 12255), 'pytest.approx', 'pytest.approx', (['polR'], {}), '(polR)\n', (12249, 12255), False, 'import pytest\n'), ((12290, 12311), 'pytest.approx', 'pytest.approx', (['polPhi'], {}), '(polPhi)\n', (12303, 12311), False, 'import pytest\n'), ((12496, 12515), 'pytest.approx', 'pytest.approx', (['polR'], {}), '(polR)\n', (12509, 12515), False, 'import pytest\n'), ((12550, 12571), 'pytest.approx', 'pytest.approx', (['polPhi'], {}), '(polPhi)\n', (12563, 12571), False, 'import pytest\n'), ((12576, 12582), 'projectq.ops.All', 'All', (['H'], {}), '(H)\n', (12579, 12582), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((12596, 12602), 'projectq.ops.All', 'All', (['X'], {}), '(X)\n', (12599, 12602), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((12777, 12795), 'pytest.approx', 'pytest.approx', (['(0.3)'], {}), '(0.3)\n', (12790, 12795), False, 'import pytest\n'), ((12952, 12964), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (12955, 12964), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((13031, 13058), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (13044, 13058), False, 'import pytest\n'), ((13159, 13186), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (13172, 13186), False, 'import pytest\n'), ((13351, 13378), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (13364, 13378), False, 'import pytest\n'), ((13499, 13515), 'projectq.cengines.LocalOptimizer', 'LocalOptimizer', ([], {}), '()\n', (13513, 13515), False, 'from projectq.cengines import BasicEngine, BasicMapperEngine, DummyEngine, LocalOptimizer, NotYetMeasuredError\n'), ((13683, 13697), 'math.sqrt', 'math.sqrt', (['(0.2)'], {}), '(0.2)\n', (13692, 13697), False, 'import math\n'), ((13699, 13713), 'math.sqrt', 'math.sqrt', (['(0.8)'], {}), '(0.8)\n', (13708, 13713), False, 'import math\n'), ((13724, 13751), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (13737, 13751), False, 'import pytest\n'), ((14097, 14109), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (14100, 14109), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((14606, 14622), 'projectq.cengines.LocalOptimizer', 'LocalOptimizer', ([], {}), '()\n', (14620, 14622), False, 'from projectq.cengines import BasicEngine, BasicMapperEngine, DummyEngine, LocalOptimizer, NotYetMeasuredError\n'), ((14810, 14837), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (14823, 14837), False, 'import pytest\n'), ((15050, 15056), 'projectq.ops.All', 'All', (['H'], {}), '(H)\n', (15053, 15056), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((15207, 15234), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (15220, 15234), False, 'import pytest\n'), ((15457, 15475), 'pytest.approx', 'pytest.approx', (['(0.5)'], {}), '(0.5)\n', (15470, 15475), False, 'import pytest\n'), ((15751, 15769), 'pytest.approx', 'pytest.approx', (['(1.0)'], {}), '(1.0)\n', (15764, 15769), False, 'import pytest\n'), ((15905, 15932), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (15918, 15932), False, 'import pytest\n'), ((17182, 17208), 'projectq.meta.Control', 'Control', (['eng', 'qubits[0:-1]'], {}), '(eng, qubits[0:-1])\n', (17189, 17208), False, 'from projectq.meta import Compute, Uncompute, Control, Dagger, LogicalQubitIDTag\n'), ((17242, 17268), 'projectq.meta.Control', 'Control', (['eng', 'qubits[0:-2]'], {}), '(eng, qubits[0:-2])\n', (17249, 17268), False, 'from projectq.meta import Compute, Uncompute, Control, Dagger, LogicalQubitIDTag\n'), ((17302, 17328), 'projectq.meta.Control', 'Control', (['eng', 'qubits[0:-3]'], {}), '(eng, qubits[0:-3])\n', (17309, 17328), False, 'from projectq.meta import Compute, Uncompute, Control, Dagger, LogicalQubitIDTag\n'), ((17645, 17657), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (17648, 17657), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((18726, 18740), 'projectq.meta.Uncompute', 'Uncompute', (['eng'], {}), '(eng)\n', (18735, 18740), False, 'from projectq.meta import Compute, Uncompute, Control, Dagger, LogicalQubitIDTag\n'), ((20502, 20514), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (20505, 20514), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((20547, 20559), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (20550, 20559), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((20861, 20885), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (20874, 20885), False, 'import pytest\n'), ((21238, 21256), 'pytest.approx', 'pytest.approx', (['(0.0)'], {}), '(0.0)\n', (21251, 21256), False, 'import pytest\n'), ((21344, 21362), 'pytest.approx', 'pytest.approx', (['(1.0)'], {}), '(1.0)\n', (21357, 21362), False, 'import pytest\n'), ((21578, 21602), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (21591, 21602), False, 'import pytest\n'), ((21914, 21960), 'pytest.approx', 'pytest.approx', (['(1)'], {'rel': 'tolerance', 'abs': 'tolerance'}), '(1, rel=tolerance, abs=tolerance)\n', (21927, 21960), False, 'import pytest\n'), ((22178, 22225), 'pytest.approx', 'pytest.approx', (['(-1)'], {'rel': 'tolerance', 'abs': 'tolerance'}), '(-1, rel=tolerance, abs=tolerance)\n', (22191, 22225), False, 'import pytest\n'), ((22482, 22528), 'pytest.approx', 'pytest.approx', (['(1)'], {'rel': 'tolerance', 'abs': 'tolerance'}), '(1, rel=tolerance, abs=tolerance)\n', (22495, 22528), False, 'import pytest\n'), ((22747, 22794), 'pytest.approx', 'pytest.approx', (['(-1)'], {'rel': 'tolerance', 'abs': 'tolerance'}), '(-1, rel=tolerance, abs=tolerance)\n', (22760, 22794), False, 'import pytest\n'), ((23020, 23066), 'pytest.approx', 'pytest.approx', (['(1)'], {'rel': 'tolerance', 'abs': 'tolerance'}), '(1, rel=tolerance, abs=tolerance)\n', (23033, 23066), False, 'import pytest\n'), ((23253, 23300), 'pytest.approx', 'pytest.approx', (['(-1)'], {'rel': 'tolerance', 'abs': 'tolerance'}), '(-1, rel=tolerance, abs=tolerance)\n', (23266, 23300), False, 'import pytest\n'), ((23529, 23578), 'pytest.approx', 'pytest.approx', (['(0.25)'], {'rel': 'tolerance', 'abs': 'tolerance'}), '(0.25, rel=tolerance, abs=tolerance)\n', (23542, 23578), False, 'import pytest\n'), ((23765, 23815), 'pytest.approx', 'pytest.approx', (['(-0.25)'], {'rel': 'tolerance', 'abs': 'tolerance'}), '(-0.25, rel=tolerance, abs=tolerance)\n', (23778, 23815), False, 'import pytest\n'), ((24044, 24090), 'pytest.approx', 'pytest.approx', (['(1)'], {'rel': 'tolerance', 'abs': 'tolerance'}), '(1, rel=tolerance, abs=tolerance)\n', (24057, 24090), False, 'import pytest\n'), ((24197, 24244), 'pytest.approx', 'pytest.approx', (['(-1)'], {'rel': 'tolerance', 'abs': 'tolerance'}), '(-1, rel=tolerance, abs=tolerance)\n', (24210, 24244), False, 'import pytest\n'), ((24351, 24397), 'pytest.approx', 'pytest.approx', (['(1)'], {'rel': 'tolerance', 'abs': 'tolerance'}), '(1, rel=tolerance, abs=tolerance)\n', (24364, 24397), False, 'import pytest\n'), ((24504, 24551), 'pytest.approx', 'pytest.approx', (['(-1)'], {'rel': 'tolerance', 'abs': 'tolerance'}), '(-1, rel=tolerance, abs=tolerance)\n', (24517, 24551), False, 'import pytest\n'), ((2614, 2625), 'projectq.backends.Simulator', 'Simulator', ([], {}), '()\n', (2623, 2625), False, 'from projectq.backends import Simulator\n'), ((2651, 2669), 'projectq.backends._qracksim._qracksim.QrackSimulator', 'QrackSim', (['(1)', '(-1)', '(2)'], {}), '(1, -1, 2)\n', (2659, 2669), True, 'from projectq.backends._qracksim._qracksim import QrackSimulator as QrackSim\n'), ((7358, 7366), 'projectq.ops.Ry', 'Ry', (['(-0.1)'], {}), '(-0.1)\n', (7360, 7366), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((7413, 7431), 'numpy.kron', 'numpy.kron', (['m2', 'm1'], {}), '(m2, m1)\n', (7423, 7431), False, 'import numpy\n'), ((8694, 8718), 'projectq.libs.math.AddConstantModN', 'AddConstantModN', (['(10)', '(256)'], {}), '(10, 256)\n', (8709, 8718), False, 'from projectq.libs.math import AddConstant, AddConstantModN, SubConstant, SubConstantModN, MultiplyByConstantModN\n'), ((8921, 8945), 'projectq.libs.math.AddConstantModN', 'AddConstantModN', (['(10)', '(256)'], {}), '(10, 256)\n', (8936, 8945), False, 'from projectq.libs.math import AddConstant, AddConstantModN, SubConstant, SubConstantModN, MultiplyByConstantModN\n'), ((9271, 9295), 'projectq.libs.math.SubConstantModN', 'SubConstantModN', (['(10)', '(256)'], {}), '(10, 256)\n', (9286, 9295), False, 'from projectq.libs.math import AddConstant, AddConstantModN, SubConstant, SubConstantModN, MultiplyByConstantModN\n'), ((9498, 9522), 'projectq.libs.math.SubConstantModN', 'SubConstantModN', (['(10)', '(256)'], {}), '(10, 256)\n', (9513, 9522), False, 'from projectq.libs.math import AddConstant, AddConstantModN, SubConstant, SubConstantModN, MultiplyByConstantModN\n'), ((9885, 9915), 'projectq.libs.math.MultiplyByConstantModN', 'MultiplyByConstantModN', (['(2)', '(256)'], {}), '(2, 256)\n', (9907, 9915), False, 'from projectq.libs.math import AddConstant, AddConstantModN, SubConstant, SubConstantModN, MultiplyByConstantModN\n'), ((10118, 10149), 'projectq.libs.math.MultiplyByConstantModN', 'MultiplyByConstantModN', (['(10)', '(256)'], {}), '(10, 256)\n', (10140, 10149), False, 'from projectq.libs.math import AddConstant, AddConstantModN, SubConstant, SubConstantModN, MultiplyByConstantModN\n'), ((10712, 10735), 'pytest.approx', 'pytest.approx', (['(0.5 ** i)'], {}), '(0.5 ** i)\n', (10725, 10735), False, 'import pytest\n'), ((12930, 12945), 'math.sqrt', 'math.sqrt', (['(0.91)'], {}), '(0.91)\n', (12939, 12945), False, 'import math\n'), ((14477, 14496), 'pytest.approx', 'pytest.approx', (['(1.0j)'], {}), '(1.0j)\n', (14490, 14496), False, 'import pytest\n'), ((14511, 14531), 'pytest.approx', 'pytest.approx', (['(-1.0j)'], {}), '(-1.0j)\n', (14524, 14531), False, 'import pytest\n'), ((18458, 18470), 'projectq.meta.Compute', 'Compute', (['eng'], {}), '(eng)\n', (18465, 18470), False, 'from projectq.meta import Compute, Uncompute, Control, Dagger, LogicalQubitIDTag\n'), ((18635, 18663), 'projectq.meta.Control', 'Control', (['eng', 'control_qubits'], {}), '(eng, control_qubits)\n', (18642, 18663), False, 'from projectq.meta import Compute, Uncompute, Control, Dagger, LogicalQubitIDTag\n'), ((19127, 19138), 'projectq.backends.Simulator', 'Simulator', ([], {}), '()\n', (19136, 19138), False, 'from projectq.backends import Simulator\n'), ((20447, 20496), 'pytest.approx', 'pytest.approx', (['test'], {'rel': 'tolerance', 'abs': 'tolerance'}), '(test, rel=tolerance, abs=tolerance)\n', (20460, 20496), False, 'import pytest\n'), ((2957, 2983), 'projectq.cengines.BasicEngine.__init__', 'BasicEngine.__init__', (['self'], {}), '(self)\n', (2977, 2983), False, 'from projectq.cengines import BasicEngine, BasicMapperEngine, DummyEngine, LocalOptimizer, NotYetMeasuredError\n'), ((6931, 6951), 'projectq.meta.LogicalQubitIDTag', 'LogicalQubitIDTag', (['(2)'], {}), '(2)\n', (6948, 6951), False, 'from projectq.meta import Compute, Uncompute, Control, Dagger, LogicalQubitIDTag\n'), ((7339, 7346), 'projectq.ops.Rz', 'Rz', (['(0.9)'], {}), '(0.9)\n', (7341, 7346), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((12623, 12637), 'math.acos', 'math.acos', (['(0.3)'], {}), '(0.3)\n', (12632, 12637), False, 'import math\n'), ((21742, 21754), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (21751, 21754), False, 'import math\n'), ((21760, 21772), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (21769, 21772), False, 'import math\n'), ((22005, 22017), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (22014, 22017), False, 'import math\n'), ((22024, 22036), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (22033, 22036), False, 'import math\n'), ((22309, 22321), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (22318, 22321), False, 'import math\n'), ((22328, 22340), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (22337, 22340), False, 'import math\n'), ((22573, 22585), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (22582, 22585), False, 'import math\n'), ((22593, 22605), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (22602, 22605), False, 'import math\n'), ((10927, 10941), 'math.sqrt', 'math.sqrt', (['(0.3)'], {}), '(0.3)\n', (10936, 10941), False, 'import math\n'), ((11072, 11086), 'math.sqrt', 'math.sqrt', (['(0.4)'], {}), '(0.4)\n', (11081, 11086), False, 'import math\n')]
|
"""
Remove Fragments not in Knowledgebase
"""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "Copyright 2019, Hong Kong University of Science and Technology"
__license__ = "3-clause BSD"
from argparse import ArgumentParser
import numpy as np
import pickle
parser = ArgumentParser(description="Build Files")
parser.add_argument("--datadir", type=str, default="Data", help="input - XXX.YYY ")
parser.add_argument("--envNewAcronym", type=str, default="PRT.SNW", help="input - XXX.YYY ")
args = parser.parse_args()
# Check the Bound Fragments
BoundFrags = np.loadtxt("../%s/%s/%s.Homogenised.boundfrags_zeros.txt" %(args.datadir, args.envNewAcronym, args.envNewAcronym), delimiter=',')
normalDF = pickle.load(open("../%s/GrandCID.dict" %(args.datadir), "rb"))
binding = np.full(BoundFrags.shape,-1)
mlength = 0
for r, i in enumerate(BoundFrags):
for c, j in enumerate(i[i!=0]):
try:
# Checks whether the Fragment can be found in the 59k Fragment Base
binding[r,c]=normalDF.index.get_loc(int(j))
except:
continue
temp = binding[r]
if temp[temp!=-1].shape[0] > mlength:
mlength = temp[temp!=-1].shape[0]
print(mlength) #Finds the maximum number of Fragments per environment -> 705
indices = np.empty(binding.shape[0])
red_binding = np.full((binding.shape[0], mlength), -1)
for j, i in enumerate(binding):
indices[j] = i[i!=-1].shape[0]
red_binding[j][:int(indices[j])] = i[i!=-1]
red_binding = np.delete(red_binding, np.where(indices==0), axis=0)
pickle.dump(red_binding, open("../%s/%s/%s.binding.mtr" %(args.datadir, args.envNewAcronym, args.envNewAcronym), "wb"))
# Removes environments without binding Fragments
Features_all = pickle.load(open("../%s/%s/%s.Homogenised.property.pvar" %(args.datadir, args.envNewAcronym, args.envNewAcronym), "rb"))
Features_all = np.delete(Features_all, np.where(indices==0), axis=0)
pickle.dump(Features_all, open("../%s/%s/%s.Homogenised.property.pvar" %(args.datadir, args.envNewAcronym, args.envNewAcronym), "wb"))
# Removes environment annotiation without binding fragments
with open("../%s/%s/%s.Homogenised.annotation.txt" %(args.datadir, args.envNewAcronym, args.envNewAcronym), "r+") as f:
lines = f.readlines()
for i in np.where(indices==0)[0][::-1]:
del lines[i]
f.seek(0)
f.truncate()
f.writelines(lines)
|
[
"argparse.ArgumentParser",
"numpy.where",
"numpy.empty",
"numpy.full",
"numpy.loadtxt"
] |
[((281, 322), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Build Files"""'}), "(description='Build Files')\n", (295, 322), False, 'from argparse import ArgumentParser\n'), ((572, 706), 'numpy.loadtxt', 'np.loadtxt', (["('../%s/%s/%s.Homogenised.boundfrags_zeros.txt' % (args.datadir, args.\n envNewAcronym, args.envNewAcronym))"], {'delimiter': '""","""'}), "('../%s/%s/%s.Homogenised.boundfrags_zeros.txt' % (args.datadir,\n args.envNewAcronym, args.envNewAcronym), delimiter=',')\n", (582, 706), True, 'import numpy as np\n'), ((788, 817), 'numpy.full', 'np.full', (['BoundFrags.shape', '(-1)'], {}), '(BoundFrags.shape, -1)\n', (795, 817), True, 'import numpy as np\n'), ((1296, 1322), 'numpy.empty', 'np.empty', (['binding.shape[0]'], {}), '(binding.shape[0])\n', (1304, 1322), True, 'import numpy as np\n'), ((1337, 1377), 'numpy.full', 'np.full', (['(binding.shape[0], mlength)', '(-1)'], {}), '((binding.shape[0], mlength), -1)\n', (1344, 1377), True, 'import numpy as np\n'), ((1530, 1552), 'numpy.where', 'np.where', (['(indices == 0)'], {}), '(indices == 0)\n', (1538, 1552), True, 'import numpy as np\n'), ((1908, 1930), 'numpy.where', 'np.where', (['(indices == 0)'], {}), '(indices == 0)\n', (1916, 1930), True, 'import numpy as np\n'), ((2294, 2316), 'numpy.where', 'np.where', (['(indices == 0)'], {}), '(indices == 0)\n', (2302, 2316), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Sat May 25 13:17:49 2019
@author: Toonw
"""
import numpy as np
def vlen(a):
return (a[0]**2 + a[1]**2)**0.5
def add(v1,v2):
return (v1[0]+v2[0], v1[1]+v2[1])
def sub(v1,v2):
return (v1[0]-v2[0], v1[1]-v2[1])
def unit_vector(v):
vu = v / np.linalg.norm(v)
return (vu[0], vu[1])
def angle_between(v1, v2):
angle = np.arccos(np.dot(v1,v2)/(vlen(v1)*vlen(v2)))
return angle
# Similarity measure of article
## https://pdfs.semanticscholar.org/60b5/aca20ba34d424f4236359bd5e6aa30487682.pdf
def sim_measure(A, B): # similarity between two shapes A and B
# print(A)
# print(B)
return 1 - (sum([(vlen(unit_vector(a))+vlen(unit_vector(b)))*angle_between(a,b) for a,b in zip(A,B)]))/(np.pi*(len(A)+len(B)))
|
[
"numpy.dot",
"numpy.linalg.norm"
] |
[((300, 317), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (314, 317), True, 'import numpy as np\n'), ((394, 408), 'numpy.dot', 'np.dot', (['v1', 'v2'], {}), '(v1, v2)\n', (400, 408), True, 'import numpy as np\n')]
|
import numpy as np
import tensorflow as tf
import os
from scipy.io import savemat
from scipy.io import loadmat
from scipy.misc import imread
from scipy.misc import imsave
from alexnet_face_classifier import *
import matplotlib.pyplot as plt
plt.switch_backend('agg')
class backprop_graph:
def __init__(self, num_classes, nhid, cnn):
self.num_classes = num_classes
self.inputs = tf.placeholder(tf.float32, shape = [None, 227, 227, 3], name='input')
self.labels_1hot = tf.placeholder(tf.float32, shape=[None, self.num_classes])
self.cnn = cnn(self.inputs, None, self.num_classes)
self.cnn.preprocess()
self.cnn.convlayers()
self.cnn.fc_layers(transfer_learning=False, nhid=nhid)
def classifier_graph(self, temp=3.0):
self.probabilities = tf.nn.softmax(self.cnn.fc2/temp)
self.probability = tf.tensordot(self.probabilities, self.labels_1hot, axes=[[1],[1]])
self.log_probability = tf.log(self.probability)
def guided_backprop_graph(self):
self.grad_fc2 = tf.nn.relu(tf.gradients(self.probability, self.cnn.fc2)[0])
self.grad_fc1 = tf.nn.relu(tf.gradients(self.cnn.fc2, self.cnn.fc1, grad_ys=self.grad_fc2)[0])
self.grad_conv5 = tf.nn.relu(tf.gradients(self.cnn.fc1, self.cnn.conv5, grad_ys=self.grad_fc1)[0])
self.grad_conv4 = tf.nn.relu(tf.gradients(self.cnn.conv5, self.cnn.conv4, grad_ys=self.grad_conv5)[0])
self.grad_conv3 = tf.nn.relu(tf.gradients(self.cnn.conv4, self.cnn.conv3, grad_ys=self.grad_conv4)[0])
self.grad_conv2 = tf.nn.relu(tf.gradients(self.cnn.conv3, self.cnn.conv2, grad_ys=self.grad_conv3)[0])
self.grad_conv1 = tf.nn.relu(tf.gradients(self.cnn.conv2, self.cnn.conv1, grad_ys=self.grad_conv2)[0])
self.grad_image = tf.nn.relu(tf.gradients(self.cnn.conv1, self.inputs, grad_ys=self.grad_conv1)[0])
###
def guided_backprop(graph, image, one_hot, sess):
image = np.expand_dims(image, 0)
one_hot = np.expand_dims(one_hot, 0)
saliency_map = sess.run(graph.grad_image, feed_dict={graph.inputs:image, graph.labels_1hot:one_hot})[0]
scaling_adjustment = 1E-20
saliency_map_scaled = saliency_map/(np.max(saliency_map)+scaling_adjustment)
return saliency_map_scaled
|
[
"tensorflow.tensordot",
"tensorflow.placeholder",
"matplotlib.pyplot.switch_backend",
"numpy.max",
"tensorflow.gradients",
"tensorflow.nn.softmax",
"numpy.expand_dims",
"tensorflow.log"
] |
[((243, 268), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""agg"""'], {}), "('agg')\n", (261, 268), True, 'import matplotlib.pyplot as plt\n'), ((1967, 1991), 'numpy.expand_dims', 'np.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (1981, 1991), True, 'import numpy as np\n'), ((2006, 2032), 'numpy.expand_dims', 'np.expand_dims', (['one_hot', '(0)'], {}), '(one_hot, 0)\n', (2020, 2032), True, 'import numpy as np\n'), ((402, 469), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 227, 227, 3]', 'name': '"""input"""'}), "(tf.float32, shape=[None, 227, 227, 3], name='input')\n", (416, 469), True, 'import tensorflow as tf\n'), ((499, 557), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, self.num_classes]'}), '(tf.float32, shape=[None, self.num_classes])\n', (513, 557), True, 'import tensorflow as tf\n'), ((819, 853), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['(self.cnn.fc2 / temp)'], {}), '(self.cnn.fc2 / temp)\n', (832, 853), True, 'import tensorflow as tf\n'), ((879, 946), 'tensorflow.tensordot', 'tf.tensordot', (['self.probabilities', 'self.labels_1hot'], {'axes': '[[1], [1]]'}), '(self.probabilities, self.labels_1hot, axes=[[1], [1]])\n', (891, 946), True, 'import tensorflow as tf\n'), ((977, 1001), 'tensorflow.log', 'tf.log', (['self.probability'], {}), '(self.probability)\n', (983, 1001), True, 'import tensorflow as tf\n'), ((2216, 2236), 'numpy.max', 'np.max', (['saliency_map'], {}), '(saliency_map)\n', (2222, 2236), True, 'import numpy as np\n'), ((1079, 1123), 'tensorflow.gradients', 'tf.gradients', (['self.probability', 'self.cnn.fc2'], {}), '(self.probability, self.cnn.fc2)\n', (1091, 1123), True, 'import tensorflow as tf\n'), ((1163, 1226), 'tensorflow.gradients', 'tf.gradients', (['self.cnn.fc2', 'self.cnn.fc1'], {'grad_ys': 'self.grad_fc2'}), '(self.cnn.fc2, self.cnn.fc1, grad_ys=self.grad_fc2)\n', (1175, 1226), True, 'import tensorflow as tf\n'), ((1268, 1333), 'tensorflow.gradients', 'tf.gradients', (['self.cnn.fc1', 'self.cnn.conv5'], {'grad_ys': 'self.grad_fc1'}), '(self.cnn.fc1, self.cnn.conv5, grad_ys=self.grad_fc1)\n', (1280, 1333), True, 'import tensorflow as tf\n'), ((1375, 1444), 'tensorflow.gradients', 'tf.gradients', (['self.cnn.conv5', 'self.cnn.conv4'], {'grad_ys': 'self.grad_conv5'}), '(self.cnn.conv5, self.cnn.conv4, grad_ys=self.grad_conv5)\n', (1387, 1444), True, 'import tensorflow as tf\n'), ((1486, 1555), 'tensorflow.gradients', 'tf.gradients', (['self.cnn.conv4', 'self.cnn.conv3'], {'grad_ys': 'self.grad_conv4'}), '(self.cnn.conv4, self.cnn.conv3, grad_ys=self.grad_conv4)\n', (1498, 1555), True, 'import tensorflow as tf\n'), ((1597, 1666), 'tensorflow.gradients', 'tf.gradients', (['self.cnn.conv3', 'self.cnn.conv2'], {'grad_ys': 'self.grad_conv3'}), '(self.cnn.conv3, self.cnn.conv2, grad_ys=self.grad_conv3)\n', (1609, 1666), True, 'import tensorflow as tf\n'), ((1708, 1777), 'tensorflow.gradients', 'tf.gradients', (['self.cnn.conv2', 'self.cnn.conv1'], {'grad_ys': 'self.grad_conv2'}), '(self.cnn.conv2, self.cnn.conv1, grad_ys=self.grad_conv2)\n', (1720, 1777), True, 'import tensorflow as tf\n'), ((1819, 1885), 'tensorflow.gradients', 'tf.gradients', (['self.cnn.conv1', 'self.inputs'], {'grad_ys': 'self.grad_conv1'}), '(self.cnn.conv1, self.inputs, grad_ys=self.grad_conv1)\n', (1831, 1885), True, 'import tensorflow as tf\n')]
|
import copy
import unittest
import networkx as nx
import numpy as np
from scipy.special import erf
from dfn import Fluid, FractureNetworkThermal
class TestFractureNetworkThermal(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestFractureNetworkThermal, self).__init__(*args, **kwargs)
# fluid properties
cp_w = 4300.0
rho_w = 1000.0
mu_w = 1E-3
self.fluid = Fluid(density=rho_w, viscosity=mu_w, heat_capacity=cp_w)
# reservoir properties
k_r = 2.9
cp_r = 1050.0
rho_r = 2700.0
alpha_r = k_r / (rho_r * cp_r)
# first network
conn_1 = [(0, 1), (1, 2), (1, 3), (2, 4), (3, 4), (4, 5)]
L_1 = [100, 500, 500, 500, 500, 100]
H_1 = [500, 500, 500, 500, 500, 500]
w_1 = [1E-3, 1E-3, 1E-3, 1E-3, 1E-3, 1E-3]
self.network_1 = FractureNetworkThermal(conn_1, L_1, H_1, w_1, k_r,
alpha_r)
# second network
conn_2 = [(0, 1), (1, 2), (2, 3), (1, 4), (2, 5), (3, 6), (4, 5),
(5, 6), (4, 7), (5, 8), (6, 9), (7, 8), (8, 9), (9, 10)]
L_2 = 250 * np.ones(len(conn_2))
L_2[0] = 100
L_2[-1] = 100
H_2 = 500 * np.ones(len(conn_2))
w_2 = 1E-3 * np.ones(len(conn_2))
self.network_2 = FractureNetworkThermal(conn_2, L_2, H_2, w_2, k_r,
alpha_r)
def copy_networks(self):
"""Return a copy of the fracture networks."""
return copy.copy(self.network_1), copy.copy(self.network_2)
def networks_with_flow(self):
"""Return networks with the mass flow calculated."""
network_1, network_2 = self.copy_networks()
P_0 = 0.0
m_inj = 50.0
network_1.calculate_flow(self.fluid, {0: P_0}, {5: -m_inj})
network_2.calculate_flow(self.fluid, {0: P_0}, {10: -m_inj})
return network_1, network_2
def reverse_nodes(self, network, segments):
"""Reverse the node order for given segments."""
conn = network.connectivity
for seg in segments:
inlet, outlet = conn[seg]
conn[seg, :] = outlet, inlet
network.connectivity = conn
return network
def test_no_mass_flow(self):
"""Test if TypeError is raised for networks without flow calculated."""
with self.assertRaises(TypeError):
self.network_1._check_if_calculated()
with self.assertRaises(TypeError):
self.network_2._check_if_calculated()
def test_neg_mass_flow(self):
"""Test if valueError is raised for networks with negative flow."""
network_1, network_2 = self.networks_with_flow()
network_1 = self.reverse_nodes(network_1, [1])
network_2 = self.reverse_nodes(network_2, [1])
network_1.calculate_flow(self.fluid, {0: 0}, {5: -1.0})
network_2.calculate_flow(self.fluid, {0: 0}, {10: -1.0})
with self.assertRaises(ValueError):
network_1.calculate_temperature(self.fluid, 0, [0], [1])
with self.assertRaises(ValueError):
network_2.calculate_temperature(self.fluid, 0, [0], [1])
def test_construct_graph(self):
"""Test _construct_graph method."""
network_1, network_2 = self.networks_with_flow()
network_1._construct_graph()
network_2._construct_graph()
# construct graph for network 1
G_1 = nx.MultiDiGraph()
edge_data_1 = [(0, 1, {'index': 0}), (1, 2, {'index': 1}),
(1, 3, {'index': 2}), (2, 4, {'index': 3}),
(3, 4, {'index': 4}), (4, 5, {'index': 5})]
G_1.add_edges_from(edge_data_1)
# construct graph for network 2
G_2 = nx.MultiDiGraph()
edge_data_2 = [(0, 1, {'index': 0}), (1, 2, {'index': 1}),
(2, 3, {'index': 2}), (1, 4, {'index': 3}),
(2, 5, {'index': 4}), (3, 6, {'index': 5}),
(4, 5, {'index': 6}), (5, 6, {'index': 7}),
(4, 7, {'index': 8}), (5, 8, {'index': 9}),
(6, 9, {'index': 10}), (7, 8, {'index': 11}),
(8, 9, {'index': 12}), (9, 10, {'index': 13})]
G_2.add_edges_from(edge_data_2)
# return True if graphs are the same
is_isomorphic_1 = nx.is_isomorphic(network_1.graph, G_1)
is_isomorphic_2 = nx.is_isomorphic(network_2.graph, G_2)
self.assertTrue(is_isomorphic_1)
self.assertTrue(is_isomorphic_2)
def test_find_injection_nodes(self):
"""Test _find_injection_nodes method."""
network_1, network_2 = self.networks_with_flow()
network_1._construct_graph()
network_2._construct_graph()
self.assertEqual(network_1._find_injection_nodes(), [0])
self.assertEqual(network_2._find_injection_nodes(), [0])
def test_mass_contribution(self):
"""Test _mass_contribution method."""
network_1, network_2 = self.networks_with_flow()
chi_1 = network_1._mass_contribution()
chi_2 = network_2._mass_contribution()
# first network
for i in (0, 1, 2, 5):
self.assertAlmostEqual(chi_1[i], 1.0, 12)
self.assertAlmostEqual(chi_1[3] + chi_1[4], 1.0, 12)
# second network
for i in (0, 1, 2, 3, 8, 13):
self.assertAlmostEqual(chi_2[i], 1.0, 12)
for i, j in [(4, 6), (5, 7), (9, 11), (10, 12)]:
self.assertAlmostEqual(chi_2[i] + chi_2[j], 1.0, 12)
def test_find_paths(self):
"""Test find_paths method."""
# .find_paths method calls .construct_graph if needed. Manually call
# .construct_graph() on one network for testing both True and False
# conditions
network_1, network_2 = self.networks_with_flow()
network_1._construct_graph()
path_1 = {(0, 1, 3), (0, 2, 4)}
path_2 = {(0, 1, 2, 5, 10), (0, 1, 4, 7, 10), (0, 3, 6, 7, 10),
(0, 3, 6, 9, 12), (0, 3, 8, 11, 12), (0, 1, 4, 9, 12)}
self.assertEqual(path_1, set(network_1.find_paths(0, 4)))
self.assertEqual(path_2, set(network_2.find_paths(0, 9)))
def test_calculate_temperature_inlet_segment(self):
"""Test calculate_temperature ability to handle the inlet segment."""
# operational parameters for temperature
t_end = 86400 * 365.25 * 20
time = t_end * np.linspace(1.0 / 100, 1.0, 100)
distance = np.linspace(0.0, 100.0, 100)
z, t = np.meshgrid(distance, time)
network_1, network_2 = self.networks_with_flow()
# create parameters for temperature manually
m_1 = network_1.mass_flow[0]
m_2 = network_2.mass_flow[0]
beta_1 = 2 * network_1.thermal_cond * network_1.thickness[0] / \
(m_1 * network_1.fluid.c_f)
beta_2 = 2 * network_2.thermal_cond * network_2.thickness[0] / \
(m_2 * network_2.fluid.c_f)
xi_1 = beta_1 * z / (2 * np.sqrt(network_1.thermal_diff * t))
xi_2 = beta_2 * z / (2 * np.sqrt(network_2.thermal_diff * t))
Theta_1 = erf(xi_1)
Theta_2 = erf(xi_2)
# difference between manual and automatic construction
diff_1 = Theta_1 - network_1.calculate_temperature(self.fluid, 0,
distance, time)
diff_2 = Theta_2 - network_2.calculate_temperature(self.fluid, 0,
distance, time)
self.assertAlmostEqual((diff_1**2).sum() / (Theta_1**2).sum(), 0, 12)
self.assertAlmostEqual((diff_2**2).sum() / (Theta_2**2).sum(), 0, 12)
def test_calculate_temperature(self):
"""Test calculate_temperature by constructing manual the equations."""
# operational parameters for temperature
t_end = 86400 * 365.25 * 20
time = t_end * np.linspace(1.0 / 100, 1.0, 100)
distance = np.linspace(0.0, 100.0, 100)
z, t = np.meshgrid(distance, time)
network_1, network_2 = self.networks_with_flow()
# create parameters for temperature manually
chi_1 = np.array([1.0, 1.0, 1.0, 0.5, 0.5, 1.0])
chi_2 = np.ones(network_2.n_segments)
chi_2[4:8] = 0.5
chi_2[9:13] = 0.5
m_1 = network_1.mass_flow
m_2 = network_2.mass_flow
beta_1 = 2 * network_1.thermal_cond * network_1.thickness / \
(m_1 * network_1.fluid.c_f)
beta_2 = 2 * network_2.thermal_cond * network_2.thickness / \
(m_2 * network_2.fluid.c_f)
xi_1 = np.einsum('i,jk->ijk', beta_1 * network_1.length,
1 / (2 * np.sqrt(network_1.thermal_diff * t)))
xi_2 = np.einsum('i,jk->ijk', beta_2 * network_2.length,
1 / (2 * np.sqrt(network_2.thermal_diff * t)))
a = xi_1[[0, 2, 4], :, :].sum(axis=0)
b = xi_1[[0, 1, 3], :, :].sum(axis=0)
xi_seg = beta_1[-1] * z / (2 * np.sqrt(network_1.thermal_diff * t))
Theta_1 = chi_1[0] * chi_1[2] * chi_1[4] * erf(a + xi_seg) + \
chi_1[0] * chi_1[1] * chi_1[3] * erf(b + xi_seg)
a = xi_2[[0, 1, 2, 5, 10], :, :].sum(axis=0)
b = xi_2[[0, 1, 4, 7, 10], :, :].sum(axis=0)
c = xi_2[[0, 3, 6, 7, 10], :, :].sum(axis=0)
d = xi_2[[0, 3, 6, 9, 12], :, :].sum(axis=0)
e = xi_2[[0, 3, 8, 11, 12], :, :].sum(axis=0)
f = xi_2[[0, 1, 4, 9, 12], :, :].sum(axis=0)
C_1 = chi_2[0] * chi_2[1] * chi_2[2] * chi_2[5] * chi_2[10]
C_2 = chi_2[0] * chi_2[1] * chi_2[4] * chi_2[7] * chi_2[10]
C_3 = chi_2[0] * chi_2[3] * chi_2[6] * chi_2[7] * chi_2[10]
C_4 = chi_2[0] * chi_2[3] * chi_2[6] * chi_2[9] * chi_2[12]
C_5 = chi_2[0] * chi_2[3] * chi_2[8] * chi_2[11] * chi_2[12]
C_6 = chi_2[0] * chi_2[1] * chi_2[4] * chi_2[9] * chi_2[12]
xi_seg = beta_2[-1] * z / (2 * np.sqrt(network_2.thermal_diff * t))
Theta_2 = C_1 * erf(a + xi_seg) + C_2 * erf(b + xi_seg) + \
C_3 * erf(c + xi_seg) + C_4 * erf(d + xi_seg) + \
C_5 * erf(e + xi_seg) + C_6 * erf(f + xi_seg)
# difference between manual and automatic construction
diff_1 = Theta_1 - network_1.calculate_temperature(self.fluid, 5,
distance, time)
diff_2 = Theta_2 - network_2.calculate_temperature(self.fluid, 13,
distance, time)
self.assertAlmostEqual((diff_1**2).sum() / (Theta_1**2).sum(), 0, 12)
self.assertAlmostEqual((diff_2**2).sum() / (Theta_2**2).sum(), 0, 12)
if __name__ == '__main__':
unittest.main()
|
[
"networkx.MultiDiGraph",
"networkx.is_isomorphic",
"numpy.sqrt",
"numpy.ones",
"dfn.FractureNetworkThermal",
"numpy.array",
"numpy.linspace",
"scipy.special.erf",
"dfn.Fluid",
"unittest.main",
"numpy.meshgrid",
"copy.copy"
] |
[((10801, 10816), 'unittest.main', 'unittest.main', ([], {}), '()\n', (10814, 10816), False, 'import unittest\n'), ((432, 488), 'dfn.Fluid', 'Fluid', ([], {'density': 'rho_w', 'viscosity': 'mu_w', 'heat_capacity': 'cp_w'}), '(density=rho_w, viscosity=mu_w, heat_capacity=cp_w)\n', (437, 488), False, 'from dfn import Fluid, FractureNetworkThermal\n'), ((880, 939), 'dfn.FractureNetworkThermal', 'FractureNetworkThermal', (['conn_1', 'L_1', 'H_1', 'w_1', 'k_r', 'alpha_r'], {}), '(conn_1, L_1, H_1, w_1, k_r, alpha_r)\n', (902, 939), False, 'from dfn import Fluid, FractureNetworkThermal\n'), ((1355, 1414), 'dfn.FractureNetworkThermal', 'FractureNetworkThermal', (['conn_2', 'L_2', 'H_2', 'w_2', 'k_r', 'alpha_r'], {}), '(conn_2, L_2, H_2, w_2, k_r, alpha_r)\n', (1377, 1414), False, 'from dfn import Fluid, FractureNetworkThermal\n'), ((3500, 3517), 'networkx.MultiDiGraph', 'nx.MultiDiGraph', ([], {}), '()\n', (3515, 3517), True, 'import networkx as nx\n'), ((3814, 3831), 'networkx.MultiDiGraph', 'nx.MultiDiGraph', ([], {}), '()\n', (3829, 3831), True, 'import networkx as nx\n'), ((4418, 4456), 'networkx.is_isomorphic', 'nx.is_isomorphic', (['network_1.graph', 'G_1'], {}), '(network_1.graph, G_1)\n', (4434, 4456), True, 'import networkx as nx\n'), ((4483, 4521), 'networkx.is_isomorphic', 'nx.is_isomorphic', (['network_2.graph', 'G_2'], {}), '(network_2.graph, G_2)\n', (4499, 4521), True, 'import networkx as nx\n'), ((6565, 6593), 'numpy.linspace', 'np.linspace', (['(0.0)', '(100.0)', '(100)'], {}), '(0.0, 100.0, 100)\n', (6576, 6593), True, 'import numpy as np\n'), ((6609, 6636), 'numpy.meshgrid', 'np.meshgrid', (['distance', 'time'], {}), '(distance, time)\n', (6620, 6636), True, 'import numpy as np\n'), ((7209, 7218), 'scipy.special.erf', 'erf', (['xi_1'], {}), '(xi_1)\n', (7212, 7218), False, 'from scipy.special import erf\n'), ((7237, 7246), 'scipy.special.erf', 'erf', (['xi_2'], {}), '(xi_2)\n', (7240, 7246), False, 'from scipy.special import erf\n'), ((8050, 8078), 'numpy.linspace', 'np.linspace', (['(0.0)', '(100.0)', '(100)'], {}), '(0.0, 100.0, 100)\n', (8061, 8078), True, 'import numpy as np\n'), ((8094, 8121), 'numpy.meshgrid', 'np.meshgrid', (['distance', 'time'], {}), '(distance, time)\n', (8105, 8121), True, 'import numpy as np\n'), ((8250, 8290), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 0.5, 0.5, 1.0]'], {}), '([1.0, 1.0, 1.0, 0.5, 0.5, 1.0])\n', (8258, 8290), True, 'import numpy as np\n'), ((8307, 8336), 'numpy.ones', 'np.ones', (['network_2.n_segments'], {}), '(network_2.n_segments)\n', (8314, 8336), True, 'import numpy as np\n'), ((1562, 1587), 'copy.copy', 'copy.copy', (['self.network_1'], {}), '(self.network_1)\n', (1571, 1587), False, 'import copy\n'), ((1589, 1614), 'copy.copy', 'copy.copy', (['self.network_2'], {}), '(self.network_2)\n', (1598, 1614), False, 'import copy\n'), ((6512, 6544), 'numpy.linspace', 'np.linspace', (['(1.0 / 100)', '(1.0)', '(100)'], {}), '(1.0 / 100, 1.0, 100)\n', (6523, 6544), True, 'import numpy as np\n'), ((7997, 8029), 'numpy.linspace', 'np.linspace', (['(1.0 / 100)', '(1.0)', '(100)'], {}), '(1.0 / 100, 1.0, 100)\n', (8008, 8029), True, 'import numpy as np\n'), ((7083, 7118), 'numpy.sqrt', 'np.sqrt', (['(network_1.thermal_diff * t)'], {}), '(network_1.thermal_diff * t)\n', (7090, 7118), True, 'import numpy as np\n'), ((7153, 7188), 'numpy.sqrt', 'np.sqrt', (['(network_2.thermal_diff * t)'], {}), '(network_2.thermal_diff * t)\n', (7160, 7188), True, 'import numpy as np\n'), ((9083, 9118), 'numpy.sqrt', 'np.sqrt', (['(network_1.thermal_diff * t)'], {}), '(network_1.thermal_diff * t)\n', (9090, 9118), True, 'import numpy as np\n'), ((9172, 9187), 'scipy.special.erf', 'erf', (['(a + xi_seg)'], {}), '(a + xi_seg)\n', (9175, 9187), False, 'from scipy.special import erf\n'), ((9237, 9252), 'scipy.special.erf', 'erf', (['(b + xi_seg)'], {}), '(b + xi_seg)\n', (9240, 9252), False, 'from scipy.special import erf\n'), ((10022, 10057), 'numpy.sqrt', 'np.sqrt', (['(network_2.thermal_diff * t)'], {}), '(network_2.thermal_diff * t)\n', (10029, 10057), True, 'import numpy as np\n'), ((10233, 10248), 'scipy.special.erf', 'erf', (['(f + xi_seg)'], {}), '(f + xi_seg)\n', (10236, 10248), False, 'from scipy.special import erf\n'), ((8777, 8812), 'numpy.sqrt', 'np.sqrt', (['(network_1.thermal_diff * t)'], {}), '(network_1.thermal_diff * t)\n', (8784, 8812), True, 'import numpy as np\n'), ((8914, 8949), 'numpy.sqrt', 'np.sqrt', (['(network_2.thermal_diff * t)'], {}), '(network_2.thermal_diff * t)\n', (8921, 8949), True, 'import numpy as np\n'), ((10209, 10224), 'scipy.special.erf', 'erf', (['(e + xi_seg)'], {}), '(e + xi_seg)\n', (10212, 10224), False, 'from scipy.special import erf\n'), ((10171, 10186), 'scipy.special.erf', 'erf', (['(d + xi_seg)'], {}), '(d + xi_seg)\n', (10174, 10186), False, 'from scipy.special import erf\n'), ((10147, 10162), 'scipy.special.erf', 'erf', (['(c + xi_seg)'], {}), '(c + xi_seg)\n', (10150, 10162), False, 'from scipy.special import erf\n'), ((10085, 10100), 'scipy.special.erf', 'erf', (['(a + xi_seg)'], {}), '(a + xi_seg)\n', (10088, 10100), False, 'from scipy.special import erf\n'), ((10109, 10124), 'scipy.special.erf', 'erf', (['(b + xi_seg)'], {}), '(b + xi_seg)\n', (10112, 10124), False, 'from scipy.special import erf\n')]
|
import numpy as np
import math
from scipy.optimize import curve_fit
def calc_lorentzian(CestCurveS, x_calcentires, mask, config):
(rows, colums, z_slices, entires) = CestCurveS.shape
lorenzian = {key: np.zeros((rows, colums, z_slices), dtype=float) for key in config.lorenzian_keys}
for k in range(z_slices):
for i in range(rows):
for j in range(colums):
if mask[i, j, k] != 0:
params = calc_lorenzian_pixel(CestCurveS[i, j, k, :], x_calcentires, config.Lorenzian['MT_f'],
config.Lorenzian['NOE1_f'], config.Lorenzian['NOE2_f'],
config.Lorenzian['OH_f'], config.Lorenzian['NH_f'])
if params is None:
continue
dic = {
'OH_a': params[3],
'OH_w': params[4],
'NH_a': params[5],
'NH_w': params[6],
'NOE1_a': params[7],
'NOE1_w': params[8],
'NOE2_a': params[9],
'NOE2_w': params[10],
'MT_a': params[11],
'MT_w': params[12],
}
for key in config.lorenzian_keys:
lorenzian[key][i, j, k] = dic[key]
return lorenzian
def calc_lorenzian_pixel(values, x_calcentires, MT_f, NOE1_f, NOE2_f, OH_f, NH_f):
# wassr_offset, da die Z-Spektren vorher korrigiert wurden
fit = lorenz_like_matlab(wassr_offset=0, MT_f=MT_f, NOE1_f=NOE1_f, NOE2_f=NOE2_f, OH_f=OH_f, NH_f=NH_f)
try:
param, param_cov = curve_fit(fit, x_calcentires, values, bounds=([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10,
10]))
except RuntimeError:
param = None
return param
def lorenz_like_matlab(wassr_offset, MT_f: float = - 2.43, NOE1_f: float = - 1, NOE2_f: float = - 2.6,
OH_f: float = + 1.4, NH_f: float = + 3.2):
# X_f = frequenz of X
#ret = (a + ak) - (a * ((b ** 2) / 4) / (((b ** 2) / 4) + (x - wassr_offset) ** 2))
pass
def one_lorenz(x, amplitude, width, wassr_offset, frequenz):
return amplitude * ((width ** 2) / 4) / (((width ** 2) / 4) + (x - (wassr_offset + frequenz)) ** 2)
|
[
"scipy.optimize.curve_fit",
"numpy.zeros"
] |
[((212, 259), 'numpy.zeros', 'np.zeros', (['(rows, colums, z_slices)'], {'dtype': 'float'}), '((rows, colums, z_slices), dtype=float)\n', (220, 259), True, 'import numpy as np\n'), ((1741, 1886), 'scipy.optimize.curve_fit', 'curve_fit', (['fit', 'x_calcentires', 'values'], {'bounds': '([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [10, 10, 10, 10, 10, 10, 10, 10, \n 10, 10, 10, 10, 10])'}), '(fit, x_calcentires, values, bounds=([0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0], [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10]))\n', (1750, 1886), False, 'from scipy.optimize import curve_fit\n')]
|
import backend as F
import numpy as np
import scipy as sp
import dgl
from dgl import utils
import unittest
from numpy.testing import assert_array_equal
np.random.seed(42)
def generate_rand_graph(n):
arr = (sp.sparse.random(n, n, density=0.1, format='coo') != 0).astype(np.int64)
return dgl.DGLGraph(arr, readonly=True)
def test_create_full():
g = generate_rand_graph(100)
full_nf = dgl.contrib.sampling.sampler.create_full_nodeflow(g, 5)
assert full_nf.number_of_nodes() == g.number_of_nodes() * 6
assert full_nf.number_of_edges() == g.number_of_edges() * 5
def test_1neighbor_sampler_all():
g = generate_rand_graph(100)
# In this case, NeighborSampling simply gets the neighborhood of a single vertex.
for i, subg in enumerate(dgl.contrib.sampling.NeighborSampler(
g, 1, g.number_of_nodes(), neighbor_type='in', num_workers=4)):
seed_ids = subg.layer_parent_nid(-1)
assert len(seed_ids) == 1
src, dst, eid = g.in_edges(seed_ids, form='all')
assert subg.number_of_nodes() == len(src) + 1
assert subg.number_of_edges() == len(src)
assert seed_ids == subg.layer_parent_nid(-1)
child_src, child_dst, child_eid = subg.in_edges(subg.layer_nid(-1), form='all')
assert F.array_equal(child_src, subg.layer_nid(0))
src1 = subg.map_to_parent_nid(child_src)
assert F.array_equal(src1, src)
def is_sorted(arr):
return np.sum(np.sort(arr) == arr, 0) == len(arr)
def verify_subgraph(g, subg, seed_id):
seed_id = F.asnumpy(seed_id)
seeds = F.asnumpy(subg.map_to_parent_nid(subg.layer_nid(-1)))
assert seed_id in seeds
child_seed = F.asnumpy(subg.layer_nid(-1))[seeds == seed_id]
src, dst, eid = g.in_edges(seed_id, form='all')
child_src, child_dst, child_eid = subg.in_edges(child_seed, form='all')
child_src = F.asnumpy(child_src)
# We don't allow duplicate elements in the neighbor list.
assert(len(np.unique(child_src)) == len(child_src))
# The neighbor list also needs to be sorted.
assert(is_sorted(child_src))
# a neighbor in the subgraph must also exist in parent graph.
src = F.asnumpy(src)
for i in subg.map_to_parent_nid(child_src):
assert F.asnumpy(i) in src
def test_1neighbor_sampler():
g = generate_rand_graph(100)
# In this case, NeighborSampling simply gets the neighborhood of a single vertex.
for subg in dgl.contrib.sampling.NeighborSampler(g, 1, 5, neighbor_type='in',
num_workers=4):
seed_ids = subg.layer_parent_nid(-1)
assert len(seed_ids) == 1
assert subg.number_of_nodes() <= 6
assert subg.number_of_edges() <= 5
verify_subgraph(g, subg, seed_ids)
def test_prefetch_neighbor_sampler():
g = generate_rand_graph(100)
# In this case, NeighborSampling simply gets the neighborhood of a single vertex.
for subg in dgl.contrib.sampling.NeighborSampler(g, 1, 5, neighbor_type='in',
num_workers=4, prefetch=True):
seed_ids = subg.layer_parent_nid(-1)
assert len(seed_ids) == 1
assert subg.number_of_nodes() <= 6
assert subg.number_of_edges() <= 5
verify_subgraph(g, subg, seed_ids)
def test_10neighbor_sampler_all():
g = generate_rand_graph(100)
# In this case, NeighborSampling simply gets the neighborhood of a single vertex.
for subg in dgl.contrib.sampling.NeighborSampler(g, 10, g.number_of_nodes(),
neighbor_type='in', num_workers=4):
seed_ids = subg.layer_parent_nid(-1)
assert F.array_equal(seed_ids, subg.map_to_parent_nid(subg.layer_nid(-1)))
src, dst, eid = g.in_edges(seed_ids, form='all')
child_src, child_dst, child_eid = subg.in_edges(subg.layer_nid(-1), form='all')
src1 = subg.map_to_parent_nid(child_src)
assert F.array_equal(src1, src)
def check_10neighbor_sampler(g, seeds):
# In this case, NeighborSampling simply gets the neighborhood of a single vertex.
for subg in dgl.contrib.sampling.NeighborSampler(g, 10, 5, neighbor_type='in',
num_workers=4, seed_nodes=seeds):
seed_ids = subg.layer_parent_nid(-1)
assert subg.number_of_nodes() <= 6 * len(seed_ids)
assert subg.number_of_edges() <= 5 * len(seed_ids)
for seed_id in seed_ids:
verify_subgraph(g, subg, seed_id)
def test_10neighbor_sampler():
g = generate_rand_graph(100)
check_10neighbor_sampler(g, None)
check_10neighbor_sampler(g, seeds=np.unique(np.random.randint(0, g.number_of_nodes(),
size=int(g.number_of_nodes() / 10))))
def _test_layer_sampler(prefetch=False):
g = generate_rand_graph(100)
nid = g.nodes()
src, dst, eid = g.all_edges(form='all', order='eid')
n_batches = 5
batch_size = 50
seed_batches = [np.sort(np.random.choice(F.asnumpy(nid), batch_size, replace=False))
for i in range(n_batches)]
seed_nodes = np.hstack(seed_batches)
layer_sizes = [50] * 3
LayerSampler = getattr(dgl.contrib.sampling, 'LayerSampler')
sampler = LayerSampler(g, batch_size, layer_sizes, 'in',
seed_nodes=seed_nodes, num_workers=4, prefetch=prefetch)
for sub_g in sampler:
assert all(sub_g.layer_size(i) < size for i, size in enumerate(layer_sizes))
sub_nid = F.arange(0, sub_g.number_of_nodes())
assert all(np.all(np.isin(F.asnumpy(sub_g.layer_nid(i)), F.asnumpy(sub_nid)))
for i in range(sub_g.num_layers))
assert np.all(np.isin(F.asnumpy(sub_g.map_to_parent_nid(sub_nid)),
F.asnumpy(nid)))
sub_eid = F.arange(0, sub_g.number_of_edges())
assert np.all(np.isin(F.asnumpy(sub_g.map_to_parent_eid(sub_eid)),
F.asnumpy(eid)))
assert any(np.all(np.sort(F.asnumpy(sub_g.layer_parent_nid(-1))) == seed_batch)
for seed_batch in seed_batches)
sub_src, sub_dst = sub_g.all_edges(order='eid')
for i in range(sub_g.num_blocks):
block_eid = sub_g.block_eid(i)
block_src = sub_g.map_to_parent_nid(F.gather_row(sub_src, block_eid))
block_dst = sub_g.map_to_parent_nid(F.gather_row(sub_dst, block_eid))
block_parent_eid = sub_g.block_parent_eid(i)
block_parent_src = F.gather_row(src, block_parent_eid)
block_parent_dst = F.gather_row(dst, block_parent_eid)
assert np.all(F.asnumpy(block_src == block_parent_src))
n_layers = sub_g.num_layers
sub_n = sub_g.number_of_nodes()
assert sum(F.shape(sub_g.layer_nid(i))[0] for i in range(n_layers)) == sub_n
n_blocks = sub_g.num_blocks
sub_m = sub_g.number_of_edges()
assert sum(F.shape(sub_g.block_eid(i))[0] for i in range(n_blocks)) == sub_m
def test_layer_sampler():
_test_layer_sampler()
_test_layer_sampler(prefetch=True)
@unittest.skipIf(dgl.backend.backend_name == "tensorflow", reason="Error occured when multiprocessing")
def test_nonuniform_neighbor_sampler():
# Construct a graph with
# (1) A path (0, 1, ..., 99) with weight 1
# (2) A bunch of random edges with weight 0.
edges = []
for i in range(99):
edges.append((i, i + 1))
for i in range(1000):
edge = (np.random.randint(100), np.random.randint(100))
if edge not in edges:
edges.append(edge)
src, dst = zip(*edges)
g = dgl.DGLGraph()
g.add_nodes(100)
g.add_edges(src, dst)
g.readonly()
g.edata['w'] = F.cat([
F.ones((99,), F.float64, F.cpu()),
F.zeros((len(edges) - 99,), F.float64, F.cpu())], 0)
# Test 1-neighbor NodeFlow with 99 as target node.
# The generated NodeFlow should only contain node i on layer i.
sampler = dgl.contrib.sampling.NeighborSampler(
g, 1, 1, 99, 'in', transition_prob='w', seed_nodes=[99])
nf = next(iter(sampler))
assert nf.num_layers == 100
for i in range(nf.num_layers):
assert nf.layer_size(i) == 1
assert F.asnumpy(nf.layer_parent_nid(i)[0]) == i
# Test the reverse direction
sampler = dgl.contrib.sampling.NeighborSampler(
g, 1, 1, 99, 'out', transition_prob='w', seed_nodes=[0])
nf = next(iter(sampler))
assert nf.num_layers == 100
for i in range(nf.num_layers):
assert nf.layer_size(i) == 1
assert F.asnumpy(nf.layer_parent_nid(i)[0]) == 99 - i
def test_setseed():
g = generate_rand_graph(100)
nids = []
dgl.random.seed(42)
for subg in dgl.contrib.sampling.NeighborSampler(
g, 5, 3, num_hops=2, neighbor_type='in', num_workers=1):
nids.append(
tuple(tuple(F.asnumpy(subg.layer_parent_nid(i))) for i in range(3)))
# reinitialize
dgl.random.seed(42)
for i, subg in enumerate(dgl.contrib.sampling.NeighborSampler(
g, 5, 3, num_hops=2, neighbor_type='in', num_workers=1)):
item = tuple(tuple(F.asnumpy(subg.layer_parent_nid(i))) for i in range(3))
assert item == nids[i]
for i, subg in enumerate(dgl.contrib.sampling.NeighborSampler(
g, 5, 3, num_hops=2, neighbor_type='in', num_workers=4)):
pass
def check_head_tail(g):
lsrc, ldst, leid = g.all_edges(form='all', order='eid')
lsrc = np.unique(F.asnumpy(lsrc))
head_nid = np.unique(F.asnumpy(g.head_nid))
assert len(head_nid) == len(g.head_nid)
np.testing.assert_equal(lsrc, head_nid)
ldst = np.unique(F.asnumpy(ldst))
tail_nid = np.unique(F.asnumpy(g.tail_nid))
assert len(tail_nid) == len(g.tail_nid)
np.testing.assert_equal(tail_nid, ldst)
def check_negative_sampler(mode, exclude_positive, neg_size):
g = generate_rand_graph(100)
num_edges = g.number_of_edges()
etype = np.random.randint(0, 10, size=g.number_of_edges(), dtype=np.int64)
g.edata['etype'] = F.copy_to(F.tensor(etype), F.cpu())
pos_gsrc, pos_gdst, pos_geid = g.all_edges(form='all', order='eid')
pos_map = {}
for i in range(len(pos_geid)):
pos_d = int(F.asnumpy(pos_gdst[i]))
pos_e = int(F.asnumpy(pos_geid[i]))
pos_map[(pos_d, pos_e)] = int(F.asnumpy(pos_gsrc[i]))
EdgeSampler = getattr(dgl.contrib.sampling, 'EdgeSampler')
# Test the homogeneous graph.
batch_size = 50
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
negative_mode=mode,
reset=False,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
pos_lsrc, pos_ldst, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert_array_equal(F.asnumpy(F.gather_row(pos_edges.parent_eid, pos_leid)),
F.asnumpy(g.edge_ids(F.gather_row(pos_edges.parent_nid, pos_lsrc),
F.gather_row(pos_edges.parent_nid, pos_ldst))))
neg_lsrc, neg_ldst, neg_leid = neg_edges.all_edges(form='all', order='eid')
neg_src = F.gather_row(neg_edges.parent_nid, neg_lsrc)
neg_dst = F.gather_row(neg_edges.parent_nid, neg_ldst)
neg_eid = F.gather_row(neg_edges.parent_eid, neg_leid)
for i in range(len(neg_eid)):
neg_d = int(F.asnumpy(neg_dst)[i])
neg_e = int(F.asnumpy(neg_eid)[i])
assert (neg_d, neg_e) in pos_map
if exclude_positive:
assert int(F.asnumpy(neg_src[i])) != pos_map[(neg_d, neg_e)]
check_head_tail(neg_edges)
pos_tails = F.gather_row(pos_edges.parent_nid, pos_edges.tail_nid)
neg_tails = F.gather_row(neg_edges.parent_nid, neg_edges.tail_nid)
pos_tails = np.sort(F.asnumpy(pos_tails))
neg_tails = np.sort(F.asnumpy(neg_tails))
np.testing.assert_equal(pos_tails, neg_tails)
exist = neg_edges.edata['false_neg']
if exclude_positive:
assert np.sum(F.asnumpy(exist) == 0) == len(exist)
else:
assert F.array_equal(g.has_edges_between(neg_src, neg_dst), exist)
total_samples += batch_size
assert total_samples <= num_edges
# check replacement = True
# with reset = False (default setting)
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=True,
reset=False,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) == batch_size
total_samples += len(pos_leid)
assert total_samples == num_edges
# check replacement = False
# with reset = False (default setting)
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=False,
reset=False,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) == batch_size
total_samples += len(pos_leid)
assert total_samples == num_edges
# check replacement = True
# with reset = True
total_samples = 0
max_samples = 2 * num_edges
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=True,
reset=True,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) <= batch_size
total_samples += len(pos_leid)
if (total_samples >= max_samples):
break
assert total_samples >= max_samples
# check replacement = False
# with reset = True
total_samples = 0
max_samples = 2 * num_edges
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=False,
reset=True,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) <= batch_size
total_samples += len(pos_leid)
if (total_samples >= max_samples):
break
assert total_samples >= max_samples
# Test the knowledge graph.
total_samples = 0
for _, neg_edges in EdgeSampler(g, batch_size,
negative_mode=mode,
reset=False,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
relations=g.edata['etype'],
return_false_neg=True):
neg_lsrc, neg_ldst, neg_leid = neg_edges.all_edges(form='all', order='eid')
neg_src = F.gather_row(neg_edges.parent_nid, neg_lsrc)
neg_dst = F.gather_row(neg_edges.parent_nid, neg_ldst)
neg_eid = F.gather_row(neg_edges.parent_eid, neg_leid)
exists = neg_edges.edata['false_neg']
neg_edges.edata['etype'] = F.gather_row(g.edata['etype'], neg_eid)
for i in range(len(neg_eid)):
u, v = F.asnumpy(neg_src[i]), F.asnumpy(neg_dst[i])
if g.has_edge_between(u, v):
eid = g.edge_id(u, v)
etype = g.edata['etype'][eid]
exist = neg_edges.edata['etype'][i] == etype
assert F.asnumpy(exists[i]) == F.asnumpy(exist)
total_samples += batch_size
assert total_samples <= num_edges
def check_weighted_negative_sampler(mode, exclude_positive, neg_size):
g = generate_rand_graph(100)
num_edges = g.number_of_edges()
num_nodes = g.number_of_nodes()
edge_weight = F.copy_to(F.tensor(np.full((num_edges,), 1, dtype=np.float32)), F.cpu())
node_weight = F.copy_to(F.tensor(np.full((num_nodes,), 1, dtype=np.float32)), F.cpu())
etype = np.random.randint(0, 10, size=num_edges, dtype=np.int64)
g.edata['etype'] = F.copy_to(F.tensor(etype), F.cpu())
pos_gsrc, pos_gdst, pos_geid = g.all_edges(form='all', order='eid')
pos_map = {}
for i in range(len(pos_geid)):
pos_d = int(F.asnumpy(pos_gdst[i]))
pos_e = int(F.asnumpy(pos_geid[i]))
pos_map[(pos_d, pos_e)] = int(F.asnumpy(pos_gsrc[i]))
EdgeSampler = getattr(dgl.contrib.sampling, 'EdgeSampler')
# Correctness check
# Test the homogeneous graph.
batch_size = 50
# Test the knowledge graph with edge weight provied.
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
reset=False,
edge_weight=edge_weight,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
pos_lsrc, pos_ldst, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert_array_equal(F.asnumpy(F.gather_row(pos_edges.parent_eid, pos_leid)),
F.asnumpy(g.edge_ids(F.gather_row(pos_edges.parent_nid, pos_lsrc),
F.gather_row(pos_edges.parent_nid, pos_ldst))))
neg_lsrc, neg_ldst, neg_leid = neg_edges.all_edges(form='all', order='eid')
neg_src = F.gather_row(neg_edges.parent_nid, neg_lsrc)
neg_dst = F.gather_row(neg_edges.parent_nid, neg_ldst)
neg_eid = F.gather_row(neg_edges.parent_eid, neg_leid)
for i in range(len(neg_eid)):
neg_d = int(F.asnumpy(neg_dst[i]))
neg_e = int(F.asnumpy(neg_eid[i]))
assert (neg_d, neg_e) in pos_map
if exclude_positive:
assert int(F.asnumpy(neg_src[i])) != pos_map[(neg_d, neg_e)]
check_head_tail(neg_edges)
pos_tails = F.gather_row(pos_edges.parent_nid, pos_edges.tail_nid)
neg_tails = F.gather_row(neg_edges.parent_nid, neg_edges.tail_nid)
pos_tails = np.sort(F.asnumpy(pos_tails))
neg_tails = np.sort(F.asnumpy(neg_tails))
np.testing.assert_equal(pos_tails, neg_tails)
exist = neg_edges.edata['false_neg']
if exclude_positive:
assert np.sum(F.asnumpy(exist) == 0) == len(exist)
else:
assert F.array_equal(g.has_edges_between(neg_src, neg_dst), exist)
total_samples += batch_size
assert total_samples <= num_edges
# Test the knowledge graph with edge weight provied.
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
reset=False,
edge_weight=edge_weight,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
relations=g.edata['etype'],
return_false_neg=True):
neg_lsrc, neg_ldst, neg_leid = neg_edges.all_edges(form='all', order='eid')
neg_src = F.gather_row(neg_edges.parent_nid, neg_lsrc)
neg_dst = F.gather_row(neg_edges.parent_nid, neg_ldst)
neg_eid = F.gather_row(neg_edges.parent_eid, neg_leid)
exists = neg_edges.edata['false_neg']
neg_edges.edata['etype'] = F.gather_row(g.edata['etype'], neg_eid)
for i in range(len(neg_eid)):
u, v = F.asnumpy(neg_src[i]), F.asnumpy(neg_dst[i])
if g.has_edge_between(u, v):
eid = g.edge_id(u, v)
etype = g.edata['etype'][eid]
exist = neg_edges.edata['etype'][i] == etype
assert F.asnumpy(exists[i]) == F.asnumpy(exist)
total_samples += batch_size
assert total_samples <= num_edges
# Test the knowledge graph with edge/node weight provied.
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
reset=False,
edge_weight=edge_weight,
node_weight=node_weight,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
relations=g.edata['etype'],
return_false_neg=True):
neg_lsrc, neg_ldst, neg_leid = neg_edges.all_edges(form='all', order='eid')
neg_src = F.gather_row(neg_edges.parent_nid, neg_lsrc)
neg_dst = F.gather_row(neg_edges.parent_nid, neg_ldst)
neg_eid = F.gather_row(neg_edges.parent_eid, neg_leid)
exists = neg_edges.edata['false_neg']
neg_edges.edata['etype'] = F.gather_row(g.edata['etype'], neg_eid)
for i in range(len(neg_eid)):
u, v = F.asnumpy(neg_src[i]), F.asnumpy(neg_dst[i])
if g.has_edge_between(u, v):
eid = g.edge_id(u, v)
etype = g.edata['etype'][eid]
exist = neg_edges.edata['etype'][i] == etype
assert F.asnumpy(exists[i]) == F.asnumpy(exist)
total_samples += batch_size
assert total_samples <= num_edges
# check replacement = True with pos edges no-uniform sample
# with reset = False
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=True,
reset=False,
edge_weight=edge_weight,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) == batch_size
total_samples += len(pos_leid)
assert total_samples == num_edges
# check replacement = True with pos edges no-uniform sample
# with reset = True
total_samples = 0
max_samples = 4 * num_edges
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=True,
reset=True,
edge_weight=edge_weight,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) == batch_size
total_samples += len(pos_leid)
if total_samples >= max_samples:
break
assert total_samples == max_samples
# check replacement = False with pos/neg edges no-uniform sample
# reset = False
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=False,
reset=False,
edge_weight=edge_weight,
node_weight=node_weight,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
relations=g.edata['etype'],
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) == batch_size
total_samples += len(pos_leid)
assert total_samples == num_edges
# check replacement = False with pos/neg edges no-uniform sample
# reset = True
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=False,
reset=True,
edge_weight=edge_weight,
node_weight=node_weight,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
relations=g.edata['etype'],
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) == batch_size
total_samples += len(pos_leid)
if total_samples >= max_samples:
break
assert total_samples == max_samples
# Check Rate
dgl.random.seed(0)
g = generate_rand_graph(1000)
num_edges = g.number_of_edges()
num_nodes = g.number_of_nodes()
edge_weight = F.copy_to(F.tensor(np.full((num_edges,), 1, dtype=np.float32)), F.cpu())
edge_weight[0] = F.sum(edge_weight, dim=0)
node_weight = F.copy_to(F.tensor(np.full((num_nodes,), 1, dtype=np.float32)), F.cpu())
node_weight[-1] = F.sum(node_weight, dim=0) / 200
etype = np.random.randint(0, 20, size=num_edges, dtype=np.int64)
g.edata['etype'] = F.copy_to(F.tensor(etype), F.cpu())
# Test w/o node weight.
max_samples = num_edges // 5
total_samples = 0
# Test the knowledge graph with edge weight provied.
edge_sampled = np.full((num_edges,), 0, dtype=np.int32)
node_sampled = np.full((num_nodes,), 0, dtype=np.int32)
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=True,
edge_weight=edge_weight,
shuffle=True,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=False,
relations=g.edata['etype'],
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
neg_lsrc, neg_ldst, _ = neg_edges.all_edges(form='all', order='eid')
if 'head' in mode:
neg_src = neg_edges.parent_nid[neg_lsrc]
np.add.at(node_sampled, F.asnumpy(neg_src), 1)
else:
neg_dst = neg_edges.parent_nid[neg_ldst]
np.add.at(node_sampled, F.asnumpy(neg_dst), 1)
np.add.at(edge_sampled, F.asnumpy(pos_edges.parent_eid[pos_leid]), 1)
total_samples += batch_size
if total_samples > max_samples:
break
# Check rate here
edge_rate_0 = edge_sampled[0] / edge_sampled.sum()
edge_tail_half_cnt = edge_sampled[edge_sampled.shape[0] // 2:-1].sum()
edge_rate_tail_half = edge_tail_half_cnt / edge_sampled.sum()
assert np.allclose(edge_rate_0, 0.5, atol=0.05)
assert np.allclose(edge_rate_tail_half, 0.25, atol=0.05)
node_rate_0 = node_sampled[0] / node_sampled.sum()
node_tail_half_cnt = node_sampled[node_sampled.shape[0] // 2:-1].sum()
node_rate_tail_half = node_tail_half_cnt / node_sampled.sum()
assert node_rate_0 < 0.02
assert np.allclose(node_rate_tail_half, 0.5, atol=0.02)
# Test the knowledge graph with edge/node weight provied.
edge_sampled = np.full((num_edges,), 0, dtype=np.int32)
node_sampled = np.full((num_nodes,), 0, dtype=np.int32)
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=True,
edge_weight=edge_weight,
node_weight=node_weight,
shuffle=True,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=False,
relations=g.edata['etype'],
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
neg_lsrc, neg_ldst, _ = neg_edges.all_edges(form='all', order='eid')
if 'head' in mode:
neg_src = F.gather_row(neg_edges.parent_nid, neg_lsrc)
np.add.at(node_sampled, F.asnumpy(neg_src), 1)
else:
neg_dst = F.gather_row(neg_edges.parent_nid, neg_ldst)
np.add.at(node_sampled, F.asnumpy(neg_dst), 1)
np.add.at(edge_sampled, F.asnumpy(pos_edges.parent_eid[pos_leid]), 1)
total_samples += batch_size
if total_samples > max_samples:
break
# Check rate here
edge_rate_0 = edge_sampled[0] / edge_sampled.sum()
edge_tail_half_cnt = edge_sampled[edge_sampled.shape[0] // 2:-1].sum()
edge_rate_tail_half = edge_tail_half_cnt / edge_sampled.sum()
assert np.allclose(edge_rate_0, 0.5, atol=0.05)
assert np.allclose(edge_rate_tail_half, 0.25, atol=0.05)
node_rate = node_sampled[-1] / node_sampled.sum()
node_rate_a = np.average(node_sampled[:50]) / node_sampled.sum()
node_rate_b = np.average(node_sampled[50:100]) / node_sampled.sum()
# As neg sampling does not contain duplicate nodes,
# this test takes some acceptable variation on the sample rate.
assert np.allclose(node_rate, node_rate_a * 5, atol=0.002)
assert np.allclose(node_rate_a, node_rate_b, atol=0.0002)
def check_positive_edge_sampler():
g = generate_rand_graph(1000)
num_edges = g.number_of_edges()
edge_weight = F.copy_to(F.tensor(np.full((num_edges,), 1, dtype=np.float32)), F.cpu())
edge_weight[num_edges-1] = num_edges ** 3
EdgeSampler = getattr(dgl.contrib.sampling, 'EdgeSampler')
# Correctness check
# Test the homogeneous graph.
batch_size = 128
edge_sampled = np.full((num_edges,), 0, dtype=np.int32)
for pos_edges in EdgeSampler(g, batch_size,
reset=False,
edge_weight=edge_weight):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
np.add.at(edge_sampled, F.asnumpy(pos_edges.parent_eid[pos_leid]), 1)
truth = np.full((num_edges,), 1, dtype=np.int32)
edge_sampled = edge_sampled[:num_edges]
assert np.array_equal(truth, edge_sampled)
edge_sampled = np.full((num_edges,), 0, dtype=np.int32)
for pos_edges in EdgeSampler(g, batch_size,
reset=False,
shuffle=True,
edge_weight=edge_weight):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
np.add.at(edge_sampled, F.asnumpy(pos_edges.parent_eid[pos_leid]), 1)
truth = np.full((num_edges,), 1, dtype=np.int32)
edge_sampled = edge_sampled[:num_edges]
assert np.array_equal(truth, edge_sampled)
@unittest.skipIf(dgl.backend.backend_name == "tensorflow", reason="TF doesn't support item assignment")
def test_negative_sampler():
check_negative_sampler('chunk-head', False, 10)
check_negative_sampler('head', True, 10)
check_negative_sampler('head', False, 10)
check_weighted_negative_sampler('chunk-head', False, 10)
check_weighted_negative_sampler('head', True, 10)
check_weighted_negative_sampler('head', False, 10)
check_positive_edge_sampler()
#disable this check for now. It might take too long time.
#check_negative_sampler('head', False, 100)
if __name__ == '__main__':
test_create_full()
test_1neighbor_sampler_all()
test_10neighbor_sampler_all()
test_1neighbor_sampler()
test_10neighbor_sampler()
test_layer_sampler()
test_nonuniform_neighbor_sampler()
test_setseed()
test_negative_sampler()
|
[
"numpy.testing.assert_equal",
"numpy.hstack",
"unittest.skipIf",
"backend.array_equal",
"backend.sum",
"dgl.random.seed",
"numpy.sort",
"scipy.sparse.random",
"numpy.random.seed",
"dgl.DGLGraph",
"dgl.contrib.sampling.NeighborSampler",
"backend.tensor",
"dgl.contrib.sampling.sampler.create_full_nodeflow",
"numpy.allclose",
"backend.asnumpy",
"numpy.average",
"numpy.unique",
"numpy.random.randint",
"numpy.array_equal",
"backend.gather_row",
"numpy.full",
"backend.cpu"
] |
[((153, 171), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (167, 171), True, 'import numpy as np\n'), ((7157, 7264), 'unittest.skipIf', 'unittest.skipIf', (["(dgl.backend.backend_name == 'tensorflow')"], {'reason': '"""Error occured when multiprocessing"""'}), "(dgl.backend.backend_name == 'tensorflow', reason=\n 'Error occured when multiprocessing')\n", (7172, 7264), False, 'import unittest\n'), ((32993, 33100), 'unittest.skipIf', 'unittest.skipIf', (["(dgl.backend.backend_name == 'tensorflow')"], {'reason': '"""TF doesn\'t support item assignment"""'}), '(dgl.backend.backend_name == \'tensorflow\', reason=\n "TF doesn\'t support item assignment")\n', (33008, 33100), False, 'import unittest\n'), ((296, 328), 'dgl.DGLGraph', 'dgl.DGLGraph', (['arr'], {'readonly': '(True)'}), '(arr, readonly=True)\n', (308, 328), False, 'import dgl\n'), ((401, 456), 'dgl.contrib.sampling.sampler.create_full_nodeflow', 'dgl.contrib.sampling.sampler.create_full_nodeflow', (['g', '(5)'], {}), '(g, 5)\n', (450, 456), False, 'import dgl\n'), ((1542, 1560), 'backend.asnumpy', 'F.asnumpy', (['seed_id'], {}), '(seed_id)\n', (1551, 1560), True, 'import backend as F\n'), ((1865, 1885), 'backend.asnumpy', 'F.asnumpy', (['child_src'], {}), '(child_src)\n', (1874, 1885), True, 'import backend as F\n'), ((2163, 2177), 'backend.asnumpy', 'F.asnumpy', (['src'], {}), '(src)\n', (2172, 2177), True, 'import backend as F\n'), ((2427, 2512), 'dgl.contrib.sampling.NeighborSampler', 'dgl.contrib.sampling.NeighborSampler', (['g', '(1)', '(5)'], {'neighbor_type': '"""in"""', 'num_workers': '(4)'}), "(g, 1, 5, neighbor_type='in', num_workers=4\n )\n", (2463, 2512), False, 'import dgl\n'), ((2944, 3043), 'dgl.contrib.sampling.NeighborSampler', 'dgl.contrib.sampling.NeighborSampler', (['g', '(1)', '(5)'], {'neighbor_type': '"""in"""', 'num_workers': '(4)', 'prefetch': '(True)'}), "(g, 1, 5, neighbor_type='in',\n num_workers=4, prefetch=True)\n", (2980, 3043), False, 'import dgl\n'), ((4133, 4236), 'dgl.contrib.sampling.NeighborSampler', 'dgl.contrib.sampling.NeighborSampler', (['g', '(10)', '(5)'], {'neighbor_type': '"""in"""', 'num_workers': '(4)', 'seed_nodes': 'seeds'}), "(g, 10, 5, neighbor_type='in',\n num_workers=4, seed_nodes=seeds)\n", (4169, 4236), False, 'import dgl\n'), ((5169, 5192), 'numpy.hstack', 'np.hstack', (['seed_batches'], {}), '(seed_batches)\n', (5178, 5192), True, 'import numpy as np\n'), ((7683, 7697), 'dgl.DGLGraph', 'dgl.DGLGraph', ([], {}), '()\n', (7695, 7697), False, 'import dgl\n'), ((8032, 8129), 'dgl.contrib.sampling.NeighborSampler', 'dgl.contrib.sampling.NeighborSampler', (['g', '(1)', '(1)', '(99)', '"""in"""'], {'transition_prob': '"""w"""', 'seed_nodes': '[99]'}), "(g, 1, 1, 99, 'in', transition_prob='w',\n seed_nodes=[99])\n", (8068, 8129), False, 'import dgl\n'), ((8374, 8472), 'dgl.contrib.sampling.NeighborSampler', 'dgl.contrib.sampling.NeighborSampler', (['g', '(1)', '(1)', '(99)', '"""out"""'], {'transition_prob': '"""w"""', 'seed_nodes': '[0]'}), "(g, 1, 1, 99, 'out', transition_prob=\n 'w', seed_nodes=[0])\n", (8410, 8472), False, 'import dgl\n'), ((8747, 8766), 'dgl.random.seed', 'dgl.random.seed', (['(42)'], {}), '(42)\n', (8762, 8766), False, 'import dgl\n'), ((8783, 8880), 'dgl.contrib.sampling.NeighborSampler', 'dgl.contrib.sampling.NeighborSampler', (['g', '(5)', '(3)'], {'num_hops': '(2)', 'neighbor_type': '"""in"""', 'num_workers': '(1)'}), "(g, 5, 3, num_hops=2, neighbor_type=\n 'in', num_workers=1)\n", (8819, 8880), False, 'import dgl\n'), ((9016, 9035), 'dgl.random.seed', 'dgl.random.seed', (['(42)'], {}), '(42)\n', (9031, 9035), False, 'import dgl\n'), ((9658, 9697), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['lsrc', 'head_nid'], {}), '(lsrc, head_nid)\n', (9681, 9697), True, 'import numpy as np\n'), ((9833, 9872), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['tail_nid', 'ldst'], {}), '(tail_nid, ldst)\n', (9856, 9872), True, 'import numpy as np\n'), ((17319, 17375), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)'], {'size': 'num_edges', 'dtype': 'np.int64'}), '(0, 10, size=num_edges, dtype=np.int64)\n', (17336, 17375), True, 'import numpy as np\n'), ((26687, 26705), 'dgl.random.seed', 'dgl.random.seed', (['(0)'], {}), '(0)\n', (26702, 26705), False, 'import dgl\n'), ((26924, 26949), 'backend.sum', 'F.sum', (['edge_weight'], {'dim': '(0)'}), '(edge_weight, dim=0)\n', (26929, 26949), True, 'import backend as F\n'), ((27107, 27163), 'numpy.random.randint', 'np.random.randint', (['(0)', '(20)'], {'size': 'num_edges', 'dtype': 'np.int64'}), '(0, 20, size=num_edges, dtype=np.int64)\n', (27124, 27163), True, 'import numpy as np\n'), ((27383, 27423), 'numpy.full', 'np.full', (['(num_edges,)', '(0)'], {'dtype': 'np.int32'}), '((num_edges,), 0, dtype=np.int32)\n', (27390, 27423), True, 'import numpy as np\n'), ((27443, 27483), 'numpy.full', 'np.full', (['(num_nodes,)', '(0)'], {'dtype': 'np.int32'}), '((num_nodes,), 0, dtype=np.int32)\n', (27450, 27483), True, 'import numpy as np\n'), ((28888, 28928), 'numpy.allclose', 'np.allclose', (['edge_rate_0', '(0.5)'], {'atol': '(0.05)'}), '(edge_rate_0, 0.5, atol=0.05)\n', (28899, 28928), True, 'import numpy as np\n'), ((28940, 28989), 'numpy.allclose', 'np.allclose', (['edge_rate_tail_half', '(0.25)'], {'atol': '(0.05)'}), '(edge_rate_tail_half, 0.25, atol=0.05)\n', (28951, 28989), True, 'import numpy as np\n'), ((29228, 29276), 'numpy.allclose', 'np.allclose', (['node_rate_tail_half', '(0.5)'], {'atol': '(0.02)'}), '(node_rate_tail_half, 0.5, atol=0.02)\n', (29239, 29276), True, 'import numpy as np\n'), ((29359, 29399), 'numpy.full', 'np.full', (['(num_edges,)', '(0)'], {'dtype': 'np.int32'}), '((num_edges,), 0, dtype=np.int32)\n', (29366, 29399), True, 'import numpy as np\n'), ((29419, 29459), 'numpy.full', 'np.full', (['(num_nodes,)', '(0)'], {'dtype': 'np.int32'}), '((num_nodes,), 0, dtype=np.int32)\n', (29426, 29459), True, 'import numpy as np\n'), ((30983, 31023), 'numpy.allclose', 'np.allclose', (['edge_rate_0', '(0.5)'], {'atol': '(0.05)'}), '(edge_rate_0, 0.5, atol=0.05)\n', (30994, 31023), True, 'import numpy as np\n'), ((31035, 31084), 'numpy.allclose', 'np.allclose', (['edge_rate_tail_half', '(0.25)'], {'atol': '(0.05)'}), '(edge_rate_tail_half, 0.25, atol=0.05)\n', (31046, 31084), True, 'import numpy as np\n'), ((31416, 31467), 'numpy.allclose', 'np.allclose', (['node_rate', '(node_rate_a * 5)'], {'atol': '(0.002)'}), '(node_rate, node_rate_a * 5, atol=0.002)\n', (31427, 31467), True, 'import numpy as np\n'), ((31479, 31529), 'numpy.allclose', 'np.allclose', (['node_rate_a', 'node_rate_b'], {'atol': '(0.0002)'}), '(node_rate_a, node_rate_b, atol=0.0002)\n', (31490, 31529), True, 'import numpy as np\n'), ((31936, 31976), 'numpy.full', 'np.full', (['(num_edges,)', '(0)'], {'dtype': 'np.int32'}), '((num_edges,), 0, dtype=np.int32)\n', (31943, 31976), True, 'import numpy as np\n'), ((32296, 32336), 'numpy.full', 'np.full', (['(num_edges,)', '(1)'], {'dtype': 'np.int32'}), '((num_edges,), 1, dtype=np.int32)\n', (32303, 32336), True, 'import numpy as np\n'), ((32392, 32427), 'numpy.array_equal', 'np.array_equal', (['truth', 'edge_sampled'], {}), '(truth, edge_sampled)\n', (32406, 32427), True, 'import numpy as np\n'), ((32448, 32488), 'numpy.full', 'np.full', (['(num_edges,)', '(0)'], {'dtype': 'np.int32'}), '((num_edges,), 0, dtype=np.int32)\n', (32455, 32488), True, 'import numpy as np\n'), ((32858, 32898), 'numpy.full', 'np.full', (['(num_edges,)', '(1)'], {'dtype': 'np.int32'}), '((num_edges,), 1, dtype=np.int32)\n', (32865, 32898), True, 'import numpy as np\n'), ((32954, 32989), 'numpy.array_equal', 'np.array_equal', (['truth', 'edge_sampled'], {}), '(truth, edge_sampled)\n', (32968, 32989), True, 'import numpy as np\n'), ((1388, 1412), 'backend.array_equal', 'F.array_equal', (['src1', 'src'], {}), '(src1, src)\n', (1401, 1412), True, 'import backend as F\n'), ((3965, 3989), 'backend.array_equal', 'F.array_equal', (['src1', 'src'], {}), '(src1, src)\n', (3978, 3989), True, 'import backend as F\n'), ((9065, 9162), 'dgl.contrib.sampling.NeighborSampler', 'dgl.contrib.sampling.NeighborSampler', (['g', '(5)', '(3)'], {'num_hops': '(2)', 'neighbor_type': '"""in"""', 'num_workers': '(1)'}), "(g, 5, 3, num_hops=2, neighbor_type=\n 'in', num_workers=1)\n", (9101, 9162), False, 'import dgl\n'), ((9317, 9414), 'dgl.contrib.sampling.NeighborSampler', 'dgl.contrib.sampling.NeighborSampler', (['g', '(5)', '(3)'], {'num_hops': '(2)', 'neighbor_type': '"""in"""', 'num_workers': '(4)'}), "(g, 5, 3, num_hops=2, neighbor_type=\n 'in', num_workers=4)\n", (9353, 9414), False, 'import dgl\n'), ((9545, 9560), 'backend.asnumpy', 'F.asnumpy', (['lsrc'], {}), '(lsrc)\n', (9554, 9560), True, 'import backend as F\n'), ((9587, 9608), 'backend.asnumpy', 'F.asnumpy', (['g.head_nid'], {}), '(g.head_nid)\n', (9596, 9608), True, 'import backend as F\n'), ((9720, 9735), 'backend.asnumpy', 'F.asnumpy', (['ldst'], {}), '(ldst)\n', (9729, 9735), True, 'import backend as F\n'), ((9762, 9783), 'backend.asnumpy', 'F.asnumpy', (['g.tail_nid'], {}), '(g.tail_nid)\n', (9771, 9783), True, 'import backend as F\n'), ((10118, 10133), 'backend.tensor', 'F.tensor', (['etype'], {}), '(etype)\n', (10126, 10133), True, 'import backend as F\n'), ((10135, 10142), 'backend.cpu', 'F.cpu', ([], {}), '()\n', (10140, 10142), True, 'import backend as F\n'), ((11419, 11463), 'backend.gather_row', 'F.gather_row', (['neg_edges.parent_nid', 'neg_lsrc'], {}), '(neg_edges.parent_nid, neg_lsrc)\n', (11431, 11463), True, 'import backend as F\n'), ((11482, 11526), 'backend.gather_row', 'F.gather_row', (['neg_edges.parent_nid', 'neg_ldst'], {}), '(neg_edges.parent_nid, neg_ldst)\n', (11494, 11526), True, 'import backend as F\n'), ((11545, 11589), 'backend.gather_row', 'F.gather_row', (['neg_edges.parent_eid', 'neg_leid'], {}), '(neg_edges.parent_eid, neg_leid)\n', (11557, 11589), True, 'import backend as F\n'), ((11933, 11987), 'backend.gather_row', 'F.gather_row', (['pos_edges.parent_nid', 'pos_edges.tail_nid'], {}), '(pos_edges.parent_nid, pos_edges.tail_nid)\n', (11945, 11987), True, 'import backend as F\n'), ((12008, 12062), 'backend.gather_row', 'F.gather_row', (['neg_edges.parent_nid', 'neg_edges.tail_nid'], {}), '(neg_edges.parent_nid, neg_edges.tail_nid)\n', (12020, 12062), True, 'import backend as F\n'), ((12171, 12216), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['pos_tails', 'neg_tails'], {}), '(pos_tails, neg_tails)\n', (12194, 12216), True, 'import numpy as np\n'), ((16230, 16274), 'backend.gather_row', 'F.gather_row', (['neg_edges.parent_nid', 'neg_lsrc'], {}), '(neg_edges.parent_nid, neg_lsrc)\n', (16242, 16274), True, 'import backend as F\n'), ((16293, 16337), 'backend.gather_row', 'F.gather_row', (['neg_edges.parent_nid', 'neg_ldst'], {}), '(neg_edges.parent_nid, neg_ldst)\n', (16305, 16337), True, 'import backend as F\n'), ((16356, 16400), 'backend.gather_row', 'F.gather_row', (['neg_edges.parent_eid', 'neg_leid'], {}), '(neg_edges.parent_eid, neg_leid)\n', (16368, 16400), True, 'import backend as F\n'), ((16482, 16521), 'backend.gather_row', 'F.gather_row', (["g.edata['etype']", 'neg_eid'], {}), "(g.edata['etype'], neg_eid)\n", (16494, 16521), True, 'import backend as F\n'), ((17207, 17214), 'backend.cpu', 'F.cpu', ([], {}), '()\n', (17212, 17214), True, 'import backend as F\n'), ((17298, 17305), 'backend.cpu', 'F.cpu', ([], {}), '()\n', (17303, 17305), True, 'import backend as F\n'), ((17409, 17424), 'backend.tensor', 'F.tensor', (['etype'], {}), '(etype)\n', (17417, 17424), True, 'import backend as F\n'), ((17426, 17433), 'backend.cpu', 'F.cpu', ([], {}), '()\n', (17431, 17433), True, 'import backend as F\n'), ((18858, 18902), 'backend.gather_row', 'F.gather_row', (['neg_edges.parent_nid', 'neg_lsrc'], {}), '(neg_edges.parent_nid, neg_lsrc)\n', (18870, 18902), True, 'import backend as F\n'), ((18921, 18965), 'backend.gather_row', 'F.gather_row', (['neg_edges.parent_nid', 'neg_ldst'], {}), '(neg_edges.parent_nid, neg_ldst)\n', (18933, 18965), True, 'import backend as F\n'), ((18984, 19028), 'backend.gather_row', 'F.gather_row', (['neg_edges.parent_eid', 'neg_leid'], {}), '(neg_edges.parent_eid, neg_leid)\n', (18996, 19028), True, 'import backend as F\n'), ((19372, 19426), 'backend.gather_row', 'F.gather_row', (['pos_edges.parent_nid', 'pos_edges.tail_nid'], {}), '(pos_edges.parent_nid, pos_edges.tail_nid)\n', (19384, 19426), True, 'import backend as F\n'), ((19447, 19501), 'backend.gather_row', 'F.gather_row', (['neg_edges.parent_nid', 'neg_edges.tail_nid'], {}), '(neg_edges.parent_nid, neg_edges.tail_nid)\n', (19459, 19501), True, 'import backend as F\n'), ((19610, 19655), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['pos_tails', 'neg_tails'], {}), '(pos_tails, neg_tails)\n', (19633, 19655), True, 'import numpy as np\n'), ((20681, 20725), 'backend.gather_row', 'F.gather_row', (['neg_edges.parent_nid', 'neg_lsrc'], {}), '(neg_edges.parent_nid, neg_lsrc)\n', (20693, 20725), True, 'import backend as F\n'), ((20744, 20788), 'backend.gather_row', 'F.gather_row', (['neg_edges.parent_nid', 'neg_ldst'], {}), '(neg_edges.parent_nid, neg_ldst)\n', (20756, 20788), True, 'import backend as F\n'), ((20807, 20851), 'backend.gather_row', 'F.gather_row', (['neg_edges.parent_eid', 'neg_leid'], {}), '(neg_edges.parent_eid, neg_leid)\n', (20819, 20851), True, 'import backend as F\n'), ((20933, 20972), 'backend.gather_row', 'F.gather_row', (["g.edata['etype']", 'neg_eid'], {}), "(g.edata['etype'], neg_eid)\n", (20945, 20972), True, 'import backend as F\n'), ((22193, 22237), 'backend.gather_row', 'F.gather_row', (['neg_edges.parent_nid', 'neg_lsrc'], {}), '(neg_edges.parent_nid, neg_lsrc)\n', (22205, 22237), True, 'import backend as F\n'), ((22256, 22300), 'backend.gather_row', 'F.gather_row', (['neg_edges.parent_nid', 'neg_ldst'], {}), '(neg_edges.parent_nid, neg_ldst)\n', (22268, 22300), True, 'import backend as F\n'), ((22319, 22363), 'backend.gather_row', 'F.gather_row', (['neg_edges.parent_eid', 'neg_leid'], {}), '(neg_edges.parent_eid, neg_leid)\n', (22331, 22363), True, 'import backend as F\n'), ((22445, 22484), 'backend.gather_row', 'F.gather_row', (["g.edata['etype']", 'neg_eid'], {}), "(g.edata['etype'], neg_eid)\n", (22457, 22484), True, 'import backend as F\n'), ((26894, 26901), 'backend.cpu', 'F.cpu', ([], {}), '()\n', (26899, 26901), True, 'import backend as F\n'), ((27032, 27039), 'backend.cpu', 'F.cpu', ([], {}), '()\n', (27037, 27039), True, 'import backend as F\n'), ((27063, 27088), 'backend.sum', 'F.sum', (['node_weight'], {'dim': '(0)'}), '(node_weight, dim=0)\n', (27068, 27088), True, 'import backend as F\n'), ((27197, 27212), 'backend.tensor', 'F.tensor', (['etype'], {}), '(etype)\n', (27205, 27212), True, 'import backend as F\n'), ((27214, 27221), 'backend.cpu', 'F.cpu', ([], {}), '()\n', (27219, 27221), True, 'import backend as F\n'), ((31158, 31187), 'numpy.average', 'np.average', (['node_sampled[:50]'], {}), '(node_sampled[:50])\n', (31168, 31187), True, 'import numpy as np\n'), ((31227, 31259), 'numpy.average', 'np.average', (['node_sampled[50:100]'], {}), '(node_sampled[50:100])\n', (31237, 31259), True, 'import numpy as np\n'), ((31718, 31725), 'backend.cpu', 'F.cpu', ([], {}), '()\n', (31723, 31725), True, 'import backend as F\n'), ((1963, 1983), 'numpy.unique', 'np.unique', (['child_src'], {}), '(child_src)\n', (1972, 1983), True, 'import numpy as np\n'), ((2241, 2253), 'backend.asnumpy', 'F.asnumpy', (['i'], {}), '(i)\n', (2250, 2253), True, 'import backend as F\n'), ((6568, 6603), 'backend.gather_row', 'F.gather_row', (['src', 'block_parent_eid'], {}), '(src, block_parent_eid)\n', (6580, 6603), True, 'import backend as F\n'), ((6635, 6670), 'backend.gather_row', 'F.gather_row', (['dst', 'block_parent_eid'], {}), '(dst, block_parent_eid)\n', (6647, 6670), True, 'import backend as F\n'), ((7539, 7561), 'numpy.random.randint', 'np.random.randint', (['(100)'], {}), '(100)\n', (7556, 7561), True, 'import numpy as np\n'), ((7563, 7585), 'numpy.random.randint', 'np.random.randint', (['(100)'], {}), '(100)\n', (7580, 7585), True, 'import numpy as np\n'), ((10289, 10311), 'backend.asnumpy', 'F.asnumpy', (['pos_gdst[i]'], {}), '(pos_gdst[i])\n', (10298, 10311), True, 'import backend as F\n'), ((10333, 10355), 'backend.asnumpy', 'F.asnumpy', (['pos_geid[i]'], {}), '(pos_geid[i])\n', (10342, 10355), True, 'import backend as F\n'), ((10395, 10417), 'backend.asnumpy', 'F.asnumpy', (['pos_gsrc[i]'], {}), '(pos_gsrc[i])\n', (10404, 10417), True, 'import backend as F\n'), ((12091, 12111), 'backend.asnumpy', 'F.asnumpy', (['pos_tails'], {}), '(pos_tails)\n', (12100, 12111), True, 'import backend as F\n'), ((12141, 12161), 'backend.asnumpy', 'F.asnumpy', (['neg_tails'], {}), '(neg_tails)\n', (12150, 12161), True, 'import backend as F\n'), ((17162, 17204), 'numpy.full', 'np.full', (['(num_edges,)', '(1)'], {'dtype': 'np.float32'}), '((num_edges,), 1, dtype=np.float32)\n', (17169, 17204), True, 'import numpy as np\n'), ((17253, 17295), 'numpy.full', 'np.full', (['(num_nodes,)', '(1)'], {'dtype': 'np.float32'}), '((num_nodes,), 1, dtype=np.float32)\n', (17260, 17295), True, 'import numpy as np\n'), ((17580, 17602), 'backend.asnumpy', 'F.asnumpy', (['pos_gdst[i]'], {}), '(pos_gdst[i])\n', (17589, 17602), True, 'import backend as F\n'), ((17624, 17646), 'backend.asnumpy', 'F.asnumpy', (['pos_geid[i]'], {}), '(pos_geid[i])\n', (17633, 17646), True, 'import backend as F\n'), ((17686, 17708), 'backend.asnumpy', 'F.asnumpy', (['pos_gsrc[i]'], {}), '(pos_gsrc[i])\n', (17695, 17708), True, 'import backend as F\n'), ((19530, 19550), 'backend.asnumpy', 'F.asnumpy', (['pos_tails'], {}), '(pos_tails)\n', (19539, 19550), True, 'import backend as F\n'), ((19580, 19600), 'backend.asnumpy', 'F.asnumpy', (['neg_tails'], {}), '(neg_tails)\n', (19589, 19600), True, 'import backend as F\n'), ((26849, 26891), 'numpy.full', 'np.full', (['(num_edges,)', '(1)'], {'dtype': 'np.float32'}), '((num_edges,), 1, dtype=np.float32)\n', (26856, 26891), True, 'import numpy as np\n'), ((26987, 27029), 'numpy.full', 'np.full', (['(num_nodes,)', '(1)'], {'dtype': 'np.float32'}), '((num_nodes,), 1, dtype=np.float32)\n', (26994, 27029), True, 'import numpy as np\n'), ((28518, 28559), 'backend.asnumpy', 'F.asnumpy', (['pos_edges.parent_eid[pos_leid]'], {}), '(pos_edges.parent_eid[pos_leid])\n', (28527, 28559), True, 'import backend as F\n'), ((30337, 30381), 'backend.gather_row', 'F.gather_row', (['neg_edges.parent_nid', 'neg_lsrc'], {}), '(neg_edges.parent_nid, neg_lsrc)\n', (30349, 30381), True, 'import backend as F\n'), ((30477, 30521), 'backend.gather_row', 'F.gather_row', (['neg_edges.parent_nid', 'neg_ldst'], {}), '(neg_edges.parent_nid, neg_ldst)\n', (30489, 30521), True, 'import backend as F\n'), ((30613, 30654), 'backend.asnumpy', 'F.asnumpy', (['pos_edges.parent_eid[pos_leid]'], {}), '(pos_edges.parent_eid[pos_leid])\n', (30622, 30654), True, 'import backend as F\n'), ((31673, 31715), 'numpy.full', 'np.full', (['(num_edges,)', '(1)'], {'dtype': 'np.float32'}), '((num_edges,), 1, dtype=np.float32)\n', (31680, 31715), True, 'import numpy as np\n'), ((32238, 32279), 'backend.asnumpy', 'F.asnumpy', (['pos_edges.parent_eid[pos_leid]'], {}), '(pos_edges.parent_eid[pos_leid])\n', (32247, 32279), True, 'import backend as F\n'), ((32800, 32841), 'backend.asnumpy', 'F.asnumpy', (['pos_edges.parent_eid[pos_leid]'], {}), '(pos_edges.parent_eid[pos_leid])\n', (32809, 32841), True, 'import backend as F\n'), ((212, 261), 'scipy.sparse.random', 'sp.sparse.random', (['n', 'n'], {'density': '(0.1)', 'format': '"""coo"""'}), "(n, n, density=0.1, format='coo')\n", (228, 261), True, 'import scipy as sp\n'), ((1452, 1464), 'numpy.sort', 'np.sort', (['arr'], {}), '(arr)\n', (1459, 1464), True, 'import numpy as np\n'), ((5061, 5075), 'backend.asnumpy', 'F.asnumpy', (['nid'], {}), '(nid)\n', (5070, 5075), True, 'import backend as F\n'), ((5840, 5854), 'backend.asnumpy', 'F.asnumpy', (['nid'], {}), '(nid)\n', (5849, 5854), True, 'import backend as F\n'), ((6017, 6031), 'backend.asnumpy', 'F.asnumpy', (['eid'], {}), '(eid)\n', (6026, 6031), True, 'import backend as F\n'), ((6363, 6395), 'backend.gather_row', 'F.gather_row', (['sub_src', 'block_eid'], {}), '(sub_src, block_eid)\n', (6375, 6395), True, 'import backend as F\n'), ((6445, 6477), 'backend.gather_row', 'F.gather_row', (['sub_dst', 'block_eid'], {}), '(sub_dst, block_eid)\n', (6457, 6477), True, 'import backend as F\n'), ((6698, 6738), 'backend.asnumpy', 'F.asnumpy', (['(block_src == block_parent_src)'], {}), '(block_src == block_parent_src)\n', (6707, 6738), True, 'import backend as F\n'), ((7823, 7830), 'backend.cpu', 'F.cpu', ([], {}), '()\n', (7828, 7830), True, 'import backend as F\n'), ((7880, 7887), 'backend.cpu', 'F.cpu', ([], {}), '()\n', (7885, 7887), True, 'import backend as F\n'), ((11078, 11122), 'backend.gather_row', 'F.gather_row', (['pos_edges.parent_eid', 'pos_leid'], {}), '(pos_edges.parent_eid, pos_leid)\n', (11090, 11122), True, 'import backend as F\n'), ((16579, 16600), 'backend.asnumpy', 'F.asnumpy', (['neg_src[i]'], {}), '(neg_src[i])\n', (16588, 16600), True, 'import backend as F\n'), ((16602, 16623), 'backend.asnumpy', 'F.asnumpy', (['neg_dst[i]'], {}), '(neg_dst[i])\n', (16611, 16623), True, 'import backend as F\n'), ((18518, 18562), 'backend.gather_row', 'F.gather_row', (['pos_edges.parent_eid', 'pos_leid'], {}), '(pos_edges.parent_eid, pos_leid)\n', (18530, 18562), True, 'import backend as F\n'), ((19091, 19112), 'backend.asnumpy', 'F.asnumpy', (['neg_dst[i]'], {}), '(neg_dst[i])\n', (19100, 19112), True, 'import backend as F\n'), ((19138, 19159), 'backend.asnumpy', 'F.asnumpy', (['neg_eid[i]'], {}), '(neg_eid[i])\n', (19147, 19159), True, 'import backend as F\n'), ((21030, 21051), 'backend.asnumpy', 'F.asnumpy', (['neg_src[i]'], {}), '(neg_src[i])\n', (21039, 21051), True, 'import backend as F\n'), ((21053, 21074), 'backend.asnumpy', 'F.asnumpy', (['neg_dst[i]'], {}), '(neg_dst[i])\n', (21062, 21074), True, 'import backend as F\n'), ((22542, 22563), 'backend.asnumpy', 'F.asnumpy', (['neg_src[i]'], {}), '(neg_src[i])\n', (22551, 22563), True, 'import backend as F\n'), ((22565, 22586), 'backend.asnumpy', 'F.asnumpy', (['neg_dst[i]'], {}), '(neg_dst[i])\n', (22574, 22586), True, 'import backend as F\n'), ((28337, 28355), 'backend.asnumpy', 'F.asnumpy', (['neg_src'], {}), '(neg_src)\n', (28346, 28355), True, 'import backend as F\n'), ((28463, 28481), 'backend.asnumpy', 'F.asnumpy', (['neg_dst'], {}), '(neg_dst)\n', (28472, 28481), True, 'import backend as F\n'), ((30418, 30436), 'backend.asnumpy', 'F.asnumpy', (['neg_src'], {}), '(neg_src)\n', (30427, 30436), True, 'import backend as F\n'), ((30558, 30576), 'backend.asnumpy', 'F.asnumpy', (['neg_dst'], {}), '(neg_dst)\n', (30567, 30576), True, 'import backend as F\n'), ((11173, 11217), 'backend.gather_row', 'F.gather_row', (['pos_edges.parent_nid', 'pos_lsrc'], {}), '(pos_edges.parent_nid, pos_lsrc)\n', (11185, 11217), True, 'import backend as F\n'), ((11267, 11311), 'backend.gather_row', 'F.gather_row', (['pos_edges.parent_nid', 'pos_ldst'], {}), '(pos_edges.parent_nid, pos_ldst)\n', (11279, 11311), True, 'import backend as F\n'), ((11652, 11670), 'backend.asnumpy', 'F.asnumpy', (['neg_dst'], {}), '(neg_dst)\n', (11661, 11670), True, 'import backend as F\n'), ((11699, 11717), 'backend.asnumpy', 'F.asnumpy', (['neg_eid'], {}), '(neg_eid)\n', (11708, 11717), True, 'import backend as F\n'), ((16833, 16853), 'backend.asnumpy', 'F.asnumpy', (['exists[i]'], {}), '(exists[i])\n', (16842, 16853), True, 'import backend as F\n'), ((16857, 16873), 'backend.asnumpy', 'F.asnumpy', (['exist'], {}), '(exist)\n', (16866, 16873), True, 'import backend as F\n'), ((18613, 18657), 'backend.gather_row', 'F.gather_row', (['pos_edges.parent_nid', 'pos_lsrc'], {}), '(pos_edges.parent_nid, pos_lsrc)\n', (18625, 18657), True, 'import backend as F\n'), ((18707, 18751), 'backend.gather_row', 'F.gather_row', (['pos_edges.parent_nid', 'pos_ldst'], {}), '(pos_edges.parent_nid, pos_ldst)\n', (18719, 18751), True, 'import backend as F\n'), ((21284, 21304), 'backend.asnumpy', 'F.asnumpy', (['exists[i]'], {}), '(exists[i])\n', (21293, 21304), True, 'import backend as F\n'), ((21308, 21324), 'backend.asnumpy', 'F.asnumpy', (['exist'], {}), '(exist)\n', (21317, 21324), True, 'import backend as F\n'), ((22796, 22816), 'backend.asnumpy', 'F.asnumpy', (['exists[i]'], {}), '(exists[i])\n', (22805, 22816), True, 'import backend as F\n'), ((22820, 22836), 'backend.asnumpy', 'F.asnumpy', (['exist'], {}), '(exist)\n', (22829, 22836), True, 'import backend as F\n'), ((5661, 5679), 'backend.asnumpy', 'F.asnumpy', (['sub_nid'], {}), '(sub_nid)\n', (5670, 5679), True, 'import backend as F\n'), ((11827, 11848), 'backend.asnumpy', 'F.asnumpy', (['neg_src[i]'], {}), '(neg_src[i])\n', (11836, 11848), True, 'import backend as F\n'), ((12318, 12334), 'backend.asnumpy', 'F.asnumpy', (['exist'], {}), '(exist)\n', (12327, 12334), True, 'import backend as F\n'), ((19266, 19287), 'backend.asnumpy', 'F.asnumpy', (['neg_src[i]'], {}), '(neg_src[i])\n', (19275, 19287), True, 'import backend as F\n'), ((19757, 19773), 'backend.asnumpy', 'F.asnumpy', (['exist'], {}), '(exist)\n', (19766, 19773), True, 'import backend as F\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.